file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
index.js
"use strict"; let Promise; if (process.env.Q_PROMISE) Promise = require('q').Promise; else Promise = require('promise'); const redis = require('redis'); const Redlock = require('redlock'); const redisClient = redis.createClient(6379, 'localhost'); const options = { delayMs: 100, maxStaleRetries: 2 }; const watchdog = require('../index'); const should = require('should'); var redlockConfig = { // the expected clock drift; for more details // see http://redis.io/topics/distlock driftFactor: 0.01, // the max number of times Redlock will attempt // to lock a resource before erroring retryCount: 3, // the time in ms between attempts retryDelay: 100 }; const testTimeoutMs = 30000; // 30sec const denodeify = function (fn) { return new Promise((resolve, reject) => fn((err, res) => { if (err) reject(err); else resolve(res); })); }; function deleteRedisKey(key) { return denodeify(redisClient.del.bind(redisClient, key)); } function stopAndReleaseWatchdog() { return watchdog.stop() .then(() => watchdog.release()); } describe("single", function () { before(function () { // previous test process could be killed without free of redis key "a" return deleteRedisKey("a") .then(() => deleteRedisKey("redlock_list")); }); beforeEach(function () { watchdog.init(redisClient, Promise, options); }); afterEach(function () { return stopAndReleaseWatchdog() .then(() => deleteRedisKey("a")); }); it("without any redlock - watchdog should check empty redlock_list without an error", function() { this.timeout(testTimeoutMs); return new Promise((resolve) => { watchdog.start(); setTimeout(() => resolve(), 1000); }); }); it("with local heartbeat - watchdog should not remove it", function () { this.timeout(testTimeoutMs); let redlock = new Redlock([redisClient], redlockConfig); return redlock.lock('a', testTimeoutMs) .then(() => { return new Promise((resolve, reject) => { watchdog.listen('removeStaled', (key) => reject(new Error("lock " + key + " should not be removed"))); watchdog.addHeartbeat("a"); watchdog.start(); setTimeout(() => resolve(), 1000); }); }); }); it("not send heartbeat - watchdog should remove redlock", function () { this.timeout(testTimeoutMs); // add redlock (not add heartbeat) // check if redis key exists // run watchdog and wait until removeKey listener will fired // check if redis key not exists let redlock = new Redlock([redisClient], redlockConfig); return redlock.lock('a', testTimeoutMs) // allocate resource during test working time .then(() => denodeify((redisClient.get.bind(redisClient, 'a')))) .then((res) => { should(res).be.ok(); // add key "a" to redlock_list; not append it to local "heartbeat" // it may do other process in real system
}) .then(() => { return new Promise((resolve, reject) => { let to; watchdog.listen('removeStaled', (key) => { key.should.be.equal('a'); to && clearTimeout(to); resolve(); }); // start without heartbeat of "a" watchdog.start(); to = setTimeout(reject.bind(null, Error('key "a" not removed')), 1000); }); }) .then(() => denodeify((redisClient.get.bind(redisClient, 'a')))) .then((res) => should(res).be.null()); }); });
// @see watchdog.setHeartbeat return denodeify(redisClient.hset.bind(redisClient, "redlock_list", "a", "0"))
relatedRemittance.go
// Copyright 2020 The Moov Authors // Use of this source code is governed by an Apache License // license that can be found in the LICENSE file. package wire import ( "encoding/json" "strings" "unicode/utf8" ) // RelatedRemittance is related remittance type RelatedRemittance struct { // tag tag string // RemittanceIdentification is remittance identification RemittanceIdentification string `json:"remittanceIdentification,omitempty"` // RemittanceLocationMethod is remittance location method RemittanceLocationMethod string `json:"remittanceLocationMethod,omitempty"` // RemittanceLocationElectronicAddress (E-mail or URL address) RemittanceLocationElectronicAddress string `json:"remittanceLocationElctronicAddress,omitempty"` // RemittanceData is RemittanceData RemittanceData RemittanceData `json:"remittanceData,omitempty"` // validator is composed for data validation validator // converters is composed for WIRE to GoLang Converters converters } // NewRelatedRemittance returns a new RelatedRemittance func NewRelatedRemittance() *RelatedRemittance { rr := &RelatedRemittance{ tag: TagRelatedRemittance, } return rr } // Parse takes the input string and parses the RelatedRemittance values // // Parse provides no guarantee about all fields being filled in. Callers should make a Validate() call to confirm // successful parsing and data validity. func (rr *RelatedRemittance) Parse(record string) error { if utf8.RuneCountInString(record) != 3041 { return NewTagWrongLengthErr(3041, utf8.RuneCountInString(record)) } rr.tag = record[:6] rr.RemittanceIdentification = rr.parseStringField(record[6:41]) rr.RemittanceLocationMethod = rr.parseStringField(record[41:45]) rr.RemittanceLocationElectronicAddress = rr.parseStringField(record[45:2093]) rr.RemittanceData.Name = rr.parseStringField(record[2093:2233]) rr.RemittanceData.AddressType = rr.parseStringField(record[2233:2237]) rr.RemittanceData.Department = rr.parseStringField(record[2237:2307]) rr.RemittanceData.SubDepartment = rr.parseStringField(record[2307:2377]) rr.RemittanceData.StreetName = rr.parseStringField(record[2377:2447]) rr.RemittanceData.BuildingNumber = rr.parseStringField(record[2447:2463]) rr.RemittanceData.PostCode = rr.parseStringField(record[2463:2479]) rr.RemittanceData.TownName = rr.parseStringField(record[2479:2514]) rr.RemittanceData.CountrySubDivisionState = rr.parseStringField(record[2514:2549]) rr.RemittanceData.Country = rr.parseStringField(record[2549:2551]) rr.RemittanceData.AddressLineOne = rr.parseStringField(record[2551:2621]) rr.RemittanceData.AddressLineTwo = rr.parseStringField(record[2621:2691]) rr.RemittanceData.AddressLineThree = rr.parseStringField(record[2691:2761]) rr.RemittanceData.AddressLineFour = rr.parseStringField(record[2761:2831]) rr.RemittanceData.AddressLineFive = rr.parseStringField(record[2831:2901]) rr.RemittanceData.AddressLineSix = rr.parseStringField(record[2901:2971]) rr.RemittanceData.AddressLineSeven = rr.parseStringField(record[2971:3041]) return nil } func (rr *RelatedRemittance) UnmarshalJSON(data []byte) error { type Alias RelatedRemittance aux := struct { *Alias }{ (*Alias)(rr), } if err := json.Unmarshal(data, &aux); err != nil { return err } rr.tag = TagRelatedRemittance return nil } // String writes RelatedRemittance func (rr *RelatedRemittance) String() string { var buf strings.Builder buf.Grow(3041) buf.WriteString(rr.tag) buf.WriteString(rr.RemittanceIdentificationField()) buf.WriteString(rr.RemittanceLocationMethodField()) buf.WriteString(rr.RemittanceLocationElectronicAddressField()) buf.WriteString(rr.NameField()) buf.WriteString(rr.AddressTypeField()) buf.WriteString(rr.DepartmentField()) buf.WriteString(rr.SubDepartmentField()) buf.WriteString(rr.StreetNameField()) buf.WriteString(rr.BuildingNumberField()) buf.WriteString(rr.PostCodeField()) buf.WriteString(rr.TownNameField()) buf.WriteString(rr.CountrySubDivisionStateField()) buf.WriteString(rr.CountryField()) buf.WriteString(rr.AddressLineOneField()) buf.WriteString(rr.AddressLineTwoField()) buf.WriteString(rr.AddressLineThreeField()) buf.WriteString(rr.AddressLineFourField()) buf.WriteString(rr.AddressLineFiveField()) buf.WriteString(rr.AddressLineSixField()) buf.WriteString(rr.AddressLineSevenField()) return buf.String() } // Validate performs WIRE format rule checks on RelatedRemittance and returns an error if not Validated // The first error encountered is returned and stops that parsing. func (rr *RelatedRemittance) Validate() error { if rr.tag != TagRelatedRemittance { return fieldError("tag", ErrValidTagForType, rr.tag) } if err := rr.fieldInclusion(); err != nil { return err } if err := rr.isAlphanumeric(rr.RemittanceIdentification); err != nil { return fieldError("RemittanceIdentification", err, rr.RemittanceIdentification) } if err := rr.isRemittanceLocationMethod(rr.RemittanceLocationMethod); err != nil { return fieldError("RemittanceLocationMethod", err, rr.RemittanceLocationMethod) } if err := rr.isAlphanumeric(rr.RemittanceLocationElectronicAddress); err != nil { return fieldError("RemittanceLocationElectronicAddress", err, rr.RemittanceLocationElectronicAddress) } if err := rr.isAlphanumeric(rr.RemittanceData.Name); err != nil { return fieldError("Name", err, rr.RemittanceData.Name) } if err := rr.isAddressType(rr.RemittanceData.AddressType); err != nil { return fieldError("AddressType", err, rr.RemittanceData.AddressType) } if err := rr.isAlphanumeric(rr.RemittanceData.Department); err != nil { return fieldError("Department", err, rr.RemittanceData.Department) } if err := rr.isAlphanumeric(rr.RemittanceData.SubDepartment); err != nil { return fieldError("SubDepartment", err, rr.RemittanceData.SubDepartment) } if err := rr.isAlphanumeric(rr.RemittanceData.StreetName); err != nil { return fieldError("StreetName", err, rr.RemittanceData.StreetName) } if err := rr.isAlphanumeric(rr.RemittanceData.BuildingNumber); err != nil { return fieldError("BuildingNumber", err, rr.RemittanceData.BuildingNumber) } if err := rr.isAlphanumeric(rr.RemittanceData.PostCode); err != nil { return fieldError("PostCode", err, rr.RemittanceData.PostCode) } if err := rr.isAlphanumeric(rr.RemittanceData.TownName); err != nil { return fieldError("TownName", err, rr.RemittanceData.TownName) } if err := rr.isAlphanumeric(rr.RemittanceData.CountrySubDivisionState); err != nil { return fieldError("CountrySubDivisionState", err, rr.RemittanceData.CountrySubDivisionState) } if err := rr.isAlphanumeric(rr.RemittanceData.Country); err != nil { return fieldError("Country", err, rr.RemittanceData.Country) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineOne); err != nil {
} if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineTwo); err != nil { return fieldError("AddressLineTwo", err, rr.RemittanceData.AddressLineTwo) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineThree); err != nil { return fieldError("AddressLineThree", err, rr.RemittanceData.AddressLineThree) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineFour); err != nil { return fieldError("AddressLineFour", err, rr.RemittanceData.AddressLineFour) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineFive); err != nil { return fieldError("AddressLineFive", err, rr.RemittanceData.AddressLineFive) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineSix); err != nil { return fieldError("AddressLineSix", err, rr.RemittanceData.AddressLineSix) } if err := rr.isAlphanumeric(rr.RemittanceData.AddressLineSeven); err != nil { return fieldError("AddressLineSeven", err, rr.RemittanceData.AddressLineSeven) } if err := rr.isAlphanumeric(rr.RemittanceData.CountryOfResidence); err != nil { return fieldError("CountryOfResidence", err, rr.RemittanceData.CountryOfResidence) } return nil } // fieldInclusion validate mandatory fields. If fields are // invalid the WIRE will return an error. func (rr *RelatedRemittance) fieldInclusion() error { if rr.RemittanceData.Name == "" { return fieldError("Name", ErrFieldRequired) } return nil } // RemittanceIdentificationField gets a string of the RemittanceIdentification field func (rr *RelatedRemittance) RemittanceIdentificationField() string { return rr.alphaField(rr.RemittanceIdentification, 35) } // RemittanceLocationMethodField gets a string of the RemittanceLocationMethod field func (rr *RelatedRemittance) RemittanceLocationMethodField() string { return rr.alphaField(rr.RemittanceLocationMethod, 4) } // RemittanceLocationElectronicAddressField gets a string of the RemittanceLocationElectronicAddress field func (rr *RelatedRemittance) RemittanceLocationElectronicAddressField() string { return rr.alphaField(rr.RemittanceLocationElectronicAddress, 2048) } // NameField gets a string of the Name field func (rr *RelatedRemittance) NameField() string { return rr.alphaField(rr.RemittanceData.Name, 140) } // AddressTypeField gets a string of the AddressType field func (rr *RelatedRemittance) AddressTypeField() string { return rr.alphaField(rr.RemittanceData.AddressType, 4) } // DepartmentField gets a string of the Department field func (rr *RelatedRemittance) DepartmentField() string { return rr.alphaField(rr.RemittanceData.Department, 70) } // SubDepartmentField gets a string of the SubDepartment field func (rr *RelatedRemittance) SubDepartmentField() string { return rr.alphaField(rr.RemittanceData.SubDepartment, 70) } // StreetNameField gets a string of the StreetName field func (rr *RelatedRemittance) StreetNameField() string { return rr.alphaField(rr.RemittanceData.StreetName, 70) } // BuildingNumberField gets a string of the BuildingNumber field func (rr *RelatedRemittance) BuildingNumberField() string { return rr.alphaField(rr.RemittanceData.BuildingNumber, 16) } // PostCodeField gets a string of the PostCode field func (rr *RelatedRemittance) PostCodeField() string { return rr.alphaField(rr.RemittanceData.PostCode, 16) } // TownNameField gets a string of the TownName field func (rr *RelatedRemittance) TownNameField() string { return rr.alphaField(rr.RemittanceData.TownName, 35) } // CountrySubDivisionStateField gets a string of the CountrySubDivisionState field func (rr *RelatedRemittance) CountrySubDivisionStateField() string { return rr.alphaField(rr.RemittanceData.CountrySubDivisionState, 35) } // CountryField gets a string of the Country field func (rr *RelatedRemittance) CountryField() string { return rr.alphaField(rr.RemittanceData.Country, 2) } // AddressLineOneField gets a string of the AddressLineOne field func (rr *RelatedRemittance) AddressLineOneField() string { return rr.alphaField(rr.RemittanceData.AddressLineOne, 70) } // AddressLineTwoField gets a string of the AddressLineTwo field func (rr *RelatedRemittance) AddressLineTwoField() string { return rr.alphaField(rr.RemittanceData.AddressLineTwo, 70) } // AddressLineThreeField gets a string of the AddressLineThree field func (rr *RelatedRemittance) AddressLineThreeField() string { return rr.alphaField(rr.RemittanceData.AddressLineThree, 70) } // AddressLineFourField gets a string of the AddressLineFour field func (rr *RelatedRemittance) AddressLineFourField() string { return rr.alphaField(rr.RemittanceData.AddressLineFour, 70) } // AddressLineFiveField gets a string of the AddressLineFive field func (rr *RelatedRemittance) AddressLineFiveField() string { return rr.alphaField(rr.RemittanceData.AddressLineFive, 70) } // AddressLineSixField gets a string of the AddressLineSix field func (rr *RelatedRemittance) AddressLineSixField() string { return rr.alphaField(rr.RemittanceData.AddressLineSix, 70) } // AddressLineSevenField gets a string of the AddressLineSeven field func (rr *RelatedRemittance) AddressLineSevenField() string { return rr.alphaField(rr.RemittanceData.AddressLineSeven, 70) }
return fieldError("AddressLineOne", err, rr.RemittanceData.AddressLineOne)
prelude2.rs
//! Commonly used items (`dispatching2`). pub use crate::{ error_handlers::{LoggingErrorHandler, OnError}, respond, }; pub use crate::dispatching2::{ dialogue::Dialogue, Dispatcher, HandlerExt as _, MessageFilterExt as _, UpdateFilterExt as _, }; #[cfg_attr(all(docsrs, feature = "nightly"), doc(cfg(feature = "macros")))] #[cfg(feature = "macros")] pub use crate::teloxide; pub use teloxide_core::types::{ CallbackQuery, ChatMemberUpdated, ChosenInlineResult, InlineQuery, Message, Poll, PollAnswer, PreCheckoutQuery, ShippingQuery, Update, }; #[cfg(feature = "auto-send")] pub use crate::adaptors::AutoSend; #[doc(no_inline)] pub use teloxide_core::prelude::*;
pub use dptree::{self, prelude::*};
template.py
#!/usr/bin/env python from __future__ import division, print_function import os import sys from io import BytesIO, IOBase if sys.version_info[0] < 3: from __builtin__ import xrange as range from future_builtins import ascii, filter, hex, map, oct, zip def main(): pass # region fastio BUFSIZE = 8192 class FastIO(IOBase): newlines = 0 def __init__(self, file): self._file = file self._fd = file.fileno() self.buffer = BytesIO() self.writable = "x" in file.mode or "r" not in file.mode self.write = self.buffer.write if self.writable else None def read(self): while True: b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE)) if not b: break ptr = self.buffer.tell() self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr) self.newlines = 0 return self.buffer.read() def readline(self):
def flush(self): if self.writable: os.write(self._fd, self.buffer.getvalue()) self.buffer.truncate(0), self.buffer.seek(0) class IOWrapper(IOBase): def __init__(self, file): self.buffer = FastIO(file) self.flush = self.buffer.flush self.writable = self.buffer.writable self.write = lambda s: self.buffer.write(s.encode("ascii")) self.read = lambda: self.buffer.read().decode("ascii") self.readline = lambda: self.buffer.readline().decode("ascii") def print(*args, **kwargs): """Prints the values to a stream, or to sys.stdout by default.""" sep, file = kwargs.pop("sep", " "), kwargs.pop("file", sys.stdout) at_start = True for x in args: if not at_start: file.write(sep) file.write(str(x)) at_start = False file.write(kwargs.pop("end", "\n")) if kwargs.pop("flush", False): file.flush() if sys.version_info[0] < 3: sys.stdin, sys.stdout = FastIO(sys.stdin), FastIO(sys.stdout) else: sys.stdin, sys.stdout = IOWrapper(sys.stdin), IOWrapper(sys.stdout) input = lambda: sys.stdin.readline().rstrip("\r\n") # endregion if __name__ == "__main__": main()
while self.newlines == 0: b = os.read(self._fd, max(os.fstat(self._fd).st_size, BUFSIZE)) self.newlines = b.count(b"\n") + (not b) ptr = self.buffer.tell() self.buffer.seek(0, 2), self.buffer.write(b), self.buffer.seek(ptr) self.newlines -= 1 return self.buffer.readline()
store.go
// SPDX-License-Identifier: Apache-2.0 // Copyright (c) 2020 Intel Corporation package db import ( "encoding/json" "reflect" "gitlab.com/project-emco/core/emco-base/src/orchestrator/pkg/infra/config" pkgerrors "github.com/pkg/errors" ) // DBconn interface used to talk a concrete Database connection var DBconn Store // Key is an interface that will be implemented by anypackage // that wants to use the Store interface. This allows various // db backends and key types. type Key interface { } // Store is an interface for accessing the database type Store interface { // Returns nil if db health is good HealthCheck() error // Unmarshal implements any unmarshalling needed for the database Unmarshal(inp []byte, out interface{}) error // Inserts and Updates a tag with key and also adds query fields if provided Insert(coll string, key Key, query interface{}, tag string, data interface{}) error // Find the document(s) with key and get the tag values from the document(s) Find(coll string, key Key, tag string) ([][]byte, error) // Removes the document(s) matching the key if no child reference in collection Remove(coll string, key Key) error // Remove all the document(s) matching the key RemoveAll(coll string, key Key) error // Remove the specifiec tag from the document matching the key RemoveTag(coll string, key Key, tag string) error } // CreateDBClient creates the DB client func createDBClient(dbType string, dbName string) error { var err error switch dbType { case "mongo": // create a mongodb database with orchestrator as the name DBconn, err = NewMongoStore(dbName, nil) default: return pkgerrors.New(dbType + "DB not supported") } return err } // Serialize converts given data into a JSON string func Serialize(v interface{}) (string, error)
// DeSerialize converts string to a json object specified by type func DeSerialize(str string, v interface{}) error { err := json.Unmarshal([]byte(str), &v) if err != nil { return pkgerrors.Wrap(err, "Error deSerializing "+str) } return nil } // InitializeDatabaseConnection sets up the connection to the // configured database to allow the application to talk to it. func InitializeDatabaseConnection(dbName string) error { err := createDBClient(config.GetConfiguration().DatabaseType, dbName) if err != nil { return pkgerrors.Cause(err) } err = DBconn.HealthCheck() if err != nil { return pkgerrors.Cause(err) } return nil }
{ out, err := json.Marshal(v) if err != nil { return "", pkgerrors.Wrap(err, "Error serializing "+reflect.TypeOf(v).String()) } return string(out), nil }
sentence.rs
use regex_automata::DFA; use bstr::BStr; use unicode::fsm::sentence_break_fwd::SENTENCE_BREAK_FWD; use utf8; /// An iterator over sentences in a byte string. /// /// This iterator is typically constructed by /// [`bstr::sentences`](struct.BStr.html#method.sentences). /// /// Sentences typically include their trailing punctuation and whitespace. /// /// Since sentences are made up of one or more codepoints, this iterator yields /// `&str` elements. When invalid UTF-8 is encountered, replacement codepoints /// are [substituted](index.html#handling-of-invalid-utf-8). /// /// This iterator yields words in accordance with the default sentence boundary /// rules specified in /// [UAX #29](https://www.unicode.org/reports/tr29/tr29-33.html#Sentence_Boundaries). #[derive(Clone, Debug)] pub struct Sentences<'a> { bs: &'a BStr, } impl<'a> Sentences<'a> { pub(crate) fn new(bs: &'a BStr) -> Sentences<'a> { Sentences { bs } } /// View the underlying data as a subslice of the original data. /// /// The slice returned has the same lifetime as the original slice, and so /// the iterator can continue to be used while this exists. /// /// # Examples /// /// ``` /// use bstr::B; /// /// let mut it = B("I want this. Not that. Right now.").sentences(); /// /// assert_eq!("I want this. Not that. Right now.", it.as_bstr()); /// it.next(); /// assert_eq!("Not that. Right now.", it.as_bstr()); /// it.next(); /// it.next(); /// assert_eq!("", it.as_bstr()); /// ``` #[inline] pub fn as_bstr(&self) -> &'a BStr { self.bs } } impl<'a> Iterator for Sentences<'a> { type Item = &'a str; #[inline] fn
(&mut self) -> Option<&'a str> { let (sentence, size) = decode_sentence(self.bs); if size == 0 { return None; } self.bs = &self.bs[size..]; Some(sentence) } } /// An iterator over sentences in a byte string, along with their byte offsets. /// /// This iterator is typically constructed by /// [`bstr::sentence_indices`](struct.BStr.html#method.sentence_indices). /// /// Sentences typically include their trailing punctuation and whitespace. /// /// Since sentences are made up of one or more codepoints, this iterator /// yields `&str` elements (along with their start and end byte offsets). /// When invalid UTF-8 is encountered, replacement codepoints are /// [substituted](index.html#handling-of-invalid-utf-8). Because of this, the /// indices yielded by this iterator may not correspond to the length of the /// sentence yielded with those indices. For example, when this iterator /// encounters `\xFF` in the byte string, then it will yield a pair of indices /// ranging over a single byte, but will provide an `&str` equivalent to /// `"\u{FFFD}"`, which is three bytes in length. However, when given only /// valid UTF-8, then all indices are in exact correspondence with their paired /// word. /// /// This iterator yields words in accordance with the default sentence boundary /// rules specified in /// [UAX #29](https://www.unicode.org/reports/tr29/tr29-33.html#Sentence_Boundaries). #[derive(Clone, Debug)] pub struct SentenceIndices<'a> { bs: &'a BStr, forward_index: usize, } impl<'a> SentenceIndices<'a> { pub(crate) fn new(bs: &'a BStr) -> SentenceIndices<'a> { SentenceIndices { bs: bs, forward_index: 0 } } /// View the underlying data as a subslice of the original data. /// /// The slice returned has the same lifetime as the original slice, and so /// the iterator can continue to be used while this exists. /// /// # Examples /// /// ``` /// use bstr::B; /// /// let mut it = B("I want this. Not that. Right now.").sentence_indices(); /// /// assert_eq!("I want this. Not that. Right now.", it.as_bstr()); /// it.next(); /// assert_eq!("Not that. Right now.", it.as_bstr()); /// it.next(); /// it.next(); /// assert_eq!("", it.as_bstr()); /// ``` #[inline] pub fn as_bstr(&self) -> &'a BStr { self.bs } } impl<'a> Iterator for SentenceIndices<'a> { type Item = (usize, usize, &'a str); #[inline] fn next(&mut self) -> Option<(usize, usize, &'a str)> { let index = self.forward_index; let (word, size) = decode_sentence(self.bs); if size == 0 { return None; } self.bs = &self.bs[size..]; self.forward_index += size; Some((index, index + size, word)) } } fn decode_sentence(bs: &BStr) -> (&str, usize) { if bs.is_empty() { ("", 0) } else if let Some(end) = SENTENCE_BREAK_FWD.find(bs.as_bytes()) { // Safe because a match can only occur for valid UTF-8. let sentence = unsafe { bs[..end].to_str_unchecked() }; (sentence, sentence.len()) } else { const INVALID: &'static str = "\u{FFFD}"; // No match on non-empty bytes implies we found invalid UTF-8. let (_, size) = utf8::decode_lossy(bs.as_bytes()); (INVALID, size) } } #[cfg(test)] mod tests { use ucd_parse::SentenceBreakTest; use bstr::{B, BStr}; #[test] fn forward_ucd() { for (i, test) in ucdtests().into_iter().enumerate() { let given = test.sentences.concat(); let got = sentences(given.as_bytes()); assert_eq!( test.sentences, got, "\n\nsentence forward break test {} failed:\n\ given: {:?}\n\ expected: {:?}\n\ got: {:?}\n", i, BStr::new(&given), strs_to_bstrs(&test.sentences), strs_to_bstrs(&got), ); } } // Some additional tests that don't seem to be covered by the UCD tests. #[test] fn forward_additional() { assert_eq!(vec!["a.. ", "A"], sentences(b"a.. A")); assert_eq!(vec!["a.. a"], sentences(b"a.. a")); assert_eq!(vec!["a... ", "A"], sentences(b"a... A")); assert_eq!(vec!["a... a"], sentences(b"a... a")); assert_eq!(vec!["a...,..., a"], sentences(b"a...,..., a")); } fn sentences(bytes: &[u8]) -> Vec<&str> { BStr::new(bytes).sentences().collect() } fn strs_to_bstrs<S: AsRef<str>>(strs: &[S]) -> Vec<&BStr> { strs.iter().map(|s| B(s.as_ref())).collect() } /// Return all of the UCD for sentence breaks. fn ucdtests() -> Vec<SentenceBreakTest> { const TESTDATA: &'static str = include_str!( "data/SentenceBreakTest.txt" ); let mut tests = vec![]; for mut line in TESTDATA.lines() { line = line.trim(); if line.starts_with("#") || line.contains("surrogate") { continue; } tests.push(line.parse().unwrap()); } tests } }
next
storemodel.py
import sqlite3 from db import db class StoreModel(db.Model): __tablename__ = 'stores' id = db.Column(db.Integer, primary_key=True) name = db.Column(db.String(80)) items = db.relationship('ItemModel', lazy='dynamic') def __init__(self, _id, name): self.id = _id self.name = name def json_items(self): return {'id': self.id, 'name': self.name, 'item': [item.json() for item in self.items.all()]} def
(self): return {'id': self.id, 'name': self.name} @classmethod def find_by_name(cls, name): return cls.query.filter_by(name=name).first() # SELECT * from __tablename__ WHERE name=name LIMIT 1 def save_to_db(self): db.session.add(self) db.session.commit() def delete_from_db(self): db.session.delete(self) db.session.commit()
json
run_token_classification.py
# -*- coding: utf-8 -*- # MIT License # # Copyright 2018-2021 New York University Abu Dhabi # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ Fine-tuning pre-trained models for token classification tasks. Heavily adapted from: https://github.com/huggingface/transformers/blob/ v3.0.1/examples/token-classification/run_ner.py""" import logging import os import sys from dataclasses import dataclass, field from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import ( accuracy_score as seq_accuracy_score, f1_score as seq_f1_score, precision_score as seq_precision_score, recall_score as seq_recall_score ) from sklearn.metrics import ( accuracy_score, f1_score, precision_score, recall_score ) from torch import nn from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from utils import TokenClassificationDataSet, Split, get_labels logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from " "huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if " "not the same as model_name"} ) # If you want to tweak more attributes on your tokenizer, you should do it # in a distinct script, or just modify its tokenizer_config.json. tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if " "not the same as model_name"} ) use_fast: bool = field(default=False, metadata={"help": "Set this flag to " "use fast " "tokenization."}) task_type: Optional[str] = field( default="ner", metadata={"help": "the name of the task (ner or pos)"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the " "pretrained models downloaded from s3"} ) @dataclass class
: """ Arguments pertaining to what data we are going to input our model for training and eval. """ data_dir: str = field( metadata={"help": "The input data dir. Should contain the .txt files " "for a CoNLL-2003-formatted task."} ) labels: Optional[str] = field( default=None, metadata={"help": "Path to a file containing all labels."}, ) max_seq_length: int = field( default=128, metadata={ "help": "The maximum total input sequence length after " "tokenization. Sequences longer than this will be truncated, " "sequences shorter will be padded." }, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and " "evaluation sets"} ) blind_test: bool = field( default=False, metadata={"help": "Use blind test set"} ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a # json file, let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath( sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists " "and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=(logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN), ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, " "16-bits training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, ) logger.info("Training/evaluation parameters %s", training_args) # Set seed set_seed(training_args.seed) # Prepare task labels = get_labels(data_args.labels) label_map: Dict[int, str] = {i: label for i, label in enumerate(labels)} num_labels = len(labels) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can # concurrently download model & vocab. config = AutoConfig.from_pretrained( (model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, id2label=label_map, label2id={label: i for i, label in enumerate(labels)}, cache_dir=model_args.cache_dir, ) tokenizer = AutoTokenizer.from_pretrained( (model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, ) # Get datasets train_dataset = ( TokenClassificationDataSet( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) eval_dataset = ( TokenClassificationDataSet( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(predictions: np.ndarray, label_ids: np.ndarray) -> Tuple[List[int], List[int]]: preds = np.argmax(predictions, axis=2) batch_size, seq_len = preds.shape out_label_list = [[] for _ in range(batch_size)] preds_list = [[] for _ in range(batch_size)] for i in range(batch_size): for j in range(seq_len): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]]) preds_list[i].append(label_map[preds[i][j]]) return preds_list, out_label_list def compute_metrics(p: EvalPrediction) -> Dict: preds_list, out_label_list = align_predictions(p.predictions, p.label_ids) # If task type is NER, use seqeval metrics. # Otherwise, use scikit learn if model_args.task_type == "ner": return { "accuracy": seq_accuracy_score(out_label_list, preds_list), "precision": seq_precision_score(out_label_list, preds_list), "recall": seq_recall_score(out_label_list, preds_list), "f1": seq_f1_score(out_label_list, preds_list), } else: # Flatten the preds_list and out_label_list preds_list = [p for sublist in preds_list for p in sublist] out_label_list = [p for sublist in out_label_list for p in sublist] return { "accuracy": accuracy_score(out_label_list, preds_list), "precision_micro": precision_score(out_label_list, preds_list, average="micro"), "recall_micro": recall_score(out_label_list, preds_list, average="micro"), "f1_micro": f1_score(out_label_list, preds_list, average="micro"), "precision_macro": precision_score(out_label_list, preds_list, average="macro"), "recall_macro": recall_score(out_label_list, preds_list, average="macro"), "f1_macro": f1_score(out_label_list, preds_list, average="macro"), } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset, eval_dataset=eval_dataset, compute_metrics=compute_metrics, ) # Training if training_args.do_train: trainer.train( model_path=(model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path) else None) ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir) # Evaluation results = {} if training_args.do_eval: logger.info("*** Evaluate ***") result = trainer.evaluate() output_eval_file = os.path.join(training_args.output_dir, "eval_results.txt") if trainer.is_world_master(): with open(output_eval_file, "w") as writer: logger.info("***** Eval results *****") for key, value in result.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) results.update(result) # Predict if training_args.do_predict: data_split = Split.test if data_args.blind_test: data_split = Split.blind_test test_dataset = TokenClassificationDataSet( data_dir=data_args.data_dir, tokenizer=tokenizer, labels=labels, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=data_split, ) predictions, label_ids, metrics = trainer.predict(test_dataset) preds_list, _ = align_predictions(predictions, label_ids) output_test_results_file = os.path.join(training_args.output_dir, f"{data_split.value}_results.txt") if trainer.is_world_master(): with open(output_test_results_file, "w") as writer: for key, value in metrics.items(): logger.info(" %s = %s", key, value) writer.write("%s = %s\n" % (key, value)) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, f"{data_split.value}_predictions.txt") if trainer.is_world_master(): with open(output_test_predictions_file, "w") as writer: with open(os.path.join(data_args.data_dir, f"{data_split.value}.txt"), "r") as f: example_id = 0 for line in f: if (line.startswith("-DOCSTART-") or line == "" or line == "\n"): writer.write(line) if not preds_list[example_id]: example_id += 1 elif preds_list[example_id]: output_line = (line.split()[0] + " " + preds_list[example_id].pop(0) + "\n") writer.write(output_line) else: logger.warning( "Maximum sequence length exceeded: " "No prediction for '%s'.", line.split()[0]) return results if __name__ == "__main__": main()
DataTrainingArguments
component.ts
/** * @license * Copyright 2016 Google Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ import {getCorrectEventName} from '@material/animation/util'; import {MDCComponent} from '@material/base/component'; import {applyPassive} from '@material/dom/events'; import {matches} from '@material/dom/ponyfill'; import {MDCRippleAdapter} from '@material/ripple/adapter'; import {MDCRipple} from '@material/ripple/component'; import {MDCRippleFoundation} from '@material/ripple/foundation'; import {MDCRippleCapableSurface} from '@material/ripple/types'; import {MDCCheckboxAdapter} from './adapter'; import {strings} from './constants'; import {MDCCheckboxFoundation} from './foundation'; /** * This type is needed for compatibility with Closure Compiler. */ type PropertyDescriptorGetter = (() => unknown) | undefined; const CB_PROTO_PROPS = ['checked', 'indeterminate']; export type MDCCheckboxFactory = (el: Element, foundation?: MDCCheckboxFoundation) => MDCCheckbox;
export class MDCCheckbox extends MDCComponent<MDCCheckboxFoundation> implements MDCRippleCapableSurface { static attachTo(root: Element) { return new MDCCheckbox(root); } get ripple(): MDCRipple { return this.ripple_; } get checked(): boolean { return this.nativeControl_.checked; } set checked(checked: boolean) { this.nativeControl_.checked = checked; } get indeterminate(): boolean { return this.nativeControl_.indeterminate; } set indeterminate(indeterminate: boolean) { this.nativeControl_.indeterminate = indeterminate; } get disabled(): boolean { return this.nativeControl_.disabled; } set disabled(disabled: boolean) { this.foundation.setDisabled(disabled); } get value(): string { return this.nativeControl_.value; } set value(value: string) { this.nativeControl_.value = value; } private readonly ripple_: MDCRipple = this.createRipple_(); private handleAnimationEnd_!: EventListener; // assigned in initialSyncWithDOM() initialize() { const {DATA_INDETERMINATE_ATTR} = strings; this.nativeControl_.indeterminate = this.nativeControl_.getAttribute(DATA_INDETERMINATE_ATTR) === 'true'; this.nativeControl_.removeAttribute(DATA_INDETERMINATE_ATTR); } initialSyncWithDOM() { this.handleAnimationEnd_ = () => this.foundation.handleAnimationEnd(); this.listen(getCorrectEventName(window, 'animationend'), this.handleAnimationEnd_); this.installPropertyChangeHooks_(); } destroy() { this.ripple_.destroy(); this.unlisten(getCorrectEventName(window, 'animationend'), this.handleAnimationEnd_); this.uninstallPropertyChangeHooks_(); super.destroy(); } getDefaultFoundation() { // DO NOT INLINE this variable. For backward compatibility, foundations take a Partial<MDCFooAdapter>. // To ensure we don't accidentally omit any methods, we need a separate, strongly typed adapter variable. const adapter: MDCCheckboxAdapter = { addClass: (className) => this.root.classList.add(className), forceLayout: () => (this.root as HTMLElement).offsetWidth, hasNativeControl: () => !!this.nativeControl_, isAttachedToDOM: () => Boolean(this.root.parentNode), isChecked: () => this.checked, isIndeterminate: () => this.indeterminate, removeClass: (className) => { this.root.classList.remove(className); }, removeNativeControlAttr: (attr) => { this.nativeControl_.removeAttribute(attr); }, setNativeControlAttr: (attr, value) => { this.nativeControl_.setAttribute(attr, value); }, setNativeControlDisabled: (disabled) => { this.nativeControl_.disabled = disabled; }, }; return new MDCCheckboxFoundation(adapter); } private createRipple_(): MDCRipple { // DO NOT INLINE this variable. For backward compatibility, foundations take a Partial<MDCFooAdapter>. // To ensure we don't accidentally omit any methods, we need a separate, strongly typed adapter variable. const adapter: MDCRippleAdapter = { ...MDCRipple.createAdapter(this), deregisterInteractionHandler: (evtType, handler) => this.nativeControl_.removeEventListener( evtType, handler, applyPassive()), isSurfaceActive: () => matches(this.nativeControl_, ':active'), isUnbounded: () => true, registerInteractionHandler: (evtType, handler) => this.nativeControl_.addEventListener( evtType, handler, applyPassive()), }; return new MDCRipple(this.root, new MDCRippleFoundation(adapter)); } private installPropertyChangeHooks_() { const nativeCb = this.nativeControl_; const cbProto = Object.getPrototypeOf(nativeCb); CB_PROTO_PROPS.forEach((controlState) => { const desc = Object.getOwnPropertyDescriptor(cbProto, controlState); // We have to check for this descriptor, since some browsers (Safari) don't support its return. // See: https://bugs.webkit.org/show_bug.cgi?id=49739 if (!validDescriptor(desc)) { return; } // Type cast is needed for compatibility with Closure Compiler. const nativeGetter = (desc as {get: PropertyDescriptorGetter}).get; const nativeCbDesc = { configurable: desc.configurable, enumerable: desc.enumerable, get: nativeGetter, set: (state: boolean) => { desc.set!.call(nativeCb, state); this.foundation.handleChange(); }, }; Object.defineProperty(nativeCb, controlState, nativeCbDesc); }); } private uninstallPropertyChangeHooks_() { const nativeCb = this.nativeControl_; const cbProto = Object.getPrototypeOf(nativeCb); CB_PROTO_PROPS.forEach((controlState) => { const desc = Object.getOwnPropertyDescriptor(cbProto, controlState); if (!validDescriptor(desc)) { return; } Object.defineProperty(nativeCb, controlState, desc); }); } private get nativeControl_(): HTMLInputElement { const {NATIVE_CONTROL_SELECTOR} = strings; const el = this.root.querySelector<HTMLInputElement>(NATIVE_CONTROL_SELECTOR); if (!el) { throw new Error(`Checkbox component requires a ${NATIVE_CONTROL_SELECTOR} element`); } return el; } } function validDescriptor(inputPropDesc: PropertyDescriptor | undefined): inputPropDesc is PropertyDescriptor { return !!inputPropDesc && typeof inputPropDesc.set === 'function'; }
test_likeness.py
import pytest import wave.data.likeness as likeness class TestLikenessFDS: def testRoundedLikenessFDS(self): likeness_instance = likeness.WaveLikeness( base=[i for i in range(64)], comparison=[0 for i in range(64)], ceiling=64 ) assert 31.46 == likeness_instance.getLikeness() def testExactRoundedLikenessFDS(self): likeness_instance = likeness.WaveLikeness( base=[i for i in range(64)], comparison=[i for i in range(64)], ceiling=64 ) assert 100.00 == likeness_instance.getLikeness() def testSinglePercentageNamco(self): likeness_instance = likeness.WaveLikeness( base=[i for i in range(64)], comparison=[0 for i in range(64)], ceiling=64 ) assert 100.00 == likeness_instance.getPercentage(0, 0) assert 0.00 == likeness_instance.getPercentage(63, 63) class TestLikenessNamco: def testExactRoundedLikenessNamco(self):
def testRoundedLikenessNamco(self): likeness_instance = likeness.WaveLikeness( base=[i for i in range(16)], comparison=[0 for i in range(16)], ceiling=16 ) assert 33.71 == likeness_instance.getLikeness() def testSinglePercentageNamco(self): likeness_instance = likeness.WaveLikeness( base=[i for i in range(16)], comparison=[0 for i in range(16)], ceiling=16 ) assert 100.00 == likeness_instance.getPercentage(0, 0) assert 0.00 == likeness_instance.getPercentage(15, 15)
likeness_instance = likeness.WaveLikeness( base=[i for i in range(16)], comparison=[i for i in range(16)], ceiling=16 ) assert 100.00 == likeness_instance.getLikeness()
versions.go
package versions import ( "bytes" "encoding/json" "fmt" "freshgo/internal/files" gvhttp "freshgo/pkg/http" "io" "os" "os/exec" "strings" "time" vers "github.com/hashicorp/go-version" ) var ( OS string Architecture string ) const ( versionTmpPathLin = "/tmp/freshgo/" //linuxOS = "linux-amd64" //versionPrefix = "<a class=\"download\" href=\"/dl/" ) type GoVersion struct { Name string `json:"version"` Stable bool `json:"stable"` Files []File `json:"files"` } type File struct { Name string `json:"filename"` OS string `json:"os"` Arch string `json:"arch"` Version string `json:"version"` SHA256 string `json:"sha256"` Size int `json:"size"` Kind string `json:"kind"` } func Select(selection string, onlyNewer bool) { current, err := CurrentVersion() isUpgrade := true if err != nil { isUpgrade = false } versions, err := getVersions() if err != nil { fmt.Println("error: getting versions failed - ", err) } var versReq GoVersion if selection == "latest" { versReq = LookUpLatest(versions, true) } else { versReq, err = lookUpVersion(versions, selection) if err != nil { fmt.Println(err) return } } semVer, err := vers.NewVersion(strings.TrimPrefix(versReq.Name, "go")) if err != nil { fmt.Println("error: could not get latest version.") return } newer, isUpgrade, err := compare(semVer) if err != nil { fmt.Println("error: could not compare versions - ", err) } if onlyNewer && !newer { return } if !promptInstall(isUpgrade) { return } if promptBackup(isUpgrade) { curDir, err := files.GetGoSrcPath(OS) if err != nil { fmt.Println("Error getting go bin dir: ", err) return } err = files.BackUp(curDir, current) if err != nil { fmt.Println("Error taking backup: ", err) return } } err = InstallVersion(semVer, isUpgrade) if err != nil { fmt.Println(err) return } } func Latest() { Select("latest", true) } func InstallVersion(version *vers.Version, isUpgrade bool) error { curGoSrcPath := "/usr/local/go" if _, err := os.Stat(curGoSrcPath); err == nil { os.MkdirAll(curGoSrcPath, os.ModePerm) } if _, err := os.Stat(versionTmpPathLin); os.IsNotExist(err) { err := os.Mkdir(versionTmpPathLin, os.ModePerm) if err != nil { return err } } downloadPath := versionTmpPathLin + "go" + version.String() fmt.Printf(" - Downloading version %s to path %s.\n", "go"+version.String(), downloadPath) dlVers := dlGoVersionFormat(version.String()) err := downloadToPath("https://go.dev"+dlVers, downloadPath) if err != nil { return err } if isUpgrade { curGoSrcPath, err = files.GetGoSrcPath(OS) if err != nil { return err } fmt.Println(" - Deleting current version.") err = deleteCurrentVersion() if err != nil { return err } } fmt.Printf(" - Untaring downloaded version from %s to %s.\n", downloadPath, versionTmpPathLin) err = files.UnTarGz(downloadPath, versionTmpPathLin) // err = otiai10.Copy(downloadPath, curGoSrcPath) if err != nil { return err } fmt.Printf(" - Copying from %s to %s.\n", versionTmpPathLin+"go", curGoSrcPath) err = files.SudoCopyDir(versionTmpPathLin+"go", curGoSrcPath) if err != nil { return err } if !isUpgrade { err := files.ExportToPath(curGoSrcPath + "/bin") if err != nil { return err } } u, err := CurrentVersion() if err != nil { return err } updated, err := vers.NewVersion(u) if err != nil { return err } fmt.Println("Successfully updated go version to: ", updated) return nil } func List() error { versions, err := getVersions() if err != nil { return err } for i := range versions { fmt.Print("• " + strings.TrimPrefix(versions[i].Name, "go") + " ") } return nil } func lookUpVersion(versions []GoVersion, name string) (GoVersion, error) { for i := range versions { if strings.TrimPrefix(versions[i].Name, "go") == strings.TrimPrefix(name, "go") { return versions[i], nil } } return GoVersion{}, fmt.Errorf("error: version '%s' notfound", name) } func downloadToPath(url string, path string) error { cli := gvhttp.NewHTTPClient("Freshgo", "", 60*time.Second, nil, false) resp, err := cli.Request("GET", url, nil, "", "", nil) if err != nil { return fmt.Errorf("error getting versions: %v", err) } out, err := os.Create(path) if err != nil { return err } defer out.Close() // Write the body to file _, err = io.Copy(out, strings.NewReader(string(resp))) return err } func LookUpLatest(versions []GoVersion, wantStable bool) (version GoVersion) { for i := range versions { if !wantStable { return versions[i] } else if wantStable == versions[i].Stable { return versions[i] } } return GoVersion{} } func CurrentVersion() (string, error) { var out bytes.Buffer cmd := exec.Command("go", "version") cmd.Stdout = &out err := cmd.Run() if err != nil { return "", err } return strings.TrimPrefix(strings.Split(out.String(), " ")[2], "go"), nil } func promptInstall(upgrade bool) bool { if upgrade { fmt.Print("Would you like to upgrade?[Y/n]") } else { fmt.Print("Would you like to install?[Y/n]") } var prompt string fmt.Scanln(&prompt) return prompt == "Y" } func promptBackup(upgrade bool) bool { if upgrade { fmt.Print("Would you like to backup current go-version[Y/n]") var prompt string fmt.Scanln(&prompt) return prompt == "Y" } return false } func ge
([]GoVersion, error) { cli := gvhttp.NewHTTPClient("Freshgo", "", 10*time.Second, nil, false) resp, err := cli.Request("GET", "https://go.dev/dl/?mode=json&include=all", nil, "", "", nil) if err != nil { return nil, err } var versions []GoVersion err = json.Unmarshal(resp, &versions) if err != nil { return nil, err } return versions, nil } func compare(upstream *vers.Version) (newer, isUpgrade bool, err error) { comp := 1 c, err := CurrentVersion() if err != nil { fmt.Println("[INFO]: no installed go version.") return true, false, nil } else { isUpgrade = true current, err := vers.NewVersion(c) if err != nil { fmt.Println(err) } comp = upstream.Compare(current) is := "" switch comp { case -1: is = "older than" newer = false case 1: is = "newer than" newer = true default: is = "equal to" newer = false } fmt.Printf("The latest go version is %v, which is %s the current %v \n", upstream.String(), is, current.String()) } return newer, isUpgrade, err } func deleteCurrentVersion() error { curDir, err := files.GetGoSrcPath(OS) if err != nil { return err } if curDir != "" { err = files.Remove(strings.TrimSpace(curDir)) if err != nil { return err } } else { return fmt.Errorf("%s", "error: no go src path exists") } return nil } func dlGoVersionFormat(version string) string { // in case this is the first minor version ie 1.18 version = strings.TrimSuffix(version, ".0") switch strings.ToLower(OS) { case "windows": return "/dl/go" + version + "." + strings.ToLower(OS) + "-" + strings.ToLower(Architecture) + ".zip" default: return "/dl/go" + version + "." + strings.ToLower(OS) + "-" + strings.ToLower(Architecture) + ".tar.gz" } } func init() { if OS == "" { OS = "linux" Architecture = "amd64" } }
tVersions()
list_sessions_parameters.go
// Code generated by go-swagger; DO NOT EDIT. package sessions // This file was generated by the swagger tool. // Editing this file might prove futile when you re-run the swagger generate command import ( "context" "net/http" "time" "github.com/go-openapi/errors" "github.com/go-openapi/runtime" cr "github.com/go-openapi/runtime/client" "github.com/go-openapi/strfmt" ) // NewListSessionsParams creates a new ListSessionsParams object // with the default values initialized. func NewListSessionsParams() *ListSessionsParams { var () return &ListSessionsParams{ timeout: cr.DefaultTimeout, } } // NewListSessionsParamsWithTimeout creates a new ListSessionsParams object // with the default values initialized, and the ability to set a timeout on a request func NewListSessionsParamsWithTimeout(timeout time.Duration) *ListSessionsParams { var () return &ListSessionsParams{ timeout: timeout, } } // NewListSessionsParamsWithContext creates a new ListSessionsParams object // with the default values initialized, and the ability to set a context for a request func NewListSessionsParamsWithContext(ctx context.Context) *ListSessionsParams
// NewListSessionsParamsWithHTTPClient creates a new ListSessionsParams object // with the default values initialized, and the ability to set a custom HTTPClient for a request func NewListSessionsParamsWithHTTPClient(client *http.Client) *ListSessionsParams { var () return &ListSessionsParams{ HTTPClient: client, } } /*ListSessionsParams contains all the parameters to send to the API endpoint for the list sessions operation typically these are written to a http.Request */ type ListSessionsParams struct { /*IdentityID Sessions for this identityID only */ IdentityID *strfmt.UUID /*State Sessions that are "open" or "closed" only */ State *string timeout time.Duration Context context.Context HTTPClient *http.Client } // WithTimeout adds the timeout to the list sessions params func (o *ListSessionsParams) WithTimeout(timeout time.Duration) *ListSessionsParams { o.SetTimeout(timeout) return o } // SetTimeout adds the timeout to the list sessions params func (o *ListSessionsParams) SetTimeout(timeout time.Duration) { o.timeout = timeout } // WithContext adds the context to the list sessions params func (o *ListSessionsParams) WithContext(ctx context.Context) *ListSessionsParams { o.SetContext(ctx) return o } // SetContext adds the context to the list sessions params func (o *ListSessionsParams) SetContext(ctx context.Context) { o.Context = ctx } // WithHTTPClient adds the HTTPClient to the list sessions params func (o *ListSessionsParams) WithHTTPClient(client *http.Client) *ListSessionsParams { o.SetHTTPClient(client) return o } // SetHTTPClient adds the HTTPClient to the list sessions params func (o *ListSessionsParams) SetHTTPClient(client *http.Client) { o.HTTPClient = client } // WithIdentityID adds the identityID to the list sessions params func (o *ListSessionsParams) WithIdentityID(identityID *strfmt.UUID) *ListSessionsParams { o.SetIdentityID(identityID) return o } // SetIdentityID adds the identityId to the list sessions params func (o *ListSessionsParams) SetIdentityID(identityID *strfmt.UUID) { o.IdentityID = identityID } // WithState adds the state to the list sessions params func (o *ListSessionsParams) WithState(state *string) *ListSessionsParams { o.SetState(state) return o } // SetState adds the state to the list sessions params func (o *ListSessionsParams) SetState(state *string) { o.State = state } // WriteToRequest writes these params to a swagger request func (o *ListSessionsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { if err := r.SetTimeout(o.timeout); err != nil { return err } var res []error if o.IdentityID != nil { // query param identityID var qrIdentityID strfmt.UUID if o.IdentityID != nil { qrIdentityID = *o.IdentityID } qIdentityID := qrIdentityID.String() if qIdentityID != "" { if err := r.SetQueryParam("identityID", qIdentityID); err != nil { return err } } } if o.State != nil { // query param state var qrState string if o.State != nil { qrState = *o.State } qState := qrState if qState != "" { if err := r.SetQueryParam("state", qState); err != nil { return err } } } if len(res) > 0 { return errors.CompositeValidationError(res...) } return nil }
{ var () return &ListSessionsParams{ Context: ctx, } }
asm.rs
use crate::builder::Builder; use crate::context::CodegenCx; use crate::llvm; use crate::type_of::LayoutLlvmExt; use crate::value::Value; use rustc_codegen_ssa::mir::operand::OperandValue; use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; use rustc_span::Span; use libc::{c_char, c_uint}; use log::debug; use std::ffi::{CStr, CString}; impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> { fn codegen_inline_asm( &mut self, ia: &hir::InlineAsmInner, outputs: Vec<PlaceRef<'tcx, &'ll Value>>, mut inputs: Vec<&'ll Value>, span: Span, ) -> bool { let mut ext_constraints = vec![]; let mut output_types = vec![]; // Prepare the output operands let mut indirect_outputs = vec![]; for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() { if out.is_rw { let operand = self.load_operand(place); if let OperandValue::Immediate(_) = operand.val { inputs.push(operand.immediate()); } ext_constraints.push(i.to_string()); } if out.is_indirect { let operand = self.load_operand(place); if let OperandValue::Immediate(_) = operand.val { indirect_outputs.push(operand.immediate()); } } else { output_types.push(place.layout.llvm_type(self.cx())); } } if !indirect_outputs.is_empty() { indirect_outputs.extend_from_slice(&inputs); inputs = indirect_outputs; } let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s)); // Default per-arch clobbers // Basically what clang does let arch_clobbers = match &self.sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], "mips" | "mips64" => vec!["~{$1}"], _ => Vec::new(), }; let all_constraints = ia .outputs .iter() .map(|out| out.constraint.to_string()) .chain(ia.inputs.iter().map(|s| s.to_string())) .chain(ext_constraints) .chain(clobbers) .chain(arch_clobbers.iter().map(|s| (*s).to_string())) .collect::<Vec<String>>() .join(","); debug!("Asm Constraints: {}", &all_constraints); // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { 0 => self.type_void(), 1 => output_types[0], _ => self.type_struct(&output_types, false), }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); let r = inline_asm_call( self, &asm, &constraint_cstr, &inputs, output_type, ia.volatile, ia.alignstack, ia.dialect, ); if r.is_none() { return false; } let r = r.unwrap(); // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &place)) in outputs.enumerate() { let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; OperandValue::Immediate(v).store(self, place); } // Store mark in a metadata node so we can map LLVM errors // back to source locations. See #17552. unsafe { let key = "srcloc"; let kind = llvm::LLVMGetMDKindIDInContext( self.llcx, key.as_ptr() as *const c_char, key.len() as c_uint, ); let val: &'ll Value = self.const_i32(span.ctxt().outer_expn().as_u32() as i32); llvm::LLVMSetMetadata(r, kind, llvm::LLVMMDNodeInContext(self.llcx, &val, 1)); } true } } impl AsmMethods for CodegenCx<'ll, 'tcx> { fn codegen_global_asm(&self, ga: &hir::GlobalAsm) { let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); unsafe { llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr()); } } } fn inline_asm_call( bx: &mut Builder<'a, 'll, 'tcx>, asm: &CStr, cons: &CStr, inputs: &[&'ll Value], output: &'ll llvm::Type, volatile: bool, alignstack: bool, dia: ::rustc_ast::ast::AsmDialect, ) -> Option<&'ll Value> { let volatile = if volatile { llvm::True } else { llvm::False }; let alignstack = if alignstack { llvm::True } else { llvm::False }; let argtys = inputs .iter() .map(|v| { debug!("Asm Input Type: {:?}", *v); bx.cx.val_ty(*v) }) .collect::<Vec<_>>(); debug!("Asm Output Type: {:?}", output); let fty = bx.cx.type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr()); debug!("constraint verification result: {:?}", constraints_ok); if constraints_ok {
fty, asm.as_ptr(), cons.as_ptr(), volatile, alignstack, llvm::AsmDialect::from_generic(dia), ); Some(bx.call(v, inputs, None)) } else { // LLVM has detected an issue with our constraints, bail out None } } }
let v = llvm::LLVMRustInlineAsm(
disablecommand.ts
import { Message } from "discord.js" import GuildSettings from "../../../schemas/GuildSettings"; import Client from "../../../structures/Client" import { ICommand, RunCallback } from "../../../structures/Interfaces" import Logger from "../../../utils/logger/Logger"; function
(): ICommand { const run: RunCallback = async (client: Client, message: Message, args: string[]) => { const cmdname = args[0]?.toLowerCase(); if (!cmdname || !message.guild) return; if (cmdname === "admin") { message.channel.send("You can not disable this command!"); return; } if (!client.commands.has(cmdname)) { message.channel.send("Invalid Command: " + cmdname); return; } let guildSettings = await GuildSettings.findOne({ guildID: message.guild.id }); if (guildSettings === null) { const doc = new GuildSettings({ guildID: message.guild.id }); await doc.save(); guildSettings = doc; } if (guildSettings.disabledCommands.includes(cmdname as string)) { message.channel.send("Command already disabled!"); return; } guildSettings.disabledCommands.push(cmdname as string); await guildSettings.save(); message.channel.send(`Disabled \`${cmdname}\``); Logger.info(`Disabled \`${cmdname}\``); } return { run: run, settings: { description: "Disables a command", usage: "admin disablecommand <command>", minimumArgs: 1 } } } export default DisableCommand();
DisableCommand
gru_gate.py
from ray.rllib.utils.framework import try_import_tf tf = try_import_tf() class GRUGate(tf.keras.layers.Layer): def __init__(self, init_bias=0., **kwargs): super().__init__(**kwargs) self._init_bias = init_bias def
(self, input_shape): h_shape, x_shape = input_shape if x_shape[-1] != h_shape[-1]: raise ValueError( "Both inputs to GRUGate must have equal size in last axis!") dim = int(h_shape[-1]) self._w_r = self.add_weight(shape=(dim, dim)) self._w_z = self.add_weight(shape=(dim, dim)) self._w_h = self.add_weight(shape=(dim, dim)) self._u_r = self.add_weight(shape=(dim, dim)) self._u_z = self.add_weight(shape=(dim, dim)) self._u_h = self.add_weight(shape=(dim, dim)) def bias_initializer(shape, dtype): return tf.fill(shape, tf.cast(self._init_bias, dtype=dtype)) self._bias_z = self.add_weight( shape=(dim, ), initializer=bias_initializer) def call(self, inputs, **kwargs): # Pass in internal state first. h, X = inputs r = tf.tensordot(X, self._w_r, axes=1) + \ tf.tensordot(h, self._u_r, axes=1) r = tf.nn.sigmoid(r) z = tf.tensordot(X, self._w_z, axes=1) + \ tf.tensordot(h, self._u_z, axes=1) - self._bias_z z = tf.nn.sigmoid(z) h_next = tf.tensordot(X, self._w_h, axes=1) + \ tf.tensordot((h * r), self._u_h, axes=1) h_next = tf.nn.tanh(h_next) return (1 - z) * h + z * h_next
build
used_limits_for_admin.py
# Copyright 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations
from nova.api.openstack import extensions class Used_limits_for_admin(extensions.ExtensionDescriptor): """Provide data to admin on limited resources used by other tenants.""" name = "UsedLimitsForAdmin" alias = "os-used-limits-for-admin" namespace = ("http://docs.openstack.org/compute/ext/used_limits_for_admin" "/api/v1.1") updated = "2013-05-02T00:00:00Z"
# under the License.
commands.rs
use std::{ fs, io::{self, BufReader, BufWriter, Read, Write}, path::{Path, PathBuf}, }; use utils::colors; use crate::{ archive, cli::Command, dialogs::Confirmation, error::FinalError, file::{ extensions_from_path, separate_known_extensions_from_name, CompressionFormat::{self, *}, }, oof, utils::{self, to_utf}, }; pub fn
(command: Command, flags: &oof::Flags) -> crate::Result<()> { match command { Command::Compress { files, output_path } => { // Formats from path extension, like "file.tar.gz.xz" -> vec![Tar, Gzip, Lzma] let formats = extensions_from_path(&output_path); if formats.is_empty() { FinalError::with_title(format!("Cannot compress to '{}'.", to_utf(&output_path))) .detail("You shall supply the compression format via the extension.") .hint("Try adding something like .tar.gz or .zip to the output file.") .hint("") .hint("Examples:") .hint(format!(" ouch compress ... {}.tar.gz", to_utf(&output_path))) .hint(format!(" ouch compress ... {}.zip", to_utf(&output_path))) .display_and_crash(); } if matches!(&formats[0], Bzip | Gzip | Lzma) && files.len() > 1 { // This piece of code creates a sugestion for compressing multiple files // It says: // Change from file.bz.xz // To file.tar.bz.xz let extensions_text: String = formats.iter().map(|format| format.to_string()).collect(); let output_path = to_utf(output_path); // Breaks if Lzma is .lz or .lzma and not .xz // Or if Bzip is .bz2 and not .bz let extensions_start_position = output_path.rfind(&extensions_text).unwrap(); let pos = extensions_start_position; let empty_range = pos..pos; let mut suggested_output_path = output_path.clone(); suggested_output_path.replace_range(empty_range, ".tar"); FinalError::with_title(format!( "Cannot compress to '{}'.", to_utf(&output_path) )) .detail("You are trying to compress multiple files.") .detail(format!( "The compression format '{}' cannot receive multiple files.", &formats[0] )) .detail("The only supported formats that bundle files into an archive are .tar and .zip.") .hint(format!( "Try inserting '.tar' or '.zip' before '{}'.", &formats[0] )) .hint(format!("From: {}", output_path)) .hint(format!(" To : {}", suggested_output_path)) .display_and_crash(); } if let Some(format) = formats.iter().skip(1).position(|format| matches!(format, Tar | Zip)) { FinalError::with_title(format!("Cannot compress to '{}'.", to_utf(&output_path))) .detail(format!("Found the format '{}' in an incorrect position.", format)) .detail(format!( "{} can only be used at the start of the file extension.", format )) .hint(format!( "If you wish to compress multiple files, start the extension with {}.", format )) .hint(format!("Otherwise, remove {} from '{}'.", format, to_utf(&output_path))) .display_and_crash(); } let confirm = Confirmation::new("Do you want to overwrite 'FILE'?", Some("FILE")); if output_path.exists() && !utils::permission_for_overwriting(&output_path, flags, &confirm)? { // The user does not want to overwrite the file return Ok(()); } let output_file = fs::File::create(&output_path).unwrap_or_else(|err| { FinalError::with_title(format!("Cannot compress to '{}'.", to_utf(&output_path))) .detail(format!("Could not open file '{}' for writing.", to_utf(&output_path))) .detail(format!("Error: {}.", err)) .display_and_crash() }); let compress_result = compress_files(files, formats, output_file, flags); // If any error occurred, delete incomplete file if compress_result.is_err() { // Print an extra alert message pointing out that we left a possibly // CORRUPTED FILE at `output_path` if let Err(err) = fs::remove_file(&output_path) { eprintln!("{red}FATAL ERROR:\n", red = colors::red()); eprintln!(" Please manually delete '{}'.", to_utf(&output_path)); eprintln!( " Compression failed and we could not delete '{}'.", to_utf(&output_path), ); eprintln!( " Error:{reset} {}{red}.{reset}\n", err, reset = colors::reset(), red = colors::red() ); } } else { println!( "{}[INFO]{} Successfully compressed '{}'.", colors::yellow(), colors::reset(), to_utf(output_path), ); } compress_result?; }, Command::Decompress { files, output_folder } => { let mut output_paths = vec![]; let mut formats = vec![]; for path in files.iter() { let (file_output_path, file_formats) = separate_known_extensions_from_name(path); output_paths.push(file_output_path); formats.push(file_formats); } let files_missing_format: Vec<PathBuf> = files .iter() .zip(&formats) .filter(|(_, formats)| formats.is_empty()) .map(|(input_path, _)| PathBuf::from(input_path)) .collect(); if !files_missing_format.is_empty() { panic!("Throw this vec into a error variant: {:#?}", files_missing_format); } // From Option<PathBuf> to Option<&Path> let output_folder = output_folder.as_ref().map(|path| path.as_ref()); for ((input_path, formats), output_path) in files.iter().zip(formats).zip(output_paths) { decompress_file(input_path, formats, output_folder, output_path, flags)?; } }, Command::ShowHelp => crate::help_command(), Command::ShowVersion => crate::version_command(), } Ok(()) } fn compress_files( files: Vec<PathBuf>, formats: Vec<CompressionFormat>, output_file: fs::File, _flags: &oof::Flags, ) -> crate::Result<()> { let file_writer = BufWriter::new(output_file); if formats.len() == 1 { let build_archive_from_paths = match formats[0] { Tar => archive::tar::build_archive_from_paths, Zip => archive::zip::build_archive_from_paths, _ => unreachable!(), }; let mut bufwriter = build_archive_from_paths(&files, file_writer)?; bufwriter.flush()?; } else { let mut writer: Box<dyn Write> = Box::new(file_writer); // Grab previous encoder and wrap it inside of a new one let chain_writer_encoder = |format: &CompressionFormat, encoder: Box<dyn Write>| { let encoder: Box<dyn Write> = match format { Gzip => Box::new(flate2::write::GzEncoder::new(encoder, Default::default())), Bzip => Box::new(bzip2::write::BzEncoder::new(encoder, Default::default())), Lzma => Box::new(xz2::write::XzEncoder::new(encoder, 6)), _ => unreachable!(), }; encoder }; for format in formats.iter().skip(1).rev() { writer = chain_writer_encoder(format, writer); } match formats[0] { Gzip | Bzip | Lzma => { writer = chain_writer_encoder(&formats[0], writer); let mut reader = fs::File::open(&files[0]).unwrap(); io::copy(&mut reader, &mut writer)?; }, Tar => { let mut writer = archive::tar::build_archive_from_paths(&files, writer)?; writer.flush()?; }, Zip => { eprintln!( "{yellow}Warning:{reset}", yellow = colors::yellow(), reset = colors::reset() ); eprintln!("\tCompressing .zip entirely in memory."); eprintln!("\tIf the file is too big, your pc might freeze!"); eprintln!( "\tThis is a limitation for formats like '{}'.", formats.iter().map(|format| format.to_string()).collect::<String>() ); eprintln!("\tThe design of .zip makes it impossible to compress via stream."); let mut vec_buffer = io::Cursor::new(vec![]); archive::zip::build_archive_from_paths(&files, &mut vec_buffer)?; io::copy(&mut vec_buffer, &mut writer)?; }, } } Ok(()) } fn decompress_file( input_file_path: &Path, formats: Vec<CompressionFormat>, output_folder: Option<&Path>, output_path: &Path, flags: &oof::Flags, ) -> crate::Result<()> { // TODO: improve error treatment let reader = fs::File::open(&input_file_path)?; let reader = BufReader::new(reader); let mut reader: Box<dyn Read> = Box::new(reader); // Grab previous decoder and wrap it inside of a new one let chain_reader_decoder = |format: &CompressionFormat, decoder: Box<dyn Read>| { let decoder: Box<dyn Read> = match format { Gzip => Box::new(flate2::read::GzDecoder::new(decoder)), Bzip => Box::new(bzip2::read::BzDecoder::new(decoder)), Lzma => Box::new(xz2::read::XzDecoder::new(decoder)), _ => unreachable!(), }; decoder }; for format in formats.iter().skip(1).rev() { reader = chain_reader_decoder(format, reader); } // Output path with folder prefix let output_path = if let Some(output_folder) = output_folder { output_folder.join(output_path) } else { output_path.to_path_buf() }; let output_folder = output_folder.unwrap_or_else(|| Path::new(".")); match formats[0] { Gzip | Bzip | Lzma => { reader = chain_reader_decoder(&formats[0], reader); // TODO: improve error treatment // TODO: provide more context for this error treatment let mut writer = fs::File::create(&output_path)?; io::copy(&mut reader, &mut writer)?; println!("[INFO]: Successfully uncompressed file at '{}'.", to_utf(output_path)); }, Tar => { utils::create_dir_if_non_existent(output_folder)?; let _ = crate::archive::tar::unpack_archive(reader, output_folder, flags)?; println!("[INFO]: Successfully uncompressed bundle at '{}'.", to_utf(output_folder)); }, Zip => { utils::create_dir_if_non_existent(output_folder)?; // If this is the only one if formats.len() == 1 { todo!("fix this!!!"); } let mut vec = vec![]; io::copy(&mut reader, &mut vec)?; let zip_archive = zip::ZipArchive::new(io::Cursor::new(vec))?; let _ = crate::archive::zip::unpack_archive(zip_archive, output_folder, flags)?; println!("[INFO]: Successfully uncompressed bundle at '{}'.", to_utf(output_folder)); // let vec_buffer = vec![]; // let mut vec_buffer = io::Cursor::new(vec_buffer); // // TODO: improve/change this message // eprintln!("Compressing first into .zip."); // eprintln!("Warning: .zip archives with extra extensions have a downside."); // eprintln!("The only way is loading everything into the RAM while compressing, and then write everything down."); // eprintln!("this means that by compressing .zip with extra compression formats, you can run out of RAM if the file is too large!"); // zip::build_archive_from_paths(&files, &mut vec_buffer)?; // io::copy(&mut vec_buffer, &mut writer)?; }, } Ok(()) }
run
receive.go
package verbs import ( "encoding/xml" ) type ReceiveAttributes struct { Action *string `xml:"action,attr,omitempty"` MediaType *string `xml:"mediaType,attr,omitempty"` Method *string `xml:"method,attr,omitempty"` PageSize *string `xml:"pageSize,attr,omitempty"` StoreMedia *bool `xml:"storeMedia,attr,omitempty"` }
ReceiveAttributes }
type Receive struct { XMLName xml.Name `xml:"Receive"`
dl.rs
use crate::error::Error; use crate::module::{AddrDetails, GlobalSpec, HeapSpec, Module, ModuleInternal, TableElement}; use libc::c_void; use libloading::Library; use lucet_module::{ FunctionHandle, FunctionIndex, FunctionSpec, ModuleData, ModuleFeatures, ModuleSignature, PublicKey, SerializedModule, Signature, VersionInfo, LUCET_MODULE_SYM, }; use std::ffi::CStr; use std::mem::MaybeUninit; use std::path::Path; use std::slice; use std::slice::from_raw_parts; use std::sync::Arc; use thiserror::Error; use raw_cpuid::CpuId; #[derive(Debug, Error)] pub enum DlError { #[error("Loading: {0}")] Loading( #[from] #[source] libloading::Error, ), #[error("IO: {0}")] Io( #[from] #[source] std::io::Error, ), } fn check_feature_support(module_features: &ModuleFeatures) -> Result<(), Error> { let cpuid = CpuId::new(); fn missing_feature(feature: &str) -> Error { Error::Unsupported(format!( "Module requires feature host does not support: {}", feature )) } let info = cpuid .get_feature_info() .ok_or_else(|| Error::Unsupported("Unable to obtain host CPU feature info!".to_string()))?; if module_features.sse3 && !info.has_sse3() { return Err(missing_feature("SSE3")); } if module_features.ssse3 && !info.has_ssse3() { return Err(missing_feature("SSS3")); } if module_features.sse41 && !info.has_sse41() { return Err(missing_feature("SSE4.1")); } if module_features.sse42 && !info.has_sse42() { return Err(missing_feature("SSE4.2")); } if module_features.avx && !info.has_avx() { return Err(missing_feature("AVX")); } if module_features.popcnt && !info.has_popcnt() { return Err(missing_feature("POPCNT")); } if module_features.bmi1 || module_features.bmi2 { let info = cpuid.get_extended_feature_info().ok_or_else(|| { Error::Unsupported("Unable to obtain host CPU extended feature info!".to_string()) })?; if module_features.bmi1 && !info.has_bmi1() { return Err(missing_feature("BMI1")); } if module_features.bmi2 && !info.has_bmi2() { return Err(missing_feature("BMI2")); } } if module_features.lzcnt { let info = cpuid.get_extended_function_info().ok_or_else(|| { Error::Unsupported("Unable to obtain host CPU extended function info!".to_string()) })?; if module_features.lzcnt && !info.has_lzcnt() { return Err(missing_feature("LZCNT")); } } // Features are fine, we're compatible! Ok(()) } /// A Lucet module backed by a dynamically-loaded shared object. pub struct DlModule { /// A handle to the loaded object. /// /// This is never used after initialization, but we can't let the library close until we're done /// with this module. _lib: Library, /// Base address of the dynamically-loaded module fbase: *const c_void, /// Metadata decoded from inside the module module: lucet_module::Module<'static>, } // for the one raw pointer only unsafe impl Send for DlModule {} unsafe impl Sync for DlModule {} impl DlModule { /// Create a module, loading code from a shared object on the filesystem. pub fn load<P: AsRef<Path>>(so_path: P) -> Result<Arc<Self>, Error> { Self::load_and_maybe_verify(so_path, None) } /// Create a module, loading code from a shared object on the filesystem /// and verifying it using a public key if one has been supplied. pub fn load_and_verify<P: AsRef<Path>>(so_path: P, pk: PublicKey) -> Result<Arc<Self>, Error> { Self::load_and_maybe_verify(so_path, Some(pk)) } fn
<P: AsRef<Path>>( so_path: P, pk: Option<PublicKey>, ) -> Result<Arc<Self>, Error> { // Load the dynamic library. The undefined symbols corresponding to the lucet_syscall_ // functions will be provided by the current executable. We trust our wasm->dylib compiler // to make sure these function calls are the way the dylib can touch memory outside of its // stack and heap. let abs_so_path = so_path.as_ref().canonicalize().map_err(DlError::Io)?; let lib = Library::new(abs_so_path.as_os_str()).map_err(DlError::Loading)?; let serialized_module_ptr = unsafe { lib.get::<*const SerializedModule>(LUCET_MODULE_SYM.as_bytes()) .map_err(|e| { lucet_incorrect_module!("error loading required symbol `lucet_module`: {}", e) })? }; let serialized_module: &SerializedModule = unsafe { serialized_module_ptr.as_ref().unwrap() }; let module_version = serialized_module.version.clone(); let runtime_version = VersionInfo::current(include_str!(concat!(env!("OUT_DIR"), "/commit_hash")).as_bytes()); if !module_version.valid() { return Err(lucet_incorrect_module!("reserved bit is not set. This module is likely too old for this lucet-runtime to load.")); } else if !runtime_version.compatible_with(&module_version) { return Err(lucet_incorrect_module!( "version mismatch. module has version {}, while this runtime is version {}", module_version, runtime_version, )); } // Deserialize the slice into ModuleData, which will hold refs into the loaded // shared object file in `module_data_slice`. Both of these get a 'static lifetime because // Rust doesn't have a safe way to describe that their lifetime matches the containing // struct (and the dll). // // The exposed lifetime of ModuleData will be the same as the lifetime of the // dynamically loaded library. This makes the interface safe. let module_data_slice: &'static [u8] = unsafe { slice::from_raw_parts( serialized_module.module_data_ptr as *const u8, serialized_module.module_data_len as usize, ) }; let module_data = ModuleData::deserialize(module_data_slice)?; check_feature_support(module_data.features())?; // If a public key has been provided, verify the module signature // The TOCTOU issue is unavoidable without reimplenting `dlopen(3)` if let Some(pk) = pk { ModuleSignature::verify(so_path, &pk, &module_data)?; } let fbase = if let Some(dli) = dladdr(serialized_module as *const SerializedModule as *const c_void) { dli.dli_fbase } else { std::ptr::null() }; if serialized_module.tables_len > std::u32::MAX as u64 { lucet_incorrect_module!("table segment too long: {}", serialized_module.tables_len); } let tables: &'static [&'static [TableElement]] = unsafe { from_raw_parts( serialized_module.tables_ptr as *const &[TableElement], serialized_module.tables_len as usize, ) }; let function_manifest = if serialized_module.function_manifest_ptr != 0 { unsafe { from_raw_parts( serialized_module.function_manifest_ptr as *const FunctionSpec, serialized_module.function_manifest_len as usize, ) } } else { &[] }; Ok(Arc::new(DlModule { _lib: lib, fbase, module: lucet_module::Module { version: module_version, module_data, tables, function_manifest, }, })) } } impl Module for DlModule {} impl ModuleInternal for DlModule { fn is_instruction_count_instrumented(&self) -> bool { self.module.module_data.features().instruction_count } fn heap_spec(&self) -> Option<&HeapSpec> { self.module.module_data.heap_spec() } fn globals(&self) -> &[GlobalSpec<'_>] { self.module.module_data.globals_spec() } fn get_sparse_page_data(&self, page: usize) -> Option<&[u8]> { if let Some(ref sparse_data) = self.module.module_data.sparse_data() { *sparse_data.get_page(page) } else { None } } fn sparse_page_data_len(&self) -> usize { self.module .module_data .sparse_data() .map(|d| d.len()) .unwrap_or(0) } fn table_elements(&self) -> Result<&[TableElement], Error> { match self.module.tables.get(0) { Some(table) => Ok(table), None => Err(lucet_incorrect_module!("table 0 is not present")), } } fn get_export_func(&self, sym: &str) -> Result<FunctionHandle, Error> { self.module .module_data .get_export_func_id(sym) .ok_or_else(|| Error::SymbolNotFound(sym.to_string())) .map(|id| { let ptr = self.function_manifest()[id.as_u32() as usize].ptr(); FunctionHandle { ptr, id, is_start_func: false, } }) } fn get_func_from_idx(&self, table_id: u32, func_id: u32) -> Result<FunctionHandle, Error> { if table_id != 0 { return Err(Error::FuncNotFound(table_id, func_id)); } let table = self.table_elements()?; let func = table .get(func_id as usize) .map(|element| element.function_pointer()) .ok_or(Error::FuncNotFound(table_id, func_id))?; Ok(self.function_handle_from_ptr(func)) } fn get_start_func(&self) -> Result<Option<FunctionHandle>, Error> { Ok(self.module.module_data.get_start_func_id().map(|id| { let ptr = self.function_manifest()[id.as_u32() as usize].ptr(); FunctionHandle { ptr, id, is_start_func: true, } })) } fn function_manifest(&self) -> &[FunctionSpec] { self.module.function_manifest } fn addr_details(&self, addr: *const c_void) -> Result<Option<AddrDetails>, Error> { if let Some(dli) = dladdr(addr) { let file_name = if dli.dli_fname.is_null() { None } else { Some(unsafe { CStr::from_ptr(dli.dli_fname).to_owned().into_string()? }) }; let sym_name = if dli.dli_sname.is_null() { None } else { Some(unsafe { CStr::from_ptr(dli.dli_sname).to_owned().into_string()? }) }; Ok(Some(AddrDetails { in_module_code: dli.dli_fbase as *const c_void == self.fbase, file_name, sym_name, })) } else { Ok(None) } } fn get_signature(&self, fn_id: FunctionIndex) -> &Signature { self.module.module_data.get_signature(fn_id) } } // TODO: PR to nix or libloading? // TODO: possibly not safe to use without grabbing the mutex within libloading::Library? fn dladdr(addr: *const c_void) -> Option<libc::Dl_info> { let mut info = MaybeUninit::<libc::Dl_info>::uninit(); let res = unsafe { libc::dladdr(addr, info.as_mut_ptr()) }; if res != 0 { Some(unsafe { info.assume_init() }) } else { None } }
load_and_maybe_verify
lib.go
// +build go1.2 // Copyright 2014 Unknwon // // Licensed under the Apache License, Version 2.0 (the "License"): you may // not use this file except in compliance with the License. You may obtain // a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the // License for the specific language governing permissions and limitations // under the License. // Package lib is a library version of Gopm(Go Package Manager). package lib import ( "io" "runtime" "github.com/gpmgo/gopm/cmd" "github.com/gpmgo/gopm/modules/cli" "github.com/gpmgo/gopm/modules/log" "github.com/gpmgo/gopm/modules/setting" ) const APP_VER = "0.8.8.0307 Beta" func init() { runtime.GOMAXPROCS(runtime.NumCPU()) setting.LibraryMode = true } func Run(args []string) *setting.Error
func SetOutput(out io.Writer) { log.Output = out }
{ app := cli.NewApp() app.Name = "Gopm" app.Usage = "Go Package Manager" app.Version = APP_VER app.Commands = []cli.Command{ cmd.CmdList, cmd.CmdGen, cmd.CmdGet, cmd.CmdBin, cmd.CmdConfig, cmd.CmdRun, cmd.CmdTest, cmd.CmdBuild, cmd.CmdInstall, cmd.CmdClean, cmd.CmdUpdate, // CmdSearch, } app.Flags = append(app.Flags, []cli.Flag{ cli.BoolFlag{"noterm, n", "disable color output", ""}, cli.BoolFlag{"strict, s", "strict mode", ""}, cli.BoolFlag{"debug, d", "debug mode", ""}, }...) app.Run(args) return setting.RuntimeError }
faculty.rs
pub struct Faculty { id: u8, } impl Faculty { pub fn new(id: u8) -> Faculty { Faculty { id: id } } pub fn get_id(&self) -> u8 { self.id } } #[test] fn
() { let f = Faculty::new(10); assert_eq!(10u8, f.id); }
test_faculty
lib.rs
// SPDX-License-Identifier: MIT OR Apache-2.0 // // Copyright (c) 2018-2021 Andre Richter <[email protected]> // Rust embedded logo for `make doc`. #![doc(html_logo_url = "https://git.io/JeGIp")] //! The `kernel` library. //! //! Used to compose the final kernel binary. //! //! # Code organization and architecture //! //! The code is divided into different *modules*, each representing a typical **subsystem** of the //! `kernel`. Top-level module files of subsystems reside directly in the `src` folder. For example, //! `src/memory.rs` contains code that is concerned with all things memory management. //! //! ## Visibility of processor architecture code //! //! Some of the `kernel`'s subsystems depend on low-level code that is specific to the target //! processor architecture. For each supported processor architecture, there exists a subfolder in //! `src/_arch`, for example, `src/_arch/aarch64`. //! //! The architecture folders mirror the subsystem modules laid out in `src`. For example, //! architectural code that belongs to the `kernel`'s MMU subsystem (`src/memory/mmu.rs`) would go //! into `src/_arch/aarch64/memory/mmu.rs`. The latter file is loaded as a module in //! `src/memory/mmu.rs` using the `path attribute`. Usually, the chosen module name is the generic //! module's name prefixed with `arch_`. //! //! For example, this is the top of `src/memory/mmu.rs`: //! //! ``` //! #[cfg(target_arch = "aarch64")] //! #[path = "../_arch/aarch64/memory/mmu.rs"] //! mod arch_mmu; //! ``` //! //! Often times, items from the `arch_ module` will be publicly reexported by the parent module. //! This way, each architecture specific module can provide its implementation of an item, while the //! caller must not be concerned which architecture has been conditionally compiled. //! //! ## BSP code //! //! `BSP` stands for Board Support Package. `BSP` code is organized under `src/bsp.rs` and contains //! target board specific definitions and functions. These are things such as the board's memory map //! or instances of drivers for devices that are featured on the respective board. //! //! Just like processor architecture code, the `BSP` code's module structure tries to mirror the //! `kernel`'s subsystem modules, but there is no reexporting this time. That means whatever is //! provided must be called starting from the `bsp` namespace, e.g. `bsp::driver::driver_manager()`. //! //! ## Kernel interfaces //! //! Both `arch` and `bsp` contain code that is conditionally compiled depending on the actual target //! and board for which the kernel is compiled. For example, the `interrupt controller` hardware of //! the `Raspberry Pi 3` and the `Raspberry Pi 4` is different, but we want the rest of the `kernel` //! code to play nicely with any of the two without much hassle. //! //! In order to provide a clean abstraction between `arch`, `bsp` and `generic kernel code`, //! `interface` traits are provided *whenever possible* and *where it makes sense*. They are defined //! in the respective subsystem module and help to enforce the idiom of *program to an interface, //! not an implementation*. For example, there will be a common IRQ handling interface which the two //! different interrupt controller `drivers` of both Raspberrys will implement, and only export the //! interface to the rest of the `kernel`. //! //! ``` //! +-------------------+ //! | Interface (Trait) | //! | | //! +--+-------------+--+ //! ^ ^ //! | | //! | | //! +----------+--+ +--+----------+ //! | kernel code | | bsp code | //! | | | arch code | //! +-------------+ +-------------+ //! ``` //! //! # Summary //! //! For a logical `kernel` subsystem, corresponding code can be distributed over several physical //! locations. Here is an example for the **memory** subsystem: //! //! - `src/memory.rs` and `src/memory/**/*` //! - Common code that is agnostic of target processor architecture and `BSP` characteristics. //! - Example: A function to zero a chunk of memory. //! - Interfaces for the memory subsystem that are implemented by `arch` or `BSP` code. //! - Example: An `MMU` interface that defines `MMU` function prototypes. //! - `src/bsp/__board_name__/memory.rs` and `src/bsp/__board_name__/memory/**/*` //! - `BSP` specific code. //! - Example: The board's memory map (physical addresses of DRAM and MMIO devices). //! - `src/_arch/__arch_name__/memory.rs` and `src/_arch/__arch_name__/memory/**/*` //! - Processor architecture specific code. //! - Example: Implementation of the `MMU` interface for the `__arch_name__` processor //! architecture. //! //! From a namespace perspective, **memory** subsystem code lives in: //! //! - `crate::memory::*` //! - `crate::bsp::memory::*` //! //! # Boot flow //! //! 1. The kernel's entry point is the function `cpu::boot::arch_boot::_start()`. //! - It is implemented in `src/_arch/__arch_name__/cpu/boot.s`. //! 2. Once finished with architectural setup, the arch code calls `kernel_init()`. #![allow(clippy::upper_case_acronyms)] #![allow(incomplete_features)] #![feature(asm)]
#![feature(const_fn_trait_bound)] #![feature(core_intrinsics)] #![feature(format_args_nl)] #![feature(generic_const_exprs)] #![feature(global_asm)] #![feature(linkage)] #![feature(panic_info_message)] #![feature(step_trait)] #![feature(trait_alias)] #![no_std] // Testing #![cfg_attr(test, no_main)] #![feature(custom_test_frameworks)] #![reexport_test_harness_main = "test_main"] #![test_runner(crate::test_runner)] mod panic_wait; mod synchronization; pub mod bsp; pub mod common; pub mod console; pub mod cpu; pub mod driver; pub mod exception; pub mod memory; pub mod print; pub mod state; pub mod time; //-------------------------------------------------------------------------------------------------- // Public Code //-------------------------------------------------------------------------------------------------- /// Version string. pub fn version() -> &'static str { concat!( env!("CARGO_PKG_NAME"), " version ", env!("CARGO_PKG_VERSION") ) } //-------------------------------------------------------------------------------------------------- // Testing //-------------------------------------------------------------------------------------------------- /// The default runner for unit tests. pub fn test_runner(tests: &[&test_types::UnitTest]) { // This line will be printed as the test header. println!("Running {} tests", tests.len()); for (i, test) in tests.iter().enumerate() { print!("{:>3}. {:.<58}", i + 1, test.name); // Run the actual test. (test.test_func)(); // Failed tests call panic!(). Execution reaches here only if the test has passed. println!("[ok]") } } /// The `kernel_init()` for unit tests. #[cfg(test)] #[no_mangle] unsafe fn kernel_init() -> ! { exception::handling_init(); memory::mmu::post_enable_init(); bsp::console::qemu_bring_up_console(); test_main(); cpu::qemu_exit_success() }
#![feature(const_fn_fn_ptr_basics)]
mod.rs
//! Provides types for working with Volta hooks. use std::env; use std::fs::File; use std::marker::PhantomData; use std::path::Path; use crate::error::ErrorDetails; use crate::layout::volta_home; use crate::project::Project; use crate::tool::{Node, Package, Tool, Yarn}; use lazycell::LazyCell; use log::debug; use volta_fail::{Fallible, ResultExt}; pub(crate) mod serial; pub mod tool; /// A hook for publishing Volta events. #[derive(PartialEq, Debug)] pub enum Publish { /// Reports an event by sending a POST request to a URL. Url(String), /// Reports an event by forking a process and sending the event by IPC. Bin(String), } /// Lazily loaded Volta hook configuration pub struct LazyHookConfig { settings: LazyCell<HookConfig>, } impl LazyHookConfig { /// Constructs a new `LazyHookConfig` pub fn init() -> LazyHookConfig { LazyHookConfig { settings: LazyCell::new(), } } /// Forces the loading of the hook configuration pub fn get(&self) -> Fallible<&HookConfig> { self.settings.try_borrow_with(HookConfig::current) } } /// Volta hook configuration pub struct HookConfig { node: Option<ToolHooks<Node>>, yarn: Option<ToolHooks<Yarn>>, package: Option<ToolHooks<Package>>, events: Option<EventHooks>, } /// Volta hooks for an individual tool pub struct ToolHooks<T: Tool> { /// The hook for resolving the URL for a distro version pub distro: Option<tool::DistroHook>, /// The hook for resolving the URL for the latest version pub latest: Option<tool::MetadataHook>, /// The hook for resolving the Tool Index URL pub index: Option<tool::MetadataHook>, phantom: PhantomData<T>, } impl<T: Tool> ToolHooks<T> { /// Creates a merged struct, with "right" having precedence over "left". fn merge(left: Self, right: Self) -> Self { Self { distro: right.distro.or(left.distro), latest: right.latest.or(left.latest), index: right.index.or(left.index), phantom: PhantomData, } } } macro_rules! merge_hook_config_field { ($left:ident, $right:ident, $field:ident, $type:ident) => { match ($left.$field, $right.$field) { (Some(left), Some(right)) => Some($type::merge(left, right)), (Some(left), None) => Some(left), (None, Some(right)) => Some(right), (None, None) => None, } }; } impl HookConfig { pub fn node(&self) -> Option<&ToolHooks<Node>> { self.node.as_ref() } pub fn yarn(&self) -> Option<&ToolHooks<Yarn>> { self.yarn.as_ref() } pub fn package(&self) -> Option<&ToolHooks<Package>> { self.package.as_ref() } pub fn events(&self) -> Option<&EventHooks> { self.events.as_ref() } /// Returns the current hooks, which are a merge between the user hooks and /// the project hooks (if any). fn current() -> Fallible<Self> { let maybe_project_config = Self::for_current_dir()?; let maybe_user_config = Self::for_user()?; Ok(match (maybe_project_config, maybe_user_config) { (Some(project_config), Some(user_config)) => { debug!("Merging user and project hooks"); Self::merge(user_config, project_config) } (Some(project_config), None) => project_config, (None, Some(user_config)) => user_config, (None, None) => { debug!("No custom hooks found"); Self { node: None, yarn: None, package: None, events: None, } } }) } /// Returns the per-project hooks for the current directory. fn for_current_dir() -> Fallible<Option<Self>>
/// Returns the per-project hooks for the specified directory. If the /// specified directory is not itself a project, its ancestors will be /// searched. fn for_dir(base_dir: &Path) -> Fallible<Option<Self>> { match Project::find_dir(&base_dir) { Some(project_dir) => { let path = project_dir.join(".volta").join("hooks.json"); let hooks_config = Self::from_file(&path)?; if hooks_config.is_some() { debug!("Found project hooks in '{}'", path.display()); } Ok(hooks_config) } None => Ok(None), } } fn from_file(file_path: &Path) -> Fallible<Option<Self>> { if !file_path.is_file() { return Ok(None); } let file = File::open(file_path).with_context(|_| ErrorDetails::ReadHooksError { file: file_path.to_path_buf(), })?; let raw: serial::RawHookConfig = serde_json::de::from_reader(file).with_context(|_| ErrorDetails::ParseHooksError { file: file_path.to_path_buf(), })?; let hooks_path = file_path.parent().unwrap_or_else(|| Path::new("/")); raw.into_hook_config(hooks_path).map(Some) } /// Returns the per-user hooks, loaded from the filesystem. fn for_user() -> Fallible<Option<Self>> { let path = volta_home()?.user_hooks_file(); let hooks_config = Self::from_file(&path)?; if hooks_config.is_some() { debug!("Found user hooks in '{}'", path.display()); } Ok(hooks_config) } /// Creates a merged struct, with "right" having precedence over "left". fn merge(left: Self, right: Self) -> Self { Self { node: merge_hook_config_field!(left, right, node, ToolHooks), yarn: merge_hook_config_field!(left, right, yarn, ToolHooks), package: merge_hook_config_field!(left, right, package, ToolHooks), events: merge_hook_config_field!(left, right, events, EventHooks), } } } /// Volta hooks related to events. pub struct EventHooks { /// The hook for publishing events, if any. pub publish: Option<Publish>, } impl EventHooks { /// Creates a merged struct, with "right" having precedence over "left". fn merge(left: Self, right: Self) -> Self { Self { publish: right.publish.or(left.publish), } } } #[cfg(test)] pub mod tests { use super::{tool, HookConfig, Publish}; use std::path::PathBuf; fn fixture_path(fixture_dir: &str) -> PathBuf { let mut cargo_manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); cargo_manifest_dir.push("fixtures"); cargo_manifest_dir.push(fixture_dir); cargo_manifest_dir } #[test] fn test_from_str_event_url() { let fixture_dir = fixture_path("hooks"); let url_file = fixture_dir.join("event_url.json"); let hooks = HookConfig::from_file(&url_file).unwrap().unwrap(); assert_eq!( hooks.events.unwrap().publish, Some(Publish::Url("https://google.com".to_string())) ); } #[test] fn test_from_str_bins() { let fixture_dir = fixture_path("hooks"); let bin_file = fixture_dir.join("bins.json"); let hooks = HookConfig::from_file(&bin_file).unwrap().unwrap(); let node = hooks.node.unwrap(); let yarn = hooks.yarn.unwrap(); assert_eq!( node.distro, Some(tool::DistroHook::Bin { bin: "/some/bin/for/node/distro".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( node.latest, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/latest".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( node.index, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/index".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( yarn.distro, Some(tool::DistroHook::Bin { bin: "/bin/to/yarn/distro".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( yarn.latest, Some(tool::MetadataHook::Bin { bin: "/bin/to/yarn/latest".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( yarn.index, Some(tool::MetadataHook::Bin { bin: "/bin/to/yarn/index".to_string(), base_path: fixture_dir.clone(), }) ); assert_eq!( hooks.events.unwrap().publish, Some(Publish::Bin("/events/bin".to_string())) ); } #[test] fn test_from_str_prefixes() { let fixture_dir = fixture_path("hooks"); let prefix_file = fixture_dir.join("prefixes.json"); let hooks = HookConfig::from_file(&prefix_file).unwrap().unwrap(); let node = hooks.node.unwrap(); let yarn = hooks.yarn.unwrap(); assert_eq!( node.distro, Some(tool::DistroHook::Prefix( "http://localhost/node/distro/".to_string() )) ); assert_eq!( node.latest, Some(tool::MetadataHook::Prefix( "http://localhost/node/latest/".to_string() )) ); assert_eq!( node.index, Some(tool::MetadataHook::Prefix( "http://localhost/node/index/".to_string() )) ); assert_eq!( yarn.distro, Some(tool::DistroHook::Prefix( "http://localhost/yarn/distro/".to_string() )) ); assert_eq!( yarn.latest, Some(tool::MetadataHook::Prefix( "http://localhost/yarn/latest/".to_string() )) ); assert_eq!( yarn.index, Some(tool::MetadataHook::Prefix( "http://localhost/yarn/index/".to_string() )) ); } #[test] fn test_from_str_templates() { let fixture_dir = fixture_path("hooks"); let template_file = fixture_dir.join("templates.json"); let hooks = HookConfig::from_file(&template_file).unwrap().unwrap(); let node = hooks.node.unwrap(); let yarn = hooks.yarn.unwrap(); assert_eq!( node.distro, Some(tool::DistroHook::Template( "http://localhost/node/distro/{{version}}/".to_string() )) ); assert_eq!( node.latest, Some(tool::MetadataHook::Template( "http://localhost/node/latest/{{version}}/".to_string() )) ); assert_eq!( node.index, Some(tool::MetadataHook::Template( "http://localhost/node/index/{{version}}/".to_string() )) ); assert_eq!( yarn.distro, Some(tool::DistroHook::Template( "http://localhost/yarn/distro/{{version}}/".to_string() )) ); assert_eq!( yarn.latest, Some(tool::MetadataHook::Template( "http://localhost/yarn/latest/{{version}}/".to_string() )) ); assert_eq!( yarn.index, Some(tool::MetadataHook::Template( "http://localhost/yarn/index/{{version}}/".to_string() )) ); } #[test] fn test_for_dir() { let project_dir = fixture_path("hooks/project"); let hooks_dir = project_dir.join(".volta"); let hooks = HookConfig::for_dir(&project_dir) .expect("Could not read project hooks.json") .expect("Could not find project hooks.json"); let node = hooks.node.unwrap(); assert_eq!( node.distro, Some(tool::DistroHook::Bin { bin: "/some/bin/for/node/distro".to_string(), base_path: hooks_dir.clone(), }) ); assert_eq!( node.latest, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/latest".to_string(), base_path: hooks_dir.clone(), }) ); assert_eq!( node.index, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/index".to_string(), base_path: hooks_dir.clone(), }) ); assert_eq!( hooks.events.unwrap().publish, Some(Publish::Bin("/events/bin".to_string())) ); } #[test] fn test_merge() { let fixture_dir = fixture_path("hooks"); let user_hooks = HookConfig::from_file(&fixture_dir.join("templates.json")) .unwrap() .unwrap(); let project_dir = fixture_path("hooks/project"); let project_hooks_dir = project_dir.join(".volta"); let project_hooks = HookConfig::for_dir(&project_dir) .expect("Could not read project hooks.json") .expect("Could not find project hooks.json"); let merged_hooks = HookConfig::merge(user_hooks, project_hooks); let node = merged_hooks.node.expect("No node config found"); let yarn = merged_hooks.yarn.expect("No yarn config found"); assert_eq!( node.distro, Some(tool::DistroHook::Bin { bin: "/some/bin/for/node/distro".to_string(), base_path: project_hooks_dir.clone(), }) ); assert_eq!( node.latest, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/latest".to_string(), base_path: project_hooks_dir.clone(), }) ); assert_eq!( node.index, Some(tool::MetadataHook::Bin { bin: "/some/bin/for/node/index".to_string(), base_path: project_hooks_dir.clone(), }) ); assert_eq!( yarn.distro, Some(tool::DistroHook::Template( "http://localhost/yarn/distro/{{version}}/".to_string() )) ); assert_eq!( yarn.latest, Some(tool::MetadataHook::Template( "http://localhost/yarn/latest/{{version}}/".to_string() )) ); assert_eq!( yarn.index, Some(tool::MetadataHook::Template( "http://localhost/yarn/index/{{version}}/".to_string() )) ); assert_eq!( merged_hooks.events.expect("No events config found").publish, Some(Publish::Bin("/events/bin".to_string())) ); } }
{ Self::for_dir(&env::current_dir().with_context(|_| ErrorDetails::CurrentDirError)?) }
context.js
'use strict'; const Bot = require('../lib/chipchat'); const bot = new Bot({ token: process.env.TOKEN }); bot.on('ready', () => { console.log('ready', bot.user); }); bot.on('error', (err) => { console.log('handle error', err); }); bot.on('message', (_, conversation) => { const convId = conversation.id; //'5cd8c48dabd2dc52deb1cfb2'; bot.conversation(convId).then(ctx => ctx.say('Hooked in1')); bot.conversation(convId, (err, ctx) => ctx.say('Hooked in2')); bot.conversation(null, err => console.log('ERROR', err.toString())); bot.conversation(null).then(ctx => ctx.say('Error bypassed')).catch(err => console.log('ERROR PROMISE', err.toString())); bot.conversations.list().then(convs => console.log('fetched convs', convs.length)); console.log('convsprop', bot.conversations); bot.conversations.create({ messages: [{ text: 'Hi' }] }) .then(conv => console.log('created', conv.name)) .catch(err => console.log('created err', err.toString())); }); bot.on('message', (_, conversation) => { const sendSummary = (ctx) => { ctx.say(`Ok, here's what you told me about you: - Name: ${ctx.get('name')} - Favorite Food: ${ctx.get('food')}`); ctx.leave(); }; const askFavoriteFood = (conv) => { conv.ask("What's your favorite food?", (msg, ctx) => { const text = msg.text; ctx.set('food', text); //ctx.say(`Got it, your favorite food is ${text}`).then(() => sendSummary(ctx)); ctx.ask(`So your favorite food is ${text}?`, () => sendSummary(ctx)); }); }; const askName = (conv) => { conv.ask("What's your name?", (msg, ctx) => { const text = msg.text; ctx.set('name', text);
ctx.say(`Oh, your name is ${text}`, () => askFavoriteFood(ctx)); }); }; askName(conversation); }); bot.start();
//ctx.say(`Oh, your name is ${text}`).then(() => askFavoriteFood(ctx));
TableViewHeader.js
import React from 'react'; import { requireNativeComponent } from 'react-native'; const RNHeaderView = requireNativeComponent('RNTableHeaderView', null); export default class
extends React.Component { constructor(props) { super(props); this.state = { width: 0, height: 0 }; } render() { return ( <RNHeaderView onLayout={event => { this.setState(event.nativeEvent.layout); }} {...this.props} componentWidth={this.state.width} componentHeight={this.state.height} /> ); } }
TableViewHeader
integ_test.go
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. // +build go1.10,integration package eventbridge_test import ( "context" "testing" "time" "github.com/ClearcodeHQ/aws-sdk-go/aws" "github.com/ClearcodeHQ/aws-sdk-go/aws/awserr" "github.com/ClearcodeHQ/aws-sdk-go/aws/request" "github.com/ClearcodeHQ/aws-sdk-go/awstesting/integration" "github.com/ClearcodeHQ/aws-sdk-go/service/eventbridge" ) var _ aws.Config var _ awserr.Error var _ request.Request func TestInteg_00_ListRules(t *testing.T)
func TestInteg_01_DescribeRule(t *testing.T) { ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) defer cancelFn() sess := integration.SessionWithDefaultRegion("us-west-2") svc := eventbridge.New(sess) params := &eventbridge.DescribeRuleInput{ Name: aws.String("fake-rule"), } _, err := svc.DescribeRuleWithContext(ctx, params, func(r *request.Request) { r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") }) if err == nil { t.Fatalf("expect request to fail") } aerr, ok := err.(awserr.RequestFailure) if !ok { t.Fatalf("expect awserr, was %T", err) } if len(aerr.Code()) == 0 { t.Errorf("expect non-empty error code") } if len(aerr.Message()) == 0 { t.Errorf("expect non-empty error message") } if v := aerr.Code(); v == request.ErrCodeSerialization { t.Errorf("expect API error code got serialization failure") } }
{ ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) defer cancelFn() sess := integration.SessionWithDefaultRegion("us-west-2") svc := eventbridge.New(sess) params := &eventbridge.ListRulesInput{} _, err := svc.ListRulesWithContext(ctx, params, func(r *request.Request) { r.Handlers.Validate.RemoveByName("core.ValidateParametersHandler") }) if err != nil { t.Errorf("expect no error, got %v", err) } }
helpers.go
package main import "encoding/json" func
(v interface{}) string { b, err := json.MarshalIndent(v, "", " ") if err != nil { return "" } return string(b) } func format(v interface{}) string { b, err := json.Marshal(v) if err != nil { return "" } return string(b) }
formatIndent
main.py
# Copyright 2017 National Computational Infrastructure(NCI). # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ========================================================================= import numpy as np from osgeo import gdal import netCDF4 import json import os import sys import datetime import time import argparse def get_output_filename(output_dir, source_file, month, product_name, ver): if not os.path.exists(output_dir): os.makedirs(output_dir) parts = source_file.split('.') year = parts[-3] hv = parts[-4] filename = 'FC_%s.v%s.MCD43A4.%s.%d.%s.006.nc' % (product_name, ver, hv, month, year) full_filename = os.path.join(output_dir, filename) return full_filename def pack_data(src_filename, output_filename, pv_data, npv_data, soil_data, data_type, timestamps): with netCDF4.Dataset(output_filename, 'w', format='NETCDF4') as dest: with open('nc_metadata.json') as data_file: attrs = json.load(data_file) for key in attrs: setattr(dest, key, attrs[key]) setattr(dest, "date_created", datetime.datetime.now().strftime("%Y%m%dT%H%M%S")) ds = gdal.Open('NETCDF:"%s":phot_veg' % src_filename) proj_wkt = ds.GetProjection() geot = ds.GetGeoTransform() t_dim = dest.createDimension("time", len(timestamps)) x_dim = dest.createDimension("x", ds.RasterXSize) y_dim = dest.createDimension("y", ds.RasterYSize) var = dest.createVariable("time", "f8", ("time",)) var.units = "seconds since 1970-01-01 00:00:00.0" var.calendar = "standard" var.long_name = "Time, unix time-stamp" var.standard_name = "time" var[:] = netCDF4.date2num(timestamps, units="seconds since 1970-01-01 00:00:00.0", calendar="standard") var = dest.createVariable("x", "f8", ("x",)) var.units = "m" var.long_name = "x coordinate of projection" var.standard_name = "projection_x_coordinate" var[:] = np.linspace(geot[0], geot[0]+(geot[1]*ds.RasterXSize), ds.RasterXSize) var = dest.createVariable("y", "f8", ("y",)) var.units = "m" var.long_name = "y coordinate of projection" var.standard_name = "projection_y_coordinate" var[:] = np.linspace(geot[3], geot[3]+(geot[5]*ds.RasterYSize), ds.RasterYSize) var = dest.createVariable("phot_veg", data_type, ("time", "y", "x"), fill_value=255, zlib=True) var.long_name = "Photosynthetic Vegetation" var.units = '%' var.grid_mapping = "sinusoidal" var[:] = pv_data var = dest.createVariable("nphot_veg", data_type, ("time", "y", "x"), fill_value=255, zlib=True) var.long_name = "Non Photosynthetic Vegetation" var.units = '%' var.grid_mapping = "sinusoidal" var[:] = npv_data var = dest.createVariable("bare_soil", data_type, ("time", "y", "x"), fill_value=255, zlib=True) var.long_name = "Bare Soil" var.units = '%' var.grid_mapping = "sinusoidal" var[:] = soil_data var = dest.createVariable("sinusoidal", 'S1', ()) var.grid_mapping_name = "sinusoidal" var.false_easting = 0.0 var.false_northing = 0.0 var.longitude_of_central_meridian = 0.0 var.longitude_of_prime_meridian = 0.0 var.semi_major_axis = 6371007.181 var.inverse_flattening = 0.0 var.spatial_ref = proj_wkt var.GeoTransform = "{} {} {} {} {} {} ".format(*[geot[i] for i in range(6)]) def compute_percentiles(data): sort_idx = np.zeros_like(data) idx = np.arange(sort_idx.shape[0]) arg_idx = np.argsort(data, axis=0) for i in xrange(sort_idx.shape[1]): for j in xrange(sort_idx.shape[2]): sort_idx[arg_idx[:, i, j], i, j] = idx masks = data < 255 count = masks.sum(axis=0) #The original formula is percentile = ((p-1)/(n-1)) * 100 #But numpy index starts from zero. So we use p/(n-1) * 100 instead percentiles = sort_idx / (count - 1.) * 100 percentiles[~masks] = 255 nan_masks = np.isnan(percentiles) | np.isinf(percentiles) percentiles[nan_masks] = 255 percentiles[percentiles < 0] = 255 #print percentiles.max(), percentiles.min(), percentiles.shape #print sort_idx.max(), sort_idx.min(), count.max(), count.min(), percentiles.max(), percentiles.min() percentiles = np.round(percentiles).astype(np.uint8) return percentiles def
(data): masks = data < 255 _data = data.copy() _data[~masks] = 0 data_sum = _data.sum(axis=0) counts = masks.sum(axis=0) mean_data = data_sum / counts diff = data - mean_data nan_masks = np.isnan(diff) | np.isinf(diff) diff[nan_masks] = 255 diff[~masks] = 255 return diff def compute_by_month(src_root_filename, raw_src_file_list, month, mean_diffs_output_dir, percentiles_output_dir, ver): pv_data = npv_data = soil_data = None t0 = time.time() src_file_list = [] for src_f in raw_src_file_list: if os.path.isfile(src_f): src_file_list.append(src_f) else: print 'source file not found: %s' % src_f timestamp_list = [] for _, src_f in enumerate(src_file_list): parts = src_f.split('.') year = int(parts[-3]) timestamp_list.append(datetime.datetime(year, month, 1, 0, 0)) for i_src, src_f in enumerate(src_file_list): with netCDF4.Dataset(src_f) as ds: month2idx = -1 for its, ts in enumerate(ds['time']): date = netCDF4.num2date(ts, ds['time'].units) if date.month == month: month2idx = its break if month2idx == -1: print '%s has no data for month: %d' % (src_f, month) continue pv = np.asarray(ds['phot_veg'][month2idx, ...], dtype=np.float32) npv = np.asarray(ds['nphot_veg'][month2idx, ...], dtype=np.float32) soil = np.asarray(ds['bare_soil'][month2idx, ...], dtype=np.float32) ts = netCDF4.num2date(ds['time'][month2idx], ds['time'].units) if pv_data is None: fill_val = 255 pv_data = fill_val * np.ones((len(src_file_list), pv.shape[0], pv.shape[1]), dtype=pv.dtype) npv_data = fill_val * np.ones_like(pv_data) soil_data = fill_val * np.ones_like(pv_data) pv_data[i_src, ...] = pv npv_data[i_src, ...] = npv soil_data[i_src, ...] = soil timestamp_list[i_src] = ts if pv_data is None: raise Exception('no data, month:%d, src_root:%s' % (month, src_root_filename)) pv_mean_diff = compute_mean_diff(pv_data) npv_mean_diff = compute_mean_diff(npv_data) soil_mean_diff = compute_mean_diff(soil_data) #we save one file per year for the given month. This simplifies combine_outputs.py for i_src, src_f in enumerate(src_file_list): output_filename = get_output_filename(mean_diffs_output_dir, src_f, month, 'Mean_Diff', ver) print output_filename #print pv_ranks[i_src, ...].shape pack_data(src_root_filename, output_filename, np.expand_dims(pv_mean_diff[i_src, ...], 0), np.expand_dims(npv_mean_diff[i_src, ...], 0), np.expand_dims(soil_mean_diff[i_src, ...], 0), 'f4', [timestamp_list[i_src], ]) pv_pct = compute_percentiles(pv_data) npv_pct = compute_percentiles(npv_data) soil_pct = compute_percentiles(soil_data) for i_src, src_f in enumerate(src_file_list): output_filename = get_output_filename(percentiles_output_dir, src_f, month, 'Percentile', ver) print output_filename #print pv_ranks[i_src, ...].shape pack_data(src_root_filename, output_filename, np.expand_dims(pv_pct[i_src, ...], 0), np.expand_dims(npv_pct[i_src, ...], 0), np.expand_dims(soil_pct[i_src, ...], 0), 'u1', [timestamp_list[i_src], ]) print 'time elapsed: ', time.time() - t0 if __name__ == "__main__": #src_root_dir = 'src_root_dir = '/g/data2/u39/public/prep/modis-fc/global_fc_006' parser = argparse.ArgumentParser(description="""Modis Vegetation Analysis argument parser""") parser.add_argument(dest="src_root_dir", type=str, help="Source root dir") parser.add_argument(dest="input_dir", type=str, help="Full path of input file") parser.add_argument(dest="h", type=str, help="h of source file") parser.add_argument(dest="v", type=str, help="v of source file") parser.add_argument(dest="year_start", type=int, help="Starting year of source file") parser.add_argument(dest="year_end", type=int, help="Ending year of source file (inclusive)") parser.add_argument(dest="month", type=int, help="Month of source file") parser.add_argument(dest="mean_diffs_output_dir", type=str, help="Full path to destination of mean differences.") parser.add_argument(dest="percentiles_output_dir", type=str, help="Full path to destination of percentiles.") parser.add_argument("--version", default='310', type=str, help="Product version") args = parser.parse_args() src_root_dir = args.src_root_dir src_file_list = [os.path.join(args.input_dir, 'FC_Monthly_Medoid.v%s.MCD43A4.h%sv%s.%s.006.nc' % (args.version, args.h, args.v, year)) for year in xrange(args.year_start, args.year_end+1)] src_root_filename = os.path.join(src_root_dir, 'FC.v%s.MCD43A4.h%sv%s.%s.006.nc' % (args.version, args.h, args.v, args.year_start)) compute_by_month(src_root_filename, src_file_list, args.month, args.mean_diffs_output_dir, args.percentiles_output_dir, args.version)
compute_mean_diff
main.go
package main import ( "fmt" "html/template" "io" "log" "net" "net/http" "os" "os/exec" "os/user" "path" "runtime" ) const html = ` <!DOCTYPE html> <html lang="en"> <head> <meta charset=utf-8> <meta name="viewport" content="width=device-width, initial-scale=0.41, maximum-scale=1" /> <title>Simple Upload</title> <style type="text/css"> * { color: white; font-family: sans-serif; padding: 0; margin: 0; cursor: pointer; -webkit-touch-callout: none; -webkit-user-select: none; -khtml-user-select: none; -moz-user-select: none; -ms-user-select: none; user-select: none; } #upload { width: 100%; top: 0; position: absolute; background-color: #f44242; height: 100%; } .text { position: absolute; top: 40%; text-align: center; width: 100%; font-size: 3em; } input[type="file"] { display: none; } </style> </head> <body> <!-- <input type='file' id='file' /> --> <form id="file-form" enctype="multipart/form-data" action="/upload" method="post"> <label> <div id="upload"> <div class="text" id="uploadtext">UPLOAD<br>({{ . }}:8080)</div> </div> <input type="file" id='file' name="uploadfile" /> </label> <input type="submit" value="upload" /> </form> <script> var uploadtext = document.getElementById('uploadtext'); var fileelt = document.getElementById('file'); document.body.ondragover = function() { uploadtext.innerHTML = 'DROP YOUR FILE HERE'; return false; }; document.body.ondrop = function(e) { e.preventDefault(); readfiles(e.dataTransfer.files); }; fileelt.addEventListener("change", function(e) { var formData = new FormData(document.getElementById("file-form")); var xhr = new XMLHttpRequest(); xhr.open('POST', '/upload'); xhr.onload = function() { uploadtext.innerHTML = xhr.responseText; }; xhr.upload.onprogress = function(event) { if (event.lengthComputable) { var complete = (event.loaded / event.total * 100 | 0); var complete36 = (event.loaded / event.total * 36 | 0); uploadtext.innerHTML = 'UPLOADING<br>PROGRESS ' + complete + '%'; } }; xhr.send(formData); }); </script> </body> </html> ` var ip = "" func upload(w http.ResponseWriter, r *http.Request) { if r.Method == "GET" { t, _ := template.New("upload").Parse(html) t.Execute(w, ip) } else { err := r.ParseMultipartForm(32 << 20) if err != nil { log.Fatal(err) return } for _, fheaders := range r.MultipartForm.File { for _, handler := range fheaders { // open uploaded infile, err := handler.Open() if err != nil { log.Fatal(err) return } // open destination usr, err := user.Current() if err != nil { log.Fatal(err) } outfile, err := os.OpenFile(path.Join(usr.HomeDir, "Downloads", handler.Filename), os.O_WRONLY|os.O_CREATE, 0666) if err != nil { fmt.Println(err) fmt.Fprintf(w, "ERROR") return } // 32K buffer copy if _, err = io.Copy(outfile, infile); nil != err { log.Fatal(err) return } } } fmt.Fprintf(w, "UPLOAD COMPLETE!") } } // open opens the specified URL in the default browser of the user. func open(url string) error { var cmd string var args []string switch runtime.GOOS { case "windows": cmd = "cmd" args = []string{"/c", "start"} case "darwin": cmd = "open" default: // "linux", "freebsd", "openbsd", "netbsd" cmd = "xdg-open" } args = append(args, url) return exec.Command(cmd, args...).Start() } func main()
{ addrs, err := net.InterfaceAddrs() if err != nil { os.Stderr.WriteString("Oops: " + err.Error() + "\n") os.Exit(1) } for _, a := range addrs { if ipnet, ok := a.(*net.IPNet); ok && !ipnet.IP.IsLoopback() { if ipnet.IP.To4() != nil { ip = ipnet.IP.String() } } } go open("http://" + ip + ":8080") http.HandleFunc("/", upload) err = http.ListenAndServe(":8080", nil) // set listen port if err != nil { log.Fatal("ListenAndServe: ", err) } }
msg_channel_reestablish_target.rs
// This file is auto-generated by gen_target.sh based on target_template.txt // To modify it, modify target_template.txt and run gen_target.sh instead. //Uncomment this for libfuzzer builds: //#![no_main] extern crate lightning_fuzz; use lightning_fuzz::msg_targets::msg_channel_reestablish::*; use std::io::Read; #[cfg(feature = "afl")] #[macro_use] extern crate afl; #[cfg(feature = "afl")] fn main() { fuzz!(|data| { msg_channel_reestablish_run(data.as_ptr(), data.len()); }); } #[cfg(feature = "honggfuzz")] #[macro_use] extern crate honggfuzz; #[cfg(feature = "honggfuzz")] fn main()
#[cfg(feature = "libfuzzer_fuzz")] #[macro_use] extern crate libfuzzer_sys; #[cfg(feature = "libfuzzer_fuzz")] fuzz_target!(|data: &[u8]| { msg_channel_reestablish_run(data.as_ptr(), data.len()); }); #[cfg(feature = "stdin_fuzz")] fn main() { let mut data = Vec::with_capacity(8192); std::io::stdin().read_to_end(&mut data).unwrap(); msg_channel_reestablish_run(data.as_ptr(), data.len()); }
{ loop { fuzz!(|data| { msg_channel_reestablish_run(data.as_ptr(), data.len()); }); } }
popover.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files) // DO NOT EDIT use gdk; use gio; use glib::object::Cast; use glib::object::IsA; use glib::signal::connect_raw; use glib::signal::SignalHandlerId; use glib::translate::*; use glib::StaticType; use glib::ToValue; use glib_sys; use gtk_sys; use std::boxed::Box as Box_; use std::fmt; use std::mem::transmute; use Align; use Bin; use Buildable; use Container; #[cfg(any(feature = "v3_20", feature = "dox"))] use PopoverConstraint; use PositionType; use ResizeMode; use Widget; glib_wrapper! { pub struct Popover(Object<gtk_sys::GtkPopover, gtk_sys::GtkPopoverClass, PopoverClass>) @extends Bin, Container, Widget, @implements Buildable; match fn { get_type => || gtk_sys::gtk_popover_get_type(), } } impl Popover { pub fn new<P: IsA<Widget>>(relative_to: Option<&P>) -> Popover { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(gtk_sys::gtk_popover_new( relative_to.map(|p| p.as_ref()).to_glib_none().0, )) .unsafe_cast() } } pub fn new_from_model<P: IsA<Widget>, Q: IsA<gio::MenuModel>>( relative_to: Option<&P>, model: &Q, ) -> Popover { assert_initialized_main_thread!(); unsafe { Widget::from_glib_none(gtk_sys::gtk_popover_new_from_model( relative_to.map(|p| p.as_ref()).to_glib_none().0, model.as_ref().to_glib_none().0, )) .unsafe_cast() } } } #[derive(Clone, Default)] pub struct PopoverBuilder { #[cfg(any(feature = "v3_20", feature = "dox"))] constrain_to: Option<PopoverConstraint>, modal: Option<bool>, pointing_to: Option<gdk::Rectangle>, position: Option<PositionType>, relative_to: Option<Widget>, #[cfg(any(feature = "v3_16", feature = "dox"))] transitions_enabled: Option<bool>, border_width: Option<u32>, child: Option<Widget>, resize_mode: Option<ResizeMode>, app_paintable: Option<bool>, can_default: Option<bool>, can_focus: Option<bool>, events: Option<gdk::EventMask>, expand: Option<bool>, #[cfg(any(feature = "v3_20", feature = "dox"))] focus_on_click: Option<bool>, halign: Option<Align>, has_default: Option<bool>, has_focus: Option<bool>, has_tooltip: Option<bool>, height_request: Option<i32>, hexpand: Option<bool>, hexpand_set: Option<bool>, is_focus: Option<bool>, margin: Option<i32>, margin_bottom: Option<i32>, margin_end: Option<i32>, margin_start: Option<i32>, margin_top: Option<i32>, name: Option<String>, no_show_all: Option<bool>, opacity: Option<f64>, parent: Option<Container>, receives_default: Option<bool>, sensitive: Option<bool>, tooltip_markup: Option<String>, tooltip_text: Option<String>, valign: Option<Align>, vexpand: Option<bool>, vexpand_set: Option<bool>, visible: Option<bool>, width_request: Option<i32>, } impl PopoverBuilder { pub fn new() -> Self { Self::default() } pub fn build(self) -> Popover { let mut properties: Vec<(&str, &dyn ToValue)> = vec![]; #[cfg(any(feature = "v3_20", feature = "dox"))] { if let Some(ref constrain_to) = self.constrain_to { properties.push(("constrain-to", constrain_to)); } } if let Some(ref modal) = self.modal { properties.push(("modal", modal)); } if let Some(ref pointing_to) = self.pointing_to { properties.push(("pointing-to", pointing_to)); } if let Some(ref position) = self.position { properties.push(("position", position)); } if let Some(ref relative_to) = self.relative_to { properties.push(("relative-to", relative_to)); } #[cfg(any(feature = "v3_16", feature = "dox"))] { if let Some(ref transitions_enabled) = self.transitions_enabled { properties.push(("transitions-enabled", transitions_enabled)); } } if let Some(ref border_width) = self.border_width { properties.push(("border-width", border_width)); } if let Some(ref child) = self.child { properties.push(("child", child)); } if let Some(ref resize_mode) = self.resize_mode { properties.push(("resize-mode", resize_mode)); } if let Some(ref app_paintable) = self.app_paintable { properties.push(("app-paintable", app_paintable)); } if let Some(ref can_default) = self.can_default { properties.push(("can-default", can_default)); } if let Some(ref can_focus) = self.can_focus { properties.push(("can-focus", can_focus)); } if let Some(ref events) = self.events { properties.push(("events", events)); } if let Some(ref expand) = self.expand { properties.push(("expand", expand)); } #[cfg(any(feature = "v3_20", feature = "dox"))] { if let Some(ref focus_on_click) = self.focus_on_click { properties.push(("focus-on-click", focus_on_click)); } } if let Some(ref halign) = self.halign { properties.push(("halign", halign)); } if let Some(ref has_default) = self.has_default { properties.push(("has-default", has_default)); } if let Some(ref has_focus) = self.has_focus { properties.push(("has-focus", has_focus)); } if let Some(ref has_tooltip) = self.has_tooltip { properties.push(("has-tooltip", has_tooltip)); } if let Some(ref height_request) = self.height_request { properties.push(("height-request", height_request)); } if let Some(ref hexpand) = self.hexpand { properties.push(("hexpand", hexpand)); } if let Some(ref hexpand_set) = self.hexpand_set { properties.push(("hexpand-set", hexpand_set)); } if let Some(ref is_focus) = self.is_focus { properties.push(("is-focus", is_focus)); } if let Some(ref margin) = self.margin { properties.push(("margin", margin)); } if let Some(ref margin_bottom) = self.margin_bottom { properties.push(("margin-bottom", margin_bottom)); } if let Some(ref margin_end) = self.margin_end { properties.push(("margin-end", margin_end)); } if let Some(ref margin_start) = self.margin_start { properties.push(("margin-start", margin_start)); } if let Some(ref margin_top) = self.margin_top { properties.push(("margin-top", margin_top)); } if let Some(ref name) = self.name { properties.push(("name", name)); } if let Some(ref no_show_all) = self.no_show_all { properties.push(("no-show-all", no_show_all)); } if let Some(ref opacity) = self.opacity { properties.push(("opacity", opacity)); } if let Some(ref parent) = self.parent { properties.push(("parent", parent)); } if let Some(ref receives_default) = self.receives_default { properties.push(("receives-default", receives_default)); } if let Some(ref sensitive) = self.sensitive { properties.push(("sensitive", sensitive)); } if let Some(ref tooltip_markup) = self.tooltip_markup { properties.push(("tooltip-markup", tooltip_markup)); } if let Some(ref tooltip_text) = self.tooltip_text { properties.push(("tooltip-text", tooltip_text)); } if let Some(ref valign) = self.valign { properties.push(("valign", valign)); } if let Some(ref vexpand) = self.vexpand { properties.push(("vexpand", vexpand)); } if let Some(ref vexpand_set) = self.vexpand_set { properties.push(("vexpand-set", vexpand_set)); } if let Some(ref visible) = self.visible { properties.push(("visible", visible)); } if let Some(ref width_request) = self.width_request { properties.push(("width-request", width_request)); } glib::Object::new(Popover::static_type(), &properties) .expect("object new") .downcast() .expect("downcast") } #[cfg(any(feature = "v3_20", feature = "dox"))] pub fn constrain_to(mut self, constrain_to: PopoverConstraint) -> Self { self.constrain_to = Some(constrain_to); self } pub fn modal(mut self, modal: bool) -> Self { self.modal = Some(modal); self } pub fn pointing_to(mut self, pointing_to: &gdk::Rectangle) -> Self { self.pointing_to = Some(pointing_to.clone()); self } pub fn position(mut self, position: PositionType) -> Self { self.position = Some(position); self } pub fn relative_to<P: IsA<Widget>>(mut self, relative_to: &P) -> Self { self.relative_to = Some(relative_to.clone().upcast()); self } #[cfg(any(feature = "v3_16", feature = "dox"))] pub fn transitions_enabled(mut self, transitions_enabled: bool) -> Self { self.transitions_enabled = Some(transitions_enabled); self } pub fn border_width(mut self, border_width: u32) -> Self { self.border_width = Some(border_width); self } pub fn child<P: IsA<Widget>>(mut self, child: &P) -> Self { self.child = Some(child.clone().upcast()); self } pub fn resize_mode(mut self, resize_mode: ResizeMode) -> Self { self.resize_mode = Some(resize_mode); self } pub fn app_paintable(mut self, app_paintable: bool) -> Self { self.app_paintable = Some(app_paintable); self } pub fn can_default(mut self, can_default: bool) -> Self { self.can_default = Some(can_default); self } pub fn can_focus(mut self, can_focus: bool) -> Self { self.can_focus = Some(can_focus); self } pub fn events(mut self, events: gdk::EventMask) -> Self
pub fn expand(mut self, expand: bool) -> Self { self.expand = Some(expand); self } #[cfg(any(feature = "v3_20", feature = "dox"))] pub fn focus_on_click(mut self, focus_on_click: bool) -> Self { self.focus_on_click = Some(focus_on_click); self } pub fn halign(mut self, halign: Align) -> Self { self.halign = Some(halign); self } pub fn has_default(mut self, has_default: bool) -> Self { self.has_default = Some(has_default); self } pub fn has_focus(mut self, has_focus: bool) -> Self { self.has_focus = Some(has_focus); self } pub fn has_tooltip(mut self, has_tooltip: bool) -> Self { self.has_tooltip = Some(has_tooltip); self } pub fn height_request(mut self, height_request: i32) -> Self { self.height_request = Some(height_request); self } pub fn hexpand(mut self, hexpand: bool) -> Self { self.hexpand = Some(hexpand); self } pub fn hexpand_set(mut self, hexpand_set: bool) -> Self { self.hexpand_set = Some(hexpand_set); self } pub fn is_focus(mut self, is_focus: bool) -> Self { self.is_focus = Some(is_focus); self } pub fn margin(mut self, margin: i32) -> Self { self.margin = Some(margin); self } pub fn margin_bottom(mut self, margin_bottom: i32) -> Self { self.margin_bottom = Some(margin_bottom); self } pub fn margin_end(mut self, margin_end: i32) -> Self { self.margin_end = Some(margin_end); self } pub fn margin_start(mut self, margin_start: i32) -> Self { self.margin_start = Some(margin_start); self } pub fn margin_top(mut self, margin_top: i32) -> Self { self.margin_top = Some(margin_top); self } pub fn name(mut self, name: &str) -> Self { self.name = Some(name.to_string()); self } pub fn no_show_all(mut self, no_show_all: bool) -> Self { self.no_show_all = Some(no_show_all); self } pub fn opacity(mut self, opacity: f64) -> Self { self.opacity = Some(opacity); self } pub fn parent<P: IsA<Container>>(mut self, parent: &P) -> Self { self.parent = Some(parent.clone().upcast()); self } pub fn receives_default(mut self, receives_default: bool) -> Self { self.receives_default = Some(receives_default); self } pub fn sensitive(mut self, sensitive: bool) -> Self { self.sensitive = Some(sensitive); self } pub fn tooltip_markup(mut self, tooltip_markup: &str) -> Self { self.tooltip_markup = Some(tooltip_markup.to_string()); self } pub fn tooltip_text(mut self, tooltip_text: &str) -> Self { self.tooltip_text = Some(tooltip_text.to_string()); self } pub fn valign(mut self, valign: Align) -> Self { self.valign = Some(valign); self } pub fn vexpand(mut self, vexpand: bool) -> Self { self.vexpand = Some(vexpand); self } pub fn vexpand_set(mut self, vexpand_set: bool) -> Self { self.vexpand_set = Some(vexpand_set); self } pub fn visible(mut self, visible: bool) -> Self { self.visible = Some(visible); self } pub fn width_request(mut self, width_request: i32) -> Self { self.width_request = Some(width_request); self } } pub const NONE_POPOVER: Option<&Popover> = None; pub trait PopoverExt: 'static { fn bind_model<P: IsA<gio::MenuModel>>(&self, model: Option<&P>, action_namespace: Option<&str>); #[cfg(any(feature = "v3_20", feature = "dox"))] fn get_constrain_to(&self) -> PopoverConstraint; #[cfg(any(feature = "v3_18", feature = "dox"))] fn get_default_widget(&self) -> Option<Widget>; fn get_modal(&self) -> bool; fn get_pointing_to(&self) -> Option<gdk::Rectangle>; fn get_position(&self) -> PositionType; fn get_relative_to(&self) -> Option<Widget>; #[cfg_attr(feature = "v3_22", deprecated)] #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_transitions_enabled(&self) -> bool; #[cfg(any(feature = "v3_22", feature = "dox"))] fn popdown(&self); #[cfg(any(feature = "v3_22", feature = "dox"))] fn popup(&self); #[cfg(any(feature = "v3_20", feature = "dox"))] fn set_constrain_to(&self, constraint: PopoverConstraint); #[cfg(any(feature = "v3_18", feature = "dox"))] fn set_default_widget<P: IsA<Widget>>(&self, widget: Option<&P>); fn set_modal(&self, modal: bool); fn set_pointing_to(&self, rect: &gdk::Rectangle); fn set_position(&self, position: PositionType); fn set_relative_to<P: IsA<Widget>>(&self, relative_to: Option<&P>); #[cfg_attr(feature = "v3_22", deprecated)] #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_transitions_enabled(&self, transitions_enabled: bool); fn connect_closed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[cfg(any(feature = "v3_20", feature = "dox"))] fn connect_property_constrain_to_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_modal_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_pointing_to_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_position_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; fn connect_property_relative_to_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId; #[cfg_attr(feature = "v3_22", deprecated)] #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_transitions_enabled_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId; } impl<O: IsA<Popover>> PopoverExt for O { fn bind_model<P: IsA<gio::MenuModel>>( &self, model: Option<&P>, action_namespace: Option<&str>, ) { unsafe { gtk_sys::gtk_popover_bind_model( self.as_ref().to_glib_none().0, model.map(|p| p.as_ref()).to_glib_none().0, action_namespace.to_glib_none().0, ); } } #[cfg(any(feature = "v3_20", feature = "dox"))] fn get_constrain_to(&self) -> PopoverConstraint { unsafe { from_glib(gtk_sys::gtk_popover_get_constrain_to( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v3_18", feature = "dox"))] fn get_default_widget(&self) -> Option<Widget> { unsafe { from_glib_none(gtk_sys::gtk_popover_get_default_widget( self.as_ref().to_glib_none().0, )) } } fn get_modal(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_popover_get_modal( self.as_ref().to_glib_none().0, )) } } fn get_pointing_to(&self) -> Option<gdk::Rectangle> { unsafe { let mut rect = gdk::Rectangle::uninitialized(); let ret = from_glib(gtk_sys::gtk_popover_get_pointing_to( self.as_ref().to_glib_none().0, rect.to_glib_none_mut().0, )); if ret { Some(rect) } else { None } } } fn get_position(&self) -> PositionType { unsafe { from_glib(gtk_sys::gtk_popover_get_position( self.as_ref().to_glib_none().0, )) } } fn get_relative_to(&self) -> Option<Widget> { unsafe { from_glib_none(gtk_sys::gtk_popover_get_relative_to( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn get_transitions_enabled(&self) -> bool { unsafe { from_glib(gtk_sys::gtk_popover_get_transitions_enabled( self.as_ref().to_glib_none().0, )) } } #[cfg(any(feature = "v3_22", feature = "dox"))] fn popdown(&self) { unsafe { gtk_sys::gtk_popover_popdown(self.as_ref().to_glib_none().0); } } #[cfg(any(feature = "v3_22", feature = "dox"))] fn popup(&self) { unsafe { gtk_sys::gtk_popover_popup(self.as_ref().to_glib_none().0); } } #[cfg(any(feature = "v3_20", feature = "dox"))] fn set_constrain_to(&self, constraint: PopoverConstraint) { unsafe { gtk_sys::gtk_popover_set_constrain_to( self.as_ref().to_glib_none().0, constraint.to_glib(), ); } } #[cfg(any(feature = "v3_18", feature = "dox"))] fn set_default_widget<P: IsA<Widget>>(&self, widget: Option<&P>) { unsafe { gtk_sys::gtk_popover_set_default_widget( self.as_ref().to_glib_none().0, widget.map(|p| p.as_ref()).to_glib_none().0, ); } } fn set_modal(&self, modal: bool) { unsafe { gtk_sys::gtk_popover_set_modal(self.as_ref().to_glib_none().0, modal.to_glib()); } } fn set_pointing_to(&self, rect: &gdk::Rectangle) { unsafe { gtk_sys::gtk_popover_set_pointing_to( self.as_ref().to_glib_none().0, rect.to_glib_none().0, ); } } fn set_position(&self, position: PositionType) { unsafe { gtk_sys::gtk_popover_set_position(self.as_ref().to_glib_none().0, position.to_glib()); } } fn set_relative_to<P: IsA<Widget>>(&self, relative_to: Option<&P>) { unsafe { gtk_sys::gtk_popover_set_relative_to( self.as_ref().to_glib_none().0, relative_to.map(|p| p.as_ref()).to_glib_none().0, ); } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn set_transitions_enabled(&self, transitions_enabled: bool) { unsafe { gtk_sys::gtk_popover_set_transitions_enabled( self.as_ref().to_glib_none().0, transitions_enabled.to_glib(), ); } } fn connect_closed<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn closed_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"closed\0".as_ptr() as *const _, Some(transmute(closed_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } #[cfg(any(feature = "v3_20", feature = "dox"))] fn connect_property_constrain_to_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_constrain_to_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::constrain-to\0".as_ptr() as *const _, Some(transmute( notify_constrain_to_trampoline::<Self, F> as usize, )), Box_::into_raw(f), ) } } fn connect_property_modal_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_modal_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::modal\0".as_ptr() as *const _, Some(transmute(notify_modal_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } fn connect_property_pointing_to_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_pointing_to_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::pointing-to\0".as_ptr() as *const _, Some(transmute(notify_pointing_to_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } fn connect_property_position_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_position_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::position\0".as_ptr() as *const _, Some(transmute(notify_position_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } fn connect_property_relative_to_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId { unsafe extern "C" fn notify_relative_to_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::relative-to\0".as_ptr() as *const _, Some(transmute(notify_relative_to_trampoline::<Self, F> as usize)), Box_::into_raw(f), ) } } #[cfg(any(feature = "v3_16", feature = "dox"))] fn connect_property_transitions_enabled_notify<F: Fn(&Self) + 'static>( &self, f: F, ) -> SignalHandlerId { unsafe extern "C" fn notify_transitions_enabled_trampoline<P, F: Fn(&P) + 'static>( this: *mut gtk_sys::GtkPopover, _param_spec: glib_sys::gpointer, f: glib_sys::gpointer, ) where P: IsA<Popover>, { let f: &F = &*(f as *const F); f(&Popover::from_glib_borrow(this).unsafe_cast()) } unsafe { let f: Box_<F> = Box_::new(f); connect_raw( self.as_ptr() as *mut _, b"notify::transitions-enabled\0".as_ptr() as *const _, Some(transmute( notify_transitions_enabled_trampoline::<Self, F> as usize, )), Box_::into_raw(f), ) } } } impl fmt::Display for Popover { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Popover") } }
{ self.events = Some(events); self }
util.py
import PIL.Image from io import BytesIO from IPython.display import clear_output, Image, display import numpy as np def
(a, fmt='jpeg'): a = np.uint8(np.clip(a, 0, 255)) f = BytesIO() PIL.Image.fromarray(a).save(f, fmt) display(Image(data=f.getvalue())) def showtensor(a): mean = np.array([0.485, 0.456, 0.406]).reshape([1, 1, 3]) std = np.array([0.229, 0.224, 0.225]).reshape([1, 1, 3]) inp = a[0, :, :, :] inp = inp.transpose(1, 2, 0) inp = std * inp + mean inp *= 255 showarray(inp) clear_output(wait=True)
showarray
mapper000.rs
use super::Mapper; use crate::rom::{MirrorMode, Rom}; use serde::{Serialize, Deserialize}; use bincode; #[derive(Serialize, Deserialize)] pub struct Mappper000 { rom: Rom, mirror_prg: bool, prg_ram: Vec<u8>, vram: Vec<u8>, } impl Mappper000 { pub fn new(rom: Rom) -> Mappper000 { assert!(rom.prg_banks <= 2); let mirror_rom = rom.prg_banks == 1; Mappper000 { rom, mirror_prg: mirror_rom, prg_ram: [0; 0x2000].to_vec(), vram: [0; 0x800].to_vec(), } } } impl Mapper for Mappper000 { fn peek_expansion_rom(&mut self, addr: u16) -> u8
fn poke_expansion_rom(&mut self, addr: u16, val: u8) { unimplemented!() } fn peek_sram(&mut self, addr: u16) -> u8 { self.prg_ram[(addr & 0x1FFF) as usize] } fn poke_sram(&mut self, addr: u16, val: u8) { self.prg_ram[(addr & 0x1FFF) as usize] = val; } fn peek_prg_rom(&mut self, addr: u16) -> u8 { let addr = if self.mirror_prg { addr & 0xBFFF } else { addr }; self.rom.prg_rom[(addr & 0x7FFF) as usize] } fn poke_prg_rom(&mut self, addr: u16, val: u8) { let addr = if self.mirror_prg { addr & 0xBFFF } else { addr }; self.rom.prg_rom[(addr & 0x7FFF) as usize] = val; } fn vpeek_nametable(&mut self, addr: u16) -> u8 { let index = if self.rom.mirroring == MirrorMode::H { let t = addr & 0xBFF; if t > 0x7FF { (t & 0x7FF) + 0x400 } else { t } } else { addr & 0x7FF } as usize; self.vram[index] } fn vpoke_nametable(&mut self, addr: u16, val: u8) { let index = if self.rom.mirroring == MirrorMode::H { let t = addr & 0xBFF; if t > 0x7FF { (t & 0x7FF) + 0x400 } else { t } } else { addr & 0x7FF } as usize; self.vram[index] = val; } fn vpeek_pattern(&mut self, addr: u16) -> u8 { self.rom.chr_rom[(addr & 0x1FFF) as usize] } fn vpoke_pattern(&mut self, addr: u16, val: u8) { self.rom.chr_rom[(addr & 0x1FFF) as usize] = val; } fn load_state(&mut self, state: Vec<u8>) { let mapper: Self = bincode::deserialize(&state[..]).unwrap(); *self = mapper; } fn save_state(&self) -> Vec<u8> { bincode::serialize(&self).unwrap() } }
{ unimplemented!() }
set_scheduler_status.go
package api import ( "github.com/twitter/scoot/sched/scheduler" ) /** throttle the scheduler - set the max number of tasks it will allow */ func
(scheduler scheduler.Scheduler, maxTasks int32) error { return scheduler.SetSchedulerStatus(int(maxTasks)) }
SetSchedulerStatus
0007_monumenttype.py
# Generated by Django 2.0.6 on 2018-07-03 14:48 from django.db import migrations, models class
(migrations.Migration): dependencies = [ ('photos', '0006_alter_photo_and_photographer'), ] operations = [ migrations.CreateModel( name='MonumentType', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=128, unique=True)), ], options={ 'ordering': ['title'], }, ), migrations.AddField( model_name='photo', name='monument_type', field=models.ManyToManyField(blank=True, null=True, to='photos.MonumentType'), ), ]
Migration
sink_runnable.rs
use crate::core::checkpoint::{Checkpoint, CheckpointHandle, FunctionSnapshotContext}; use crate::core::element::{Element, Partition}; use crate::core::function::OutputFormat; use crate::core::operator::{DefaultStreamOperator, FunctionCreator, TStreamOperator}; use crate::core::runtime::{OperatorId, TaskId}; use crate::dag::job_graph::JobEdge; use crate::metrics::metric::Counter; use crate::metrics::register_counter; use crate::runtime::worker::checkpoint::submit_checkpoint; use crate::runtime::worker::runnable::{Runnable, RunnableContext}; pub(crate) struct SinkRunnable { operator_id: OperatorId, task_id: TaskId, // child_target_id: TaskId, child_parallelism: u16, context: Option<RunnableContext>, stream_sink: DefaultStreamOperator<dyn OutputFormat>, counter: Counter, } impl SinkRunnable { pub fn new( operator_id: OperatorId, stream_sink: DefaultStreamOperator<dyn OutputFormat>, ) -> Self { SinkRunnable { operator_id, task_id: TaskId::default(), // child_target_id: TaskId::default(), child_parallelism: 0, context: None, stream_sink, counter: Counter::default(), } } } impl Runnable for SinkRunnable { fn open(&mut self, context: &RunnableContext) -> anyhow::Result<()> { self.context = Some(context.clone()); self.task_id = context.task_descriptor.task_id; let child_jobs = context.child_jobs(); self.child_parallelism = if child_jobs.len() > 1 { unimplemented!() } else if child_jobs.len() == 1 { let (child_job_node, child_job_edge) = &child_jobs[0]; match child_job_edge { JobEdge::Forward => 0, JobEdge::ReBalance => child_job_node.parallelism, } } else { 0 }; info!( "SinkRunnable Opened. operator_id={:?}, task_id={:?}, child_parallelism={}", self.operator_id, self.task_id, self.child_parallelism ); let fun_context = context.to_fun_context(self.operator_id); self.stream_sink.operator_fn.open(&fun_context)?; self.counter = register_counter( format!("Sink_{}", self.stream_sink.operator_fn.as_ref().name()), self.task_id.to_tags(), ); Ok(()) } fn run(&mut self, element: Element) { match element { Element::Record(record) => { self.stream_sink .operator_fn .write_element(Element::Record(record)); self.counter.fetch_add(1); } _ =>
} } fn close(&mut self) -> anyhow::Result<()> { self.stream_sink.operator_fn.close()?; Ok(()) } fn set_next_runnable(&mut self, _next_runnable: Option<Box<dyn Runnable>>) { unimplemented!() } fn checkpoint(&mut self, snapshot_context: FunctionSnapshotContext) { let handle = self .stream_sink .operator_fn .snapshot_state(&snapshot_context) .unwrap_or(CheckpointHandle::default()); let ck = Checkpoint { operator_id: snapshot_context.operator_id, task_id: snapshot_context.task_id, checkpoint_id: snapshot_context.checkpoint_id, completed_checkpoint_id: snapshot_context.completed_checkpoint_id, handle, }; submit_checkpoint(ck).map(|ck| { error!( "{:?} submit checkpoint error. maybe report channel is full, checkpoint: {:?}", snapshot_context.operator_id, ck ) }); } }
{ if element.is_barrier() { let snapshot_context = { let checkpoint_id = element.as_barrier().checkpoint_id; let completed_checkpoint_id = element.as_barrier().completed_checkpoint_id(); let context = self.context.as_ref().unwrap(); context.checkpoint_context( self.operator_id, checkpoint_id, completed_checkpoint_id, ) }; self.checkpoint(snapshot_context); } match self.stream_sink.fn_creator() { FunctionCreator::System => { // distribution to downstream if self.child_parallelism > 0 { for index in 0..self.child_parallelism { let mut ele = element.clone(); ele.set_partition(index); debug!("downstream barrier: {}", index); self.stream_sink.operator_fn.write_element(ele); } } else { debug!("downstream barrier"); self.stream_sink.operator_fn.write_element(element); } } FunctionCreator::User => {} } }
hostweight.go
package hostdb import ( "fmt" "math" "strings" "time" "gitlab.com/NebulousLabs/errors" "go.sia.tech/siad/build" "go.sia.tech/siad/modules" "go.sia.tech/siad/modules/renter/hostdb/hosttree" "go.sia.tech/siad/types" ) const ( // collateralExponentiation is the power to which we raise the weight // during collateral adjustment when the collateral is large. This sublinear // number ensures that there is not an overpreference on collateral when // collateral is large relative to the size of the allowance. collateralExponentiationLarge = 0.5 // collateralExponentiationSmall is the power to which we raise the weight // during collateral adjustment when the collateral is small. A large number // ensures a heavy focus on collateral when distinguishing between hosts // that have a very small amount of collateral provided compared to the size // of the allowance. // // For safety, this number needs to be larger than priceExponentiationSmall. collateralExponentiationSmall = 4 // collateralFloor is a part of the equation for determining the collateral // cutoff between large and small collateral. The equation figures out how // much collateral is expected given the allowance, and then divided by // 'collateralFloor' so that the cutoff for how much collateral counts as // 'not much' is reasonably below what we are actually expecting from the // host. // collateralFloor determines how much lower than the expected collateral // the host can provide before switching to a different scoring strategy. A // collateral floor of 0.5 means that once the host is offering a collateral // that is more than 50% of what the renter would expect given the amount of // storage being used, the host switching to a scoring strategy which less // intensly favors adding more collateral. As long as the host has provided // sufficient skin-in-the-game, enormous amounts of extra collateral are // less important. // // The collateralFloor is set relative to the price floor because generally // we look for the collateral to be about 2x the price. collateralFloor = priceFloor * 2 // interactionExponentiation determines how heavily we penalize hosts for // having poor interactions - disconnecting, RPCs with errors, etc. The // exponentiation is very high because the renter will already intentionally // avoid hosts that do not have many successful interactions, meaning that // the bad points do not rack up very quickly. interactionExponentiation = 10 // priceExponentiationLarge is the number of times that the weight is // divided by the price when the price is large relative to the allowance. // The exponentiation is a lot higher because we care greatly about high // priced hosts. priceExponentiationLarge = 5 // priceExponentiationSmall is the number of times that the weight is // divided by the price when the price is small relative to the allowance. // The exponentiation is lower because we do not care about saving // substantial amounts of money when the price is low. priceExponentiationSmall = 0.75 // priceFloor determines how much cheaper than the expected allowance the // host can be before switching to a different scoring strategy for the // score. A price floor of 0.2 means that once the host is less than 20% of // the expected price for that amount of resources (using the allowance as a // guide), instead of using priceExponentiationLarge to reward decreasing // prices, we use priceExponentiationSmall to reward decreasing prices. This // reduced steepness reflects the reality that getting 99.9% off is not all // that different from getting 80% off - both feel like an amazing deal. // // This is necessary to prevent exploits where a host gets an unreasonable // score by putting it's price way too low. priceFloor = 0.1 ) // basePriceAdjustments will adjust the weight of the entry according to the prices // that it has set for BaseRPCPrice and SectorAccessPrice func (hdb *HostDB) basePriceAdjustments(entry modules.HostDBEntry) float64 { // Check for BaseRPCPrice violations maxBaseRPCPrice := entry.MaxBaseRPCPrice() baseRPCPrice := entry.HostExternalSettings.BaseRPCPrice if baseRPCPrice.Cmp(maxBaseRPCPrice) > 0 { hdb.staticLog.Debugf("Host getting 0 score for BaseRPCPrice: Host %v, BaseRPCPrice %v, MaxBaseRPCPrice %v", entry.PublicKey.String(), baseRPCPrice.HumanString(), maxBaseRPCPrice.HumanString()) return math.SmallestNonzeroFloat64 } // Check for SectorAccessPrice violations maxSectorAccessPrice := entry.MaxSectorAccessPrice() sectorAccessPrice := entry.HostExternalSettings.SectorAccessPrice if sectorAccessPrice.Cmp(maxSectorAccessPrice) > 0 { hdb.staticLog.Debugf("Host getting 0 score for SectorAccessPrice: Host %v, SectorAccessPrice %v, MaxSectorAccessPrice %v", entry.PublicKey.String(), sectorAccessPrice.HumanString(), maxSectorAccessPrice.HumanString()) return math.SmallestNonzeroFloat64 } return 1 } // collateralAdjustments improves the host's weight according to the amount of // collateral that they have provided. func (hdb *HostDB) collateralAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { // Ensure that all values will avoid divide by zero errors. if allowance.Hosts == 0 { allowance.Hosts = 1 } if allowance.Period == 0 { allowance.Period = 1 } if allowance.ExpectedStorage == 0 { allowance.ExpectedStorage = 1 } if allowance.ExpectedUpload == 0 { allowance.ExpectedUpload = 1 } if allowance.ExpectedDownload == 0 { allowance.ExpectedDownload = 1 } if allowance.ExpectedRedundancy == 0 { allowance.ExpectedRedundancy = 1 } // Convert each element of the allowance into a number of resources that we // expect to use in this contract. contractExpectedFunds := allowance.Funds.Div64(allowance.Hosts) contractExpectedStorage := uint64(float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts)) contractExpectedStorageTime := types.NewCurrency64(contractExpectedStorage).Mul64(uint64(allowance.Period)) // Ensure that the allowance and expected storage will not brush up against // the max collateral. If the allowance comes within half of the max // collateral, cap the collateral that we use during adjustments based on // the max collateral instead of the per-byte collateral. // // The purpose of this code is to make sure that the host actually has a // high enough MaxCollateral to cover all of the data that we intend to // store with the host at the collateral price that the host is advertising. // We add a 2x buffer to account for the fact that the renter may end up // storing extra data on this host. hostCollateral := entry.Collateral.Mul(contractExpectedStorageTime) possibleCollateral := entry.MaxCollateral.Div64(2) if possibleCollateral.Cmp(hostCollateral) < 0 { hostCollateral = possibleCollateral } // Determine the cutoff for the difference between small collateral and // large collateral. The cutoff is used to create a step function in the // collateral scoring where decreasing collateral results in much higher // penalties below a certain threshold. // // This threshold is attempting to be the threshold where the amount of // money becomes insignificant. A collateral that is 10x higher than the // price is not interesting, compelling, nor a sign of reliability if the // price and collateral are both effectively zero. // // TODO: This method has no way to account for bandwidth heavy vs. storage // heavy hosts, nor did we give the user any way to configure a situation // where hosts aren't needed to be nearly as reliable. cutoff := contractExpectedFunds.MulFloat(collateralFloor) // Get the ratio between the cutoff and the actual collateral so we can // award the bonus for having a large collateral. collateral64, _ := hostCollateral.Float64() cutoff64, _ := cutoff.Float64() // If the hostCollateral is less than the cutoff, set the cutoff equal to // the collateral so that the ratio has a minimum of 1, and also so that // the smallWeight is computed based on the actual collateral instead of // just the cutoff. if collateral64 < cutoff64 { cutoff64 = collateral64 } // One last check for safety before grabbing the ratio. This ensures that // the ratio is never less than one, which is critical to getting a coherent // large weight - large weight should never be below one. if collateral64 < 1 { collateral64 = 1 } if cutoff64 < 1 { cutoff64 = 1 } ratio := collateral64 / cutoff64 // Use the cutoff to determine the score based on the small exponentiation // factor (which has a high exponentiation), and then use the ratio between // the two to determine the bonus gained from having a high collateral. smallWeight := math.Pow(cutoff64, collateralExponentiationSmall) largeWeight := math.Pow(ratio, collateralExponentiationLarge) return smallWeight * largeWeight } // acceptContractAdjustments checks that a host which doesn't accept contracts // will receive the worst score possible until it enables accepting contracts // again. func (hdb *HostDB) acceptContractAdjustments(entry modules.HostDBEntry) float64 { if !entry.AcceptingContracts { return math.SmallestNonzeroFloat64 } return 1 } // durationAdjustments checks that the host has a maxduration which is larger // than the period of the allowance. The host's score is heavily minimized if // not. func (hdb *HostDB) durationAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { if entry.MaxDuration < allowance.Period+allowance.RenewWindow { return math.SmallestNonzeroFloat64 } return 1 } // interactionAdjustments determine the penalty to be applied to a host for the // historic and current interactions with that host. This function focuses on // historic interactions and ignores recent interactions. func (hdb *HostDB) interactionAdjustments(entry modules.HostDBEntry) float64 { // Give the host a baseline of 30 successful interactions and 1 failed // interaction. This gives the host a baseline if we've had few // interactions with them. The 1 failed interaction will become // irrelevant after sufficient interactions with the host. hsi := entry.HistoricSuccessfulInteractions + 30 hfi := entry.HistoricFailedInteractions + 1 // Determine the intraction ratio based off of the historic interactions. ratio := float64(hsi) / float64(hsi+hfi) return math.Pow(ratio, interactionExponentiation) } // priceAdjustments will adjust the weight of the entry according to the prices // that it has set. // // REMINDER: The allowance contains an absolute number of bytes for expected // storage on a per-renter basis that doesn't account for redundancy. This value // needs to be adjusted to a per-contract basis that accounts for redundancy. // The upload and download values also do not account for redundancy, and they // are on a per-block basis, meaning you need to multiply be the allowance // period when working with these values. func (hdb *HostDB) priceAdjustments(entry modules.HostDBEntry, allowance modules.Allowance, txnFees types.Currency) float64 { // Divide by zero mitigation. if allowance.Hosts == 0 { allowance.Hosts = 1 } if allowance.Period == 0 { allowance.Period = 1 } if allowance.ExpectedStorage == 0 { allowance.ExpectedStorage = 1 } if allowance.ExpectedUpload == 0 { allowance.ExpectedUpload = 1 } if allowance.ExpectedDownload == 0 { allowance.ExpectedDownload = 1 } if allowance.ExpectedRedundancy == 0 { allowance.ExpectedRedundancy = 1 } // Convert each element of the allowance into a number of resources that we // expect to use in this contract. contractExpectedDownload := types.NewCurrency64(allowance.ExpectedDownload).Mul64(uint64(allowance.Period)).Div64(allowance.Hosts) contractExpectedFunds := allowance.Funds.Div64(allowance.Hosts) contractExpectedStorage := uint64(float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts)) contractExpectedStorageTime := types.NewCurrency64(contractExpectedStorage).Mul64(uint64(allowance.Period)) contractExpectedUpload := types.NewCurrency64(allowance.ExpectedUpload).Mul64(uint64(allowance.Period)).MulFloat(allowance.ExpectedRedundancy).Div64(allowance.Hosts) // Get the extra costs expected for downloads and uploads from the sector access // price and base price. extraCostsPerRPC := entry.BaseRPCPrice.Add(entry.SectorAccessPrice) contractExpectedDownloadRPCs := contractExpectedDownload.Div64(modules.StreamDownloadSize) extraDownloadRPCCost := contractExpectedDownloadRPCs.Mul(extraCostsPerRPC) contractExpectedUploadRPCs := contractExpectedUpload.Div64(modules.StreamUploadSize) extraUploadRPCCost := contractExpectedUploadRPCs.Mul(extraCostsPerRPC) // Calculate the hostCollateral the renter would expect the host to put // into a contract. // contractTxnFees := txnFees.Mul64(modules.EstimatedFileContractTransactionSetSize) _, _, hostCollateral, err := modules.RenterPayoutsPreTax(entry, contractExpectedFunds, contractTxnFees, types.ZeroCurrency, types.ZeroCurrency, allowance.Period, contractExpectedStorage) if err != nil { // Errors containing 'exceeds funding' are not logged. All it means is // that the contract price (or some other price) of the host is too high // for us to be able to form a contract with it, so this host is // strictly not valuable given our allowance and it's pricing. This is // common enough and expected enough that we don't need to log when it // happens. if !strings.Contains(err.Error(), "exceeds funding") { info := fmt.Sprintf("Error while estimating collateral for host: Host %v, ContractPrice %v, TxnFees %v, Funds %v", entry.PublicKey.String(), entry.ContractPrice.HumanString(), txnFees.HumanString(), allowance.Funds.HumanString()) hdb.staticLog.Debugln(errors.AddContext(err, info)) } return math.SmallestNonzeroFloat64 } // Determine the pricing for each type of resource in the contract. We have // already converted the resources into absolute terms for this contract. // // The contract price and transaction fees get doubled because we expect // that there will be on average one early renewal per contract, due to // spending all of the contract's money. contractPrice := entry.ContractPrice.Add(txnFees).Mul64(2) downloadPrice := entry.DownloadBandwidthPrice.Mul(contractExpectedDownload).Add(extraDownloadRPCCost) storagePrice := entry.StoragePrice.Mul(contractExpectedStorageTime) uploadPrice := entry.UploadBandwidthPrice.Mul(contractExpectedUpload).Add(extraUploadRPCCost) siafundFee := contractPrice.Add(hostCollateral).Add(downloadPrice).Add(storagePrice).Add(uploadPrice).MulTax() totalPrice := contractPrice.Add(downloadPrice).Add(storagePrice).Add(uploadPrice).Add(siafundFee) // Determine a cutoff for whether the total price is considered a high price // or a low price. This cutoff attempts to determine where the price becomes // insignificant. cutoff := contractExpectedFunds.MulFloat(priceFloor) // Convert the price and cutoff to floats. price64, _ := totalPrice.Float64() cutoff64, _ := cutoff.Float64() // If the total price is less than the cutoff, set the cutoff equal to the // price. This ensures that the ratio (totalPrice / cutoff) can never be // less than 1. if price64 < cutoff64 { cutoff64 = price64 } // Check for less-than-one. if price64 < 1 { price64 = 1 } if cutoff64 < 1 { cutoff64 = 1 } // Perform this check one more time after all of the conversions, just in // case there was some sort of rounding error. if price64 < cutoff64 { cutoff64 = price64 } ratio := price64 / cutoff64 smallWeight := math.Pow(cutoff64, priceExponentiationSmall) largeWeight := math.Pow(ratio, priceExponentiationLarge) return 1 / (smallWeight * largeWeight) } // storageRemainingAdjustments adjusts the weight of the entry according to how // much storage it has remaining. func (hdb *HostDB) storageRemainingAdjustments(entry modules.HostDBEntry, allowance modules.Allowance) float64 { // Determine how much data the renter is storing on this host. var storedData float64 if ci, exists := hdb.knownContracts[entry.PublicKey.String()]; exists { storedData = float64(ci.StoredData) } // Divide by zero mitigation. if allowance.Hosts == 0 { allowance.Hosts = 1 } // idealDataPerHost is the amount of data that we would have to put on each // host assuming that our storage requirements were spread evenly across // every single host. idealDataPerHost := float64(allowance.ExpectedStorage) * allowance.ExpectedRedundancy / float64(allowance.Hosts) // allocationPerHost is the amount of data that we would like to be able to // put on each host, because data is not always spread evenly across the // hosts during upload. Slower hosts may get very little data, more // expensive hosts may get very little data, and other factors can skew the // distribution. allocationPerHost takes into account the skew and tries to // ensure that there's enough allocation per host to accommodate for a skew. allocationPerHost := idealDataPerHost * storageSkewMultiplier // hostExpectedStorage is the amount of storage that we expect to be able to // store on this host overall, which should include the stored data that is // already on the host. hostExpectedStorage := (float64(entry.RemainingStorage) * storageCompetitionFactor) + storedData // The score for the host is the square of the amount of storage we // expected divided by the amount of storage we want. If we expect to be // able to store more data on the host than we need to allocate, the host // gets full score for storage. if hostExpectedStorage >= allocationPerHost { return 1 } // Otherwise, the score of the host is the fraction of the data we expect // raised to the storage penalty exponentiation. storageRatio := hostExpectedStorage / allocationPerHost return math.Pow(storageRatio, storagePenaltyExponentitaion) } // versionAdjustments will adjust the weight of the entry according to the siad // version reported by the host. func versionAdjustments(entry modules.HostDBEntry) float64
// lifetimeAdjustments will adjust the weight of the host according to the total // amount of time that has passed since the host's original announcement. func (hdb *HostDB) lifetimeAdjustments(entry modules.HostDBEntry) float64 { base := float64(1) if hdb.blockHeight >= entry.FirstSeen { age := hdb.blockHeight - entry.FirstSeen if age < 12000 { base = base * 2 / 3 // 1.5x total } if age < 6000 { base = base / 2 // 3x total } if age < 4000 { base = base / 2 // 6x total } if age < 2000 { base = base / 2 // 12x total } if age < 1000 { base = base / 3 // 36x total } if age < 576 { base = base / 3 // 108x total } if age < 288 { base = base / 3 // 324x total } if age < 144 { base = base / 3 // 972x total } } return base } // uptimeAdjustments penalizes the host for having poor uptime, and for being // offline. // // CAUTION: The function 'updateEntry' will manually fill out two scans for a // new host to give the host some initial uptime or downtime. Modification of // this function needs to be made paying attention to the structure of that // function. // // TODO: This function doesn't correctly handle situations where the user's // clock goes back in time. If the user adjusts their system clock to be in the // past, we'll get timestamping that's out of order, and this will cause erratic // / improper / untested behavior. func (hdb *HostDB) uptimeAdjustments(entry modules.HostDBEntry) float64 { // Special case: if we have scanned the host twice or fewer, don't perform // uptime math. if len(entry.ScanHistory) == 0 { return 0.25 } if len(entry.ScanHistory) == 1 { if entry.ScanHistory[0].Success { return 0.75 } return 0.25 } if len(entry.ScanHistory) == 2 { if entry.ScanHistory[0].Success && entry.ScanHistory[1].Success { return 0.85 } if entry.ScanHistory[0].Success || entry.ScanHistory[1].Success { return 0.50 } return 0.05 } // Compute the total measured uptime and total measured downtime for this // host. downtime := entry.HistoricDowntime uptime := entry.HistoricUptime recentTime := entry.ScanHistory[0].Timestamp recentSuccess := entry.ScanHistory[0].Success for _, scan := range entry.ScanHistory[1:] { if recentTime.After(scan.Timestamp) { if build.DEBUG { hdb.staticLog.Critical("Host entry scan history not sorted.") } else { hdb.staticLog.Print("WARN: Host entry scan history not sorted.") } // Ignore the unsorted scan entry. continue } if recentSuccess { uptime += scan.Timestamp.Sub(recentTime) } else { downtime += scan.Timestamp.Sub(recentTime) } recentTime = scan.Timestamp recentSuccess = scan.Success } // One more check to incorporate the uptime or downtime of the most recent // scan, we assume that if we scanned them right now, their uptime / // downtime status would be equal to what it currently is. if recentSuccess { uptime += time.Now().Sub(recentTime) } else { downtime += time.Now().Sub(recentTime) } // Sanity check against 0 total time. if uptime == 0 && downtime == 0 { build.Critical("uptime and downtime are zero for this host, should have been caught in earlier logic") return math.SmallestNonzeroFloat64 } // Compute the uptime ratio, but shift by 0.02 to acknowledge fully that // 98% uptime and 100% uptime is valued the same. uptimeRatio := float64(uptime) / float64(uptime+downtime) if uptimeRatio > 0.98 { uptimeRatio = 0.98 } uptimeRatio += 0.02 // Cap the total amount of downtime allowed based on the total number of // scans that have happened. allowedDowntime := 0.03 * float64(len(entry.ScanHistory)) if uptimeRatio < 1-allowedDowntime { uptimeRatio = 1 - allowedDowntime } // Calculate the penalty for low uptime. Penalties increase extremely // quickly as uptime falls away from 95%. // // 100% uptime = 1 // 98% uptime = 1 // 95% uptime = 0.83 // 90% uptime = 0.26 // 85% uptime = 0.03 // 80% uptime = 0.001 // 75% uptime = 0.00001 // 70% uptime = 0.0000001 exp := 200 * math.Min(1-uptimeRatio, 0.30) return math.Pow(uptimeRatio, exp) } // managedCalculateHostWeightFn creates a hosttree.WeightFunc given an // Allowance. // // NOTE: the hosttree.WeightFunc that is returned accesses fields of the hostdb. // The hostdb lock must be held while utilizing the WeightFunc func (hdb *HostDB) managedCalculateHostWeightFn(allowance modules.Allowance) hosttree.WeightFunc { // Get the txnFees. hdb.mu.RLock() txnFees := hdb.txnFees hdb.mu.RUnlock() // Create the weight function. return func(entry modules.HostDBEntry) hosttree.ScoreBreakdown { return hosttree.HostAdjustments{ AcceptContractAdjustment: hdb.acceptContractAdjustments(entry), AgeAdjustment: hdb.lifetimeAdjustments(entry), BasePriceAdjustment: hdb.basePriceAdjustments(entry), BurnAdjustment: 1, CollateralAdjustment: hdb.collateralAdjustments(entry, allowance), DurationAdjustment: hdb.durationAdjustments(entry, allowance), InteractionAdjustment: hdb.interactionAdjustments(entry), PriceAdjustment: hdb.priceAdjustments(entry, allowance, txnFees), StorageRemainingAdjustment: hdb.storageRemainingAdjustments(entry, allowance), UptimeAdjustment: hdb.uptimeAdjustments(entry), VersionAdjustment: versionAdjustments(entry), } } } // EstimateHostScore takes a HostExternalSettings and returns the estimated // score of that host in the hostdb, assuming no penalties for age or uptime. func (hdb *HostDB) EstimateHostScore(entry modules.HostDBEntry, allowance modules.Allowance) (modules.HostScoreBreakdown, error) { if err := hdb.tg.Add(); err != nil { return modules.HostScoreBreakdown{}, err } defer hdb.tg.Done() return hdb.managedEstimatedScoreBreakdown(entry, allowance, true, true, true) } // ScoreBreakdown provdes a detailed set of scalars and bools indicating // elements of the host's overall score. func (hdb *HostDB) ScoreBreakdown(entry modules.HostDBEntry) (modules.HostScoreBreakdown, error) { if err := hdb.tg.Add(); err != nil { return modules.HostScoreBreakdown{}, err } defer hdb.tg.Done() return hdb.managedScoreBreakdown(entry, false, false, false) } // managedEstimatedScoreBreakdown computes the score breakdown of a host. // Certain adjustments can be ignored. func (hdb *HostDB) managedEstimatedScoreBreakdown(entry modules.HostDBEntry, allowance modules.Allowance, ignoreAge, ignoreDuration, ignoreUptime bool) (modules.HostScoreBreakdown, error) { hosts, err := hdb.ActiveHosts() if err != nil { return modules.HostScoreBreakdown{}, errors.AddContext(err, "error getting Active hosts:") } weightFunc := hdb.managedCalculateHostWeightFn(allowance) // Compute the totalScore. hdb.mu.Lock() defer hdb.mu.Unlock() totalScore := types.Currency{} for _, host := range hosts { totalScore = totalScore.Add(hdb.weightFunc(host).Score()) } // Compute the breakdown. return weightFunc(entry).HostScoreBreakdown(totalScore, ignoreAge, ignoreDuration, ignoreUptime), nil } // managedScoreBreakdown computes the score breakdown of a host. Certain // adjustments can be ignored. func (hdb *HostDB) managedScoreBreakdown(entry modules.HostDBEntry, ignoreAge, ignoreDuration, ignoreUptime bool) (modules.HostScoreBreakdown, error) { hosts, err := hdb.ActiveHosts() if err != nil { return modules.HostScoreBreakdown{}, errors.AddContext(err, "error getting Active hosts:") } // Compute the totalScore. hdb.mu.Lock() defer hdb.mu.Unlock() totalScore := types.Currency{} for _, host := range hosts { totalScore = totalScore.Add(hdb.weightFunc(host).Score()) } // Compute the breakdown. return hdb.weightFunc(entry).HostScoreBreakdown(totalScore, ignoreAge, ignoreDuration, ignoreUptime), nil }
{ base := float64(1) // This needs to give a very tiny penalty to the current version. The reason // we give the current version a very tiny penalty is so that the test suite // complains if we forget to update this file when we bump the version next // time. The value compared against must be higher than the current version. if build.VersionCmp(entry.Version, "1.5.8") < 0 { base = base * 0.99999 // Safety value to make sure we update the version penalties every time we update the host. } // This needs to be "less than the current version" - anything less than the current version should get a penalty. if build.VersionCmp(entry.Version, "1.5.7") < 0 { base = base * 0.99 // Slight penalty against slightly out of date hosts. } if build.VersionCmp(entry.Version, "1.5.6") < 0 { base = base * 0.99 // Slight penalty against slightly out of date hosts. } if build.VersionCmp(entry.Version, "1.5.5") < 0 { base = base * 0.90 // 10% penalty for hosts without the virtual sector fix } // Heavy penalty for hosts before the foundation hardfork. if build.VersionCmp(entry.Version, "1.5.4") < 0 { base = math.SmallestNonzeroFloat64 } return base }
broker.go
package messaging import ( "fmt" "sync/atomic" "time" "github.com/cskr/pubsub" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/util/workqueue" configv1alpha2 "github.com/openservicemesh/osm/pkg/apis/config/v1alpha2" "github.com/openservicemesh/osm/pkg/announcements" "github.com/openservicemesh/osm/pkg/constants" "github.com/openservicemesh/osm/pkg/k8s/events" "github.com/openservicemesh/osm/pkg/metricsstore" ) const ( // proxyUpdateSlidingWindow is the sliding window duration used to batch proxy update events proxyUpdateSlidingWindow = 2 * time.Second // proxyUpdateMaxWindow is the max window duration used to batch proxy update events, and is // the max amount of time a proxy update event can be held for batching before being dispatched. proxyUpdateMaxWindow = 10 * time.Second ) // NewBroker returns a new message broker instance and starts the internal goroutine // to process events added to the workqueue. func
(stopCh <-chan struct{}) *Broker { b := &Broker{ queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), proxyUpdatePubSub: pubsub.New(0), proxyUpdateCh: make(chan proxyUpdateEvent), kubeEventPubSub: pubsub.New(0), certPubSub: pubsub.New(0), } go b.runWorkqueueProcessor(stopCh) go b.runProxyUpdateDispatcher(stopCh) return b } // GetProxyUpdatePubSub returns the PubSub instance corresponding to proxy update events func (b *Broker) GetProxyUpdatePubSub() *pubsub.PubSub { return b.proxyUpdatePubSub } // GetKubeEventPubSub returns the PubSub instance corresponding to k8s events func (b *Broker) GetKubeEventPubSub() *pubsub.PubSub { return b.kubeEventPubSub } // GetCertPubSub returns the PubSub instance corresponding to certificate events func (b *Broker) GetCertPubSub() *pubsub.PubSub { return b.certPubSub } // GetTotalQProxyEventCount returns the total number of events read from the workqueue // pertaining to proxy updates func (b *Broker) GetTotalQProxyEventCount() uint64 { return atomic.LoadUint64(&b.totalQProxyEventCount) } // GetTotalDispatchedProxyEventCount returns the total number of events dispatched // to subscribed proxies func (b *Broker) GetTotalDispatchedProxyEventCount() uint64 { return atomic.LoadUint64(&b.totalDispatchedProxyEventCount) } // runWorkqueueProcessor starts a goroutine to process events from the workqueue until // signalled to stop on the given channel. func (b *Broker) runWorkqueueProcessor(stopCh <-chan struct{}) { // Start the goroutine workqueue to process kubernetes events // The continuous processing of items in the workqueue will run // until signalled to stop. // The 'wait.Until' helper is used here to ensure the processing // of items in the workqueue continues until signalled to stop, even // if 'processNextItems()' returns false. go wait.Until( func() { for b.processNextItem() { } }, time.Second, stopCh, ) } // runProxyUpdateDispatcher runs the dispatcher responsible for batching // proxy update events received in close proximity. // It batches proxy update events with the use of 2 timers: // 1. Sliding window timer that resets when a proxy update event is received // 2. Max window timer that caps the max duration a sliding window can be reset to // When either of the above timers expire, the proxy update event is published // on the dedicated pub-sub instance. func (b *Broker) runProxyUpdateDispatcher(stopCh <-chan struct{}) { // batchTimer and maxTimer are updated by the dispatcher routine // when events are processed and timeouts expire. They are initialized // with a large timeout (a decade) so they don't time out till an event // is received. noTimeout := 87600 * time.Hour // A decade slidingTimer := time.NewTimer(noTimeout) maxTimer := time.NewTimer(noTimeout) // dispatchPending indicates whether a proxy update event is pending // from being published on the pub-sub. A proxy update event will // be held for 'proxyUpdateSlidingWindow' duration to be able to // coalesce multiple proxy update events within that duration, before // it is dispatched on the pub-sub. The 'proxyUpdateSlidingWindow' duration // is a sliding window, which means each event received within a window // slides the window further ahead in time, up to a max of 'proxyUpdateMaxWindow'. // // This mechanism is necessary to avoid triggering proxy update pub-sub events in // a hot loop, which would otherwise result in CPU spikes on the controller. // We want to coalesce as many proxy update events within the 'proxyUpdateMaxWindow' // duration. dispatchPending := false batchCount := 0 // number of proxy update events batched per dispatch var event proxyUpdateEvent for { select { case e, ok := <-b.proxyUpdateCh: if !ok { log.Warn().Msgf("Proxy update event chan closed, exiting dispatcher") return } event = e if !dispatchPending { // No proxy update events are pending send on the pub-sub. // Reset the dispatch timers. The events will be dispatched // when either of the timers expire. if !slidingTimer.Stop() { <-slidingTimer.C } slidingTimer.Reset(proxyUpdateSlidingWindow) if !maxTimer.Stop() { <-maxTimer.C } maxTimer.Reset(proxyUpdateMaxWindow) dispatchPending = true batchCount++ log.Trace().Msgf("Pending dispatch of msg kind %s", event.msg.Kind) } else { // A proxy update event is pending dispatch. Update the sliding window. if !slidingTimer.Stop() { <-slidingTimer.C } slidingTimer.Reset(proxyUpdateSlidingWindow) batchCount++ log.Trace().Msgf("Reset sliding window for msg kind %s", event.msg.Kind) } case <-slidingTimer.C: slidingTimer.Reset(noTimeout) // 'slidingTimer' drained in this case statement // Stop and drain 'maxTimer' before Reset() if !maxTimer.Stop() { // Drain channel. Refer to Reset() doc for more info. <-maxTimer.C } maxTimer.Reset(noTimeout) b.proxyUpdatePubSub.Pub(event.msg, event.topic) atomic.AddUint64(&b.totalDispatchedProxyEventCount, 1) metricsstore.DefaultMetricsStore.ProxyBroadcastEventCount.Inc() log.Trace().Msgf("Sliding window expired, msg kind %s, batch size %d", event.msg.Kind, batchCount) dispatchPending = false batchCount = 0 case <-maxTimer.C: maxTimer.Reset(noTimeout) // 'maxTimer' drained in this case statement // Stop and drain 'slidingTimer' before Reset() if !slidingTimer.Stop() { // Drain channel. Refer to Reset() doc for more info. <-slidingTimer.C } slidingTimer.Reset(noTimeout) b.proxyUpdatePubSub.Pub(event.msg, event.topic) atomic.AddUint64(&b.totalDispatchedProxyEventCount, 1) metricsstore.DefaultMetricsStore.ProxyBroadcastEventCount.Inc() log.Trace().Msgf("Max window expired, msg kind %s, batch size %d", event.msg.Kind, batchCount) dispatchPending = false batchCount = 0 case <-stopCh: log.Info().Msg("Proxy update dispatcher received stop signal, exiting") return } } } // processEvent processes an event dispatched from the workqueue. // It does the following: // 1. If the event must update a proxy, it publishes a proxy update message // 2. Processes other internal control plane events // 3. Updates metrics associated with the event func (b *Broker) processEvent(msg events.PubSubMessage) { log.Trace().Msgf("Processing msg kind: %s", msg.Kind) // Update proxies if applicable if event := getProxyUpdateEvent(msg); event != nil { log.Trace().Msgf("Msg kind %s will update proxies", msg.Kind) atomic.AddUint64(&b.totalQProxyEventCount, 1) if event.topic != announcements.ProxyUpdate.String() { // This is not a broadcast event, so it cannot be coalesced with // other events as the event is specific to one or more proxies. b.proxyUpdatePubSub.Pub(event.msg, event.topic) atomic.AddUint64(&b.totalDispatchedProxyEventCount, 1) } else { // Pass the broadcast event to the dispatcher routine, that coalesces // multiple broadcasts received in close proximity. b.proxyUpdateCh <- *event } } // Publish event to other interested clients, e.g. log level changes, debug server on/off etc. b.kubeEventPubSub.Pub(msg, msg.Kind.String()) // Update event metric updateMetric(msg) } // updateMetric updates metrics related to the event func updateMetric(msg events.PubSubMessage) { switch msg.Kind { case announcements.NamespaceAdded: metricsstore.DefaultMetricsStore.MonitoredNamespaceCounter.Inc() case announcements.NamespaceDeleted: metricsstore.DefaultMetricsStore.MonitoredNamespaceCounter.Dec() } } // Unsub unsubscribes the given channel from the PubSub instance func (b *Broker) Unsub(pubSub *pubsub.PubSub, ch chan interface{}) { // Unsubscription should be performed from a different goroutine and // existing messages on the subscribed channel must be drained as noted // in https://github.com/cskr/pubsub/blob/v1.0.2/pubsub.go#L95. go pubSub.Unsub(ch) for range ch { // Drain channel until 'Unsub' results in a close on the subscribed channel } } // getProxyUpdateEvent returns a proxyUpdateEvent type indicating whether the given PubSubMessage should // result in a Proxy configuration update on an appropriate topic. Nil is returned if the PubSubMessage // does not result in a proxy update event. func getProxyUpdateEvent(msg events.PubSubMessage) *proxyUpdateEvent { switch msg.Kind { case // // K8s native resource events // // Endpoint event announcements.EndpointAdded, announcements.EndpointDeleted, announcements.EndpointUpdated, // k8s Ingress event announcements.IngressAdded, announcements.IngressDeleted, announcements.IngressUpdated, // // OSM resource events // // Egress event announcements.EgressAdded, announcements.EgressDeleted, announcements.EgressUpdated, // IngressBackend event announcements.IngressBackendAdded, announcements.IngressBackendDeleted, announcements.IngressBackendUpdated, // Retry event announcements.RetryPolicyAdded, announcements.RetryPolicyDeleted, announcements.RetryPolicyUpdated, // MulticlusterService event announcements.MultiClusterServiceAdded, announcements.MultiClusterServiceDeleted, announcements.MultiClusterServiceUpdated, // // SMI resource events // // SMI HTTPRouteGroup event announcements.RouteGroupAdded, announcements.RouteGroupDeleted, announcements.RouteGroupUpdated, // SMI TCPRoute event announcements.TCPRouteAdded, announcements.TCPRouteDeleted, announcements.TCPRouteUpdated, // SMI TrafficSplit event announcements.TrafficSplitAdded, announcements.TrafficSplitDeleted, announcements.TrafficSplitUpdated, // SMI TrafficTarget event announcements.TrafficTargetAdded, announcements.TrafficTargetDeleted, announcements.TrafficTargetUpdated, // // Proxy events // announcements.ProxyUpdate: return &proxyUpdateEvent{ msg: msg, topic: announcements.ProxyUpdate.String(), } case announcements.MeshConfigUpdated: prevMeshConfig, okPrevCast := msg.OldObj.(*configv1alpha2.MeshConfig) newMeshConfig, okNewCast := msg.NewObj.(*configv1alpha2.MeshConfig) if !okPrevCast || !okNewCast { log.Error().Msgf("Expected MeshConfig type, got previous=%T, new=%T", okPrevCast, okNewCast) return nil } prevSpec := prevMeshConfig.Spec newSpec := newMeshConfig.Spec // A proxy config update must only be triggered when a MeshConfig field that maps to a proxy config // changes. if prevSpec.Traffic.EnableEgress != newSpec.Traffic.EnableEgress || prevSpec.Traffic.EnablePermissiveTrafficPolicyMode != newSpec.Traffic.EnablePermissiveTrafficPolicyMode || prevSpec.Observability.Tracing != newSpec.Observability.Tracing || prevSpec.Traffic.InboundExternalAuthorization.Enable != newSpec.Traffic.InboundExternalAuthorization.Enable || // Only trigger an update on InboundExternalAuthorization field changes if the new spec has the 'Enable' flag set to true. (newSpec.Traffic.InboundExternalAuthorization.Enable && (prevSpec.Traffic.InboundExternalAuthorization != newSpec.Traffic.InboundExternalAuthorization)) || prevSpec.FeatureFlags != newSpec.FeatureFlags { return &proxyUpdateEvent{ msg: msg, topic: announcements.ProxyUpdate.String(), } } return nil case announcements.PodUpdated: // Only trigger a proxy update for proxies associated with this pod based on the proxy UUID prevPod, okPrevCast := msg.OldObj.(*corev1.Pod) newPod, okNewCast := msg.NewObj.(*corev1.Pod) if !okPrevCast || !okNewCast { log.Error().Msgf("Expected *Pod type, got previous=%T, new=%T", okPrevCast, okNewCast) return nil } prevMetricAnnotation := prevPod.Annotations[constants.PrometheusScrapeAnnotation] newMetricAnnotation := newPod.Annotations[constants.PrometheusScrapeAnnotation] if prevMetricAnnotation != newMetricAnnotation { proxyUUID := newPod.Labels[constants.EnvoyUniqueIDLabelName] return &proxyUpdateEvent{ msg: msg, topic: GetPubSubTopicForProxyUUID(proxyUUID), } } return nil default: return nil } } // GetPubSubTopicForProxyUUID returns the topic on which PubSubMessages specific to a proxy UUID are published func GetPubSubTopicForProxyUUID(uuid string) string { return fmt.Sprintf("proxy:%s", uuid) }
NewBroker
gradient_check.py
import numpy as np from random import randrange def eval_numerical_gradient(f, x, verbose=True, h=0.00001): """ a naive implementation of numerical gradient of f at x - f should be a function that takes a single argument - x is the point (numpy array) to evaluate the gradient at """ fx = f(x) # evaluate function value at original point grad = np.zeros_like(x) # iterate over all indexes in x it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: # evaluate function at x+h ix = it.multi_index oldval = x[ix] x[ix] = oldval + h # increment by h fxph = f(x) # evalute f(x + h) x[ix] = oldval - h fxmh = f(x) # evaluate f(x - h) x[ix] = oldval # restore # compute the partial derivative with centered formula grad[ix] = (fxph - fxmh) / (2 * h) # the slope if verbose: print(ix, grad[ix]) it.iternext() # step to next dimension return grad def eval_numerical_gradient_array(f, x, df, h=1e-5): """ Evaluate a numeric gradient for a function that accepts a numpy array and returns a numpy array. """ grad = np.zeros_like(x) it = np.nditer(x, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: ix = it.multi_index oldval = x[ix] x[ix] = oldval + h pos = f(x).copy() x[ix] = oldval - h neg = f(x).copy() x[ix] = oldval grad[ix] = np.sum((pos - neg) * df) / (2 * h) it.iternext() return grad def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):
def eval_numerical_gradient_net(net, inputs, output, h=1e-5): return eval_numerical_gradient_blobs(lambda *args: net.forward(), inputs, output, h=h) def grad_check_sparse(f, x, analytic_grad, num_checks=10, h=1e-5): """ sample a few random elements and only return numerical in this dimensions. """ for i in range(num_checks): ix = tuple([randrange(m) for m in x.shape]) oldval = x[ix] x[ix] = oldval + h # increment by h fxph = f(x) # evaluate f(x + h) x[ix] = oldval - h # increment by h fxmh = f(x) # evaluate f(x - h) x[ix] = oldval # reset grad_numerical = (fxph - fxmh) / (2 * h) grad_analytic = analytic_grad[ix] rel_error = (abs(grad_numerical - grad_analytic) / (abs(grad_numerical) + abs(grad_analytic))) print('numerical: %f analytic: %f, relative error: %e' %(grad_numerical, grad_analytic, rel_error))
""" Compute numeric gradients for a function that operates on input and output blobs. We assume that f accepts several input blobs as arguments, followed by a blob where outputs will be written. For example, f might be called like: f(x, w, out) where x and w are input Blobs, and the result of f will be written to out. Inputs: - f: function - inputs: tuple of input blobs - output: output blob - h: step size """ numeric_diffs = [] for input_blob in inputs: diff = np.zeros_like(input_blob.diffs) it = np.nditer(input_blob.vals, flags=['multi_index'], op_flags=['readwrite']) while not it.finished: idx = it.multi_index orig = input_blob.vals[idx] input_blob.vals[idx] = orig + h f(*(inputs + (output,))) pos = np.copy(output.vals) input_blob.vals[idx] = orig - h f(*(inputs + (output,))) neg = np.copy(output.vals) input_blob.vals[idx] = orig diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h) it.iternext() numeric_diffs.append(diff) return numeric_diffs
profile.component.ts
import { Component, OnInit, Inject, ViewChild } from '@angular/core';
@Component({ selector: 'app-profile', templateUrl: './profile.component.html', styleUrls: ['./profile.component.css'] }) export class ProfileComponent implements OnInit { isEditing: boolean = false; hide = true; @ViewChild('f') formValues; constructor(private userService: UserService, private router: Router) { } ngOnInit() { } onSubmit(form: NgForm) { var user: User = this.userService.getActiveUser(); if (form.value.email != null && form.value.email != "") { user.email = form.value.email; } if (form.value.phone != null && form.value.phone != "") { user.phone = form.value.phone; } if (form.value.password != null && form.value.password != "") { user.password = form.value.password; } if (form.value.firstName != null && form.value.firstName != "") { user.firstName = form.value.firstName; } if (form.value.lastName != null && form.value.lastName != "") { user.lastName = form.value.lastName; } if (form.value.address != null && form.value.address != "") { user.address = form.value.address; } if (form.value.interest != null && form.value.interest != "") { user.interests = form.value.interest; } this.userService.updateUser(user); this.isEditing = false; this.formValues.resetForm(); } }
import { UserService, User } from '../user.service'; import { NgForm } from '@angular/forms'; import { MAT_DIALOG_DATA } from '@angular/material'; import { Router } from '@angular/router';
remote.rs
use crate::core::{GitReference, PackageId, SourceId}; use crate::sources::git; use crate::sources::registry::MaybeLock; use crate::sources::registry::{ RegistryConfig, RegistryData, CRATE_TEMPLATE, LOWER_PREFIX_TEMPLATE, PREFIX_TEMPLATE, VERSION_TEMPLATE, }; use crate::util::errors::{CargoResult, CargoResultExt}; use crate::util::interning::InternedString; use crate::util::paths; use crate::util::{Config, Filesystem, Sha256}; use lazycell::LazyCell; use log::{debug, trace}; use std::cell::{Cell, Ref, RefCell}; use std::fmt::Write as FmtWrite; use std::fs::{self, File, OpenOptions}; use std::io::prelude::*; use std::io::SeekFrom; use std::mem; use std::path::Path; use std::str; fn make_crate_prefix(name: &str) -> String { match name.len() { 1 => String::from("1"), 2 => String::from("2"), 3 => format!("3/{}", &name[..1]), _ => format!("{}/{}", &name[0..2], &name[2..4]), } } pub struct RemoteRegistry<'cfg> { index_path: Filesystem, cache_path: Filesystem, source_id: SourceId, index_git_ref: GitReference, config: &'cfg Config, tree: RefCell<Option<git2::Tree<'static>>>, repo: LazyCell<git2::Repository>, head: Cell<Option<git2::Oid>>, current_sha: Cell<Option<InternedString>>, } impl<'cfg> RemoteRegistry<'cfg> { pub fn new(source_id: SourceId, config: &'cfg Config, name: &str) -> RemoteRegistry<'cfg> { RemoteRegistry { index_path: config.registry_index_path().join(name), cache_path: config.registry_cache_path().join(name), source_id, config, // TODO: we should probably make this configurable index_git_ref: GitReference::DefaultBranch, tree: RefCell::new(None), repo: LazyCell::new(), head: Cell::new(None), current_sha: Cell::new(None), } } fn repo(&self) -> CargoResult<&git2::Repository> { self.repo.try_borrow_with(|| { let path = self.config.assert_package_cache_locked(&self.index_path); // Fast path without a lock if let Ok(repo) = git2::Repository::open(&path) { trace!("opened a repo without a lock"); return Ok(repo); } // Ok, now we need to lock and try the whole thing over again. trace!("acquiring registry index lock"); match git2::Repository::open(&path) { Ok(repo) => Ok(repo), Err(_) => { drop(paths::remove_dir_all(&path)); paths::create_dir_all(&path)?; // Note that we'd actually prefer to use a bare repository // here as we're not actually going to check anything out. // All versions of Cargo, though, share the same CARGO_HOME, // so for compatibility with older Cargo which *does* do // checkouts we make sure to initialize a new full // repository (not a bare one). //
// We should change this to `init_bare` whenever we feel // like enough time has passed or if we change the directory // that the folder is located in, such as by changing the // hash at the end of the directory. // // Note that in the meantime we also skip `init.templatedir` // as it can be misconfigured sometimes or otherwise add // things that we don't want. let mut opts = git2::RepositoryInitOptions::new(); opts.external_template(false); Ok(git2::Repository::init_opts(&path, &opts) .chain_err(|| "failed to initialize index git repository")?) } } }) } fn head(&self) -> CargoResult<git2::Oid> { if self.head.get().is_none() { let repo = self.repo()?; let oid = self.index_git_ref.resolve(repo)?; self.head.set(Some(oid)); } Ok(self.head.get().unwrap()) } fn tree(&self) -> CargoResult<Ref<'_, git2::Tree<'_>>> { { let tree = self.tree.borrow(); if tree.is_some() { return Ok(Ref::map(tree, |s| s.as_ref().unwrap())); } } let repo = self.repo()?; let commit = repo.find_commit(self.head()?)?; let tree = commit.tree()?; // Unfortunately in libgit2 the tree objects look like they've got a // reference to the repository object which means that a tree cannot // outlive the repository that it came from. Here we want to cache this // tree, though, so to accomplish this we transmute it to a static // lifetime. // // Note that we don't actually hand out the static lifetime, instead we // only return a scoped one from this function. Additionally the repo // we loaded from (above) lives as long as this object // (`RemoteRegistry`) so we then just need to ensure that the tree is // destroyed first in the destructor, hence the destructor on // `RemoteRegistry` below. let tree = unsafe { mem::transmute::<git2::Tree<'_>, git2::Tree<'static>>(tree) }; *self.tree.borrow_mut() = Some(tree); Ok(Ref::map(self.tree.borrow(), |s| s.as_ref().unwrap())) } fn filename(&self, pkg: PackageId) -> String { format!("{}-{}.crate", pkg.name(), pkg.version()) } } const LAST_UPDATED_FILE: &str = ".last-updated"; impl<'cfg> RegistryData for RemoteRegistry<'cfg> { fn prepare(&self) -> CargoResult<()> { self.repo()?; // create intermediate dirs and initialize the repo Ok(()) } fn index_path(&self) -> &Filesystem { &self.index_path } fn assert_index_locked<'a>(&self, path: &'a Filesystem) -> &'a Path { self.config.assert_package_cache_locked(path) } fn current_version(&self) -> Option<InternedString> { if let Some(sha) = self.current_sha.get() { return Some(sha); } let sha = InternedString::new(&self.head().ok()?.to_string()); self.current_sha.set(Some(sha)); Some(sha) } fn load( &self, _root: &Path, path: &Path, data: &mut dyn FnMut(&[u8]) -> CargoResult<()>, ) -> CargoResult<()> { // Note that the index calls this method and the filesystem is locked // in the index, so we don't need to worry about an `update_index` // happening in a different process. let repo = self.repo()?; let tree = self.tree()?; let entry = tree.get_path(path)?; let object = entry.to_object(repo)?; let blob = match object.as_blob() { Some(blob) => blob, None => anyhow::bail!("path `{}` is not a blob in the git repo", path.display()), }; data(blob.content()) } fn config(&mut self) -> CargoResult<Option<RegistryConfig>> { debug!("loading config"); self.prepare()?; self.config.assert_package_cache_locked(&self.index_path); let mut config = None; self.load(Path::new(""), Path::new("config.json"), &mut |json| { config = Some(serde_json::from_slice(json)?); Ok(()) })?; trace!("config loaded"); Ok(config) } fn update_index(&mut self) -> CargoResult<()> { if self.config.offline() { return Ok(()); } if self.config.cli_unstable().no_index_update { return Ok(()); } // Make sure the index is only updated once per session since it is an // expensive operation. This generally only happens when the resolver // is run multiple times, such as during `cargo publish`. if self.config.updated_sources().contains(&self.source_id) { return Ok(()); } debug!("updating the index"); // Ensure that we'll actually be able to acquire an HTTP handle later on // once we start trying to download crates. This will weed out any // problems with `.cargo/config` configuration related to HTTP. // // This way if there's a problem the error gets printed before we even // hit the index, which may not actually read this configuration. self.config.http()?; self.prepare()?; self.head.set(None); *self.tree.borrow_mut() = None; self.current_sha.set(None); let path = self.config.assert_package_cache_locked(&self.index_path); self.config .shell() .status("Updating", self.source_id.display_index())?; // Fetch the latest version of our `index_git_ref` into the index // checkout. let url = self.source_id.url(); let repo = self.repo.borrow_mut().unwrap(); git::fetch(repo, url.as_str(), &self.index_git_ref, self.config) .chain_err(|| format!("failed to fetch `{}`", url))?; self.config.updated_sources().insert(self.source_id); // Create a dummy file to record the mtime for when we updated the // index. paths::create(&path.join(LAST_UPDATED_FILE))?; Ok(()) } fn download(&mut self, pkg: PackageId, _checksum: &str) -> CargoResult<MaybeLock> { let filename = self.filename(pkg); // Attempt to open an read-only copy first to avoid an exclusive write // lock and also work with read-only filesystems. Note that we check the // length of the file like below to handle interrupted downloads. // // If this fails then we fall through to the exclusive path where we may // have to redownload the file. let path = self.cache_path.join(&filename); let path = self.config.assert_package_cache_locked(&path); if let Ok(dst) = File::open(&path) { let meta = dst.metadata()?; if meta.len() > 0 { return Ok(MaybeLock::Ready(dst)); } } let config = self.config()?.unwrap(); let mut url = config.dl; if !url.contains(CRATE_TEMPLATE) && !url.contains(VERSION_TEMPLATE) && !url.contains(PREFIX_TEMPLATE) && !url.contains(LOWER_PREFIX_TEMPLATE) { write!(url, "/{}/{}/download", CRATE_TEMPLATE, VERSION_TEMPLATE).unwrap(); } let prefix = make_crate_prefix(&*pkg.name()); let url = url .replace(CRATE_TEMPLATE, &*pkg.name()) .replace(VERSION_TEMPLATE, &pkg.version().to_string()) .replace(PREFIX_TEMPLATE, &prefix) .replace(LOWER_PREFIX_TEMPLATE, &prefix.to_lowercase()); Ok(MaybeLock::Download { url, descriptor: pkg.to_string(), }) } fn finish_download( &mut self, pkg: PackageId, checksum: &str, data: &[u8], ) -> CargoResult<File> { // Verify what we just downloaded let actual = Sha256::new().update(data).finish_hex(); if actual != checksum { anyhow::bail!("failed to verify the checksum of `{}`", pkg) } let filename = self.filename(pkg); self.cache_path.create_dir()?; let path = self.cache_path.join(&filename); let path = self.config.assert_package_cache_locked(&path); let mut dst = OpenOptions::new() .create(true) .read(true) .write(true) .open(&path) .chain_err(|| format!("failed to open `{}`", path.display()))?; let meta = dst.metadata()?; if meta.len() > 0 { return Ok(dst); } dst.write_all(data)?; dst.seek(SeekFrom::Start(0))?; Ok(dst) } fn is_crate_downloaded(&self, pkg: PackageId) -> bool { let filename = format!("{}-{}.crate", pkg.name(), pkg.version()); let path = Path::new(&filename); let path = self.cache_path.join(path); let path = self.config.assert_package_cache_locked(&path); if let Ok(meta) = fs::metadata(path) { return meta.len() > 0; } false } } impl<'cfg> Drop for RemoteRegistry<'cfg> { fn drop(&mut self) { // Just be sure to drop this before our other fields self.tree.borrow_mut().take(); } } #[cfg(test)] mod tests { use super::make_crate_prefix; #[test] fn crate_prefix() { assert_eq!(make_crate_prefix("a"), "1"); assert_eq!(make_crate_prefix("ab"), "2"); assert_eq!(make_crate_prefix("abc"), "3/a"); assert_eq!(make_crate_prefix("Abc"), "3/A"); assert_eq!(make_crate_prefix("AbCd"), "Ab/Cd"); assert_eq!(make_crate_prefix("aBcDe"), "aB/cD"); } }
openapi.go
// Copyright 2016-2018, Pulumi Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package openapi import ( "fmt" "github.com/pulumi/pulumi-kubernetes/provider/v2/pkg/kinds" logger "github.com/pulumi/pulumi/sdk/v2/go/common/util/logging" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/jsonmergepatch" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/cli-runtime/pkg/resource" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/kube-openapi/pkg/util/proto" "k8s.io/kubectl/pkg/scheme" "k8s.io/kubectl/pkg/util/openapi" "k8s.io/kubectl/pkg/util/openapi/validation" ) // -------------------------------------------------------------------------- // OpenAPI spec utilities code. // // Primarily serves two purposes: // // 1. Validation. This code allows us to easily validate unstructured property bag objects against // the OpenAPI spec exposed by the API server. The OpenAPI spec would typically be obtained from // the API server, and it represents not only the spec of the Kubernetes version running the API // server itself, but also the flags it was started with, (e.g., RBAC enabled or not, etc.). // 2. Update/patch logic. Code to allow us to introspect on the OpenAPI spec to generate the patch // logic required to update some Kubernetes resource. // -------------------------------------------------------------------------- // ValidateAgainstSchema validates a document against the given schema. func ValidateAgainstSchema( resources openapi.Resources, obj *unstructured.Unstructured, ) error { bytes, err := obj.MarshalJSON() if err != nil { return err } // Error if schema does not exist for object type. gvk := obj.GroupVersionKind() resSchema := resources.LookupResource(gvk) if resSchema == nil { return fmt.Errorf("cluster does not support resource type '%s'", gvk.String()) } // TODO(hausdorff): Come back and make sure that `ValidateBytes` actually reports a list of // validation errors when there are multiple errors for usability purposes. // Validate resource against schema. specValidator := validation.NewSchemaValidation(resources) return specValidator.ValidateBytes(bytes) } // PatchForResourceUpdate introspects on the given OpenAPI spec and attempts to generate a strategic merge patch for // use in a resource update. If there is no specification of how to generate a strategic merge patch, we fall back // to JSON merge patch. func PatchForResourceUpdate( resources openapi.Resources, lastSubmitted, currentSubmitted, liveOldObj *unstructured.Unstructured, ) (patch []byte, patchType types.PatchType, lookupPatchMeta strategicpatch.LookupPatchMeta, err error) { // Create JSON blobs for each of these, preparing to create the three-way merge patch. lastSubmittedJSON, err := lastSubmitted.MarshalJSON() if err != nil { return nil, "", nil, err } currentSubmittedJSON, err := currentSubmitted.MarshalJSON() if err != nil { return nil, "", nil, err } liveOldJSON, err := liveOldObj.MarshalJSON() if err != nil { return nil, "", nil, err } // Use kinds.Namespaced() to determine if kind is unknown, such as for CRD Kinds. kind := kinds.Kind(lastSubmitted.GetKind()) if knownKind, _ := kind.Namespaced(); !knownKind { // Use a JSON merge patch for CRD Kinds. patch, patchType, err = MergePatch( lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) return patch, patchType, lookupPatchMeta, err } // Attempt a three-way strategic merge. patch, patchType, lookupPatchMeta, err = StrategicMergePatch( resources, lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) // Else, fall back to a three-way JSON merge patch. if err != nil { patch, patchType, err = MergePatch( lastSubmitted, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, ) } return patch, patchType, lookupPatchMeta, err } // StrategicMergePatch is a helper to use a three-way strategic merge on a resource version. // See for more details: https://tools.ietf.org/html/rfc6902 func StrategicMergePatch( resources openapi.Resources, lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, lookupPatchMeta strategicpatch.LookupPatchMeta, err error) { gvk := lastSubmitted.GroupVersionKind() if resSchema := resources.LookupResource(gvk); resSchema != nil { logger.V(1).Infof("Attempting to update '%s' '%s/%s' with strategic merge", gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) patch, patchType, lookupPatchMeta, err = strategicMergePatch( gvk, resSchema, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) } if err != nil { return patch, patchType, lookupPatchMeta, err } return patch, patchType, lookupPatchMeta, nil } // MergePatch is a helper to use a three-way JSON merge patch on a resource version. // See for more details: https://tools.ietf.org/html/rfc7386 func MergePatch( lastSubmitted *unstructured.Unstructured, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) (patch []byte, patchType types.PatchType, err error) {
// Fall back to three-way JSON merge patch. logger.V(1).Infof("Attempting to update '%s' '%s/%s' with JSON merge", gvk.String(), lastSubmitted.GetNamespace(), lastSubmitted.GetName()) patch, patchType, err = jsonMergePatch(lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) return patch, patchType, err } // SupportsDryRun returns true if the given GVK supports dry-run applies. func SupportsDryRun(client discovery.CachedDiscoveryInterface, dynamicClient dynamic.Interface, gvk schema.GroupVersionKind) bool { // If an error is returned, DryRun is not supported. if err := resource.VerifyDryRun(gvk, dynamicClient, client); err != nil { return false } return true } // Pluck obtains the property identified by the string components in `path`. For example, // `Pluck(foo, "bar", "baz")` returns `foo.bar.baz`. func Pluck(obj map[string]interface{}, path ...string) (interface{}, bool) { var curr interface{} = obj for _, component := range path { // Make sure we can actually dot into the current element. currObj, isMap := curr.(map[string]interface{}) if !isMap { return nil, false } // Attempt to dot into the current element. var exists bool curr, exists = currObj[component] if !exists { return nil, false } } return curr, true } // -------------------------------------------------------------------------- // Utility functions. // -------------------------------------------------------------------------- // strategicMergePatch allows a Kubernetes resource to be "updated" by creating a three-way // "strategic" merge patch (a Kubernetes-specific patching strategy) between the user's last // submitted and current submitted versions of a resource, along with the live object as it exists // in the API server. func strategicMergePatch( gvk schema.GroupVersionKind, resourceSchema proto.Schema, lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) ([]byte, types.PatchType, strategicpatch.LookupPatchMeta, error) { // Attempt to construct patch from OpenAPI spec data. lookupPatchMeta := strategicpatch.LookupPatchMeta(strategicpatch.PatchMetaFromOpenAPI{Schema: resourceSchema}) patch, err := strategicpatch.CreateThreeWayMergePatch( lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, lookupPatchMeta, true) if err != nil { return nil, "", nil, err } // Fall back to constructing patch from nominal type data. if patch == nil { versionedObject, err := scheme.Scheme.New(gvk) if err != nil { return nil, "", nil, err } lookupPatchMeta, err = strategicpatch.NewPatchMetaFromStruct(versionedObject) if err != nil { return nil, "", nil, err } patch, err = strategicpatch.CreateThreeWayMergePatch( lastSubmittedJSON, currentSubmittedJSON, liveOldJSON, lookupPatchMeta, true) if err != nil { return nil, "", nil, err } } return patch, types.StrategicMergePatchType, lookupPatchMeta, nil } // jsonMergePatch allows a Kubernetes resource to be "updated" by creating a three-way JSON merge // patch between the user's last submitted and current submitted versions of a resource, along with // the live object as it exists in the API server. func jsonMergePatch( lastSubmittedJSON, currentSubmittedJSON, liveOldJSON []byte, ) ([]byte, types.PatchType, error) { // // NOTE: Ordinarily we'd want to use `mergepatch.PreconditionFunc` to ensure that fields like // `apiVersion` and `kind` don't change, but in our case, changing these fields results in a hard // replace, so we need not worry about this. // patchType := types.MergePatchType patch, err := jsonmergepatch.CreateThreeWayJSONMergePatch( lastSubmittedJSON, currentSubmittedJSON, liveOldJSON) if err != nil { return nil, "", err } return patch, patchType, err } // GetResourceSchemasForClient obtains the OpenAPI schemas for all Kubernetes resources supported by // client. func GetResourceSchemasForClient( client discovery.OpenAPISchemaInterface, ) (openapi.Resources, error) { document, err := client.OpenAPISchema() if err != nil { return nil, err } return openapi.NewOpenAPIData(document) }
gvk := lastSubmitted.GroupVersionKind()
site.js
const publisherSchema = require('./publisher'); module.exports = {
}, "name": { "type": "string" }, "domain": { "type": "string" }, "cat": { "type": "array", "items": { "type": "string" } }, "sectioncat": { "type": "array", "items": { "type": "string" } }, "pagecat": { "type": "array", "items": { "type": "string" } }, "page": { "type": "string" }, "ref": { "type": "string" }, "search": { "type": "string" }, "mobile": { "type": "number" }, "privacypolicy": { "type": "number" }, "publisher": publisherSchema, "content": { "type": "object" }, "keywords": { "type": "string" }, "ext": { "type": "object" } } };
"type": "object", "properties": { "id": { "type": "string"
12940.py
from math import gcd # def getGcd(n, m): # while m != 0: # temp = n % m # n = m # m = temp # return n def
(n: int, m: int): # gcd = getGcd(n, m) # lcm = n * m // gcd # return [gcd, lcm] g = gcd(n, m) l = n * m // g return g, l if __name__ == "__main__": n = 3 m = 12 print(solution(n, m))
solution
icon.ml_classification_job-js.36c2efd1.chunk.js
(this["webpackJsonpmeteo-directo"]=this["webpackJsonpmeteo-directo"]||[]).push([[251],{342:function(e,t,r){"use strict";r.r(t),r.d(t,"icon",(function(){return h}));var n=r(1);function i(){return(i=Object.assign||function(e){for(var t=1;t<arguments.length;t++){var r=arguments[t];for(var n in r)Object.prototype.hasOwnProperty.call(r,n)&&(e[n]=r[n])}return e}).apply(this,arguments)}function
(e,t){if(null==e)return{};var r,n,i=function(e,t){if(null==e)return{};var r,n,i={},l=Object.keys(e);for(n=0;n<l.length;n++)r=l[n],t.indexOf(r)>=0||(i[r]=e[r]);return i}(e,t);if(Object.getOwnPropertySymbols){var l=Object.getOwnPropertySymbols(e);for(n=0;n<l.length;n++)r=l[n],t.indexOf(r)>=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(i[r]=e[r])}return i}var h=function(e){var t=e.title,r=e.titleId,h=l(e,["title","titleId"]);return n.createElement("svg",i({width:32,height:32,viewBox:"0 0 32 32",xmlns:"http://www.w3.org/2000/svg","aria-labelledby":r},h),t?n.createElement("title",{id:r},t):null,n.createElement("path",{d:"M7 16v5h2.038a13.179 13.179 0 000 2H7v5H5v-5H0v-2h5v-5h2zM7 0v5h5v2H7v5H5V7H0V5h5V0h2zm16 0v5h5v2h-5v2.038a13.179 13.179 0 00-2 0V7h-5V5h5V0h2z"}),n.createElement("path",{className:"euiIcon__fillSecondary",d:"M22 10c3.073 0 5.877 1.155 8 3.056v3.252A9.82 9.82 0 1016.307 30h-3.251A11.955 11.955 0 0110 22c0-6.627 5.373-12 12-12zm1 8v3h3v2h-3v3h-2v-3h-3v-2h3v-3h2z"}))}}}]); //# sourceMappingURL=icon.ml_classification_job-js.36c2efd1.chunk.js.map
l
models.rs
#![doc = "generated by AutoRust 0.1.0"] #![allow(non_camel_case_types)] #![allow(unused_imports)] use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Object {} #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Resource { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(rename = "type", default, skip_serializing_if = "Option::is_none")] pub type_: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub location: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub tags: Option<serde_json::Value>, #[serde(rename = "eTag", default, skip_serializing_if = "Option::is_none")] pub e_tag: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<JobResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupEngineBase { #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<backup_engine_base::BackupManagementType>, #[serde(rename = "registrationStatus", default, skip_serializing_if = "Option::is_none")] pub registration_status: Option<String>, #[serde(rename = "healthStatus", default, skip_serializing_if = "Option::is_none")] pub health_status: Option<String>, #[serde(rename = "backupEngineType", default, skip_serializing_if = "Option::is_none")] pub backup_engine_type: Option<String>, #[serde(rename = "canReRegister", default, skip_serializing_if = "Option::is_none")] pub can_re_register: Option<bool>, #[serde(rename = "backupEngineId", default, skip_serializing_if = "Option::is_none")] pub backup_engine_id: Option<String>, } pub mod backup_engine_base { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BmsBackupEngineQueryObject { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<bms_backup_engine_query_object::BackupManagementType>, } pub mod bms_backup_engine_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatus { #[serde(default, skip_serializing_if = "Option::is_none")] pub id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub name: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<operation_status::Status>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub error: Option<OperationStatusError>, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationStatusExtendedInfo>, } pub mod operation_status { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Invalid, InProgress, Succeeded, Failed, Canceled, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatusError { #[serde(default, skip_serializing_if = "Option::is_none")] pub code: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatusExtendedInfo { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<Job>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationWorkerResponse { #[serde(rename = "statusCode", default, skip_serializing_if = "Option::is_none")] pub status_code: Option<operation_worker_response::StatusCode>, #[serde(rename = "Headers", default, skip_serializing_if = "Option::is_none")] pub headers: Option<serde_json::Value>, } pub mod operation_worker_response { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum StatusCode { Continue, SwitchingProtocols, #[serde(rename = "OK")] Ok, Created, Accepted, NonAuthoritativeInformation, NoContent, ResetContent, PartialContent, MultipleChoices, Ambiguous, MovedPermanently, Moved, Found, Redirect, SeeOther, RedirectMethod, NotModified, UseProxy, Unused, TemporaryRedirect, RedirectKeepVerb, BadRequest, Unauthorized, PaymentRequired, Forbidden, NotFound, MethodNotAllowed, NotAcceptable, ProxyAuthenticationRequired, RequestTimeout, Conflict, Gone, LengthRequired, PreconditionFailed, RequestEntityTooLarge, RequestUriTooLong, UnsupportedMediaType, RequestedRangeNotSatisfiable, ExpectationFailed, UpgradeRequired, InternalServerError, NotImplemented, BadGateway, ServiceUnavailable, GatewayTimeout, HttpVersionNotSupported, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Job { #[serde(rename = "entityFriendlyName", default, skip_serializing_if = "Option::is_none")] pub entity_friendly_name: Option<String>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<job::BackupManagementType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(rename = "activityId", default, skip_serializing_if = "Option::is_none")] pub activity_id: Option<String>, #[serde(rename = "jobType", default, skip_serializing_if = "Option::is_none")] pub job_type: Option<String>, } pub mod job { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct JobQueryObject { #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<job_query_object::Status>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<job_query_object::BackupManagementType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub operation: Option<job_query_object::Operation>, #[serde(rename = "jobId", default, skip_serializing_if = "Option::is_none")] pub job_id: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, } pub mod job_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Status { Invalid, InProgress, Completed, Failed, CompletedWithWarnings, Cancelled, Cancelling, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum Operation { Invalid, ConfigureBackup, Backup, Restore, DisableBackup, DeleteBackupData, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadProtectableItemResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<WorkloadProtectableItemResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResultInfoBase { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadProtectableItemResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<WorkloadProtectableItem>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectedItemResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ProtectedItem>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WorkloadProtectableItem { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<String>, #[serde(rename = "protectableItemType", default, skip_serializing_if = "Option::is_none")] pub protectable_item_type: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "protectionState", default, skip_serializing_if = "Option::is_none")] pub protection_state: Option<workload_protectable_item::ProtectionState>, } pub mod workload_protectable_item { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProtectionState { Invalid, NotProtected, Protecting, Protected, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BmspoQueryObject { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<bmspo_query_object::BackupManagementType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, } pub mod bmspo_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectedItemResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ProtectedItemResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectedItem { #[serde(rename = "protectedItemType", default, skip_serializing_if = "Option::is_none")] pub protected_item_type: Option<String>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<protected_item::BackupManagementType>, #[serde(rename = "workloadType", default, skip_serializing_if = "Option::is_none")] pub workload_type: Option<protected_item::WorkloadType>, #[serde(rename = "sourceResourceId", default, skip_serializing_if = "Option::is_none")] pub source_resource_id: Option<String>, #[serde(rename = "policyId", default, skip_serializing_if = "Option::is_none")] pub policy_id: Option<String>, #[serde(rename = "lastRecoveryPoint", default, skip_serializing_if = "Option::is_none")] pub last_recovery_point: Option<String>, } pub mod protected_item { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum WorkloadType { Invalid, #[serde(rename = "VM")] Vm, FileFolder, AzureSqlDb, #[serde(rename = "SQLDB")] Sqldb, Exchange, Sharepoint, #[serde(rename = "DPMUnknown")] DpmUnknown, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GetProtectedItemQueryObject { #[serde(default, skip_serializing_if = "Option::is_none")] pub expand: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupRequestResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BackupRequest>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectedItemQueryObject { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<protected_item_query_object::BackupManagementType>, #[serde(rename = "itemType", default, skip_serializing_if = "Option::is_none")] pub item_type: Option<protected_item_query_object::ItemType>, #[serde(rename = "policyName", default, skip_serializing_if = "Option::is_none")] pub policy_name: Option<String>, #[serde(rename = "containerName", default, skip_serializing_if = "Option::is_none")] pub container_name: Option<String>, } pub mod protected_item_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ItemType { Invalid, #[serde(rename = "VM")] Vm, FileFolder, AzureSqlDb, #[serde(rename = "SQLDB")] Sqldb, Exchange, Sharepoint, #[serde(rename = "DPMUnknown")] DpmUnknown, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoveryPointResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<RecoveryPointResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupRequest { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoveryPointResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RecoveryPoint>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionContainerResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ProtectionContainerResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RecoveryPoint { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BmsrpQueryObject { #[serde(rename = "startDate", default, skip_serializing_if = "Option::is_none")] pub start_date: Option<String>, #[serde(rename = "endDate", default, skip_serializing_if = "Option::is_none")] pub end_date: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionContainerResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ProtectionContainer>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionPolicyResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<ProtectionPolicyResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionContainer { #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<protection_container::BackupManagementType>, #[serde(rename = "registrationStatus", default, skip_serializing_if = "Option::is_none")] pub registration_status: Option<String>, #[serde(rename = "healthStatus", default, skip_serializing_if = "Option::is_none")] pub health_status: Option<String>, #[serde(rename = "containerType", skip_serializing)] pub container_type: Option<String>, #[serde(rename = "protectableObjectType", default, skip_serializing_if = "Option::is_none")] pub protectable_object_type: Option<String>, } pub mod protection_container { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BmsContainerQueryObject { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<bms_container_query_object::BackupManagementType>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, } pub mod bms_container_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionPolicyResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<ProtectionPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreRequestResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<RestoreRequest>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionPolicy { #[serde(rename = "protectedItemsCount", default, skip_serializing_if = "Option::is_none")] pub protected_items_count: Option<i32>, #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProtectionPolicyQueryObject { #[serde(rename = "backupManagementType", default, skip_serializing_if = "Option::is_none")] pub backup_management_type: Option<protection_policy_query_object::BackupManagementType>, } pub mod protection_policy_query_object { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupManagementType { Invalid, #[serde(rename = "AzureIaasVM")] AzureIaasVm, #[serde(rename = "MAB")] Mab, #[serde(rename = "DPM")] Dpm, AzureBackupServer, AzureSql, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IlrRequestResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<IlrRequest>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RestoreRequest { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IlrRequest { #[serde(rename = "objectType", default, skip_serializing_if = "Option::is_none")] pub object_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureBackupServerEngine { #[serde(flatten)] pub backup_engine_base: BackupEngineBase, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DpmBackupEngine { #[serde(flatten)] pub backup_engine_base: BackupEngineBase, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureSqlContainer { #[serde(flatten)] pub protection_container: ProtectionContainer, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaaSvmContainer { #[serde(flatten)] pub protection_container: ProtectionContainer, #[serde(rename = "virtualMachineId", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_id: Option<String>, #[serde(rename = "virtualMachineVersion", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_version: Option<String>, #[serde(rename = "resourceGroup", default, skip_serializing_if = "Option::is_none")] pub resource_group: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabContainer { #[serde(flatten)] pub protection_container: ProtectionContainer, #[serde(rename = "canReRegister", default, skip_serializing_if = "Option::is_none")] pub can_re_register: Option<bool>, #[serde(rename = "containerId", default, skip_serializing_if = "Option::is_none")] pub container_id: Option<i64>, #[serde(rename = "protectedItemCount", default, skip_serializing_if = "Option::is_none")] pub protected_item_count: Option<i64>, #[serde(rename = "agentVersion", default, skip_serializing_if = "Option::is_none")] pub agent_version: Option<String>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<MabContainerExtendedInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabContainerExtendedInfo { #[serde(rename = "lastRefreshedAt", default, skip_serializing_if = "Option::is_none")] pub last_refreshed_at: Option<String>, #[serde(rename = "backupItemType", default, skip_serializing_if = "Option::is_none")] pub backup_item_type: Option<mab_container_extended_info::BackupItemType>, #[serde(rename = "backupItems", default, skip_serializing_if = "Vec::is_empty")] pub backup_items: Vec<String>, #[serde(rename = "policyName", default, skip_serializing_if = "Option::is_none")] pub policy_name: Option<String>, #[serde(rename = "lastBackupStatus", default, skip_serializing_if = "Option::is_none")] pub last_backup_status: Option<String>, } pub mod mab_container_extended_info { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum BackupItemType { Invalid, #[serde(rename = "VM")] Vm, FileFolder, AzureSqlDb, #[serde(rename = "SQLDB")] Sqldb, Exchange, Sharepoint, #[serde(rename = "DPMUnknown")] DpmUnknown, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaaSvmProtectableItem { #[serde(flatten)] pub workload_protectable_item: WorkloadProtectableItem, #[serde(rename = "virtualMachineId", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmProtectedItem { #[serde(flatten)] pub protected_item: ProtectedItem, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "virtualMachineId", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_id: Option<String>, #[serde(rename = "protectionStatus", default, skip_serializing_if = "Option::is_none")] pub protection_status: Option<String>, #[serde(rename = "protectionState", default, skip_serializing_if = "Option::is_none")] pub protection_state: Option<azure_iaa_svm_protected_item::ProtectionState>, #[serde(rename = "lastBackupStatus", default, skip_serializing_if = "Option::is_none")] pub last_backup_status: Option<String>, #[serde(rename = "lastBackupTime", default, skip_serializing_if = "Option::is_none")] pub last_backup_time: Option<String>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<AzureIaaSvmProtectedItemExtendedInfo>, } pub mod azure_iaa_svm_protected_item { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProtectionState { Invalid, #[serde(rename = "IRPending")] IrPending, Protected, ProtectionError, ProtectionStopped, ProtectionPaused, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmProtectedItemExtendedInfo { #[serde(rename = "oldestRecoveryPoint", default, skip_serializing_if = "Option::is_none")] pub oldest_recovery_point: Option<String>, #[serde(rename = "recoveryPointCount", default, skip_serializing_if = "Option::is_none")] pub recovery_point_count: Option<i32>, #[serde(rename = "policyInconsistent", default, skip_serializing_if = "Option::is_none")] pub policy_inconsistent: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabFileFolderProtectedItem { #[serde(flatten)] pub protected_item: ProtectedItem, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "computerName", default, skip_serializing_if = "Option::is_none")] pub computer_name: Option<String>, #[serde(rename = "lastBackupStatus", default, skip_serializing_if = "Option::is_none")] pub last_backup_status: Option<String>, #[serde(rename = "protectionState", default, skip_serializing_if = "Option::is_none")] pub protection_state: Option<String>, #[serde(rename = "isScheduledForDeferredDelete", default, skip_serializing_if = "Option::is_none")] pub is_scheduled_for_deferred_delete: Option<bool>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<MabFileFolderProtectedItemExtendedInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabFileFolderProtectedItemExtendedInfo { #[serde(rename = "lastRefreshedAt", default, skip_serializing_if = "Option::is_none")] pub last_refreshed_at: Option<String>, #[serde(rename = "oldestRecoveryPoint", default, skip_serializing_if = "Option::is_none")] pub oldest_recovery_point: Option<String>, #[serde(rename = "recoveryPointCount", default, skip_serializing_if = "Option::is_none")] pub recovery_point_count: Option<i32>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureSqlProtectedItem { #[serde(flatten)] pub protected_item: ProtectedItem, #[serde(rename = "protectedItemDataId", default, skip_serializing_if = "Option::is_none")] pub protected_item_data_id: Option<String>, #[serde(rename = "protectionState", default, skip_serializing_if = "Option::is_none")] pub protection_state: Option<azure_sql_protected_item::ProtectionState>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<AzureSqlProtectedItemExtendedInfo>, } pub mod azure_sql_protected_item { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ProtectionState { Invalid, #[serde(rename = "IRPending")] IrPending, Protected, ProtectionError, ProtectionStopped, ProtectionPaused, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureSqlProtectedItemExtendedInfo { #[serde(rename = "oldestRecoveryPoint", default, skip_serializing_if = "Option::is_none")] pub oldest_recovery_point: Option<String>, #[serde(rename = "recoveryPointCount", default, skip_serializing_if = "Option::is_none")] pub recovery_point_count: Option<i32>, #[serde(rename = "policyState", default, skip_serializing_if = "Option::is_none")] pub policy_state: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaasVmBackupRequest { #[serde(flatten)] pub backup_request: BackupRequest, #[serde(rename = "recoveryPointExpiryTimeInUTC", default, skip_serializing_if = "Option::is_none")] pub recovery_point_expiry_time_in_utc: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmProtectionPolicy { #[serde(flatten)] pub protection_policy: ProtectionPolicy, #[serde(rename = "schedulePolicy", default, skip_serializing_if = "Option::is_none")] pub schedule_policy: Option<SchedulePolicy>, #[serde(rename = "retentionPolicy", default, skip_serializing_if = "Option::is_none")] pub retention_policy: Option<RetentionPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SchedulePolicy { #[serde(rename = "schedulePolicyType", default, skip_serializing_if = "Option::is_none")] pub schedule_policy_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RetentionPolicy { #[serde(rename = "retentionPolicyType", default, skip_serializing_if = "Option::is_none")] pub retention_policy_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabProtectionPolicy { #[serde(flatten)] pub protection_policy: ProtectionPolicy, #[serde(rename = "schedulePolicy", default, skip_serializing_if = "Option::is_none")] pub schedule_policy: Option<SchedulePolicy>, #[serde(rename = "retentionPolicy", default, skip_serializing_if = "Option::is_none")] pub retention_policy: Option<RetentionPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureSqlProtectionPolicy { #[serde(flatten)] pub protection_policy: ProtectionPolicy, #[serde(rename = "retentionPolicy", default, skip_serializing_if = "Option::is_none")] pub retention_policy: Option<RetentionPolicy>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaasVmRecoveryPoint { #[serde(flatten)] pub recovery_point: RecoveryPoint, #[serde(rename = "recoveryPointType", default, skip_serializing_if = "Option::is_none")] pub recovery_point_type: Option<String>, #[serde(rename = "recoveryPointTime", default, skip_serializing_if = "Option::is_none")] pub recovery_point_time: Option<String>, #[serde(rename = "recoveryPointAdditionalInfo", default, skip_serializing_if = "Option::is_none")] pub recovery_point_additional_info: Option<String>, #[serde(rename = "sourceVMStorageType", default, skip_serializing_if = "Option::is_none")] pub source_vm_storage_type: Option<String>, #[serde(rename = "isSourceVMEncrypted", default, skip_serializing_if = "Option::is_none")] pub is_source_vm_encrypted: Option<bool>, #[serde(rename = "keyAndSecret", default, skip_serializing_if = "Option::is_none")] pub key_and_secret: Option<KeyAndSecretDetails>, #[serde(rename = "isInstantILRSessionActive", default, skip_serializing_if = "Option::is_none")] pub is_instant_ilr_session_active: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KeyAndSecretDetails { #[serde(rename = "kekDetails", default, skip_serializing_if = "Option::is_none")] pub kek_details: Option<KekDetails>, #[serde(rename = "bekDetails", default, skip_serializing_if = "Option::is_none")] pub bek_details: Option<BekDetails>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct KekDetails { #[serde(rename = "keyUrl", default, skip_serializing_if = "Option::is_none")] pub key_url: Option<String>, #[serde(rename = "keyVaultId", default, skip_serializing_if = "Option::is_none")] pub key_vault_id: Option<String>, #[serde(rename = "keyBackupData", default, skip_serializing_if = "Option::is_none")] pub key_backup_data: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BekDetails { #[serde(rename = "secretUrl", default, skip_serializing_if = "Option::is_none")] pub secret_url: Option<String>, #[serde(rename = "secretVaultId", default, skip_serializing_if = "Option::is_none")] pub secret_vault_id: Option<String>, #[serde(rename = "secretData", default, skip_serializing_if = "Option::is_none")] pub secret_data: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct GenericRecoveryPoint { #[serde(flatten)] pub recovery_point: RecoveryPoint, #[serde(rename = "friendlyName", default, skip_serializing_if = "Option::is_none")] pub friendly_name: Option<String>, #[serde(rename = "recoveryPointType", default, skip_serializing_if = "Option::is_none")] pub recovery_point_type: Option<String>, #[serde(rename = "recoveryPointTime", default, skip_serializing_if = "Option::is_none")] pub recovery_point_time: Option<String>, #[serde(rename = "recoveryPointAdditionalInfo", default, skip_serializing_if = "Option::is_none")] pub recovery_point_additional_info: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaasVmRestoreRequest { #[serde(flatten)] pub restore_request: RestoreRequest, #[serde(rename = "recoveryPointId", default, skip_serializing_if = "Option::is_none")] pub recovery_point_id: Option<String>, #[serde(rename = "recoveryType", default, skip_serializing_if = "Option::is_none")] pub recovery_type: Option<iaas_vm_restore_request::RecoveryType>, #[serde(rename = "sourceResourceId", default, skip_serializing_if = "Option::is_none")] pub source_resource_id: Option<String>, #[serde(rename = "targetVirtualMachineId", default, skip_serializing_if = "Option::is_none")] pub target_virtual_machine_id: Option<String>, #[serde(rename = "targetResourceGroupId", default, skip_serializing_if = "Option::is_none")] pub target_resource_group_id: Option<String>, #[serde(rename = "storageAccountId", default, skip_serializing_if = "Option::is_none")] pub storage_account_id: Option<String>, #[serde(rename = "virtualNetworkId", default, skip_serializing_if = "Option::is_none")] pub virtual_network_id: Option<String>, #[serde(rename = "subnetId", default, skip_serializing_if = "Option::is_none")] pub subnet_id: Option<String>, #[serde(rename = "targetDomainNameId", default, skip_serializing_if = "Option::is_none")] pub target_domain_name_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub region: Option<String>, #[serde(rename = "affinityGroup", default, skip_serializing_if = "Option::is_none")] pub affinity_group: Option<String>, #[serde(rename = "createNewCloudService", default, skip_serializing_if = "Option::is_none")] pub create_new_cloud_service: Option<bool>, #[serde(rename = "encryptionDetails", default, skip_serializing_if = "Option::is_none")] pub encryption_details: Option<EncryptionDetails>, } pub mod iaas_vm_restore_request { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RecoveryType { Invalid, OriginalLocation, AlternateLocation, RestoreDisks, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct EncryptionDetails { #[serde(rename = "encryptionEnabled", default, skip_serializing_if = "Option::is_none")] pub encryption_enabled: Option<bool>, #[serde(rename = "kekUrl", default, skip_serializing_if = "Option::is_none")] pub kek_url: Option<String>, #[serde(rename = "secretKeyUrl", default, skip_serializing_if = "Option::is_none")] pub secret_key_url: Option<String>, #[serde(rename = "kekVaultId", default, skip_serializing_if = "Option::is_none")] pub kek_vault_id: Option<String>, #[serde(rename = "secretKeyVaultId", default, skip_serializing_if = "Option::is_none")] pub secret_key_vault_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct IaasVmilrRegistrationRequest { #[serde(flatten)] pub ilr_request: IlrRequest, #[serde(rename = "recoveryPointId", default, skip_serializing_if = "Option::is_none")] pub recovery_point_id: Option<String>, #[serde(rename = "virtualMachineId", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_id: Option<String>, #[serde(rename = "initiatorName", default, skip_serializing_if = "Option::is_none")] pub initiator_name: Option<String>, #[serde(rename = "renewExistingRegistration", default, skip_serializing_if = "Option::is_none")] pub renew_existing_registration: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmJob { #[serde(flatten)] pub job: Job, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(rename = "actionsInfo", default, skip_serializing_if = "Vec::is_empty")] pub actions_info: Vec<String>, #[serde(rename = "errorDetails", default, skip_serializing_if = "Vec::is_empty")] pub error_details: Vec<AzureIaaSvmErrorInfo>, #[serde(rename = "virtualMachineVersion", default, skip_serializing_if = "Option::is_none")] pub virtual_machine_version: Option<String>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<AzureIaaSvmJobExtendedInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmErrorInfo { #[serde(rename = "errorCode", default, skip_serializing_if = "Option::is_none")] pub error_code: Option<i32>, #[serde(rename = "errorTitle", default, skip_serializing_if = "Option::is_none")] pub error_title: Option<String>, #[serde(rename = "errorString", default, skip_serializing_if = "Option::is_none")] pub error_string: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub recommendations: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmJobExtendedInfo { #[serde(rename = "tasksList", default, skip_serializing_if = "Vec::is_empty")] pub tasks_list: Vec<AzureIaaSvmJobTaskDetails>, #[serde(rename = "propertyBag", default, skip_serializing_if = "Option::is_none")] pub property_bag: Option<serde_json::Value>, #[serde(rename = "progressPercentage", default, skip_serializing_if = "Option::is_none")] pub progress_percentage: Option<f64>, #[serde(rename = "dynamicErrorMessage", default, skip_serializing_if = "Option::is_none")] pub dynamic_error_message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSvmJobTaskDetails { #[serde(rename = "taskId", default, skip_serializing_if = "Option::is_none")] pub task_id: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(rename = "instanceId", default, skip_serializing_if = "Option::is_none")]
pub status: Option<String>, #[serde(rename = "progressPercentage", default, skip_serializing_if = "Option::is_none")] pub progress_percentage: Option<f64>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DpmJob { #[serde(flatten)] pub job: Job, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(rename = "dpmServerName", default, skip_serializing_if = "Option::is_none")] pub dpm_server_name: Option<String>, #[serde(rename = "containerName", default, skip_serializing_if = "Option::is_none")] pub container_name: Option<String>, #[serde(rename = "containerType", default, skip_serializing_if = "Option::is_none")] pub container_type: Option<String>, #[serde(rename = "workloadType", default, skip_serializing_if = "Option::is_none")] pub workload_type: Option<String>, #[serde(rename = "actionsInfo", default, skip_serializing_if = "Vec::is_empty")] pub actions_info: Vec<String>, #[serde(rename = "errorDetails", default, skip_serializing_if = "Vec::is_empty")] pub error_details: Vec<DpmErrorInfo>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<DpmJobExtendedInfo>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DpmErrorInfo { #[serde(rename = "errorString", default, skip_serializing_if = "Option::is_none")] pub error_string: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub recommendations: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DpmJobExtendedInfo { #[serde(rename = "tasksList", default, skip_serializing_if = "Vec::is_empty")] pub tasks_list: Vec<DpmJobTaskDetails>, #[serde(rename = "propertyBag", default, skip_serializing_if = "Option::is_none")] pub property_bag: Option<serde_json::Value>, #[serde(rename = "dynamicErrorMessage", default, skip_serializing_if = "Option::is_none")] pub dynamic_error_message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DpmJobTaskDetails { #[serde(rename = "taskId", default, skip_serializing_if = "Option::is_none")] pub task_id: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabJob { #[serde(flatten)] pub job: Job, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(rename = "actionsInfo", default, skip_serializing_if = "Vec::is_empty")] pub actions_info: Vec<String>, #[serde(rename = "mabServerName", default, skip_serializing_if = "Option::is_none")] pub mab_server_name: Option<String>, #[serde(rename = "mabServerType", default, skip_serializing_if = "Option::is_none")] pub mab_server_type: Option<mab_job::MabServerType>, #[serde(rename = "workloadType", default, skip_serializing_if = "Option::is_none")] pub workload_type: Option<mab_job::WorkloadType>, #[serde(rename = "errorDetails", default, skip_serializing_if = "Vec::is_empty")] pub error_details: Vec<MabErrorInfo>, #[serde(rename = "extendedInfo", default, skip_serializing_if = "Option::is_none")] pub extended_info: Option<MabJobExtendedInfo>, } pub mod mab_job { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum MabServerType { Invalid, Unknown, #[serde(rename = "IaasVMContainer")] IaasVmContainer, #[serde(rename = "IaasVMServiceContainer")] IaasVmServiceContainer, #[serde(rename = "DPMContainer")] DpmContainer, #[serde(rename = "DPMVenusContainer")] DpmVenusContainer, #[serde(rename = "MABContainer")] MabContainer, ClusterResource, AzureSqlContainer, WindowsServer, Windows, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum WorkloadType { Invalid, #[serde(rename = "VM")] Vm, FileFolder, AzureSqlDb, #[serde(rename = "SQLDB")] Sqldb, Exchange, Sharepoint, #[serde(rename = "DPMUnknown")] DpmUnknown, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabErrorInfo { #[serde(rename = "errorString", default, skip_serializing_if = "Option::is_none")] pub error_string: Option<String>, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub recommendations: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabJobExtendedInfo { #[serde(rename = "tasksList", default, skip_serializing_if = "Vec::is_empty")] pub tasks_list: Vec<MabJobTaskDetails>, #[serde(rename = "propertyBag", default, skip_serializing_if = "Option::is_none")] pub property_bag: Option<serde_json::Value>, #[serde(rename = "dynamicErrorMessage", default, skip_serializing_if = "Option::is_none")] pub dynamic_error_message: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MabJobTaskDetails { #[serde(rename = "taskId", default, skip_serializing_if = "Option::is_none")] pub task_id: Option<String>, #[serde(rename = "startTime", default, skip_serializing_if = "Option::is_none")] pub start_time: Option<String>, #[serde(rename = "endTime", default, skip_serializing_if = "Option::is_none")] pub end_time: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub status: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResultInfo { #[serde(flatten)] pub operation_result_info_base: OperationResultInfoBase, #[serde(rename = "jobList", default, skip_serializing_if = "Vec::is_empty")] pub job_list: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ExportJobsOperationResultInfo { #[serde(flatten)] pub operation_result_info_base: OperationResultInfoBase, #[serde(rename = "blobUrl", default, skip_serializing_if = "Option::is_none")] pub blob_url: Option<String>, #[serde(rename = "blobSasKey", default, skip_serializing_if = "Option::is_none")] pub blob_sas_key: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSComputeVmContainer { #[serde(flatten)] pub iaa_svm_container: IaaSvmContainer, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSClassicComputeVmContainer { #[serde(flatten)] pub iaa_svm_container: IaaSvmContainer, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSComputeVmProtectableItem { #[serde(flatten)] pub iaa_svm_protectable_item: IaaSvmProtectableItem, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSClassicComputeVmProtectableItem { #[serde(flatten)] pub iaa_svm_protectable_item: IaaSvmProtectableItem, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSComputeVmProtectedItem { #[serde(flatten)] pub azure_iaa_svm_protected_item: AzureIaaSvmProtectedItem, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct AzureIaaSClassicComputeVmProtectedItem { #[serde(flatten)] pub azure_iaa_svm_protected_item: AzureIaaSvmProtectedItem, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatusJobExtendedInfo { #[serde(flatten)] pub operation_status_extended_info: OperationStatusExtendedInfo, #[serde(rename = "jobId", default, skip_serializing_if = "Option::is_none")] pub job_id: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatusProvisionIlrExtendedInfo { #[serde(flatten)] pub operation_status_extended_info: OperationStatusExtendedInfo, #[serde(rename = "recoveryTarget", default, skip_serializing_if = "Option::is_none")] pub recovery_target: Option<InstantItemRecoveryTarget>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct InstantItemRecoveryTarget { #[serde(rename = "clientScripts", default, skip_serializing_if = "Vec::is_empty")] pub client_scripts: Vec<ClientScriptForConnect>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ClientScriptForConnect { #[serde(rename = "scriptContent", default, skip_serializing_if = "Option::is_none")] pub script_content: Option<String>, #[serde(rename = "scriptExtension", default, skip_serializing_if = "Option::is_none")] pub script_extension: Option<String>, #[serde(rename = "osType", default, skip_serializing_if = "Option::is_none")] pub os_type: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationStatusJobsExtendedInfo { #[serde(flatten)] pub operation_status_extended_info: OperationStatusExtendedInfo, #[serde(rename = "jobIds", default, skip_serializing_if = "Vec::is_empty")] pub job_ids: Vec<String>, #[serde(rename = "failedJobsError", default, skip_serializing_if = "Option::is_none")] pub failed_jobs_error: Option<serde_json::Value>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LongTermSchedulePolicy { #[serde(flatten)] pub schedule_policy: SchedulePolicy, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SimpleSchedulePolicy { #[serde(flatten)] pub schedule_policy: SchedulePolicy, #[serde(rename = "scheduleRunFrequency", default, skip_serializing_if = "Option::is_none")] pub schedule_run_frequency: Option<simple_schedule_policy::ScheduleRunFrequency>, #[serde(rename = "scheduleRunDays", default, skip_serializing_if = "Vec::is_empty")] pub schedule_run_days: Vec<String>, #[serde(rename = "scheduleRunTimes", default, skip_serializing_if = "Vec::is_empty")] pub schedule_run_times: Vec<String>, #[serde(rename = "scheduleWeeklyFrequency", default, skip_serializing_if = "Option::is_none")] pub schedule_weekly_frequency: Option<i32>, } pub mod simple_schedule_policy { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum ScheduleRunFrequency { Invalid, Daily, Weekly, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct SimpleRetentionPolicy { #[serde(flatten)] pub retention_policy: RetentionPolicy, #[serde(rename = "retentionDuration", default, skip_serializing_if = "Option::is_none")] pub retention_duration: Option<RetentionDuration>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct RetentionDuration { #[serde(default, skip_serializing_if = "Option::is_none")] pub count: Option<i32>, #[serde(rename = "durationType", default, skip_serializing_if = "Option::is_none")] pub duration_type: Option<retention_duration::DurationType>, } pub mod retention_duration { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum DurationType { Invalid, Days, Weeks, Months, Years, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct LongTermRetentionPolicy { #[serde(flatten)] pub retention_policy: RetentionPolicy, #[serde(rename = "dailySchedule", default, skip_serializing_if = "Option::is_none")] pub daily_schedule: Option<DailyRetentionSchedule>, #[serde(rename = "weeklySchedule", default, skip_serializing_if = "Option::is_none")] pub weekly_schedule: Option<WeeklyRetentionSchedule>, #[serde(rename = "monthlySchedule", default, skip_serializing_if = "Option::is_none")] pub monthly_schedule: Option<MonthlyRetentionSchedule>, #[serde(rename = "yearlySchedule", default, skip_serializing_if = "Option::is_none")] pub yearly_schedule: Option<YearlyRetentionSchedule>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DailyRetentionSchedule { #[serde(rename = "retentionTimes", default, skip_serializing_if = "Vec::is_empty")] pub retention_times: Vec<String>, #[serde(rename = "retentionDuration", default, skip_serializing_if = "Option::is_none")] pub retention_duration: Option<RetentionDuration>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WeeklyRetentionSchedule { #[serde(rename = "daysOfTheWeek", default, skip_serializing_if = "Vec::is_empty")] pub days_of_the_week: Vec<String>, #[serde(rename = "retentionTimes", default, skip_serializing_if = "Vec::is_empty")] pub retention_times: Vec<String>, #[serde(rename = "retentionDuration", default, skip_serializing_if = "Option::is_none")] pub retention_duration: Option<RetentionDuration>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct MonthlyRetentionSchedule { #[serde(rename = "retentionScheduleFormatType", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_format_type: Option<monthly_retention_schedule::RetentionScheduleFormatType>, #[serde(rename = "retentionScheduleDaily", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_daily: Option<DailyRetentionFormat>, #[serde(rename = "retentionScheduleWeekly", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_weekly: Option<WeeklyRetentionFormat>, #[serde(rename = "retentionTimes", default, skip_serializing_if = "Vec::is_empty")] pub retention_times: Vec<String>, #[serde(rename = "retentionDuration", default, skip_serializing_if = "Option::is_none")] pub retention_duration: Option<RetentionDuration>, } pub mod monthly_retention_schedule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RetentionScheduleFormatType { Invalid, Daily, Weekly, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct YearlyRetentionSchedule { #[serde(rename = "retentionScheduleFormatType", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_format_type: Option<yearly_retention_schedule::RetentionScheduleFormatType>, #[serde(rename = "monthsOfYear", default, skip_serializing_if = "Vec::is_empty")] pub months_of_year: Vec<String>, #[serde(rename = "retentionScheduleDaily", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_daily: Option<DailyRetentionFormat>, #[serde(rename = "retentionScheduleWeekly", default, skip_serializing_if = "Option::is_none")] pub retention_schedule_weekly: Option<WeeklyRetentionFormat>, #[serde(rename = "retentionTimes", default, skip_serializing_if = "Vec::is_empty")] pub retention_times: Vec<String>, #[serde(rename = "retentionDuration", default, skip_serializing_if = "Option::is_none")] pub retention_duration: Option<RetentionDuration>, } pub mod yearly_retention_schedule { use super::*; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub enum RetentionScheduleFormatType { Invalid, Daily, Weekly, } } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct DailyRetentionFormat { #[serde(rename = "daysOfTheMonth", default, skip_serializing_if = "Vec::is_empty")] pub days_of_the_month: Vec<Day>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct WeeklyRetentionFormat { #[serde(rename = "daysOfTheWeek", default, skip_serializing_if = "Vec::is_empty")] pub days_of_the_week: Vec<String>, #[serde(rename = "weeksOfTheMonth", default, skip_serializing_if = "Vec::is_empty")] pub weeks_of_the_month: Vec<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Day { #[serde(default, skip_serializing_if = "Option::is_none")] pub date: Option<i32>, #[serde(rename = "isLast", default, skip_serializing_if = "Option::is_none")] pub is_last: Option<bool>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ResourceList { #[serde(rename = "nextLink", default, skip_serializing_if = "Option::is_none")] pub next_link: Option<String>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupEngineBaseResourceList { #[serde(flatten)] pub resource_list: ResourceList, #[serde(default, skip_serializing_if = "Vec::is_empty")] pub value: Vec<BackupEngineBaseResource>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct BackupEngineBaseResource { #[serde(flatten)] pub resource: Resource, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<BackupEngineBase>, } #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct OperationResultInfoBaseResource { #[serde(flatten)] pub operation_worker_response: OperationWorkerResponse, #[serde(default, skip_serializing_if = "Option::is_none")] pub properties: Option<OperationResultInfoBase>, }
pub instance_id: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")] pub duration: Option<String>, #[serde(default, skip_serializing_if = "Option::is_none")]
main.rs
#![deny(unsafe_code)] #![no_main] #![no_std] use aux5::{entry, prelude::*, Delay, Leds}; #[entry] fn main() -> ! { let (mut delay, mut leds): (Delay, Leds) = aux5::init(); let half_period:u16 = 50; loop { // MY SOLUTION for 05-led-roulette : https://docs.rust-embedded.org/discovery/05-led-roulette/my-solution.html for curr in 0..8 { led_roulette(curr,8,half_period,&mut delay,&mut leds); } // Practice LOGIC : round + tail follows off one by one // led_control(0,true,half_period,&mut delay,&mut leds); // led_control(0,false,half_period,&mut delay,&mut leds); } } fn led_roulette(c:i32, mut limit:i32,half_period:u16,delay:&mut Delay,leds:&mut Leds){ if limit!=0{ let mut n = c+1; if n<0 { n *=-1; } if limit<0 { limit *= -1; } while n>=limit { n -= limit; } leds[n as usize].on(); delay.delay_ms(half_period); leds[c as usize].off(); delay.delay_ms(half_period); } } fn led_control(led_index:usize,led_status:bool,half_period:u16,delay:&mut Delay,leds:&mut Leds)
{ if led_index < 8 { match led_status { true => { leds[led_index].on(); delay.delay_ms(half_period); }, false => { leds[led_index].off(); delay.delay_ms(half_period); }, } led_control(led_index+1,led_status,half_period,delay,leds); } }
utils.py
def iter_ngram(seq, max_order, min_order=None, sent_start=None, sent_end=None): if min_order > max_order: raise ValueError("min_order > max_order (%d > %d)" % (min_order, max_order)) if min_order is None: min_order = max_order orders = range(min_order, max_order+1) it = iter(seq) if sent_start is not None: buffer = [sent_start]*max_order else: buffer = [] last_countdown = None while True: if last_countdown is None: try: item = it.next() except StopIteration: if sent_end is None:
item = sent_end else: if last_countdown <= 1: break item = sent_end last_countdown -= 1 buffer.append(item) del buffer[:-max_order] for n in orders: if len(buffer) < n: continue yield buffer[-n:] def iter_ngram_pad(seq, max_order, min_order=None, sent_start=None, sent_end=None, padding=[]): if len(padding) < max_order-1: raise ValueError("padding must have at least %d items" % (max_order-1)) offset = len(padding)-max_order for ngram in iter_ngram(seq, max_order, min_order, sent_start, sent_end): n = len(ngram) yield ngram+padding[offset+n:]
break else: last_countdown = max_order - 1
mod.rs
use bevy_ecs::reflect::ReflectComponent; use bevy_math::{Mat4, Vec3, Vec3A, Vec4}; use bevy_reflect::Reflect; /// An Axis-Aligned Bounding Box #[derive(Clone, Debug, Default, Reflect)] #[reflect(Component)] pub struct Aabb { pub center: Vec3, pub half_extents: Vec3, } impl Aabb { pub fn from_min_max(minimum: Vec3, maximum: Vec3) -> Self { let center = 0.5 * (maximum + minimum); let half_extents = 0.5 * (maximum - minimum); Self { center, half_extents, } } /// Calculate the relative radius of the AABB with respect to a plane pub fn relative_radius(&self, p_normal: &Vec3A, axes: &[Vec3A]) -> f32 { // NOTE: dot products on Vec3A use SIMD and even with the overhead of conversion are net faster than Vec3 let half_extents = Vec3A::from(self.half_extents); Vec3A::new( p_normal.dot(axes[0]), p_normal.dot(axes[1]), p_normal.dot(axes[2]), ) .abs() .dot(half_extents) } } #[derive(Debug, Default)] pub struct Sphere { pub center: Vec3, pub radius: f32, } impl Sphere { pub fn intersects_obb(&self, aabb: &Aabb, model_to_world: &Mat4) -> bool { let aabb_center_world = *model_to_world * aabb.center.extend(1.0); let axes = [ Vec3A::from(model_to_world.x_axis), Vec3A::from(model_to_world.y_axis), Vec3A::from(model_to_world.z_axis), ]; let v = Vec3A::from(aabb_center_world) - Vec3A::from(self.center); let d = v.length(); let relative_radius = aabb.relative_radius(&(v / d), &axes); d < self.radius + relative_radius } } /// A plane defined by a normal and distance value along the normal /// Any point p is in the plane if n.p = d /// For planes defining half-spaces such as for frusta, if n.p > d then p is on the positive side of the plane. #[derive(Clone, Copy, Debug, Default)] pub struct Plane { pub normal_d: Vec4, } #[derive(Clone, Copy, Debug, Default)] pub struct Frustum { pub planes: [Plane; 6], } impl Frustum { // NOTE: This approach of extracting the frustum planes from the view // projection matrix is from Foundations of Game Engine Development 2 // Rendering by Lengyel. Slight modification has been made for when // the far plane is infinite but we still want to cull to a far plane. pub fn from_view_projection( view_projection: &Mat4, view_translation: &Vec3, view_backward: &Vec3, far: f32, ) -> Self { let row3 = view_projection.row(3); let mut planes = [Plane::default(); 6]; for (i, plane) in planes.iter_mut().enumerate().take(5) { let row = view_projection.row(i / 2); plane.normal_d = if (i & 1) == 0 && i != 4 { row3 + row } else { row3 - row } .normalize(); } let far_center = *view_translation - far * *view_backward; planes[5].normal_d = view_backward .extend(-view_backward.dot(far_center)) .normalize(); Self { planes } } pub fn intersects_sphere(&self, sphere: &Sphere) -> bool { for plane in &self.planes { if plane.normal_d.dot(sphere.center.extend(1.0)) + sphere.radius <= 0.0 { return false; } } true } pub fn intersects_obb(&self, aabb: &Aabb, model_to_world: &Mat4) -> bool { let aabb_center_world = *model_to_world * aabb.center.extend(1.0); let axes = [ Vec3A::from(model_to_world.x_axis), Vec3A::from(model_to_world.y_axis), Vec3A::from(model_to_world.z_axis), ]; for plane in &self.planes { let p_normal = Vec3A::from(plane.normal_d); let relative_radius = aabb.relative_radius(&p_normal, &axes); if plane.normal_d.dot(aabb_center_world) + relative_radius <= 0.0 { return false; } } true } } #[derive(Debug, Default)] pub struct CubemapFrusta { pub frusta: [Frustum; 6], } impl CubemapFrusta { pub fn
(&self) -> impl DoubleEndedIterator<Item = &Frustum> { self.frusta.iter() } pub fn iter_mut(&mut self) -> impl DoubleEndedIterator<Item = &mut Frustum> { self.frusta.iter_mut() } }
iter
error.rs
// Copyright 2019 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::model::{ moniker::{AbsoluteMoniker, PartialMoniker}, resolver::ResolverError, runner::RunnerError, storage::StorageError, }, anyhow::Error, clonable_error::ClonableError, thiserror::Error, }; /// Errors produced by `Model`. #[derive(Debug, Error, Clone)] pub enum ModelError { #[error("component instance {} not found in realm {}", child, moniker)] InstanceNotFoundInRealm { moniker: AbsoluteMoniker, child: PartialMoniker }, #[error("component instance {} in realm {} already exists", child, moniker)] InstanceAlreadyExists { moniker: AbsoluteMoniker, child: PartialMoniker }, #[error("component instance with moniker {} has shut down", moniker)] InstanceShutDown { moniker: AbsoluteMoniker }, #[error("component instance {} not found", moniker)] InstanceNotFound { moniker: AbsoluteMoniker }, #[error("component collection not found with name {}", name)] CollectionNotFound { name: String }, #[error("{} is not supported", feature)] Unsupported { feature: String }, #[error("component declaration invalid")] ComponentInvalid, #[error("component manifest invalid")] ManifestInvalid { url: String, #[source] err: ClonableError, }, #[error("The model is not available")] ModelNotAvailable, #[error("namespace creation failed: {}", err)] NamespaceCreationFailed { #[source] err: ClonableError, }, #[error("resolver error")] ResolverError { #[source] err: ResolverError, }, #[error("runner error: {}", err)] RunnerError { #[source] err: RunnerError, }, #[error("capability discovery error")] CapabilityDiscoveryError { #[source] err: ClonableError, }, #[error("storage error")] StorageError { #[source] err: StorageError, }, #[error("failed to add entry {} to {}", entry_name, moniker)] AddEntryError { moniker: AbsoluteMoniker, entry_name: String }, #[error("failed to remove entry {}", entry_name)] RemoveEntryError { entry_name: String }, #[error("open directory error")] OpenDirectoryError { moniker: AbsoluteMoniker, relative_path: String }, #[error("insufficient resources to complete operation")] InsufficientResources, #[error("failed to send {} to runner for component {}", operation, moniker)] RunnerCommunicationError { moniker: AbsoluteMoniker, operation: String, #[source] err: ClonableError, }, } impl ModelError { pub fn instance_not_found_in_realm( moniker: AbsoluteMoniker, child: PartialMoniker, ) -> ModelError { ModelError::InstanceNotFoundInRealm { moniker, child } } pub fn instance_already_exists(moniker: AbsoluteMoniker, child: PartialMoniker) -> ModelError { ModelError::InstanceAlreadyExists { moniker, child } } pub fn instance_shut_down(moniker: AbsoluteMoniker) -> ModelError { ModelError::InstanceShutDown { moniker } } pub fn instance_not_found(moniker: AbsoluteMoniker) -> ModelError { ModelError::InstanceNotFound { moniker } } pub fn collection_not_found(name: impl Into<String>) -> ModelError { ModelError::CollectionNotFound { name: name.into() } } pub fn unsupported(feature: impl Into<String>) -> ModelError { ModelError::Unsupported { feature: feature.into() } } pub fn namespace_creation_failed(err: impl Into<Error>) -> ModelError
pub fn manifest_invalid(url: impl Into<String>, err: impl Into<Error>) -> ModelError { ModelError::ManifestInvalid { url: url.into(), err: err.into().into() } } pub fn capability_discovery_error(err: impl Into<Error>) -> ModelError { ModelError::CapabilityDiscoveryError { err: err.into().into() } } pub fn add_entry_error(moniker: AbsoluteMoniker, entry_name: impl Into<String>) -> ModelError { ModelError::AddEntryError { moniker, entry_name: entry_name.into() } } pub fn remove_entry_error(entry_name: impl Into<String>) -> ModelError { ModelError::RemoveEntryError { entry_name: entry_name.into() } } pub fn open_directory_error( moniker: AbsoluteMoniker, relative_path: impl Into<String>, ) -> ModelError { ModelError::OpenDirectoryError { moniker, relative_path: relative_path.into() } } } impl From<ResolverError> for ModelError { fn from(err: ResolverError) -> Self { ModelError::ResolverError { err } } } impl From<RunnerError> for ModelError { fn from(err: RunnerError) -> Self { ModelError::RunnerError { err } } } impl From<StorageError> for ModelError { fn from(err: StorageError) -> Self { ModelError::StorageError { err } } }
{ ModelError::NamespaceCreationFailed { err: err.into().into() } }
__init__.py
# Copyright 2018-present Kensho Technologies, LLC. import six from ..blocks import Filter, GlobalOperationsStart from ..ir_lowering_common import (extract_optional_location_root_info, extract_simple_optional_location_info, lower_context_field_existence, merge_consecutive_filter_clauses, optimize_boolean_expression_comparisons, remove_end_optionals) from .ir_lowering import (lower_backtrack_blocks, lower_folded_coerce_types_into_filter_blocks, lower_has_substring_binary_compositions, remove_backtrack_blocks_from_fold, rewrite_binary_composition_inside_ternary_conditional, truncate_repeated_single_step_traversals, truncate_repeated_single_step_traversals_in_sub_queries) from ..ir_sanity_checks import sanity_check_ir_blocks_from_frontend from .between_lowering import lower_comparisons_to_between from .optional_traversal import (collect_filters_to_first_location_occurrence, convert_optional_traversals_to_compound_match_query, lower_context_field_expressions, prune_non_existent_outputs) from ..match_query import convert_to_match_query from ..workarounds import (orientdb_class_with_while, orientdb_eval_scheduling, orientdb_query_execution) from .utils import construct_where_filter_predicate ############## # Public API # ############## def lower_ir(ir_blocks, query_metadata_table, type_equivalence_hints=None): """Lower the IR into an IR form that can be represented in MATCH queries. Args: ir_blocks: list of IR blocks to lower into MATCH-compatible form query_metadata_table: QueryMetadataTable object containing all metadata collected during query processing, including location metadata (e.g. which locations are folded or optional). type_equivalence_hints: optional dict of GraphQL interface or type -> GraphQL union. Used as a workaround for GraphQL's lack of support for inheritance across "types" (i.e. non-interfaces), as well as a workaround for Gremlin's total lack of inheritance-awareness. The key-value pairs in the dict specify that the "key" type is equivalent to the "value" type, i.e. that the GraphQL type or interface in the key is the most-derived common supertype of every GraphQL type in the "value" GraphQL union. Recursive expansion of type equivalence hints is not performed, and only type-level correctness of this argument is enforced. See README.md for more details on everything this parameter does. ***** Be very careful with this option, as bad input here will lead to incorrect output queries being generated. ***** Returns: MatchQuery object containing the IR blocks organized in a MATCH-like structure """ sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table) # Construct the mapping of each location to its corresponding GraphQL type. location_types = { location: location_info.type for location, location_info in query_metadata_table.registered_locations }
location for location, location_info in query_metadata_table.registered_locations if location_info.coerced_from_type is not None } # Extract information for both simple and complex @optional traverses location_to_optional_results = extract_optional_location_root_info(ir_blocks) complex_optional_roots, location_to_optional_roots = location_to_optional_results simple_optional_root_info = extract_simple_optional_location_info( ir_blocks, complex_optional_roots, location_to_optional_roots) ir_blocks = remove_end_optionals(ir_blocks) # Append global operation block(s) to filter out incorrect results # from simple optional match traverses (using a WHERE statement) if len(simple_optional_root_info) > 0: where_filter_predicate = construct_where_filter_predicate( query_metadata_table, simple_optional_root_info) ir_blocks.insert(-1, GlobalOperationsStart()) ir_blocks.insert(-1, Filter(where_filter_predicate)) # These lowering / optimization passes work on IR blocks. ir_blocks = lower_context_field_existence(ir_blocks, query_metadata_table) ir_blocks = optimize_boolean_expression_comparisons(ir_blocks) ir_blocks = rewrite_binary_composition_inside_ternary_conditional(ir_blocks) ir_blocks = merge_consecutive_filter_clauses(ir_blocks) ir_blocks = lower_has_substring_binary_compositions(ir_blocks) ir_blocks = orientdb_eval_scheduling.workaround_lowering_pass(ir_blocks, query_metadata_table) # Here, we lower from raw IR blocks into a MatchQuery object. # From this point on, the lowering / optimization passes work on the MatchQuery representation. match_query = convert_to_match_query(ir_blocks) match_query = lower_comparisons_to_between(match_query) match_query = lower_backtrack_blocks(match_query, location_types) match_query = truncate_repeated_single_step_traversals(match_query) match_query = orientdb_class_with_while.workaround_type_coercions_in_recursions(match_query) # Optimize and lower the IR blocks inside @fold scopes. new_folds = { key: merge_consecutive_filter_clauses( remove_backtrack_blocks_from_fold( lower_folded_coerce_types_into_filter_blocks(folded_ir_blocks) ) ) for key, folded_ir_blocks in six.iteritems(match_query.folds) } match_query = match_query._replace(folds=new_folds) compound_match_query = convert_optional_traversals_to_compound_match_query( match_query, complex_optional_roots, location_to_optional_roots) compound_match_query = prune_non_existent_outputs(compound_match_query) compound_match_query = collect_filters_to_first_location_occurrence(compound_match_query) compound_match_query = lower_context_field_expressions(compound_match_query) compound_match_query = truncate_repeated_single_step_traversals_in_sub_queries( compound_match_query) compound_match_query = orientdb_query_execution.expose_ideal_query_execution_start_points( compound_match_query, location_types, coerced_locations) return compound_match_query
# Compute the set of all locations that have associated type coercions. coerced_locations = {
a.go
package bar
D int F int }
type Boo struct { E int
queueproperty.py
# Copyright 1997 - 2018 by IXIA Keysight # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from ixnetwork_restpy.base import Base from ixnetwork_restpy.files import Files class QueueProperty(Base): """The QueueProperty class encapsulates a required queueProperty node in the ixnetwork hierarchy. An instance of the class can be obtained by accessing the QueueProperty property from a parent instance. The internal properties list will contain one and only one set of properties which is populated when the property is accessed. """ _SDM_NAME = 'queueProperty' def __init__(self, parent): super(QueueProperty, self).__init__(parent) @property def
(self): """If true, indicates that a minimum data rate is guaranteed. Returns: bool """ return self._get_attribute('minimumDataRateGuaranteed') @MinimumDataRateGuaranteed.setter def MinimumDataRateGuaranteed(self, value): self._set_attribute('minimumDataRateGuaranteed', value) @property def None(self): """If true, indicates that no property is defined for the queue. Returns: bool """ return self._get_attribute('none') @None.setter def None(self, value): self._set_attribute('none', value)
MinimumDataRateGuaranteed
CastInPipeline.py
''' Created on Oct 13, 2017 @author: svanhmic ''' from pyspark.ml.param.shared import HasInputCol, HasOutputCol from pyspark.ml import Transformer from pyspark.sql import functions as F from pyspark import keyword_only from pyspark.ml.param import Params, Param, TypeConverters class CastInPipeline(Transformer, HasInputCol): ''' This inherrent-class converts a given vector column in a data frame to a ml-dense vector. Can be used in a pipeline method ''' applicable_casts = ['intstring', 'intfloat', 'intdouble', 'doublefloat', 'floatdouble', 'stringdouble', 'stringint' ] castTo = Param( parent=Params._dummy(), name='castTo', doc='Indicates the what we want to cast to.', typeConverter=TypeConverters.toString ) @keyword_only def
(self, inputCol=None, castTo=None,): if castTo not in ['string', 'int', 'float', 'double', 'boolean']: raise TypeError('new type must be a valid type!') super(CastInPipeline, self).__init__() kwargs = self.__init__._input_kwargs self.setParams(**kwargs) @keyword_only def setParams(self, inputCol=None, castTo=None): kwargs = self.setParams._input_kwargs return self._set(**kwargs) def setCastTo(self, value): """ Sets the casted value to :return: """ if value in ['string', 'int', 'float', 'double', 'boolean']: return self._set(castTo=value) else: raise TypeError('new type must be a valid type!') def getCastTo(self): return self.getOrDefault(self.castTo) def _transform(self, dataset): column_types = dict(dataset.dtypes) if str(column_types[self.getInputCol()])+str(self.getCastTo()) not in self.applicable_casts: raise Exception( 'The desired conversion from {} to {}, cannot be applied, sorry!' .format(column_types[self.getInputCol()], self.getCastTo()) ) return dataset.withColumn( self.getInputCol(), F.col(self.getInputCol()).cast(self.getCastTo()))
__init__
gcal.js
/*! * FullCalendar v3.8.0 * Docs & License: https://fullcalendar.io/ * (c) 2017 Adam Shaw */ (function webpackUniversalModuleDefinition(root, factory) { if(typeof exports === 'object' && typeof module === 'object') module.exports = factory(require("fullcalendar"), require("jquery")); else if(typeof define === 'function' && define.amd) define(["fullcalendar", "jquery"], factory); else if(typeof exports === 'object') factory(require("fullcalendar"), require("jquery")); else factory(root["FullCalendar"], root["jQuery"]); })(typeof self !== 'undefined' ? self : this, function(__WEBPACK_EXTERNAL_MODULE_1__, __WEBPACK_EXTERNAL_MODULE_3__) { return /******/ (function(modules) { // webpackBootstrap /******/ // The module cache /******/ var installedModules = {}; /******/ /******/ // The require function /******/ function __webpack_require__(moduleId) { /******/ /******/ // Check if module is in cache /******/ if(installedModules[moduleId]) { /******/ return installedModules[moduleId].exports; /******/ } /******/ // Create a new module (and put it into the cache) /******/ var module = installedModules[moduleId] = { /******/ i: moduleId, /******/ l: false, /******/ exports: {} /******/ }; /******/ /******/ // Execute the module function /******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__); /******/ /******/ // Flag the module as loaded /******/ module.l = true; /******/ /******/ // Return the exports of the module /******/ return module.exports; /******/ } /******/ /******/ /******/ // expose the modules object (__webpack_modules__) /******/ __webpack_require__.m = modules; /******/ /******/ // expose the module cache /******/ __webpack_require__.c = installedModules; /******/ /******/ // define getter function for harmony exports /******/ __webpack_require__.d = function(exports, name, getter) { /******/ if(!__webpack_require__.o(exports, name)) { /******/ Object.defineProperty(exports, name, { /******/ configurable: false, /******/ enumerable: true, /******/ get: getter /******/ }); /******/ } /******/ }; /******/ /******/ // getDefaultExport function for compatibility with non-harmony modules /******/ __webpack_require__.n = function(module) { /******/ var getter = module && module.__esModule ? /******/ function getDefault() { return module['default']; } : /******/ function getModuleExports() { return module; }; /******/ __webpack_require__.d(getter, 'a', getter); /******/ return getter; /******/ }; /******/ /******/ // Object.prototype.hasOwnProperty.call /******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); }; /******/ /******/ // __webpack_public_path__ /******/ __webpack_require__.p = ""; /******/ /******/ // Load entry module and return exports /******/ return __webpack_require__(__webpack_require__.s = 261); /******/ }) /************************************************************************/ /******/ ({ /***/ 1: /***/ (function(module, exports) { module.exports = __WEBPACK_EXTERNAL_MODULE_1__; /***/ }), /***/ 2: /***/ (function(module, exports) { /* derived from: https://github.com/Microsoft/tslib/blob/v1.6.0/tslib.js only include the Helpers we need, to keep down filesize */ var extendStatics = Object.setPrototypeOf || ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; exports.__extends = function (d, b) { extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); }; /***/ }), /***/ 261: /***/ (function(module, exports, __webpack_require__) { Object.defineProperty(exports, "__esModule", { value: true }); var exportHooks = __webpack_require__(1); var GcalEventSource_1 = __webpack_require__(262); exportHooks.EventSourceParser.registerClass(GcalEventSource_1.default); exportHooks.GcalEventSource = GcalEventSource_1.default; /***/ }), /***/ 262: /***/ (function(module, exports, __webpack_require__) { Object.defineProperty(exports, "__esModule", { value: true }); var tslib_1 = __webpack_require__(2); var $ = __webpack_require__(3); var fullcalendar_1 = __webpack_require__(1); var GcalEventSource = /** @class */ (function (_super) { tslib_1.__extends(GcalEventSource, _super); function GcalEventSource() { return _super !== null && _super.apply(this, arguments) || this; } GcalEventSource.parse = function (rawInput, calendar) { var rawProps; if (typeof rawInput === 'object') { rawProps = rawInput; } else if (typeof rawInput === 'string') { rawProps = { url: rawInput }; // url will be parsed with parseGoogleCalendarId } if (rawProps) { return fullcalendar_1.EventSource.parse.call(this, rawProps, calendar); } return false; }; GcalEventSource.prototype.fetch = function (start, end, timezone) { var _this = this; var url = this.buildUrl(); var requestParams = this.buildRequestParams(start, end, timezone); var ajaxSettings = this.ajaxSettings || {}; var onSuccess = ajaxSettings.success; if (!requestParams) { return fullcalendar_1.Promise.reject(); } this.calendar.pushLoading(); return fullcalendar_1.Promise.construct(function (onResolve, onReject) { $.ajax($.extend({}, // destination fullcalendar_1.JsonFeedEventSource.AJAX_DEFAULTS, ajaxSettings, { url: url, data: requestParams, success: function (responseData, status, xhr) { var rawEventDefs; var successRes; _this.calendar.popLoading(); if (responseData.error) { _this.reportError('Google Calendar API: ' + responseData.error.message, responseData.error.errors); onReject(); } else if (responseData.items) { rawEventDefs = _this.gcalItemsToRawEventDefs(responseData.items, requestParams.timeZone); successRes = fullcalendar_1.applyAll(onSuccess, _this, [responseData, status, xhr]); // passthru if ($.isArray(successRes)) { rawEventDefs = successRes; } onResolve(_this.parseEventDefs(rawEventDefs)); } }, error: function (xhr, statusText, errorThrown) { _this.reportError('Google Calendar network failure: ' + statusText, [xhr, errorThrown]); _this.calendar.popLoading(); onReject(); } })); }); }; GcalEventSource.prototype.gcalItemsToRawEventDefs = function (items, gcalTimezone) { var _this = this; return items.map(function (item) { return _this.gcalItemToRawEventDef(item, gcalTimezone); }); }; GcalEventSource.prototype.gcalItemToRawEventDef = function (item, gcalTimezone) { var url = item.htmlLink || null; // make the URLs for each event show times in the correct timezone if (url && gcalTimezone) { url = injectQsComponent(url, 'ctz=' + gcalTimezone); } return { id: item.id, title: item.summary, start: item.start.dateTime || item.start.date, end: item.end.dateTime || item.end.date, url: url, location: item.location, description: item.description }; }; GcalEventSource.prototype.buildUrl = function () { return GcalEventSource.API_BASE + '/' + encodeURIComponent(this.googleCalendarId) + '/events?callback=?'; // jsonp }; GcalEventSource.prototype.buildRequestParams = function (start, end, timezone) { var apiKey = this.googleCalendarApiKey || this.calendar.opt('googleCalendarApiKey'); var params; if (!apiKey) { this.reportError('Specify a googleCalendarApiKey. See http://fullcalendar.io/docs/google_calendar/'); return null; } // The API expects an ISO8601 datetime with a time and timezone part. // Since the calendar's timezone offset isn't always known, request the date in UTC and pad it by a day on each // side, guaranteeing we will receive all events in the desired range, albeit a superset. // .utc() will set a zone and give it a 00:00:00 time. if (!start.hasZone()) { start = start.clone().utc().add(-1, 'day'); } if (!end.hasZone()) { end = end.clone().utc().add(1, 'day'); } params = $.extend(this.ajaxSettings.data || {}, { key: apiKey, timeMin: start.format(), timeMax: end.format(), singleEvents: true, maxResults: 9999 }); if (timezone && timezone !== 'local') { // when sending timezone names to Google, only accepts underscores, not spaces params.timeZone = timezone.replace(' ', '_'); } return params; }; GcalEventSource.prototype.reportError = function (message, apiErrorObjs) { var calendar = this.calendar; var calendarOnError = calendar.opt('googleCalendarError'); var errorObjs = apiErrorObjs || [{ message: message }]; // to be passed into error handlers if (this.googleCalendarError) { this.googleCalendarError.apply(calendar, errorObjs); } if (calendarOnError) { calendarOnError.apply(calendar, errorObjs); } // print error to debug console fullcalendar_1.warn.apply(null, [message].concat(apiErrorObjs || [])); }; GcalEventSource.prototype.getPrimitive = function () { return this.googleCalendarId; }; GcalEventSource.prototype.applyManualStandardProps = function (rawProps) { var superSuccess = fullcalendar_1.EventSource.prototype.applyManualStandardProps.apply(this, arguments); var googleCalendarId = rawProps.googleCalendarId; if (googleCalendarId == null && rawProps.url) { googleCalendarId = parseGoogleCalendarId(rawProps.url); } if (googleCalendarId != null) { this.googleCalendarId = googleCalendarId; return superSuccess; } return false; }; GcalEventSource.prototype.applyMiscProps = function (rawProps) { if (!this.ajaxSettings) { this.ajaxSettings = {}; } $.extend(this.ajaxSettings, rawProps); }; GcalEventSource.API_BASE = 'https://www.googleapis.com/calendar/v3/calendars'; return GcalEventSource; }(fullcalendar_1.EventSource)); exports.default = GcalEventSource; GcalEventSource.defineStandardProps({ // manually process... url: false, googleCalendarId: false, // automatically transfer... googleCalendarApiKey: true, googleCalendarError: true }); function
(url) { var match; // detect if the ID was specified as a single string. // will match calendars like "[email protected]" in addition to person email calendars. if (/^[^\/]+@([^\/\.]+\.)*(google|googlemail|gmail)\.com$/.test(url)) { return url; } else if ((match = /^https:\/\/www.googleapis.com\/calendar\/v3\/calendars\/([^\/]*)/.exec(url)) || (match = /^https?:\/\/www.google.com\/calendar\/feeds\/([^\/]*)/.exec(url))) { return decodeURIComponent(match[1]); } } // Injects a string like "arg=value" into the querystring of a URL function injectQsComponent(url, component) { // inject it after the querystring but before the fragment return url.replace(/(\?.*?)?(#|$)/, function (whole, qs, hash) { return (qs ? qs + '&' : '?') + component + hash; }); } /***/ }), /***/ 3: /***/ (function(module, exports) { module.exports = __WEBPACK_EXTERNAL_MODULE_3__; /***/ }) /******/ }); });
parseGoogleCalendarId
pathutils.py
from pathlib import Path
import sys import os def add_application_path(): app_path = Path(__file__).resolve().parents[1] sys.path.append(str(app_path)) os.chdir(str(app_path))
Modal.tsx
import * as React from 'react'; import styled from 'styled-components'; import ReactDOM from 'react-dom'; export interface ModalProps { handleClose: () => void; show: boolean; modalContent: any; style?: object; title?: string; titleClass?: string;
//styles container of modal (background). var ModalWrapper = styled.div` width: 100vw; height: 100vh; display: flex; align-items: center; justify-content: center; background-color: rgba(0, 0, 0, 0.5); z-index: 10000; position: fixed; top: 0; overflow: hidden; `; //styles content section of modal (foreground). var ContentContainer = styled.div` max-width: 768px; max-height: 95vh; overflow: auto; background-color: #fff; z-index: 10001; border-radius: .2rem; padding: 1rem; `; //Wraps the header of the content section. Contains StyledTitle and CloseIcon. var HeaderWrapper = styled.div` display: flex; margin-bottom: 1rem; justify-content: space-between; `; var StyledTitle = styled.label` font-size: 24px; font-weight: bold; `; //X icon that calls handleClose from props var CloseIcon = styled.div` position: relative; display: flex; margin-left: auto; width: 20px; height: 20px; cursor: pointer; &:hover { &::before, &::after { background: gray; }; }; &::before, &::after { content: ''; position: absolute; height: 2px; width: 100%; top: 50%; left: 0; margin-top: -1px; background: #000; height: 4px; margin-top: -2px; border-radius: 5px; }; &::before { transform: rotate(45deg); }; &::after { transform: rotate(-45deg); }; `; function handleContentClick(e) { //content section clicked, prevent modal from closing. e.stopPropagation(); }; function handleKeyUp(e, props: ModalProps) { //escape pressed, close modal. if (e.keyCode === 27) props.handleClose(); } function closeIconKeyUp(e, cb) { //close (X) icon key up, if enter/space, call handleClose. if (e.keyCode === 13 || e.keyCode === 32) { cb(); }; }; function createHeader(props: ModalProps) { return ( <HeaderWrapper> {props.title && <StyledTitle tabIndex={1} className={props.titleClass} style={props.titleStyle} > {props.title} </StyledTitle> } <CloseIcon onKeyUp={(e) => closeIconKeyUp(e, props.handleClose)} tabIndex={1} onClick={props.handleClose} /> </HeaderWrapper> ); }; export class Modal extends React.Component<ModalProps> { private modal; public componentDidMount(): void { //create div and apply directly to body of document. this.modal = document.createElement('div'); document.body.appendChild(this.modal); }; public componentWillUnmount(): void { //remove modal from body of document. ReactDOM.unmountComponentAtNode(this.modal); document.body.removeChild(this.modal); //remove event listener. window.removeEventListener('keyup', (e) => handleKeyUp(e, this.props)); }; public componentWillReceiveProps(props: ModalProps): void { if (props.show) { //add keyup event listener if modal is being shown. window.addEventListener('keyup', (e) => handleKeyUp(e, props)); } else { //remove keyup event listener if modal is being hidden. window.removeEventListener('keyup', (e) => handleKeyUp(e, props)); } //add modal to div that was applied to document body. ReactDOM.render( <ModalWrapper style={{display: props.show ? 'flex' : 'none'}} onClick={props.handleClose} > <ContentContainer onClick={handleContentClick} style={props.style} > {createHeader(props)} {props.modalContent} </ContentContainer> </ModalWrapper>, this.modal ); }; public render() { return null; }; };
titleStyle?: object; }
arkalos_common.py
from django.http import HttpResponse from django.contrib.auth.models import User from django.contrib.auth import authenticate, login, logout from django.shortcuts import redirect from django.core.validators import URLValidator # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Max, Count from app.models import Reference, Tools, Reports, Tasks, TasksStats import io import re import six import uuid import hashlib import simplejson #https://pybtex.org/ from pybtex.database import parse_string as parse_reference_string import pybtex.database.input.bibtex import pybtex.plugin # Globals pybtex_style = pybtex.plugin.find_plugin('pybtex.style.formatting', 'plain')() pybtex_html_backend = pybtex.plugin.find_plugin('pybtex.backends', 'html')() pybtex_parser = pybtex.database.input.bibtex.Parser() sep = '||' sep2 = '@@' format_time_string = '%a, %d %b %Y %H:%M:%S' # RFC 2822 Internet email standard. https://docs.python.org/2/library/time.html#time.strftime # '%Y-%m-%d, %H:%M:%S' url_validator = URLValidator() # https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not class ArkalosException(Exception): pass def get_guid(): ''' Create a new guid ''' return str(uuid.uuid4()) def get_user_id(request): ''' Get id of user ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user.id return None def get_user(request): ''' Get user object ''' is_authenticated = request.user.is_authenticated() if is_authenticated: return request.user return None def fail(error_message=None): ''' Failed AJAX request ''' ret = {'success': False, 'error_message': error_message} json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def success(data={}): ''' success Ajax request ''' data['success'] = True json = simplejson.dumps(data) return HttpResponse(json, content_type='application/json') def has_data(f): ''' Decorator that passes AJAX data to a function parameters ''' def wrapper(*args, **kwargs): request = args[0] if request.method == 'POST': if len(request.POST): for k in request.POST: kwargs[k] = request.POST[k] else: POST = simplejson.loads(request.body) for k in POST: kwargs[k] = POST[k] elif request.method == 'GET': for k in request.GET: kwargs[k] = request.GET[k] print ("GET: {} == {}".format(k, kwargs[k])) return f(*args, **kwargs) return wrapper def has_field(field_names, errors): ''' Check if field names are present field_name: The field to check ''' def decorator(f): def wrapper(*args, **kwargs): for field_index, field_name in enumerate(field_names): if not field_name in kwargs: if callable(errors): kwargs['error'] = errors(field_name) elif type(errors) is list: kwargs['error'] = errors[field_index] elif type(errors) is dict: kwargs['error'] = errors[field_name] elif type(errors) is str: kwargs['error'] = errors else: # This should never happen raise ArkalosException('Unknown error type: {}'.format(type(error).__name__)) return f(*args, **kwargs) return f(*args, **kwargs) return wrapper return decorator def has_error(f): ''' Check if error in kwargs ''' def wrapper(*args, **kwargs): if 'error' in kwargs: return fail(kwargs['error']) return f(*args, **kwargs) return wrapper def username_exists(username): ''' Checks if a username exists ''' return User.objects.filter(username=username).exists() def URL_validate(url): ''' https://stackoverflow.com/questions/7160737/python-how-to-validate-a-url-in-python-malformed-or-not ''' try: url_validator(url) except ValidationError as e: return False return True def format_time(t): ''' Universal method to string format time vars ''' return t.strftime(format_time_string) ########################################################################### ##################DATABASE FUNCTIONS####################################### ########################################################################### def bootstrap_table_format_field(entry, value): ''' Formats the field of a bootstrap table. Values are taken from bidings ''' if type(value) is str: if type(entry) is dict: return entry[value] else: return getattr(entry, value) elif callable(value): return value(entry) def serve_boostrap_table2(model, query_f, filters, bindings, **kwargs): ''' count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name')) IT DOES NOT USE count_f ! ''' #count = count_f() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: # "read" the filter filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) print ("Filter:") print (filter_) applied_filters = {filters[f][0](): filters[f][1](f_value) for f, f_value in filter_.items() if f in filters} print ("Applied filters:") print (applied_filters) else: applied_filters = {} querySet = query_f(applied_filters) count = querySet.count() querySet = querySet[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def serve_boostrap_table(model, bindings, order_by, **kwargs): ''' http://bootstrap-table.wenzhixin.net.cn/ ''' count = model.objects.count() order = kwargs['order'] offset = kwargs['offset'] limit = kwargs['limit'] from_offset = int(offset) to_offset = from_offset + int(limit) if 'filter' in kwargs: filter_ = kwargs['filter'] filter_ = simplejson.loads(filter_) filter_ = { bindings[k] + '__icontains':v for k,v in filter_.items()} querySet = model.objects.filter(**filter_) count = querySet.count() querySet = querySet[from_offset:to_offset] else: querySet = model.objects.order_by(order_by)[from_offset:to_offset] ret = {'total': count} ret['rows'] = [ {k: bootstrap_table_format_field(entry, v) for k, v in bindings.items()} for entry in querySet] json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def db_exists(model, filters): ''' Does this entry exist? ''' return model.objects.filter(**filters).exists() def get_maximum_current_version(model, name): ''' Return the next available current_version ''' max_entry = model.objects.filter(name=name).aggregate(Max('current_version')) if max_entry['current_version__max'] is None: return 1 assert type(max_entry) is dict assert len(max_entry) == 1 return max_entry['current_version__max'] + 1 def build_jstree_tool_dependencies(tool, prefix='', include_original=False): ''' Build the dependency jstree of this tool include_original are we including the original tool in the jstree? ''' def node(t): ret = { 'id': prefix + sep + t.name + sep + str(t.current_version), #Through this id we get info from jstree jandlers 'text': t.name + ' ' + str(t.current_version), 'children': [build_jstree_tool_dependencies(x, prefix, include_original=True) for x in t.dependencies.all()] + \ [{'text': x[0], 'type': 'exposed', 'value': x[1], 'description': x[2], 'id': prefix+sep+x[0]+sep+t.name+sep2+str(t.current_version)} for x in simplejson.loads(t.exposed)], 'current_version': t.current_version, 'name': t.name, 'type': 'tool', } return ret if include_original: return node(tool) else: return [node(dependent_tool) for dependent_tool in tool.dependencies.all()] def build_jstree(model, name, prefix=''): ''' Take an entry that has a previous_version and current_version Build a jstree compatible structure ''' index = {} if prefix: prefix_to_add = prefix + sep else: prefix_to_add = '' def node(o): current_version = o.current_version ret = { 'id': prefix_to_add + o.name + sep + str(o.current_version), 'text': o.name + ' ' + str(o.current_version), 'children': [], 'current_version': o.current_version, 'name': o.name } index[current_version] = ret return ret ret = [] all_objects = model.objects.filter(name=name).order_by("current_version") #ret.append(node(all_objects[0])) for o in all_objects: previous_version = o.previous_version if previous_version is None: ret.append(node(o)) else: this_node = node(o) index[previous_version]['children'].append(this_node) #print (simplejson.dumps(ret)) return ret ########################################################################### ##################END OF DATABASE####################################### ########################################################################### ########################################################################### ################## REGISTER ############################################### ########################################################################### @has_data @has_field(['username', 'password', 'password_confirm', 'email'], lambda x :'{} is required'.format(x)) @has_error def register(request, **kwargs): ''' Register ''' #print (kwargs) username = kwargs['username'] password = kwargs['password'] password_confirm = kwargs['password_confirm'] email = kwargs['email'] #Check if this user exists if username_exists(username): return fail('Username {} exists'.format(username)) #Check if password match if kwargs['password'] != kwargs['password_confirm']: return fail('Passwords do not match') #Create user user = User.objects.create_user(username, email, password) return success({}) @has_data @has_field(['username', 'password'], lambda x :'{} is required'.format(x)) @has_error def loginlocal(request, **kwargs): ''' Function called from login ''' username = kwargs['username'] password = kwargs['password'] user = authenticate(username=username, password=password) if user is None: return fail('Invalid username or password') #if user.is_active: ... # https://docs.djangoproject.com/en/1.9/topics/auth/default/ login(request, user) ret = {'username': username} return success(ret) def logoutlocal(request):
########################################################################### ################## END OF REGISTER ######################################## ########################################################################### ############################### ####REFERENCES################# ############################### def reference_get_fields(content): ''' Get the code of the bibtex entry ''' p = parse_reference_string(content, 'bibtex') p_len = len(p.entries) if p_len == 0: return False, 'Could not find BIBTEX entry' if p_len > 1: return False, 'More than one BIBTEX entries found' code = p.entries.keys()[0] if not 'title' in p.entries[code].fields: return False, 'Could not find title information' title = p.entries[code].fields['title'] if not hasattr(p.entries[code], 'persons'): return False, 'Could not find author information' if not 'author' in p.entries[code].persons: return False, 'Could not find author information' if len(p.entries[code].persons['author']) == 0: return False, 'Could not find author information' authors = sep.join([str(x) for x in p.entries[code].persons['author']]) return True, {'code': code, 'title': title, 'authors': authors} def bibtex_to_html(content): ''' Convert bibtex to html Adapted from: http://pybtex-docutils.readthedocs.io/en/latest/quickstart.html#overview ''' data = pybtex_parser.parse_stream(six.StringIO(content)) data_formatted = pybtex_style.format_entries(six.itervalues(data.entries)) output = io.StringIO() pybtex_html_backend.write_to_stream(data_formatted, output) html = output.getvalue() html_s = html.split('\n') html_s = html_s[9:-2] new_html = '\n'.join(html_s).replace('<dd>', '').replace('</dd>', '') return new_html @has_data @has_field(['content'], 'BIBTEX content is required') @has_error def add_reference(request, **kwargs): ''' Add reference ''' content = kwargs['content'] s, fields = reference_get_fields(content) if not s: return fail(fiels) if db_exists(Reference, {'code': fields['code']}): return fail('BIBTEX entry with code {} already exists'.format(code)) html = bibtex_to_html(content) r = Reference( user=get_user(request), code=fields['code'], title=fields['title'], authors=fields['authors'], content=content, reference_type='BIBTEX', html = html, ) r.save() return success() @has_data def get_references(request, **kwargs): ''' Serve GET Request for References bootstrap table ''' bindings = { 'id': 'code', 'content': 'html', } return serve_boostrap_table(Reference, bindings, 'id', **kwargs) @has_data @has_error def get_reference(request, **kwargs): ''' Get reference ''' codes = kwargs['codes'] ret = {'data': {}, 'html': []} c = 0 for code in codes: try: ref = Reference.objects.get(code=code) c += 1 ret['data'][code] = {'counter': c} ret['html'].append({'html': ref.html}) except ObjectDoesNotExist: pass ret['total'] = c return success(ret) @has_data def reference_suggestions(request, **kwargs): ''' Get called from tagas input ''' query = kwargs['query'] querySet = Reference.objects.filter(content__icontains = query)[:10] ret = [ {'value' : entry.code, 'html': entry.html} for entry in querySet] # We have a html representation for each Reference json = simplejson.dumps(ret) return HttpResponse(json, content_type='application/json') def get_references_from_text(text): ''' Get all reference objects from a text. This is useful for the report ''' ret = [] all_brackets = re.findall(r'\[[\w]+\]', text) for bracket in all_brackets: #Remove brackets code = bracket[1:-1] #Check if this a real reference try: ref = Reference.objects.get(code=code) except ObjectDoesNotExist: pass else: ret += [ref] return ret ############################### ######END OF REFERENCES######## ############################### ################################# #### REPORTS #################### ################################# @has_data def get_reports(request, **kwargs): ''' Serve bootstrap table for reports ''' bindings = { 'name': 'name', #'total_edits': lambda entry: entry['name__count'], 'content': lambda entry : '' } #return serve_boostrap_table(Reports, bindings, 'id', **kwargs) return serve_boostrap_table2( model = Reports, #count_f = lambda : Reports.objects.values('name').annotate(Count('name')).count(), query_f = lambda x : Reports.objects.filter(**x).values('name').distinct(), bindings = bindings, filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, **kwargs ) @has_data @has_error def get_reports_ui(request, **kwargs): name = kwargs['name'] current_version = kwargs['current_version'] report = Reports.objects.get(name=name, current_version=current_version) username = report.user.username ret = { 'name': name, 'current_version': current_version, 'username': username, 'created_at': format_time(report.created_at), 'markdown': report.markdown, 'summary': report.summary, } return success(ret) @has_data @has_error def add_report(request, **kwargs): name = kwargs['name'] previous_version = kwargs['previous_version'] markdown = kwargs['markdown'] references = kwargs['references'] user = get_user(request) #print (name) #print (previous_version) #print (markdown) #print (references) current_version = get_maximum_current_version(Reports, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None if current_version == 1: previous_version = None report = Reports( name=name, user=user, current_version=current_version, previous_version=previous_version, markdown=markdown, ) report.save() fetched_references = [Reference.objects.get(name=x) for x in references] report.references.add(*fetched_references) report.save() ret = { 'created_at' : format_time(report.created_at), 'current_version': current_version, 'jstree': build_jstree(Reports, report.name) } #print (ret) return success(ret) ################################# #### END OF REPORTS ############# ################################# ################################# ####TOOLS / DATA################# ################################# @has_data def get_tools(request, **kwargs): ''' Serve GET Request for Tools bootstrap table def serve_boostrap_table2(model, count_f, query_f, bindings, **kwargs): count_f = Tools.objects.values('name', 'url').annotate(Count('name')).count() query_f = Tools.objects.values('name', 'url').annotate(Count('name') ''' bindings = { 'name' : 'name', 'url': lambda entry : '<a href="{}" target="_blank">{}</a>'.format(entry['url'], entry['url']), #'total_edits': lambda entry: entry['name__count'], 'description': lambda entry: '' #'current_version': lambda entry: '{} -- {}'.format(entry.current_version, entry.previous_version), #'current_version': 'current_version', #'description': 'description', #'description': lambda entry: '{} {} -- {}'.format(entry.description, entry.current_version, entry.previous_version), } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = Tools, #count_f = lambda : Tools.objects.values('name', 'url').annotate(Count('name')).count(), query_f = lambda x : Tools.objects.values('name', 'url').annotate(Count('name')), filters = { }, bindings = bindings, **kwargs ) @has_data @has_error def get_tools_ui(request, **kwargs): ''' Called when we want an explicit tool from the UI ''' name = kwargs['name'] current_version = kwargs['current_version'] tool = Tools.objects.get(name=name, current_version=current_version) #print ('System: {}'.format(tool.system)) exposed = simplejson.loads(tool.exposed) if not len(exposed): exposed = [['', '', '']] jstree = build_jstree(Tools, tool.name) dependencies = build_jstree_tool_dependencies(tool, prefix='3', include_original=False) #print ('DEPENDENCIES:') #print (dependencies) ret = { 'name': tool.name, 'current_version': current_version, 'version' : tool.version, 'system' : simplejson.loads(tool.system), 'username': tool.user.username, 'created_at': format_time(tool.created_at), 'url': tool.url, 'description': tool.description, 'installation': tool.installation, 'validate_installation': tool.validate_installation, 'exposed': exposed, 'jstree': jstree, 'references': [x.code for x in tool.references.all()], 'summary': tool.summary, 'dependencies': dependencies } return success(ret) @has_data @has_field( ['name', 'version', 'url', 'description', 'installation'], ['Name cannot be empty', 'Version cannot be empty', 'Link cannot be empty', 'Description cannot be empty', 'Installation cannot be empty']) @has_error def add_tool(request, **kwargs): ''' Attempt to add a new Tool ''' system = kwargs['system'] system_p = simplejson.loads(system) if not len(system_p): return fail('Please select one or more systems') url = kwargs['url'] if not URL_validate(url): return fail('URL: {} does not seem to be valid'.format(url)) references = kwargs['references'] references = simplejson.loads(references) references = [Reference.objects.get(code=r) for r in references] name = kwargs['name'] current_version = get_maximum_current_version(Tools, name) previous_version = kwargs["previous_version"] if previous_version == 'N/A': previous_version = None # else: # print ('Previous version: {}'.format(previous_version)) # print ('Current version: {}'.format(current_version)) # a=1/0 # Throw exception deliberately print ('Current version: {}'.format(current_version)) user = get_user(request) version = kwargs['version'] description = kwargs['description'] installation=kwargs['installation'] validate_installation = kwargs['validate_installation'] exposed = kwargs['exposed'] #print ('Exposed: {} {}'.format(exposed, type(exposed).__name__)) # This is a list exposed = [e for e in exposed if any(e)] # Remove empty exposed = simplejson.dumps(exposed) # Serialize summary = kwargs['summary'] new_tool = Tools( user=user, name=name, version=version, system=system, current_version=current_version, previous_version=previous_version, url = url, description = description, installation = installation, validate_installation = validate_installation, exposed = exposed, summary = summary, ); new_tool.save() #Add references new_tool.references.add(*references) new_tool.save() jstree = build_jstree(Tools, new_tool.name) #Add dependencies dependencies = kwargs['dependencies'] dependencies_objects = [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version']) for dependency in dependencies] new_tool.dependencies.add(*dependencies_objects) new_tool.save() #Get created at created_at = format_time(new_tool.created_at) #print ('Created at: {}'.format(created_at)) ret = { 'created_at': created_at, 'current_version': current_version, 'jstree': jstree } return success(ret) @has_data @has_error def jstree_tool(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tools, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_report(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Reports, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_wf(request, **kwargs): ''' AJAX backend to get the version jstree for a tool ''' name = kwargs['name'] prefix = kwargs['prefix'] ret = { 'jstree' : build_jstree(Tasks, name, prefix=prefix), } return success(ret) @has_data @has_error def jstree_tool_dependencies(request, **kwargs): ''' AJAX backend to get the dependency jstree for a tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) if 'prefix' in kwargs: prefix=kwargs['prefix'] else: prefix = '3' tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'jstree': build_jstree_tool_dependencies(tool, prefix=prefix, include_original=True) } #print(ret) return success(ret) @has_data @has_error def get_tool_dependencies(request, **kwargs): ''' Return ONE LEVEL dependencies of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'dependencies': [{'name': x.name, 'current_version': x.current_version} for x in tool.dependencies.all()] } return success(ret) @has_data @has_error def get_tool_variables(request, **kwargs): ''' Return the variables of this tool ''' name = kwargs['name'] current_version = int(kwargs['current_version']) tool = Tools.objects.get(name=name, current_version=current_version) ret = { 'variables': simplejson.loads(tool.exposed) } return success(ret) ######################################## ####END OF TOOLS / DATA################# ######################################## ######################################## ######### WORKFLOWS #################### ######################################## def jason_or_django(f): ''' getattr and iterate methods for JSON or DJANGO objects ''' def dec(*args, **kwargs): if type(args[0]) is dict: attr = lambda x,y : x[y] iterate = lambda x,y : (k for k in x[y]) elif type(args[0]) is Tasks: attr = lambda x,y : getattr(x,y) iterate = lambda x,y : (k for k in getattr(x,y).all()) else: raise ArkalosException('This should never happen: {}'.format(type(task))) kwargs['attr'] = attr kwargs['iterate'] = iterate return f(*args, **kwargs) return dec @jason_or_django def task_hash(task, **kwargs): ''' Creates a unique hash for this task attr: Get attribute iterate: Iterator ''' attr = kwargs['attr'] iterate = kwargs['iterate'] # Dictionary version # to_hash = [ # task['name'], # task['bash'], # task['documentation'], # '@@'.join(['&&'.join((x['name'], str(x['current_version']))) for x in task['dependencies'] if x['type'] == 'tool']), # '!!'.join(['**'.join((x['name'], str(x['current_version']) if x['is_workflow'] else 'None')) for x in task['calls']]), # '##'.join(task['inputs']), # '$$'.join(task['outputs']) # ] # This works with both dictionary and django database objects to_hash = [ attr(task, 'name'), attr(task, 'bash'), attr(task, 'documentation'), '@@'.join(['&&'.join((attr(x, 'name'), str(attr(x, 'current_version')))) for x in iterate(task, 'dependencies')]), '!!'.join(['**'.join((attr(x, 'name'), str(attr(x, 'current_version')) if attr(x, 'current_version') else 'None')) for x in iterate(task, 'calls')]), '##'.join(attr(task, 'inputs')), '$$'.join(attr(task, 'outputs')), ] to_hash = '^^'.join(to_hash) to_hash_b = bytearray(to_hash, encoding="utf-8") return hashlib.sha256(to_hash_b).hexdigest() def save_task_or_workflow(request, workflow_or_task): ''' Saves a workflow or task ''' if workflow_or_task['is_workflow']: # This is worflow is_workflow = True if workflow_or_task['current_version'] is None: # This workflow is not saved # Get the previous_version previous_version = workflow_or_task['previous_version'] # Get the current number current_version = get_maximum_current_version(Tasks, workflow_or_task['name']) else: # This workflow is saved. Find it and return it worklfow = Tasks.objects.get(name=workflow_or_task['name'], current_version=workflow_or_task['current_version']) return worklfow else: # This is a task is_workflow = False current_version = None previous_version = None #Check if it exists in the database try: task = Tasks.objects.get(hash_field=workflow_or_task['hash_value']) except ObjectDoesNotExist: pass else: return task # It does not exist. Create it! task = Tasks( user=get_user(request), name=workflow_or_task['name'], current_version=current_version, previous_version=previous_version, bash=workflow_or_task['bash'], documentation=workflow_or_task['documentation'], hash_field=workflow_or_task['hash_value'], is_workflow=is_workflow, inputs=simplejson.dumps(workflow_or_task['inputs']), outputs=simplejson.dumps(workflow_or_task['outputs']), ) task.save() # Add dependencies tools = [] for dependency in workflow_or_task['dependencies']: if dependency['type'] != 'tool': continue tools += [Tools.objects.get(name=dependency['name'], current_version=dependency['current_version'])] task.dependencies.add(*tools) task.save() # Add references refs = get_references_from_text(workflow_or_task['documentation']) task.references.add(*refs) task.save() return task def update_TasksStats(task): ''' Update the stats of this task ''' name = task.name try: taskStat = TasksStats.objects.get(name=name) except ObjectDoesNotExist: taskStat = TasksStats( name=name, edits=1, users=1, last_edit=task, ) else: taskStat.edits += 1 taskStat.users = Tasks.objects.filter(name=name).values('user').count() taskStat.last_edit=task finally: taskStat.save() @has_data @has_error def add_workflow(request, **kwargs): ''' Add a new workflow ''' graph = kwargs['graph'] main_guid = kwargs['main_guid'] #Fix is_workflow for node in graph: node['is_workflow'] = node['type'] == 'workflow' #Take main node main_node = None for node in graph: if node['guid'] == main_guid: main_node = node break assert not (main_node is None) assert main_node['is_workflow'] # Check if there is another workflow with the same name if main_node['previous_version'] is None: # It is a new workflow! if db_exists(Tasks, {'name': main_node['name']}): return fail('Another workflow with this name exists. Please choose another name') # Check if this workflow calls another workflow which is unsaved (this is not allowed) for node in graph: if not node['is_workflow']: # It is not a workflow continue if node['guid'] == main_guid: # It is not the main workflow continue if node['current_version'] is None: # It is not saved return fail('Could not save. Workflow: {} calls an UNSAVED workflow: {}'.format(main_node['name'], node['name'])) #Fix the "calls" guids_to_graph = {node['guid']:node for node in graph} for node in graph: node['calls'] = [{'name': guids_to_graph[callee_guid]['name'], 'current_version': guids_to_graph[callee_guid]['current_version']} for callee_guid in node['serial_calls']] #Do the following three things: #1. Add hash_value information #2. Take the hash of the main workflow #3. Create a mapping from GUIDs to hash_values from_guid_to_hash = {} main_hash = None guids_to_hashes = {} for node in graph: #print ('======') #print(node) node['hash_value'] = task_hash(node) if node['guid'] == main_guid: main_hash = node['hash_value'] guids_to_hashes[node['guid']] = node['hash_value'] assert not (main_hash is None) # Save the graph and create a new dictionary with the saved objects hash_objects_dict = { node['hash_value']: save_task_or_workflow(request, node) for node in graph } #Add the who calls whom information for node in graph: this_node_called =[hash_objects_dict[guids_to_hashes[callee_guid]] for callee_guid in node['serial_calls']] if this_node_called: hash_objects_dict[node['hash_value']].calls.add(*this_node_called) hash_objects_dict[node['hash_value']].save() #Update TaskStats. Perhaps can be done better with signals update_TasksStats(hash_objects_dict[main_hash]) ret = { 'current_version': hash_objects_dict[main_hash].current_version, 'created_at': format_time(hash_objects_dict[main_hash].created_at), } return success(ret) def workflow_graph(workflow_or_task): ''' Create a caller--callee graph identical to the one sent from angular for a workflow ''' ret = [] all_hashes = [] def create_node(node): ret = { 'bash': node.bash, 'current_version': node.current_version, 'previous_version': node.previous_version, 'documentation': node.documentation, 'tools_jstree_data': [build_jstree_tool_dependencies(tool, prefix='5', include_original=True) for x in node.dependencies.all()], 'inputs': simplejson.loads(node.inputs), 'outputs': simplejson.loads(node.outputs), 'type': 'workflow' if node.is_workflow else 'task', 'hash_value': node.hash_field, 'children': [] } if node.is_workflow: ret['name'] = node.name + '_' + str(node.current_version) ret['workflow_name'] = node.name ret['created_at'] = format_time(node.created_at) ret['username'] = node.user.username else: ret['name'] = node.name return ret def workflow_graph_rec(node): if node.hash_field in all_hashes: return all_hashes.append(node.hash_field) ret_json = create_node(node) ret_json['serial_calls'] = [] for callee in node.calls.all(): ret_json['serial_calls'].append(callee.hash_field) workflow_graph_rec(callee) ret.append(ret_json) workflow_graph_rec(workflow_or_task) return ret @has_data def get_workflow(request, **kwargs): ''' Creates a json object EXACTTLY the same as the one saved return { "name": node.type == 'workflow' ? node.workflow_name : node.name, "bash": node.bash, "current_version": node.current_version, // This is always null "previous_version": node.previous_version, "documentation": node.documentation, "dependencies": node.tools_jstree_data, "serial_calls" : node.serial_calls, "inputs": node.inputs, "outputs": node.outputs, "type": node.type, "guid": node.guid }; ''' name = kwargs['name'] current_version = kwargs['current_version'] wf = Tasks.objects.get(name=name, current_version=current_version) graph = workflow_graph(wf) # print ('ret:') # print (ret) ret = { 'graph': graph, 'main_hash': wf.hash_field } return success(ret) @has_data def get_workflows(request, **kwargs): ''' Serve bootstrap table for workflows ''' def description(entry): ret = '<p>Edits: <strong>%i</strong> Users: <strong>%i</strong> Last Edit: <strong>%s</strong><br />Last documentation: %s</p>' % (entry.edits, entry.users, format_time(entry.last_edit.created_at), entry.last_edit.documentation) return ret bindings = { 'name' : 'name', 'description': description, } #return serve_boostrap_table(Tools, bindings, 'name', **kwargs) return serve_boostrap_table2( model = TasksStats, #count_f = lambda : Tasks.objects.values('name').count(), # COUNT ALL query_f = lambda x : TasksStats.objects.filter(**x), # Query function filters = { 'name': (lambda : 'name__icontains', lambda x : x) # name_contains = x }, bindings = bindings, **kwargs ) ######################################## ####### END OF WORKFLOWS ############### ########################################
''' logout ''' logout(request) return redirect('/')
sort.go
package pia import ( "sort" "github.com/rxtreme8/gluetun/internal/models" ) func sortServers(servers []models.PIAServer)
{ sort.Slice(servers, func(i, j int) bool { if servers[i].Region == servers[j].Region { if servers[i].Hostname == servers[j].Hostname { return servers[i].ServerName < servers[j].ServerName } return servers[i].Hostname < servers[j].Hostname } return servers[i].Region < servers[j].Region }) }
componentsschemasmicrosoft_graph_mailfolderallof1.py
# coding=utf-8 # -------------------------------------------------------------------------- # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class ComponentsschemasmicrosoftGraphMailfolderallof1(Model): """mailFolder. :param display_name: :type display_name: str :param parent_folder_id: :type parent_folder_id: str :param child_folder_count: :type child_folder_count: int :param unread_item_count: :type unread_item_count: int :param total_item_count: :type total_item_count: int :param well_known_name: :type well_known_name: str :param single_value_extended_properties: :type single_value_extended_properties: list[~users.models.MicrosoftgraphsingleValueLegacyExtendedProperty] :param multi_value_extended_properties: :type multi_value_extended_properties: list[~users.models.MicrosoftgraphmultiValueLegacyExtendedProperty] :param messages: :type messages: list[~users.models.Microsoftgraphmessage] :param message_rules: :type message_rules: list[~users.models.MicrosoftgraphmessageRule] :param child_folders: :type child_folders: list[~users.models.MicrosoftgraphmailFolder] :param user_configurations: :type user_configurations: list[~users.models.MicrosoftgraphuserConfiguration] """ _validation = { 'child_folder_count': {'maximum': 2147483647, 'minimum': -2147483648}, 'unread_item_count': {'maximum': 2147483647, 'minimum': -2147483648}, 'total_item_count': {'maximum': 2147483647, 'minimum': -2147483648}, } _attribute_map = { 'display_name': {'key': 'displayName', 'type': 'str'}, 'parent_folder_id': {'key': 'parentFolderId', 'type': 'str'}, 'child_folder_count': {'key': 'childFolderCount', 'type': 'int'}, 'unread_item_count': {'key': 'unreadItemCount', 'type': 'int'}, 'total_item_count': {'key': 'totalItemCount', 'type': 'int'}, 'well_known_name': {'key': 'wellKnownName', 'type': 'str'}, 'single_value_extended_properties': {'key': 'singleValueExtendedProperties', 'type': '[MicrosoftgraphsingleValueLegacyExtendedProperty]'}, 'multi_value_extended_properties': {'key': 'multiValueExtendedProperties', 'type': '[MicrosoftgraphmultiValueLegacyExtendedProperty]'}, 'messages': {'key': 'messages', 'type': '[Microsoftgraphmessage]'}, 'message_rules': {'key': 'messageRules', 'type': '[MicrosoftgraphmessageRule]'}, 'child_folders': {'key': 'childFolders', 'type': '[MicrosoftgraphmailFolder]'}, 'user_configurations': {'key': 'userConfigurations', 'type': '[MicrosoftgraphuserConfiguration]'}, } def
(self, display_name=None, parent_folder_id=None, child_folder_count=None, unread_item_count=None, total_item_count=None, well_known_name=None, single_value_extended_properties=None, multi_value_extended_properties=None, messages=None, message_rules=None, child_folders=None, user_configurations=None): super(ComponentsschemasmicrosoftGraphMailfolderallof1, self).__init__() self.display_name = display_name self.parent_folder_id = parent_folder_id self.child_folder_count = child_folder_count self.unread_item_count = unread_item_count self.total_item_count = total_item_count self.well_known_name = well_known_name self.single_value_extended_properties = single_value_extended_properties self.multi_value_extended_properties = multi_value_extended_properties self.messages = messages self.message_rules = message_rules self.child_folders = child_folders self.user_configurations = user_configurations
__init__
rules.py
# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections from neutron_lib.api import converters from neutron_lib.api.definitions import portbindings from neutron_lib.api.definitions import provider_net as provider from neutron_lib.api import extensions from neutron_lib.api import validators from neutron_lib import exceptions as n_exc from neutron_lib.plugins import directory from neutron_lib.plugins.ml2 import api from neutron_lib.services.trunk import constants from neutron._i18n import _ from neutron.objects import trunk as trunk_objects from neutron.services.trunk import exceptions as trunk_exc from neutron.services.trunk import utils # This layer is introduced for keeping business logic and # data persistence decoupled. def trunk_can_be_managed(context, trunk): """Validate that the trunk can be managed.""" if not trunk.admin_state_up: raise trunk_exc.TrunkDisabled(trunk_id=trunk.id) def enforce_port_deletion_rules(resource, event, trigger, payload=None): """Prohibit the deletion of a port that's used in a trunk.""" # NOTE: the ML2 plugin properly catches these exceptions when raised, but # non-ML2 plugins might not. To address this we should move the callback # registry notification emitted in the ML2 plugin's delete_port() higher # up in the plugin hierarchy. context = payload.context port_id = payload.resource_id subport_obj = trunk_objects.SubPort.get_object(context, port_id=port_id) if subport_obj: raise trunk_exc.PortInUseAsSubPort(port_id=port_id, trunk_id=subport_obj.trunk_id) trunk_obj = trunk_objects.Trunk.get_object(context, port_id=port_id) if trunk_obj: raise trunk_exc.PortInUseAsTrunkParent(port_id=port_id, trunk_id=trunk_obj.id) class TrunkPortValidator(object): def __init__(self, port_id): self.port_id = port_id self._port = None def validate(self, context, parent_port=True): """Validate that the port can be used in a trunk. :param parent_port: True if the port is intended for use as parent in a trunk. """ # TODO(tidwellr): there is a chance of a race between the # time these checks are performed and the time the trunk # creation is executed. To be revisited, if it bites. # Validate that the given port_id is not used by a subport. subports = trunk_objects.SubPort.get_objects( context, port_id=self.port_id) if subports: raise trunk_exc.TrunkPortInUse(port_id=self.port_id) # Validate that the given port_id is not used by a trunk. trunks = trunk_objects.Trunk.get_objects(context, port_id=self.port_id) if trunks: raise trunk_exc.ParentPortInUse(port_id=self.port_id) if parent_port: # if the port is being used as a parent in a trunk, check if # it can be trunked, i.e. if it is already associated to physical # resources (namely it is bound). Bound ports may be used as # trunk parents, but that depends on the underlying driver in # charge. if not self.can_be_trunked_or_untrunked(context): raise trunk_exc.ParentPortInUse(port_id=self.port_id) else: # if the port is being used as subport in a trunk, check if it is a # port that is not actively used for other purposes, e.g. a router # port, compute port, DHCP port etc. We have no clue what the side # effects of connecting the port to a trunk would be, and it is # better to err on the side of caution and prevent the operation. self.check_not_in_use(context) return self.port_id def is_bound(self, context): """Return true if the port is bound, false otherwise.""" # Validate that the given port_id does not have a port binding. core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) return bool(self._port.get(portbindings.HOST_ID)) def can_be_trunked_or_untrunked(self, context): """"Return true if a port can be trunked.""" if not self.is_bound(context): # An unbound port can be trunked, always. return True trunk_plugin = directory.get_plugin('trunk') vif_type = self._port.get(portbindings.VIF_TYPE) binding_host = self._port.get(portbindings.HOST_ID) # Determine the driver that will be in charge of the trunk: this # can be determined based on the vif type, whether or not the # driver is agent-based, and whether the host is running the agent # associated to the driver itself. host_agent_types = utils.get_agent_types_by_host(context, binding_host) drivers = [ driver for driver in trunk_plugin.registered_drivers if utils.is_driver_compatible( context, driver, vif_type, host_agent_types) ] if len(drivers) > 1: raise trunk_exc.TrunkPluginDriverConflict() elif len(drivers) == 1: return drivers[0].can_trunk_bound_port else: return False def check_not_in_use(self, context): """Raises PortInUse for ports assigned for device purposes.""" core_plugin = directory.get_plugin() self._port = core_plugin.get_port(context, self.port_id) # NOTE(armax): the trunk extension itself does not make use of the # device_id field, because it has no reason to. If need be, this # check can be altered to accommodate the change in logic. if self._port['device_id']: raise n_exc.PortInUse(net_id=self._port['network_id'], port_id=self._port['id'], device_id=self._port['device_id']) class
(object): def __init__(self, segmentation_types, subports, trunk_port_id=None): self._segmentation_types = segmentation_types self.subports = subports self.trunk_port_id = trunk_port_id def validate(self, context, basic_validation=False, trunk_validation=True): """Validate that subports can be used in a trunk.""" # Perform basic validation on subports, in case subports # are not automatically screened by the API layer. if basic_validation: msg = validators.validate_subports(self.subports) if msg: raise n_exc.InvalidInput(error_message=msg) if trunk_validation: trunk_port_mtu = self._get_port_mtu(context, self.trunk_port_id) subport_mtus = self._prepare_subports(context) return [self._validate(context, s, trunk_port_mtu, subport_mtus) for s in self.subports] else: return self.subports def _prepare_subports(self, context): """Utility method to parse subports in the request The objective of this method is two-fold: * Update subports segmentation details if INHERIT is requested; * Return the MTU for each of the subport in the request. This method does two things rather than one to allow us to hit the DB once, and thus minimize the number of lookups required to learn about the segmentation type and the MTU of the networks on which subports are plugged. """ InheritIndex = ( collections.namedtuple("InheritIndex", "index has_inherit")) port_ids = {} any_has_inherit = False for i, s in enumerate(self.subports): has_inherit = (s.get('segmentation_type') == constants.SEGMENTATION_TYPE_INHERIT) any_has_inherit |= has_inherit port_ids[s['port_id']] = ( InheritIndex(index=i, has_inherit=has_inherit)) core_plugin = directory.get_plugin() if (any_has_inherit and not extensions.is_extension_supported( core_plugin, provider.ALIAS)): msg = (_("Cannot accept segmentation type %s") % constants.SEGMENTATION_TYPE_INHERIT) raise n_exc.InvalidInput(error_message=msg) ports = core_plugin.get_ports(context, filters={'id': port_ids}) network_port_map = collections.defaultdict(list) for p in ports: network_port_map[p['network_id']].append({'port_id': p['id']}) networks = core_plugin.get_networks( context.elevated(), filters={'id': network_port_map}) subport_mtus = {} for net in networks: for port in network_port_map[net['id']]: if port_ids[port['port_id']].has_inherit: port.update( {'segmentation_id': net[provider.SEGMENTATION_ID], 'segmentation_type': net[provider.NETWORK_TYPE]}) self.subports[port_ids[port['port_id']].index] = port # To speed up the request, record the network MTU for each # subport to avoid hitting the DB more than necessary. Do # that only if the extension is available. if extensions.is_extension_supported(core_plugin, 'net-mtu'): subport_mtus[port['port_id']] = net[api.MTU] return subport_mtus def _get_port_mtu(self, context, port_id): """Get port MTU Return MTU for the network where the given port belongs to. If the network or port cannot be obtained, or if MTU is not defined, returns None. """ core_plugin = directory.get_plugin() if not extensions.is_extension_supported(core_plugin, 'net-mtu'): return try: port = core_plugin.get_port(context, port_id) return core_plugin.get_network( context, port['network_id'])[api.MTU] except (n_exc.PortNotFound, n_exc.NetworkNotFound): # A concurrent request might have made the port or network # disappear; though during DB insertion, the subport request # will fail on integrity constraint, it is safer to return # a None MTU here. return def _raise_subport_is_parent_port(self, context, subport): if subport['port_id'] == self.trunk_port_id: raise trunk_exc.ParentPortInUse(port_id=subport['port_id']) def _raise_subport_invalid_mtu(self, context, subport, trunk_port_mtu, subport_mtus): # Check MTU sanity - subport MTU must not exceed trunk MTU. # If for whatever reason trunk_port_mtu is not available, # the MTU sanity check cannot be enforced. if trunk_port_mtu: # missing MTUs for subports is not an error condition: the # subport UUID may be invalid or non existent. subport_mtu = subport_mtus.get(subport['port_id']) if subport_mtu and subport_mtu > trunk_port_mtu: raise trunk_exc.SubPortMtuGreaterThanTrunkPortMtu( port_id=subport['port_id'], port_mtu=subport_mtu, trunk_id=self.trunk_port_id, trunk_mtu=trunk_port_mtu ) def _raise_if_segmentation_details_missing(self, subport): try: segmentation_type = subport["segmentation_type"] segmentation_id = ( converters.convert_to_int(subport["segmentation_id"])) return (segmentation_type, segmentation_id) except KeyError: msg = _("Invalid subport details '%s': missing segmentation " "information. Must specify both segmentation_id and " "segmentation_type") % subport raise n_exc.InvalidInput(error_message=msg) except n_exc.InvalidInput: msg = _("Invalid subport details: segmentation_id '%s' is " "not an integer") % subport["segmentation_id"] raise n_exc.InvalidInput(error_message=msg) def _raise_if_segmentation_details_invalid(self, segmentation_type, segmentation_id): if segmentation_type not in self._segmentation_types: msg = _("Unknown segmentation_type '%s'") % segmentation_type raise n_exc.InvalidInput(error_message=msg) if not self._segmentation_types[segmentation_type](segmentation_id): msg = _("Segmentation ID '%s' is not in range") % segmentation_id raise n_exc.InvalidInput(error_message=msg) def _raise_if_subport_is_used_in_other_trunk(self, context, subport): trunk_validator = TrunkPortValidator(subport['port_id']) trunk_validator.validate(context, parent_port=False) def _validate(self, context, subport, trunk_port_mtu, subport_mtus): self._raise_subport_is_parent_port(context, subport) self._raise_subport_invalid_mtu( context, subport, trunk_port_mtu, subport_mtus) segmentation_type, segmentation_id = ( self._raise_if_segmentation_details_missing(subport)) self._raise_if_segmentation_details_invalid( segmentation_type, segmentation_id) self._raise_if_subport_is_used_in_other_trunk(context, subport) return subport
SubPortsValidator
TransformerCRF_V2.py
from torch import nn from Models.CRF import CRF from Models.Transformer import Transformer from Models.TransformerCtx import TransformerCtx from Models.SequenceEncoder import SequenceEncoder from Models.Attention import Attention import torch from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence class Transformer_CRF(nn.Module): def __init__(self, vocab_size, ctx_vocab_size, nb_labels, emb_dim, hidden_dim, bos_idx, eos_idx, pad_idx, num_lstm_layers, dropout, device): super().__init__() self.transformer = Transformer( vocab_size, in_dim=emb_dim, nb_labels=nb_labels, dropout=dropout ) self.crf = CRF( nb_labels, device, bos_idx, eos_idx, pad_tag_id=pad_idx, batch_first=True, ) self.ctx_encoder = TransformerCtx(ctx_vocab_size, device=device, in_dim=emb_dim) self.ctx_combiner = Attention(emb_dim) self.query = nn.Parameter(torch.Tensor(1, emb_dim)) torch.nn.init.xavier_uniform_(self.query.data) self.emb_dim = emb_dim self.ctx_linear = nn.Linear(2 * emb_dim, emb_dim) def
(self, x, before_ctx, after_ctx): # (batch, h_dim) before_ctx_encoded = self.before_ctx_encoder(before_ctx) after_ctx_encoded = self.after_ctx_encoder(after_ctx) # (batch, 2 * h_dim) ctx_cat = torch.cat((before_ctx_encoded, after_ctx_encoded), dim=1) # (batch, h_dim) encoded_ctx = torch.tanh(self.ctx_linear(ctx_cat)) seq_len = x.shape[1] # (batch, seq_len, h_dim) encoded_ctx_repeated = encoded_ctx.unsqueeze(dim=0).repeat(seq_len, 1, 1) return encoded_ctx_repeated def forward_ctx(self, x, before_ctx, after_ctx): batch_size = x.shape[0] # (batch_size, 1, emb_dim) query = self.query.expand(batch_size, self.emb_dim).unsqueeze(dim=1) packed_query = pack_padded_sequence(query, batch_size * [1], batch_first=True, enforce_sorted=False) # Packed sequence (before_ctx_length, batch_size, emb_dim) encoded_before_ctx = self.ctx_encoder(before_ctx) # (batch_size, 1, emb_dim) encoded_before_ctx, _ = self.ctx_combiner(packed_query, encoded_before_ctx) # Packed sequence (after_ctx_length, batch_size, emb_dim) encoded_after_ctx = self.ctx_encoder(after_ctx) # (batch_size, 1 ,emb_dim) encoded_after_ctx, _ = self.ctx_combiner(packed_query, encoded_after_ctx) # (batch_size ,emb_dim) combined_ctx = self.ctx_linear(torch.cat([encoded_before_ctx, encoded_after_ctx], dim=2).squeeze()) # (1, batch_size ,emb_dim) combined_ctx = combined_ctx.unsqueeze(dim=0) seq_len = x.shape[1] # (seq_len, batch_size, emb_dim) combined_ctx = combined_ctx.repeat(seq_len, 1, 1) return combined_ctx def forward(self, x, before_ctx, after_ctx, mask=None): # (seq_len, batch_size, emb_dim) combined_ctx = self.forward_ctx(x, before_ctx, after_ctx) # (batch_size, src_length, num_labels) emissions = self.transformer(x, combined_ctx, mask) score, path = self.crf.decode(emissions, mask=mask) return score, path def loss(self, x, before_ctx, after_ctx, y, mask=None): # (seq_len, batch_size, emb_dim) combined_ctx = self.forward_ctx(x, before_ctx, after_ctx) # (batch_size, src_length, num_labels) emissions = self.transformer(x, combined_ctx, mask) nll = self.crf(emissions, y, mask=mask) return nll
combine_ctx
serializers.py
import logging from rest_framework import serializers from baserow.api.serializers import get_example_pagination_serializer_class from baserow.api.utils import get_serializer_class from baserow.contrib.database.fields.registries import field_type_registry from baserow.contrib.database.rows.registries import row_metadata_registry from baserow.core.utils import model_default_values, dict_to_object logger = logging.getLogger(__name__) class RowSerializer(serializers.ModelSerializer):
def get_row_serializer_class( model, base_class=None, is_response=False, field_ids=None, field_names_to_include=None, user_field_names=False, field_kwargs=None, ): """ Generates a Django rest framework model serializer based on the available fields that belong to this model. For each table field, used to generate this serializer, a serializer field will be added via the `get_serializer_field` method of the field type. :param model: The model for which to generate a serializer. :type model: Model :param base_class: The base serializer class that will be extended when generating the serializer. By default this is a regular ModelSerializer. :type base_class: ModelSerializer :param is_response: Indicates if the serializer is going to be used for a response instead of handling input data. If that is the case other serializer fields might be used depending on the field type. :type is_response: bool :param field_ids: If provided only the field ids in the list will be included in the serializer. By default all the fields of the model are going to be included. Note that the field id must exist in the model in order to work. :type field_ids: list or None :param field_names_to_include: If provided only the field names in the list will be included in the serializer. By default all the fields of the model are going to be included. Note that the field name must exist in the model in order to work. :type field_names_to_include: list or None :param field_kwargs: A dict containing additional kwargs per field. The key must be the field name and the value a dict containing the kwargs. :type field_kwargs: dict :return: The generated serializer. :rtype: ModelSerializer """ if not field_kwargs: field_kwargs = {} field_objects = model._field_objects field_names = [] field_overrides = {} for field in field_objects.values(): field_id_matches = field_ids is None or (field["field"].id in field_ids) field_name_matches = field_names_to_include is None or ( field["field"].name in field_names_to_include ) if field_id_matches and field_name_matches: name = field["field"].name if user_field_names else field["name"] extra_kwargs = field_kwargs.get(field["name"], {}) if field["name"] != name: # If we are building a serializer with names which do not match the # database column then we have to set the source. # We don't always do this if user_field_names is True as a user could # have named fields "field_1" etc, in which case if we also set source # DRF would crash as it only wants source set if the db column differs. extra_kwargs["source"] = field["name"] if is_response: serializer = field["type"].get_response_serializer_field( field["field"], **extra_kwargs ) else: serializer = field["type"].get_serializer_field( field["field"], **extra_kwargs ) field_overrides[name] = serializer field_names.append(name) return get_serializer_class(model, field_names, field_overrides, base_class) def get_example_row_serializer_class(add_id=False, user_field_names=False): """ Generates a serializer containing a field for each field type. It is only used for example purposes in the openapi documentation. :param add_id: Indicates whether the id field should be added. This could for example differ for request or response documentation. :type add_id: bool :param user_field_names: Whether this example serializer help text should indicate the fields names can be switched using the `user_field_names` GET parameter. :type user_field_names: bool :return: Generated serializer containing a field for each field type. :rtype: Serializer """ if not hasattr(get_example_row_serializer_class, "cache"): get_example_row_serializer_class.cache = {} class_name = ( "ExampleRowResponseSerializer" if add_id else "ExampleRowRequestSerializer" ) if user_field_names: class_name += "WithUserFieldNames" if class_name in get_example_row_serializer_class.cache: return get_example_row_serializer_class.cache[class_name] fields = {} if add_id: fields["id"] = serializers.IntegerField( read_only=True, help_text="The unique identifier of the row in the table." ) fields["order"] = serializers.DecimalField( max_digits=40, decimal_places=20, required=False, help_text="Indicates the position of the row, lowest first and highest " "last.", ) field_types = field_type_registry.registry.values() if len(field_types) == 0: logger.warning( "The field types appear to be empty. This module is probably " "imported before the fields have been registered." ) optional_user_field_names_info = "" if user_field_names: optional_user_field_names_info = ( " If the GET parameter `user_field_names` is provided then the key will " "instead be the actual name of the field." ) for i, field_type in enumerate(field_types): # In order to generate a serializer we need a model instance. This method is # called before Django has been loaded so it will result in errors when # creating an instance. Therefore we create an object containing the default # field values of the model. With the object we can generate the example # serializer. defaults = model_default_values(field_type.model_class) instance = dict_to_object(defaults) kwargs = { "help_text": f"This field represents the `{field_type.type}` field. The " f"number in field_{i + 1} is in a normal request or response " f"the id of the field.{optional_user_field_names_info}" f"{field_type.get_serializer_help_text(instance)}" } get_field_method = ( "get_response_serializer_field" if add_id else "get_serializer_field" ) serializer_field = getattr(field_type, get_field_method)(instance, **kwargs) fields[f"field_{i + 1}"] = serializer_field class_object = type(class_name, (serializers.Serializer,), fields) get_example_row_serializer_class.cache[class_name] = class_object return class_object def get_example_row_metadata_field_serializer(): """ Generates a serializer containing a field for each row metadata type which represents the metadata for a single row. It is only used for example purposes in the openapi documentation. :return: Generated serializer for a single rows metadata :rtype: Serializer """ metadata_types = row_metadata_registry.get_all() if len(metadata_types) == 0: return None fields = {} for metadata_type in metadata_types: fields[metadata_type.type] = metadata_type.get_example_serializer_field() per_row_serializer = type( "RowMetadataSerializer", (serializers.Serializer,), fields )() return serializers.DictField( child=per_row_serializer, required=False, help_text="An object keyed by row id with a value being an object containing " "additional metadata about that row. A row might not have metadata and will " "not be present as a key if so.", ) example_pagination_row_serializer_class = get_example_pagination_serializer_class( get_example_row_serializer_class(True, user_field_names=True) ) class MoveRowQueryParamsSerializer(serializers.Serializer): before_id = serializers.IntegerField(required=False) class CreateRowQueryParamsSerializer(serializers.Serializer): before = serializers.IntegerField(required=False) class ListRowsQueryParamsSerializer(serializers.Serializer): user_field_names = serializers.BooleanField(required=False, default=False) search = serializers.CharField(required=False) order_by = serializers.CharField(required=False) include = serializers.CharField(required=False) exclude = serializers.CharField(required=False) filter_type = serializers.CharField(required=False, default="")
class Meta: fields = ( "id", "order", ) extra_kwargs = {"id": {"read_only": True}, "order": {"read_only": True}}
1.py
#!/usr/bin/env python3 import fileinput import hashlib hash = None with fileinput.input() as fp: hash = fp.readline().strip() res = None i = 0 zeros = 5 while True: s = f'{hash}{str(i)}' h = hashlib.md5(s.encode())
i += 1 print(i) print(res)
res = h.hexdigest() if res.startswith('0'*zeros): break;
httpdump.go
package httpdump import ( "bufio" "bytes" "errors" "fmt" "log" "net" "net/http" "sort" "strconv" "strings" "time" "github.com/google/gopacket" "github.com/google/gopacket/layers" "github.com/google/gopacket/pcap" ) type TcpChannel struct { SrcIP string DstIP string SrcPort layers.TCPPort DstPort layers.TCPPort } type TcpTask struct { ipv4 *layers.IPv4 tcp *layers.TCP } type HttpCache struct { httpStart time.Time reqSeqMap map[uint32]bool reqList []TcpTask resSeqMap map[uint32]bool resList []TcpTask resStart bool } type HttpPacket struct { Ch TcpChannel Req *http.Request Res *http.Response } // support : simple network flow in http 1.0 / http 1.1 // not support : http 2 (ignore), https (ignore), Transfer-Encoding: chunked (response is replaced by text 'error') func DumpIf(path string) chan HttpPacket { httpChan := make(chan HttpPacket, 10000) var ipv4Chans [256]chan TcpTask for i := 0; i < 256; i++ { ipv4Chans[i] = make(chan TcpTask, 10000) go httpPacketCollector(i, ipv4Chans[i], httpChan) } go func() { handler, err := pcap.OpenLive(path, 102400, false, 30) if err != nil { log.Fatalln(err) panic(errors.New("can't open live path : " + path)) }
defer handler.Close() i := 0 for packet := range source.Packets() { i++ layer := packet.Layer(layers.LayerTypeIPv4) if layer == nil { continue } ipv4 := layer.(*layers.IPv4) layer = packet.Layer(layers.LayerTypeTCP) if layer == nil { continue } tcp := layer.(*layers.TCP) rt := (ipv4.SrcIP[3] | ipv4.DstIP[3]) & 255 ipv4Chans[rt] <- TcpTask{ipv4, tcp} } }() return httpChan } func ip2string(ip net.IP) string { var a, b, c, d = (int)(ip[0]), (int)(ip[1]), (int)(ip[2]), (int)(ip[3]) return strings.Join([]string{strconv.Itoa(a), strconv.Itoa(b), strconv.Itoa(c), strconv.Itoa(d)}, ".") } func httpPacketCollector(id int, tasks chan TcpTask, httpChan chan HttpPacket) { comp := [4][]byte{[]byte("GET "), []byte("POST "), []byte("DELETE "), []byte("PUT ")} resComp := []byte("HTTP") tcpCache := make(map[TcpChannel]*HttpCache, 1024) for tcpTask := range tasks { srcIp, dstIp, srcPort, dstPort := ip2string(tcpTask.ipv4.SrcIP), ip2string(tcpTask.ipv4.DstIP), tcpTask.tcp.SrcPort, tcpTask.tcp.DstPort reqDirection := true ch := TcpChannel{srcIp, dstIp, srcPort, dstPort} httpCache, ok := tcpCache[ch] if !ok { reqDirection = false ch = TcpChannel{dstIp, srcIp, dstPort, srcPort} httpCache, ok = tcpCache[ch] } if ok && tcpTask.tcp.SYN { delete(tcpCache, ch) continue } if ok { if reqDirection { httpCache.reqList = addAndSortPacket(httpCache.reqSeqMap, httpCache.reqList, tcpTask) } else { if !httpCache.resStart { if len(tcpTask.tcp.Payload) > 10 && byteSameStart(resComp, tcpTask.tcp.Payload) { httpCache.resStart = true } else { continue } } httpCache.resList = addAndSortPacket(httpCache.resSeqMap, httpCache.resList, tcpTask) // if stopMerge(httpCache) { // delete(tcpCache, ch) // } if textHttpEnd(httpCache) { // fmt.Println("http ends ", ch) httpPacket := transferHttpPacket(httpCache, ch) if httpPacket != nil { httpChan <- *httpPacket } delete(tcpCache, ch) } } if ok && tcpTask.tcp.FIN { // fmt.Println("fin detected :", ch) delete(tcpCache, ch) continue } } else { payload := tcpTask.tcp.Payload if len(payload) < 10 { continue } for _, cp := range comp { if byteSameStart(cp, payload) { httpCache = &HttpCache{ reqSeqMap: make(map[uint32]bool, 16), resSeqMap: make(map[uint32]bool, 16), reqList: []TcpTask{}, resList: []TcpTask{}, httpStart: time.Now()} tcpCache[TcpChannel{srcIp, dstIp, srcPort, dstPort}] = httpCache httpCache.reqList = addAndSortPacket(httpCache.reqSeqMap, httpCache.reqList, tcpTask) break } } } } } // if you don't want handle no text http , then use it func stopMerge(httpCache *HttpCache) bool { var LF byte = 0x0A var CR byte = 0x0D // CRLF := []byte{CR, LF} // var COLON byte = ':' // var TAB byte = 0x09 // var SPACE byte = 0x20 resBytes := merge(httpCache.resList) var headerEnd int = 0 var lines []string = []string{} for id, bt := range resBytes { if id >= 3 && bt == LF && resBytes[id-1] == CR && resBytes[id-2] == LF && resBytes[id-3] == CR { headerEnd = id before := string(resBytes[:id-3]) lines = strings.Split(before, "\r\n") break } } if headerEnd == 0 { return false } var contentType string = "" for _, line := range lines { if strings.HasPrefix(line, "Content-Type: ") { contentType = line[14:] break } } if contentType == "" { return false } return !IsTxtContent(contentType) } func IsTxtContent(contentType string) bool { if strings.HasPrefix(contentType, "text") { return true } if strings.HasPrefix(contentType, "application") { if strings.Contains(contentType, "+xml") { return true } } txtType := []string{"application/json", "application/ecmascript", "application/javascript", "application/x-www-form-urlencoded", "multipart/form-data"} for i := 0; i < len(txtType); i++ { if strings.Compare(txtType[i], contentType) == 0 { return true } } return false } func textHttpEnd(httpCache *HttpCache) bool { var LF byte = 0x0A var CR byte = 0x0D // CRLF := []byte{CR, LF} // var COLON byte = ':' // var TAB byte = 0x09 // var SPACE byte = 0x20 resBytes := merge(httpCache.resList) var headerEnd int = 0 var lines []string = []string{} for id, bt := range resBytes { if id >= 3 && bt == LF && resBytes[id-1] == CR && resBytes[id-2] == LF && resBytes[id-3] == CR { headerEnd = id before := string(resBytes[:id-3]) lines = strings.Split(before, "\r\n") break } } if headerEnd == 0 { return false } var contentLengh int = 0 for _, line := range lines { if strings.HasPrefix(line, "Content-Length: ") { cl, e := strconv.Atoi(line[16:]) if e == nil { contentLengh = cl break } else { fmt.Println("can't parse ", line) return true } } } if contentLengh == 0 { return true } alreadyBytes := resBytes[headerEnd+1:] // http content-length means bytes amount // alreadyContent := string(resBytes[headerEnd+1:]) // alreadyUtf8Length := utf8.RuneCountInString(alreadyContent) return len(alreadyBytes) >= contentLengh } func addAndSortPacket(seqMap map[uint32]bool, packetList []TcpTask, tcpTask TcpTask) []TcpTask { _, ok := seqMap[tcpTask.tcp.Seq] if !ok { seqMap[tcpTask.tcp.Seq] = true packetList = append(packetList, tcpTask) sort.Slice(packetList, func(i, j int) bool { return packetList[i].tcp.Seq < packetList[j].tcp.Seq }) } return packetList } func transferHttpPacket(httpCache *HttpCache, tcpChannel TcpChannel) *HttpPacket { reqBytes := merge(httpCache.reqList) resBytes := merge(httpCache.resList) req, e := http.ReadRequest(bufio.NewReader(bytes.NewReader(reqBytes))) if e != nil { fmt.Println("http.ReadRequest fail ", e) return nil } res, e := http.ReadResponse(bufio.NewReader(bytes.NewReader(resBytes)), req) if e != nil { fmt.Println("http.ReadResponse fail ", e) return nil } result := HttpPacket{tcpChannel, req, res} return &result } func merge(paks []TcpTask) []byte { reqBytes := []byte{} for _, req := range paks { reqBytes = append(reqBytes, req.tcp.Payload...) } return reqBytes } func byteSameStart(cp []byte, payload []byte) bool { eq := true for id, bt := range cp { if payload[id] != bt { eq = false break } } return eq }
source := gopacket.NewPacketSource(handler, handler.LinkType())
get-metadata.ts
import { IdentityMetadataWrapper } from '@celo/contractkit/lib/identity' import { IArg } from '@oclif/parser/lib/args' import { cli } from 'cli-ux' import { BaseCommand } from '../../base' import { Args } from '../../utils/command' import { displayMetadata } from '../../utils/identity' export default class GetMetadata extends BaseCommand { static description = 'Show information about an address. Retreives the metadata URL for an account from the on-chain, then fetches the metadata file off-chain and verifies proofs as able.' static flags = { ...BaseCommand.flags, ...(cli.table.flags() as object), } static args: IArg[] = [Args.address('address', { description: 'Address to get metadata for' })] static examples = ['get-metadata 0x97f7333c51897469E8D98E7af8653aAb468050a3'] async run() { const { args, flags } = this.parse(GetMetadata) const address = args.address const accounts = await this.kit.contracts.getAccounts()
if (!metadataURL) { console.info('No metadata set for address') return } try { const metadata = await IdentityMetadataWrapper.fetchFromURL(this.kit, metadataURL) console.info('Metadata contains the following claims: \n') await displayMetadata(metadata, this.kit, flags) } catch (error) { console.error(`Metadata could not be retrieved from ${metadataURL}: ${error.toString()}`) } } }
const metadataURL = await accounts.getMetadataURL(address)
test.js
/** ** * Tests * * ** */ process.env.TZ = 'America/San_Francisco'; const assert = require('assert'); const vows = require('vows'); const _ = require('underscore'); const ical = require('../node-ical'); const moment = require('moment-timezone'); vows .describe('node-ical') .addBatch({ 'when parsing test1.ics (node conferences schedule from lanyrd.com, modified)': { topic() { return ical.parseFile('./test/test1.ics'); }, 'we get 9 events'(topic) { const events = _.select(_.values(topic), x => { return x.type === 'VEVENT'; }); assert.equal(events.length, 9); }, 'event 47f6e': { topic(events) { return _.select(_.values(events), x => { return x.uid === '47f6ea3f28af2986a2192fa39a91fa7d60d26b76'; })[0]; }, 'is in fort lauderdale'(topic) { assert.equal(topic.location, 'Fort Lauderdale, United States'); }, 'starts Tue, 29 Nov 2011'(topic) { assert.equal(topic.start.toDateString(), new Date(2011, 10, 29).toDateString()); }, 'datetype is date'(topic) { assert.equal(topic.datetype, 'date'); } }, 'event 480a': { topic(events) { return _.select(_.values(events), x => { return x.uid === '480a3ad48af5ed8965241f14920f90524f533c18'; })[0]; }, 'has a summary (invalid colon handling tolerance)'(topic) { assert.equal(topic.summary, '[Async]: Everything Express'); }, 'has a date only start datetime'(topic) { assert.equal(topic.start.dateOnly, true); }, 'has a date only end datetime'(topic) { assert.equal(topic.end.dateOnly, true); } }, 'event d4c8': { topic(events) { return _.select(_.values(events), x => { return x.uid === 'd4c826dfb701f611416d69b4df81caf9ff80b03a'; })[0]; }, 'has a start datetime'(topic) { assert.equal(topic.start.toDateString(), new Date(Date.UTC(2011, 2, 12, 20, 0, 0)).toDateString()); }, 'datetype is date-time'(topic) { assert.equal(topic.datetype, 'date-time'); } }, 'event sdfkf09fsd0 (Invalid Date)': { topic(events) { return _.select(_.values(events), x => { return x.uid === 'sdfkf09fsd0'; })[0]; }, 'has a start datetime'(topic) { assert.equal(topic.start, 'Next Year'); } } }, 'with test2.ics (testing ical features)': { topic() { return ical.parseFile('./test/test2.ics'); }, 'todo item [email protected]': { topic(items) { return _.find(items, object => { return object.uid === '[email protected]'; }); }, 'is a VTODO'(topic) { assert.equal(topic.type, 'VTODO'); } }, vfreebusy: { topic(events) { return _.select(_.values(events), x => { return x.type === 'VFREEBUSY'; })[0]; }, 'has a URL'(topic) { assert.equal(topic.url, 'http://www.host.com/calendar/busytime/jsmith.ifb'); } }, 'vfreebusy first freebusy': { topic(events) { return _.select(_.values(events), x => { return x.type === 'VFREEBUSY'; })[0].freebusy[0]; }, 'has undefined type defaulting to busy'(topic) { assert.equal(topic.type, 'BUSY'); }, 'has an start datetime'(topic) { assert.equal(topic.start.getFullYear(), 1998); assert.equal(topic.start.getUTCMonth(), 2); assert.equal(topic.start.getUTCDate(), 14); assert.equal(topic.start.getUTCHours(), 23); assert.equal(topic.start.getUTCMinutes(), 30); }, 'has an end datetime'(topic) { assert.equal(topic.end.getFullYear(), 1998); assert.equal(topic.end.getUTCMonth(), 2); assert.equal(topic.end.getUTCDate(), 15); assert.equal(topic.end.getUTCHours(), 0); assert.equal(topic.end.getUTCMinutes(), 30); } }, 'tzid parsing': { topic(events) { return _.find(events, object => { return object.uid === 'EC9439B1-FF65-11D6-9973-003065F99D04'; }); }, 'tzid offset correctly applied'(event) { const start = new Date('2002-10-28T22:00:00.000Z'); assert.equal(event.start.valueOf(), start.valueOf()); } } }, 'with test3.ics (testing tvcountdown.com)': { topic() { return ical.parseFile('./test/test3.ics'); }, 'event -83': { topic(events) { return _.select(_.values(events), x => { return x.uid === '[email protected]'; })[0]; }, 'has a start datetime'(topic) { assert.equal(topic.start.getFullYear(), 2011); assert.equal(topic.start.getMonth(), 4); }, 'has an end datetime'(topic) { assert.equal(topic.end.getFullYear(), 2011); assert.equal(topic.end.getMonth(), 4); }, 'datetype is date-time'(topic) { assert.equal(topic.datetype, 'date-time'); } } }, 'with test4.ics (testing tripit.com)': { topic() { return ical.parseFile('./test/test4.ics'); }, 'event c32a5...': { topic(events) { return _.select(_.values(events), x => { return x.uid === '[email protected]'; })[0]; }, 'has a start datetime'(topic) { assert.equal(topic.start.getFullYear(), 2011); assert.equal(topic.start.getMonth(), 9); assert.equal(topic.start.getDate(), 11); }, 'has a summary'(topic) { // Escaped commas and semicolons should be replaced assert.equal(topic.summary, 'South San Francisco, CA, October 2011;'); }, 'has a description'(topic) { const desired = 'John Doe is in South San Francisco, CA from Oct 11 ' + 'to Oct 13, 2011\nView and/or edit details in TripIt : http://www.tripit.c' + 'om/trip/show/id/23710889\nTripIt - organize your travel at http://www.trip' + 'it.com\n'; assert.equal(topic.description, desired); }, 'has a geolocation'(topic) { assert.ok(topic.geo, 'no geo param'); assert.equal(topic.geo.lat, 37.654656); assert.equal(topic.geo.lon, -122.40775); }, 'has transparency'(topic) { assert.equal(topic.transparency, 'TRANSPARENT'); } } }, 'with test5.ics (testing meetup.com)': { topic() { return ical.parseFile('./test/test5.ics'); }, 'event [email protected]': { topic(events) { return _.select(_.values(events), x => { return x.uid === '[email protected]'; })[0]; }, 'has a start'(topic) { assert.equal(topic.start.tz, 'America/Phoenix'); assert.equal(topic.start.toISOString(), new Date(Date.UTC(2011, 10, 10, 2, 0, 0)).toISOString()); } } }, 'with test6.ics (testing assembly.org)': { topic() { return ical.parseFile('./test/test6.ics'); }, 'event with no ID': { topic(events) { return _.select(_.values(events), x => { return x.summary === 'foobar Summer 2011 starts!'; })[0]; }, 'has a start'(topic) { assert.equal(topic.start.toISOString(), new Date(2011, 7, 4, 0, 0, 0).toISOString()); } }, 'event with rrule': { topic(events) { return _.select(_.values(events), x => { return x.summary === 'foobarTV broadcast starts'; })[0]; }, 'Has an RRULE'(topic) { assert.notEqual(topic.rrule, undefined); }, 'RRule text'(topic) { assert.equal(topic.rrule.toText(), 'every 5 weeks on Monday, Friday until January 30, 2013'); } } }, 'with test7.ics (testing dtstart of rrule)': { topic() { return ical.parseFile('./test/test7.ics'); }, 'recurring yearly event (14 july)': { topic(events) { const ev = _.values(events)[0]; return ev.rrule.between(new Date(2013, 0, 1), new Date(2014, 0, 1)); }, 'dt start well set'(topic) { assert.equal(topic[0].toDateString(), new Date(2013, 6, 14).toDateString()); } } }, 'with test 8.ics (VTODO completion)': { topic() { return ical.parseFile('./test/test8.ics'); }, 'grabbing VTODO task': { topic(topic) { return _.values(topic)[0]; }, 'task completed'(task) { assert.equal(task.completion, 100); assert.equal(task.completed.toISOString(), new Date(2013, 6, 16, 10, 57, 45).toISOString()); } } }, 'with test 9.ics (VEVENT with VALARM)': { topic() { return ical.parseFile('./test/test9.ics'); }, 'grabbing VEVENT task': { topic(topic) { return _.values(topic)[0]; }, 'task completed'(task) { assert.equal(task.summary, 'Event with an alarm'); } } }, 'with test 11.ics (VEVENT with custom properties)': { topic() { return ical.parseFile('./test10.ics'); }, 'grabbing custom properties': { topic() {} } }, 'with test10.ics': { topic() { return ical.parseFile('./test/test10.ics'); }, 'when categories present': { topic(t) { return _.values(t)[0]; }, 'should be a list'(event) { assert(event.categories instanceof [].constructor); }, 'should contain individual category values'(event) { assert.deepEqual(event.categories, ['cat1', 'cat2', 'cat3']); } }, 'when categories present with trailing whitespace': { topic(t) { return _.values(t)[1]; }, 'should contain individual category values without whitespace'(event) { assert.deepEqual(event.categories, ['cat1', 'cat2', 'cat3']); } }, 'when categories present but empty': { topic(t) { return _.values(t)[2]; }, 'should be an empty list'(event) { assert.deepEqual(event.categories, []); } }, 'when categories present but singular': { topic(t) { return _.values(t)[3]; }, 'should be a list of single item'(event) { assert.deepEqual(event.categories, ['lonely-cat']); } }, 'when categories present on multiple lines': { topic(t) { return _.values(t)[4]; }, 'should contain the category values in an array'(event) { assert.deepEqual(event.categories, ['cat1', 'cat2', 'cat3']); } } }, 'with test11.ics (testing zimbra freebusy)': { topic() { return ical.parseFile('./test/test11.ics'); }, 'freebusy params': { topic(events) { return _.values(events)[0]; }, 'has a URL'(topic) { assert.equal(topic.url, 'http://mail.example.com/[email protected]/20140416'); }, 'has an ORGANIZER'(topic) { assert.equal(topic.organizer, 'mailto:[email protected]'); }, 'has an start datetime'(topic) { assert.equal(topic.start.getFullYear(), 2014); assert.equal(topic.start.getMonth(), 3); }, 'has an end datetime'(topic) { assert.equal(topic.end.getFullYear(), 2014); assert.equal(topic.end.getMonth(), 6); } }, 'freebusy busy events': { topic(events) { return _.select(_.values(events)[0].freebusy, x => { return x.type === 'BUSY'; })[0]; }, 'has an start datetime'(topic) { assert.equal(topic.start.getFullYear(), 2014); assert.equal(topic.start.getMonth(), 3); assert.equal(topic.start.getUTCHours(), 15); assert.equal(topic.start.getUTCMinutes(), 15); }, 'has an end datetime'(topic) { assert.equal(topic.end.getFullYear(), 2014); assert.equal(topic.end.getMonth(), 3); assert.equal(topic.end.getUTCHours(), 19); assert.equal(topic.end.getUTCMinutes(), 0); } } }, 'with test12.ics (testing recurrences and exdates)': { topic() { return ical.parseFile('./test/test12.ics'); }, 'event with rrule': { topic(events) { return _.select(_.values(events), x => { return x.uid === '0000001'; })[0]; }, 'Has an RRULE'(topic) { assert.notEqual(topic.rrule, undefined); }, 'Has summary Treasure Hunting'(topic) { assert.equal(topic.summary, 'Treasure Hunting'); }, 'Has two EXDATES'(topic) { assert.notEqual(topic.exdate, undefined); assert.notEqual( topic.exdate[new Date(Date.UTC(2015, 6, 8, 19, 0, 0)).toISOString().slice(0, 10)], undefined ); assert.notEqual( topic.exdate[new Date(Date.UTC(2015, 6, 10, 19, 0, 0)).toISOString().slice(0, 10)], undefined ); }, 'Has a RECURRENCE-ID override'(topic) { assert.notEqual(topic.recurrences, undefined); assert.notEqual( topic.recurrences[new Date(Date.UTC(2015, 6, 7, 19, 0, 0)).toISOString().slice(0, 10)], undefined ); assert.equal( topic.recurrences[new Date(Date.UTC(2015, 6, 7, 19, 0, 0)).toISOString().slice(0, 10)].summary, 'More Treasure Hunting' );
} } }, 'with test13.ics (testing recurrence-id before rrule)': { topic() { return ical.parseFile('./test/test13.ics'); }, 'event with rrule': { topic(events) { return _.select(_.values(events), x => { return x.uid === '[email protected]'; })[0]; }, 'Has an RRULE'(topic) { assert.notEqual(topic.rrule, undefined); }, 'Has summary "repeated"'(topic) { assert.equal(topic.summary, 'repeated'); }, 'Has a RECURRENCE-ID override'(topic) { assert.notEqual(topic.recurrences, undefined); assert.notEqual( topic.recurrences[new Date(Date.UTC(2016, 7, 26, 11, 0, 0)).toISOString().slice(0, 10)], undefined ); assert.equal( topic.recurrences[new Date(Date.UTC(2016, 7, 26, 11, 0, 0)).toISOString().slice(0, 10)].summary, 'bla bla' ); } } }, 'with test14.ics (testing comma-separated exdates)': { topic() { return ical.parseFile('./test/test14.ics'); }, 'event with comma-separated exdate': { topic(events) { return _.select(_.values(events), x => { return x.uid === '98765432-ABCD-DCBB-999A-987765432123'; })[0]; }, 'Has summary "Example of comma-separated exdates"'(topic) { assert.equal(topic.summary, 'Example of comma-separated exdates'); }, 'Has four comma-separated EXDATES'(topic) { assert.notEqual(topic.exdate, undefined); // Verify the four comma-separated EXDATES are there assert.notEqual(topic.exdate[new Date(2017, 6, 6, 12, 0, 0).toISOString().slice(0, 10)], undefined); assert.notEqual(topic.exdate[new Date(2017, 6, 17, 12, 0, 0).toISOString().slice(0, 10)], undefined); assert.notEqual(topic.exdate[new Date(2017, 6, 20, 12, 0, 0).toISOString().slice(0, 10)], undefined); assert.notEqual(topic.exdate[new Date(2017, 7, 3, 12, 0, 0).toISOString().slice(0, 10)], undefined); // Verify an arbitrary date isn't there assert.equal(topic.exdate[new Date(2017, 4, 5, 12, 0, 0).toISOString().slice(0, 10)], undefined); } } }, 'with test14.ics (testing exdates with bad times)': { topic() { return ical.parseFile('./test/test14.ics'); }, 'event with exdates with bad times': { topic(events) { return _.select(_.values(events), x => { return x.uid === '1234567-ABCD-ABCD-ABCD-123456789012'; })[0]; }, 'Has summary "Example of exdate with bad times"'(topic) { assert.equal(topic.summary, 'Example of exdate with bad times'); }, 'Has two EXDATES even though they have bad times'(topic) { assert.notEqual(topic.exdate, undefined); // Verify the two EXDATES are there, even though they have bad times assert.notEqual(topic.exdate[new Date(2017, 11, 18, 12, 0, 0).toISOString().slice(0, 10)], undefined); assert.notEqual(topic.exdate[new Date(2017, 11, 19, 12, 0, 0).toISOString().slice(0, 10)], undefined); } } }, 'with test15.ics (testing Microsoft Exchange Server 2010 with timezones)': { topic() { return ical.parseFile('./test/test15.ics'); }, 'event with start and end including timezones': { topic(events) { return _.select(_.values(events), x => { return ( x.uid === '040000008200E00074C5B7101A82E00800000000C9AB6E5A6AFED401000000000000000010000000C55132227F0F0948A7D58F6190A3AEF9' ); })[0]; }, 'has a start'(topic) { assert.equal(topic.start.tz, '(UTC+07:00) Bangkok, Hanoi, Jakarta'); assert.equal(topic.end.toISOString().slice(0, 8), new Date(Date.UTC(2019, 3, 30, 9, 0, 0)).toISOString().slice(0, 8)); assert.equal(topic.end.tz, '(UTC+07:00) Bangkok, Hanoi, Jakarta'); assert.equal(topic.end.toISOString().slice(0, 8), new Date(2019, 3, 30, 5, 0, 0).toISOString().slice(0, 8)); } } }, 'with test16.ics (testing quoted parameter values)': { topic() { return ical.parseFile('./test/test16.ics'); }, 'quoted params': { topic(events) { return _.values(events)[0]; }, 'is quoted'(topic) { assert.notEqual(topic.start.tz, undefined); } } }, 'with test17.ics (testing for non-stringified start/end time)': { topic() { return ical.parseFile('./test/test17.ics'); }, 'stringified params': { topic(events) { return _.values(events)[0]; }, 'is not string'(topic) { assert.notEqual(typeof topic.start, 'string'); assert.notEqual(typeof topic.end, 'string'); } } }, 'with ms_timezones.ics (testing time conversions)': { 'topic'() { return ical.parseFile('./test/ms_timezones.ics'); }, 'event with time in CET': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === 'Log Yesterday\'s Jira time'; })[0]; }, 'Has summary \'Log Yesterday\'s Jira time\''(topic) { assert.equal(topic.summary, 'Log Yesterday\'s Jira time'); }, 'Has proper start and end dates and times'(topic) { // DTSTART;TZID=W. Europe Standard Time:20200609T090000 assert.equal(topic.start.getFullYear(), 2020); assert.equal(topic.start.getMonth(), 5); assert.equal(topic.start.getUTCHours(), 7); assert.equal(topic.start.getUTCMinutes(), 0); // DTEND;TZID=W. Europe Standard Time:20200609T093000 assert.equal(topic.end.getFullYear(), 2020); assert.equal(topic.end.getMonth(), 5); assert.equal(topic.end.getUTCHours(), 7); assert.equal(topic.end.getUTCMinutes(), 30); } } }, 'with bad_ms_tz.ics (testing for unexpected ms timezone)': { topic() { return ical.parseFile('./test/bad_ms_tz.ics'); }, 'event with bad TZ': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === '[private]'; })[0]; }, 'is not valid timezone'(topic) { assert.equal(topic.start.tz, 'Customized Time Zone'); } } }, 'with bad_ms_tz.ics (testing for old ms timezones before DST)': { topic() { return ical.parseFile('./test/Office-2012-owa.ics'); }, 'event with old TZ': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === ' TEST'; })[0]; }, 'is not valid timezone'(topic) { assert.equal(topic.end.toISOString().slice(0, 8), new Date(Date.UTC(2020, 9, 28, 15, 0, 0)).toISOString().slice(0, 8)); } } }, 'with bad_ms_tz.ics (testing for old ms timezones after DST )': { topic() { return ical.parseFile('./test/Office-2012-owa.ics'); }, 'event with old TZ': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === ' TEST 3'; })[0]; }, 'is not valid timezone'(topic) { assert.equal(topic.end.toISOString().slice(0, 8), new Date(Date.UTC(2020, 10, 2, 20, 0, 0)).toISOString().slice(0, 8)); } } }, 'bad rrule': { topic() { return ical.parseFile('./test/badRRULE.ics'); }, 'is valid time': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === 'Academic Time'; })[0]; }, 'is not valid date'(topic) { assert.equal(topic.start.toISOString().slice(11), '15:50:00.000Z'); } } }, 'with forward.ics (testing for full day forward of UTC )': { topic() { moment.tz.setDefault('Europe/Berlin'); return ical.parseFile('./test/test_with_forward_TZ.ics'); }, 'event with east TZ': { 'topic'(events) { return _.select(_.values(events), x => { return x.summary === 'Fear TWD'; })[0]; }, 'is not valid date'(topic) { assert.equal(topic.start.toISOString().slice(11), '00:00:00.000Z'); } } }, 'url request errors': { topic() { ical.fromURL('http://255.255.255.255/', {}, this.callback); }, 'are passed back to the callback'(err, result) { assert.instanceOf(err, Error); if (!err) { console.log('>E:', err, result); } } } }) .export(module);
tests.rs
// Unrestricted event size is okay in tests. #![allow(clippy::large_enum_variant)] #![cfg(test)] use std::{ collections::{BTreeSet, HashMap}, fmt::{self, Debug, Display, Formatter}, iter, }; use derive_more::From; use prometheus::Registry; use rand::Rng; use reactor::ReactorEvent; use serde::Serialize; use tempfile::TempDir; use thiserror::Error; use tokio::time; use tracing::debug; use dimension_execution_engine::{ core::engine_state::DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, shared::{system_config::SystemConfig, wasm_config::WasmConfig}, }; use dimension_types::ProtocolVersion; use super::*; use crate::{ components::{ contract_runtime::{self, ContractRuntime, ContractRuntimeAnnouncement}, deploy_acceptor::{self, DeployAcceptor}, in_memory_network::{self, InMemoryNetwork, NetworkController}, storage::{self, Storage}, }, effect::{ announcements::{ ControlAnnouncement, DeployAcceptorAnnouncement, GossiperAnnouncement, NetworkAnnouncement, RpcServerAnnouncement, }, requests::{ConsensusRequest, ContractRuntimeRequest, LinearChainRequest}, Responder, }, protocol::Message as NodeMessage, reactor::{self, EventQueueHandle, Runner}, testing, testing::{ network::{Network, NetworkedReactor}, ConditionCheckReactor, TestRng, }, types::{Chainspec, Deploy, NodeId, Tag}, utils::{Loadable, WithDir}, NodeRng, }; const MAX_ASSOCIATED_KEYS: u32 = 100; /// Top-level event for the reactor. #[derive(Debug, From, Serialize)] #[must_use] enum Event { #[from] Network(in_memory_network::Event<NodeMessage>), #[from] Storage(#[serde(skip_serializing)] storage::Event), #[from] DeployAcceptor(#[serde(skip_serializing)] deploy_acceptor::Event), #[from] DeployGossiper(super::Event<Deploy>), #[from] NetworkRequest(NetworkRequest<NodeId, NodeMessage>), #[from] ControlAnnouncement(ControlAnnouncement), #[from] NetworkAnnouncement(#[serde(skip_serializing)] NetworkAnnouncement<NodeId, NodeMessage>), #[from] RpcServerAnnouncement(#[serde(skip_serializing)] RpcServerAnnouncement), #[from] DeployAcceptorAnnouncement(#[serde(skip_serializing)] DeployAcceptorAnnouncement<NodeId>), #[from] DeployGossiperAnnouncement(#[serde(skip_serializing)] GossiperAnnouncement<Deploy>), #[from] ContractRuntime(#[serde(skip_serializing)] Box<ContractRuntimeRequest>), } impl ReactorEvent for Event { fn as_control(&self) -> Option<&ControlAnnouncement> { if let Self::ControlAnnouncement(ref ctrl_ann) = self { Some(ctrl_ann) } else { None } } } impl From<ContractRuntimeRequest> for Event { fn from(contract_runtime_request: ContractRuntimeRequest) -> Self { Event::ContractRuntime(Box::new(contract_runtime_request)) } } impl From<StorageRequest> for Event { fn from(request: StorageRequest) -> Self { Event::Storage(storage::Event::from(request)) } } impl From<NetworkRequest<NodeId, Message<Deploy>>> for Event { fn from(request: NetworkRequest<NodeId, Message<Deploy>>) -> Self { Event::NetworkRequest(request.map_payload(NodeMessage::from)) } } impl From<ConsensusRequest> for Event { fn from(_request: ConsensusRequest) -> Self { unimplemented!("not implemented for gossiper tests") } } impl From<LinearChainRequest<NodeId>> for Event { fn from(_request: LinearChainRequest<NodeId>) -> Self { unimplemented!("not implemented for gossiper tests") } } impl From<ContractRuntimeAnnouncement> for Event { fn from(_request: ContractRuntimeAnnouncement) -> Self { unimplemented!("not implemented for gossiper tests") } } impl Display for Event { fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result
} /// Error type returned by the test reactor. #[derive(Debug, Error)] enum Error { #[error("prometheus (metrics) error: {0}")] Metrics(#[from] prometheus::Error), } struct Reactor { network: InMemoryNetwork<NodeMessage>, storage: Storage, deploy_acceptor: DeployAcceptor, deploy_gossiper: Gossiper<Deploy, Event>, contract_runtime: ContractRuntime, _storage_tempdir: TempDir, } impl Drop for Reactor { fn drop(&mut self) { NetworkController::<NodeMessage>::remove_node(&self.network.node_id()) } } impl reactor::Reactor for Reactor { type Event = Event; type Config = Config; type Error = Error; fn new( config: Self::Config, registry: &Registry, event_queue: EventQueueHandle<Self::Event>, rng: &mut NodeRng, ) -> Result<(Self, Effects<Self::Event>), Self::Error> { let network = NetworkController::create_node(event_queue, rng); let (storage_config, storage_tempdir) = storage::Config::default_for_tests(); let storage_withdir = WithDir::new(storage_tempdir.path(), storage_config); let storage = Storage::new( &storage_withdir, None, ProtocolVersion::from_parts(1, 0, 0), false, "test", ) .unwrap(); let contract_runtime_config = contract_runtime::Config::default(); let contract_runtime = ContractRuntime::new( ProtocolVersion::from_parts(1, 0, 0), storage.root_path(), &contract_runtime_config, WasmConfig::default(), SystemConfig::default(), MAX_ASSOCIATED_KEYS, DEFAULT_MAX_RUNTIME_CALL_STACK_HEIGHT, registry, ) .unwrap(); let deploy_acceptor = DeployAcceptor::new( deploy_acceptor::Config::new(false), &Chainspec::from_resources("local"), registry, ) .unwrap(); let deploy_gossiper = Gossiper::new_for_partial_items( "deploy_gossiper", config, get_deploy_from_storage, registry, )?; let reactor = Reactor { network, storage, deploy_acceptor, deploy_gossiper, contract_runtime, _storage_tempdir: storage_tempdir, }; let effects = Effects::new(); Ok((reactor, effects)) } fn dispatch_event( &mut self, effect_builder: EffectBuilder<Self::Event>, rng: &mut NodeRng, event: Event, ) -> Effects<Self::Event> { match event { Event::Storage(event) => reactor::wrap_effects( Event::Storage, self.storage.handle_event(effect_builder, rng, event), ), Event::DeployAcceptor(event) => reactor::wrap_effects( Event::DeployAcceptor, self.deploy_acceptor .handle_event(effect_builder, rng, event), ), Event::DeployGossiper(event) => reactor::wrap_effects( Event::DeployGossiper, self.deploy_gossiper .handle_event(effect_builder, rng, event), ), Event::NetworkRequest(request) => reactor::wrap_effects( Event::Network, self.network .handle_event(effect_builder, rng, request.into()), ), Event::ControlAnnouncement(ctrl_ann) => { unreachable!("unhandled control announcement: {}", ctrl_ann) } Event::NetworkAnnouncement(NetworkAnnouncement::MessageReceived { sender, payload, }) => { let reactor_event = match payload { NodeMessage::GetRequest { tag: Tag::Deploy, serialized_id, } => { // Note: This is copied almost verbatim from the validator reactor and // needs to be refactored. let deploy_hash = match bincode::deserialize(&serialized_id) { Ok(hash) => hash, Err(error) => { error!( "failed to decode {:?} from {}: {}", serialized_id, sender, error ); return Effects::new(); } }; match self .storage .handle_deduplicated_legacy_direct_deploy_request(deploy_hash) { Some(serialized_item) => { let message = NodeMessage::new_get_response_raw_unchecked::<Deploy>( serialized_item, ); return effect_builder.send_message(sender, message).ignore(); } None => { debug!(%sender, %deploy_hash, "failed to get deploy (not found)"); return Effects::new(); } } } NodeMessage::GetResponse { tag: Tag::Deploy, serialized_item, } => { let deploy = match bincode::deserialize(&serialized_item) { Ok(deploy) => Box::new(deploy), Err(error) => { error!("failed to decode deploy from {}: {}", sender, error); return Effects::new(); } }; Event::DeployAcceptor(deploy_acceptor::Event::Accept { deploy, source: Source::Peer(sender), maybe_responder: None, }) } NodeMessage::DeployGossiper(message) => { Event::DeployGossiper(super::Event::MessageReceived { sender, message }) } msg => panic!("should not get {}", msg), }; self.dispatch_event(effect_builder, rng, reactor_event) } Event::NetworkAnnouncement(NetworkAnnouncement::GossipOurAddress(_)) => { unreachable!("should not receive announcements of type GossipOurAddress"); } Event::NetworkAnnouncement(NetworkAnnouncement::NewPeer(_)) => { // We do not care about new peers in the gossiper test. Effects::new() } Event::RpcServerAnnouncement(RpcServerAnnouncement::DeployReceived { deploy, responder, }) => { let event = deploy_acceptor::Event::Accept { deploy, source: Source::<NodeId>::Client, maybe_responder: responder, }; self.dispatch_event(effect_builder, rng, Event::DeployAcceptor(event)) } Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::AcceptedNewDeploy { deploy, source, }) => { let event = super::Event::ItemReceived { item_id: *deploy.id(), source, }; self.dispatch_event(effect_builder, rng, Event::DeployGossiper(event)) } Event::DeployAcceptorAnnouncement(DeployAcceptorAnnouncement::InvalidDeploy { deploy: _, source: _, }) => Effects::new(), Event::DeployGossiperAnnouncement(_ann) => { // We do not care about deploy gossiper announcements in the gossiper test. Effects::new() } Event::Network(event) => reactor::wrap_effects( Event::Network, self.network.handle_event(effect_builder, rng, event), ), Event::ContractRuntime(event) => reactor::wrap_effects( Into::into, self.contract_runtime .handle_event(effect_builder, rng, *event), ), } } fn maybe_exit(&self) -> Option<crate::reactor::ReactorExit> { unimplemented!() } } impl NetworkedReactor for Reactor { type NodeId = NodeId; fn node_id(&self) -> NodeId { self.network.node_id() } } fn announce_deploy_received( deploy: Box<Deploy>, responder: Option<Responder<Result<(), deploy_acceptor::Error>>>, ) -> impl FnOnce(EffectBuilder<Event>) -> Effects<Event> { |effect_builder: EffectBuilder<Event>| { effect_builder .announce_deploy_received(deploy, responder) .ignore() } } async fn run_gossip(rng: &mut TestRng, network_size: usize, deploy_count: usize) { const TIMEOUT: Duration = Duration::from_secs(20); const QUIET_FOR: Duration = Duration::from_millis(50); NetworkController::<NodeMessage>::create_active(); let mut network = Network::<Reactor>::new(); // Add `network_size` nodes. let node_ids = network.add_nodes(rng, network_size).await; // Create `deploy_count` random deploys. let (all_deploy_hashes, mut deploys): (BTreeSet<_>, Vec<_>) = iter::repeat_with(|| { let deploy = Box::new(Deploy::random_valid_native_transfer(rng)); (*deploy.id(), deploy) }) .take(deploy_count) .unzip(); // Give each deploy to a randomly-chosen node to be gossiped. for deploy in deploys.drain(..) { let index: usize = rng.gen_range(0..network_size); network .process_injected_effect_on(&node_ids[index], announce_deploy_received(deploy, None)) .await; } // Check every node has every deploy stored locally. let all_deploys_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| { nodes.values().all(|runner| { let hashes = runner.reactor().inner().storage.get_all_deploy_hashes(); all_deploy_hashes == hashes }) }; network.settle_on(rng, all_deploys_held, TIMEOUT).await; // Ensure all responders are called before dropping the network. network.settle(rng, QUIET_FOR, TIMEOUT).await; NetworkController::<NodeMessage>::remove_active(); } #[tokio::test] async fn should_gossip() { const NETWORK_SIZES: [usize; 3] = [2, 5, 20]; const DEPLOY_COUNTS: [usize; 3] = [1, 10, 30]; let mut rng = crate::new_rng(); for network_size in &NETWORK_SIZES { for deploy_count in &DEPLOY_COUNTS { run_gossip(&mut rng, *network_size, *deploy_count).await } } } #[tokio::test] async fn should_get_from_alternate_source() { const NETWORK_SIZE: usize = 3; const POLL_DURATION: Duration = Duration::from_millis(10); const TIMEOUT: Duration = Duration::from_secs(2); NetworkController::<NodeMessage>::create_active(); let mut network = Network::<Reactor>::new(); let mut rng = crate::new_rng(); // Add `NETWORK_SIZE` nodes. let node_ids = network.add_nodes(&mut rng, NETWORK_SIZE).await; // Create random deploy. let deploy = Box::new(Deploy::random_valid_native_transfer(&mut rng)); let deploy_id = *deploy.id(); // Give the deploy to nodes 0 and 1 to be gossiped. for node_id in node_ids.iter().take(2) { network .process_injected_effect_on(node_id, announce_deploy_received(deploy.clone(), None)) .await; } // Run node 0 until it has sent the gossip request then remove it from the network. let made_gossip_request = |event: &Event| -> bool { matches!(event, Event::NetworkRequest(NetworkRequest::Gossip { .. })) }; network .crank_until(&node_ids[0], &mut rng, made_gossip_request, TIMEOUT) .await; assert!(network.remove_node(&node_ids[0]).is_some()); debug!("removed node {}", &node_ids[0]); // Run node 2 until it receives and responds to the gossip request from node 0. let node_id_0 = node_ids[0]; let sent_gossip_response = move |event: &Event| -> bool { match event { Event::NetworkRequest(NetworkRequest::SendMessage { dest, payload, .. }) => { if let NodeMessage::DeployGossiper(Message::GossipResponse { .. }) = **payload { **dest == node_id_0 } else { false } } _ => false, } }; network .crank_until(&node_ids[2], &mut rng, sent_gossip_response, TIMEOUT) .await; // Run nodes 1 and 2 until settled. Node 2 will be waiting for the deploy from node 0. network.settle(&mut rng, POLL_DURATION, TIMEOUT).await; // Advance time to trigger node 2's timeout causing it to request the deploy from node 1. let duration_to_advance = Config::default().get_remainder_timeout(); testing::advance_time(duration_to_advance.into()).await; // Check node 0 has the deploy stored locally. let deploy_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| { let runner = nodes.get(&node_ids[2]).unwrap(); runner .reactor() .inner() .storage .get_deploy_by_hash(deploy_id) .map(|retrieved_deploy| retrieved_deploy == *deploy) .unwrap_or_default() }; network.settle_on(&mut rng, deploy_held, TIMEOUT).await; NetworkController::<NodeMessage>::remove_active(); } #[tokio::test] async fn should_timeout_gossip_response() { const PAUSE_DURATION: Duration = Duration::from_millis(50); const TIMEOUT: Duration = Duration::from_secs(2); NetworkController::<NodeMessage>::create_active(); let mut network = Network::<Reactor>::new(); let mut rng = crate::new_rng(); // The target number of peers to infect with a given piece of data. let infection_target = Config::default().infection_target(); // Add `infection_target + 1` nodes. let mut node_ids = network .add_nodes(&mut rng, infection_target as usize + 1) .await; // Create random deploy. let deploy = Box::new(Deploy::random_valid_native_transfer(&mut rng)); let deploy_id = *deploy.id(); // Give the deploy to node 0 to be gossiped. network .process_injected_effect_on(&node_ids[0], announce_deploy_received(deploy.clone(), None)) .await; // Run node 0 until it has sent the gossip requests. let made_gossip_request = |event: &Event| -> bool { matches!( event, Event::DeployGossiper(super::Event::GossipedTo { .. }) ) }; network .crank_until(&node_ids[0], &mut rng, made_gossip_request, TIMEOUT) .await; // Give node 0 time to set the timeouts before advancing the clock. time::sleep(PAUSE_DURATION).await; // Replace all nodes except node 0 with new nodes. for node_id in node_ids.drain(1..) { assert!(network.remove_node(&node_id).is_some()); debug!("removed node {}", node_id); } for _ in 0..infection_target { let (node_id, _runner) = network.add_node(&mut rng).await.unwrap(); node_ids.push(node_id); } // Advance time to trigger node 0's timeout causing it to gossip to the new nodes. let duration_to_advance = Config::default().gossip_request_timeout(); testing::advance_time(duration_to_advance.into()).await; // Check every node has every deploy stored locally. let deploy_held = |nodes: &HashMap<NodeId, Runner<ConditionCheckReactor<Reactor>>>| { nodes.values().all(|runner| { runner .reactor() .inner() .storage .get_deploy_by_hash(deploy_id) .map(|retrieved_deploy| retrieved_deploy == *deploy) .unwrap_or_default() }) }; network.settle_on(&mut rng, deploy_held, TIMEOUT).await; NetworkController::<NodeMessage>::remove_active(); }
{ match self { Event::Network(event) => write!(formatter, "event: {}", event), Event::Storage(event) => write!(formatter, "storage: {}", event), Event::DeployAcceptor(event) => write!(formatter, "deploy acceptor: {}", event), Event::DeployGossiper(event) => write!(formatter, "deploy gossiper: {}", event), Event::NetworkRequest(req) => write!(formatter, "network request: {}", req), Event::ControlAnnouncement(ctrl_ann) => write!(formatter, "control: {}", ctrl_ann), Event::NetworkAnnouncement(ann) => write!(formatter, "network announcement: {}", ann), Event::RpcServerAnnouncement(ann) => { write!(formatter, "api server announcement: {}", ann) } Event::DeployAcceptorAnnouncement(ann) => { write!(formatter, "deploy-acceptor announcement: {}", ann) } Event::DeployGossiperAnnouncement(ann) => { write!(formatter, "deploy-gossiper announcement: {}", ann) } Event::ContractRuntime(event) => { write!(formatter, "contract-runtime event: {:?}", event) } } }
pixelda_20190101201505.py
import argparse import os import numpy as np import math import itertools import torchvision.transforms as transforms from torchvision.utils import save_image from torch.utils.data import DataLoader from torchvision import datasets from torch.autograd import Variable from mnistm import MNISTM import torch.nn as nn import torch.nn.functional as F import torch os.makedirs('images', exist_ok=True) parser = argparse.ArgumentParser() parser.add_argument('--n_epochs', type=int, default=200, help='number of epochs of training') parser.add_argument('--batch_size', type=int, default=64, help='size of the batches') parser.add_argument('--lr', type=float, default=0.0002, help='adam: learning rate') parser.add_argument('--b1', type=float, default=0.5, help='adam: decay of first order momentum of gradient') parser.add_argument('--b2', type=float, default=0.999, help='adam: decay of first order momentum of gradient') parser.add_argument('--n_cpu', type=int, default=8, help='number of cpu threads to use during batch generation') parser.add_argument('--n_residual_blocks', type=int, default=1, help='number of residual blocks in generator') parser.add_argument('--latent_dim', type=int, default=10, help='dimensionality of the noise input') parser.add_argument('--img_size', type=int, default=32, help='size of each image dimension') parser.add_argument('--channels', type=int, default=3, help='number of image channels') parser.add_argument('--n_classes', type=int, default=10, help='number of classes in the dataset') parser.add_argument('--sample_interval', type=int, default=300, help='interval betwen image samples') opt = parser.parse_args() print(opt) # Calculate output of image discriminator (PatchGAN) patch = int(opt.img_size / 2**4) patch = (1, patch, patch) cuda = True if torch.cuda.is_available() else False def weights_init_normal(m): classname = m.__class__.__name__ print("classname : {}".format(classname)) if classname.find('Conv') != -1: torch.nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find('BatchNorm') != -1: torch.nn.init.normal_(m.weight.data, 1.0, 0.02) torch.nn.init.constant_(m.bias.data, 0.0) class ResidualBlock_back(nn.Module): def __init__(self, in_features=64, out_features=64): super(ResidualBlock, self).__init__() self.block = nn.Sequential( nn.Conv2d(in_features, in_features, 3, 1, 1), nn.BatchNorm2d(in_features), nn.ReLU(inplace=True), nn.Conv2d(in_features, in_features, 3, 1, 1), nn.BatchNorm2d(in_features) ) def forward(self, x): return x + self.block(x) class ResidualBlock(nn.Module): def __init__(self, in_features=64, out_features=64): super(ResidualBlock, self).__init__() # calculate same padding: # (w - k + 2*p)/s + 1 = o # => p = (s(o-1) - w + k)/2 (2(128-1)-64 +3)/2 ### ENCODER self.encode_block = nn.Sequential( nn.Conv2d(in_channels=1*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0), nn.BatchNorm2d(2*in_features), nn.LeakyReLU(inplace=True), nn.Conv2d(in_channels=2*in_features,out_channels=4*in_features,kernel_size=(3, 3),stride=(2, 2),padding=2), nn.BatchNorm2d(4*in_features), nn.LeakyReLU(inplace=True) ) print("self.encode_block : {}".format(self.encode_block)) self.decode_block = nn.Sequential( nn.ConvTranspose2d(in_channels=4*in_features,out_channels=2*in_features,kernel_size=(3, 3),stride=(2, 2), padding=2), nn.BatchNorm2d(2*in_features), nn.LeakyReLU(inplace=True), nn.ConvTranspose2d(in_channels=2*in_features,out_channels=1*in_features,kernel_size=(3, 3),stride=(2, 2),padding=0), nn.BatchNorm2d(1*in_features), nn.LeakyReLU(inplace=True) ) print("self.decode_block : {}".format(self.decode_block)) def forward(self, x): encode_x = self.encode_block(x) decode_x = self.decode_block(encode_x) # decode_x = decode_x[:, :, :-1, :-1] # decode_x = F.sigmoid(decode_x) return x + decode_x class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() # Fully-connected layer which constructs image channel shaped output from noise self.fc = nn.Linear(opt.latent_dim, opt.channels*opt.img_size**2) self.l1 = nn.Sequential(nn.Conv2d(opt.channels*2, 64, 3, 1, 1), nn.ReLU(inplace=True)) resblocks = [] for _ in range(opt.n_residual_blocks): # resblocks.append(ResidualBlock()) resblocks.append(ResidualBlock()) self.resblocks = nn.Sequential(*resblocks) self.l2 = nn.Sequential(nn.Conv2d(64, opt.channels, 3, 1, 1), nn.Tanh()) def forward(self, img, z): gen_input = torch.cat((img, self.fc(z).view(*img.shape)), 1) out = self.l1(gen_input) out = self.resblocks(out) img_ = self.l2(out) return img_ class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() def block(in_features, out_features, normalization=True): """Discriminator block""" layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True) ] if normalization: layers.append(nn.InstanceNorm2d(out_features)) return layers self.model = nn.Sequential( *block(opt.channels, 64, normalization=False), *block(64, 128), *block(128, 256), *block(256, 512), nn.Conv2d(512, 1, 3, 1, 1) ) def forward(self, img): validity = self.model(img) return validity class Classifier(nn.Module): def __init__(self):
layers = [ nn.Conv2d(in_features, out_features, 3, stride=2, padding=1), nn.LeakyReLU(0.2, inplace=True) ] if normalization: layers.append(nn.InstanceNorm2d(out_features)) return layers self.model = nn.Sequential( *block(opt.channels, 64, normalization=False), *block(64, 128), *block(128, 256), *block(256, 512) ) input_size = opt.img_size // 2**4 self.output_layer = nn.Sequential( nn.Linear(512*input_size**2, opt.n_classes), nn.Softmax() ) def forward(self, img): feature_repr = self.model(img) feature_repr = feature_repr.view(feature_repr.size(0), -1) label = self.output_layer(feature_repr) return label # Loss function adversarial_loss = torch.nn.MSELoss() task_loss = torch.nn.CrossEntropyLoss() # Loss weights lambda_adv = 1 lambda_task = 0.1 # Initialize generator and discriminator generator = Generator() discriminator = Discriminator() classifier = Classifier() if cuda: generator.cuda() discriminator.cuda() classifier.cuda() adversarial_loss.cuda() task_loss.cuda() # Initialize weights generator.apply(weights_init_normal) discriminator.apply(weights_init_normal) classifier.apply(weights_init_normal) # Configure data loader os.makedirs('../../data/mnist', exist_ok=True) dataloader_A = torch.utils.data.DataLoader( datasets.MNIST('../../data/mnist', train=True, download=True, transform=transforms.Compose([ transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ])), batch_size=opt.batch_size, shuffle=True) os.makedirs('../../data/mnistm', exist_ok=True) dataloader_B = torch.utils.data.DataLoader( MNISTM('../../data/mnistm', train=True, download=True, transform=transforms.Compose([ transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ])), batch_size=opt.batch_size, shuffle=True) # Optimizers optimizer_G = torch.optim.Adam( itertools.chain(generator.parameters(), classifier.parameters()), lr=opt.lr, betas=(opt.b1, opt.b2)) optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2)) FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor # ---------- # Training # ---------- # Keeps 100 accuracy measurements task_performance = [] target_performance = [] for epoch in range(opt.n_epochs): for i, ((imgs_A, labels_A), (imgs_B, labels_B)) in enumerate(zip(dataloader_A, dataloader_B)): batch_size = imgs_A.size(0) # Adversarial ground truths valid = Variable(FloatTensor(batch_size, *patch).fill_(1.0), requires_grad=False) fake = Variable(FloatTensor(batch_size, *patch).fill_(0.0), requires_grad=False) # Configure input imgs_A = Variable(imgs_A.type(FloatTensor).expand(batch_size, 3, opt.img_size, opt.img_size)) labels_A = Variable(labels_A.type(LongTensor)) imgs_B = Variable(imgs_B.type(FloatTensor)) # ----------------- # Train Generator # ----------------- optimizer_G.zero_grad() # Sample noise z = Variable(FloatTensor(np.random.uniform(-1, 1, (batch_size, opt.latent_dim)))) # Generate a batch of images fake_B = generator(imgs_A, z) # Perform task on translated source image label_pred = classifier(fake_B) # Calculate the task loss task_loss_ = (task_loss(label_pred, labels_A) + \ task_loss(classifier(imgs_A), labels_A)) / 2 # Loss measures generator's ability to fool the discriminator g_loss = lambda_adv * adversarial_loss(discriminator(fake_B), valid) + \ lambda_task * task_loss_ g_loss.backward() optimizer_G.step() # --------------------- # Train Discriminator # --------------------- optimizer_D.zero_grad() # Measure discriminator's ability to classify real from generated samples real_loss = adversarial_loss(discriminator(imgs_B), valid) fake_loss = adversarial_loss(discriminator(fake_B.detach()), fake) d_loss = (real_loss + fake_loss) / 2 d_loss.backward() optimizer_D.step() # --------------------------------------- # Evaluate Performance on target domain # --------------------------------------- # Evaluate performance on translated Domain A acc = np.mean(np.argmax(label_pred.data.cpu().numpy(), axis=1) == labels_A.data.cpu().numpy()) task_performance.append(acc) if len(task_performance) > 100: task_performance.pop(0) # Evaluate performance on Domain B pred_B = classifier(imgs_B) target_acc = np.mean(np.argmax(pred_B.data.cpu().numpy(), axis=1) == labels_B.numpy()) target_performance.append(target_acc) if len(target_performance) > 100: target_performance.pop(0) print ("[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f] [CLF acc: %3d%% (%3d%%), target_acc: %3d%% (%3d%%)]" % (epoch, opt.n_epochs, i, len(dataloader_A), d_loss.item(), g_loss.item(), 100*acc, 100*np.mean(task_performance), 100*target_acc, 100*np.mean(target_performance))) batches_done = len(dataloader_A) * epoch + i if batches_done % opt.sample_interval == 0: sample = torch.cat((imgs_A.data[:5], fake_B.data[:5], imgs_B.data[:5]), -2) save_image(sample, 'images/%d.png' % batches_done, nrow=int(math.sqrt(batch_size)), normalize=True)
super(Classifier, self).__init__() def block(in_features, out_features, normalization=True): """Classifier block"""
prometheus_storage.go
// Copyright (c) 2020 Uber Technologies, Inc. // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. package prometheus import ( "context" "sort" "time" "github.com/m3db/m3/src/query/generated/proto/prompb" "github.com/m3db/m3/src/query/models" "github.com/m3db/m3/src/query/parser/promql" "github.com/m3db/m3/src/query/storage" "github.com/m3db/m3/src/x/instrument" "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" promstorage "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/uber-go/tally" "go.uber.org/zap" ) type prometheusQueryable struct { storage storage.Storage scope tally.Scope logger *zap.Logger } // PrometheusOptions are options to create a prometheus queryable backed by // a m3 storage. type PrometheusOptions struct { Storage storage.Storage InstrumentOptions instrument.Options } // NewPrometheusQueryable returns a new prometheus queryable backed by a m3 // storage. func NewPrometheusQueryable(opts PrometheusOptions) promstorage.Queryable { scope := opts.InstrumentOptions.MetricsScope().Tagged(map[string]string{"storage": "prometheus_storage"}) return &prometheusQueryable{ storage: opts.Storage, scope: scope, logger: opts.InstrumentOptions.Logger(), } } func (o PrometheusOptions) validate() error { if o.Storage == nil { return errors.New("storage is not set") } if o.InstrumentOptions == nil { return errors.New("instrument options not set") } return nil } // Querier returns a prometheus storage Querier. func (q *prometheusQueryable) Querier( ctx context.Context, mint, maxt int64, ) (promstorage.Querier, error) { return newQuerier(ctx, q.storage, q.logger), nil } type querier struct { ctx context.Context storage storage.Storage logger *zap.Logger } func newQuerier( ctx context.Context, storage storage.Storage, logger *zap.Logger, ) promstorage.Querier { return &querier{ ctx: ctx, storage: storage, logger: logger, } } func (q *querier) Select( sortSeries bool, hints *promstorage.SelectHints, labelMatchers ...*labels.Matcher, ) (promstorage.SeriesSet, promstorage.Warnings, error) { matchers, err := promql.LabelMatchersToModelMatcher(labelMatchers, models.NewTagOptions()) if err != nil { return nil, nil, err } query := &storage.FetchQuery{ TagMatchers: matchers, Start: time.Unix(0, hints.Start*int64(time.Millisecond)), End: time.Unix(0, hints.End*int64(time.Millisecond)), Interval: time.Duration(hints.Step) * time.Millisecond, } // NB (@shreyas): The fetch options builder sets it up from the request // which we do not have access to here. fetchOptions, err := fetchOptions(q.ctx) if err != nil { q.logger.Error("fetch options not provided in context", zap.Error(err)) return nil, nil, err } result, err := q.storage.FetchProm(q.ctx, query, fetchOptions) if err != nil { return nil, nil, err } seriesSet := fromQueryResult(sortSeries, result.PromResult) warnings := fromWarningStrings(result.Metadata.WarningStrings()) resultMetadataPtr, err := resultMetadata(q.ctx) if err != nil { q.logger.Error("result metadata not set in context") return nil, nil, err } if resultMetadataPtr == nil { err := errors.New("result metadata nil for context") q.logger.Error(err.Error()) return nil, nil, err } *resultMetadataPtr = result.Metadata return seriesSet, warnings, err } func (q *querier) LabelValues(name string) ([]string, promstorage.Warnings, error) { // TODO (@shreyas): Implement this. q.logger.Warn("calling unsupported LabelValues method") return nil, nil, errors.New("not implemented") } func (q *querier) LabelNames() ([]string, promstorage.Warnings, error) { // TODO (@shreyas): Implement this. q.logger.Warn("calling unsupported LabelNames method") return nil, nil, errors.New("not implemented") } func (q *querier) Close() error { return nil } func fromWarningStrings(warnings []string) []error { errs := make([]error, 0, len(warnings)) for _, warning := range warnings { errs = append(errs, errors.New(warning)) } return errs } // This is a copy of the prometheus remote.FromQueryResult method. Need to // copy so that this can understand m3 prompb struct. func fromQueryResult(sortSeries bool, res *prompb.QueryResult) promstorage.SeriesSet { series := make([]promstorage.Series, 0, len(res.Timeseries)) for _, ts := range res.Timeseries { labels := labelProtosToLabels(ts.Labels) if err := validateLabelsAndMetricName(labels); err != nil { return errSeriesSet{err: err} } series = append(series, &concreteSeries{ labels: labels, samples: ts.Samples, }) } if sortSeries { sort.Sort(byLabel(series)) } return &concreteSeriesSet{ series: series, } } type byLabel []promstorage.Series func (a byLabel) Len() int { return len(a) } func (a byLabel) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byLabel) Less(i, j int) bool { return labels.Compare(a[i].Labels(), a[j].Labels()) < 0 } func labelProtosToLabels(labelPairs []prompb.Label) labels.Labels { result := make(labels.Labels, 0, len(labelPairs)) for _, l := range labelPairs { result = append(result, labels.Label{ Name: string(l.Name), Value: string(l.Value), }) } sort.Sort(result) return result } // errSeriesSet implements storage.SeriesSet, just returning an error. type errSeriesSet struct { err error } func (errSeriesSet) Next() bool { return false } func (errSeriesSet) At() promstorage.Series { return nil } func (e errSeriesSet) Err() error { return e.err } // concreteSeriesSet implements storage.SeriesSet. type concreteSeriesSet struct { cur int series []promstorage.Series } func (c *concreteSeriesSet) Next() bool { c.cur++ return c.cur-1 < len(c.series) } func (c *concreteSeriesSet) At() promstorage.Series { return c.series[c.cur-1] } func (c *concreteSeriesSet) Err() error { return nil } // concreteSeries implements storage.Series. type concreteSeries struct { labels labels.Labels samples []prompb.Sample } func (c *concreteSeries) Labels() labels.Labels { return labels.New(c.labels...) } func (c *concreteSeries) Iterator() chunkenc.Iterator { return newConcreteSeriersIterator(c) } // concreteSeriesIterator implements storage.SeriesIterator. type concreteSeriesIterator struct { cur int series *concreteSeries } func newConcreteSeriersIterator(series *concreteSeries) chunkenc.Iterator { return &concreteSeriesIterator{ cur: -1, series: series, } } // Seek implements storage.SeriesIterator. func (c *concreteSeriesIterator) Seek(t int64) bool { c.cur = sort.Search(len(c.series.samples), func(n int) bool { return c.series.samples[n].Timestamp >= t }) return c.cur < len(c.series.samples) } // At implements storage.SeriesIterator. func (c *concreteSeriesIterator) At() (t int64, v float64) { s := c.series.samples[c.cur] return s.Timestamp, s.Value } // Next implements storage.SeriesIterator. func (c *concreteSeriesIterator) Next() bool { c.cur++ return c.cur < len(c.series.samples) } // Err implements storage.SeriesIterator. func (c *concreteSeriesIterator) Err() error { return nil } // validateLabelsAndMetricName validates the label names/values and metric names returned from remote read, // also making sure that there are no labels with duplicate names func validateLabelsAndMetricName(ls labels.Labels) error { for i, l := range ls { if l.Name == labels.MetricName && !model.IsValidMetricName(model.LabelValue(l.Value)) { return errors.Errorf("invalid metric name: %v", l.Value) } if !model.LabelName(l.Name).IsValid() { return errors.Errorf("invalid label name: %v", l.Name) } if !model.LabelValue(l.Value).IsValid() { return errors.Errorf("invalid label value: %v", l.Value) } if i > 0 && l.Name == ls[i-1].Name { return errors.Errorf("duplicate label with name: %v", l.Name) } } return nil }
//
compound.rs
#![allow(deprecated)] use git_odb::compound::Store; use crate::fixture_path; fn db() -> Store { Store::at(fixture_path("objects"), 0).expect("valid object path") } mod init { use crate::odb::store::compound::db; #[test] fn has_packs() { assert_eq!(db().bundles.len(), 3) } } mod locate { use git_odb::compound::Store; use crate::{hex_to_id, odb::store::compound::db}; fn can_locate(db: &Store, hex_id: &str) { let mut buf = vec![]; assert!(db .try_find(hex_to_id(hex_id), &mut buf, &mut git_pack::cache::Never) .expect("no read error") .is_some()); } #[test] fn loose_object()
#[test] fn pack_object() { can_locate(&db(), "501b297447a8255d3533c6858bb692575cdefaa0"); // pack 11fd can_locate(&db(), "4dac9989f96bc5b5b1263b582c08f0c5f0b58542"); // pack a2bf can_locate(&db(), "dd25c539efbb0ab018caa4cda2d133285634e9b5"); // pack c043 } }
{ can_locate(&db(), "37d4e6c5c48ba0d245164c4e10d5f41140cab980"); }
cluster_request.go
// Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved. // See LICENSE.txt for license information. // package model import ( "encoding/json" "io" "net/url" "github.com/pkg/errors" ) // CreateClusterRequest specifies the parameters for a new cluster. type CreateClusterRequest struct { Provider string `json:"provider,omitempty"` Zones []string `json:"zones,omitempty"` Version string `json:"version,omitempty"` KopsAMI string `json:"kops-ami,omitempty"` MasterInstanceType string `json:"master-instance-type,omitempty"` MasterCount int64 `json:"master-count,omitempty"` NodeInstanceType string `json:"node-instance-type,omitempty"` NodeMinCount int64 `json:"node-min-count,omitempty"` NodeMaxCount int64 `json:"node-max-count,omitempty"` AllowInstallations bool `json:"allow-installations,omitempty"` APISecurityLock bool `json:"api-security-lock,omitempty"` DesiredUtilityVersions map[string]*HelmUtilityVersion `json:"utility-versions,omitempty"` Annotations []string `json:"annotations,omitempty"` Networking string `json:"networking,omitempty"` VPC string `json:"vpc,omitempty"` } func (request *CreateClusterRequest) setUtilityDefaults(utilityName string) { reqDesiredUtilityVersion, ok := request.DesiredUtilityVersions[utilityName] if !ok { request.DesiredUtilityVersions[utilityName] = DefaultUtilityVersions[utilityName] return } if reqDesiredUtilityVersion.Chart == "" { reqDesiredUtilityVersion.Chart = DefaultUtilityVersions[utilityName].Chart } if reqDesiredUtilityVersion.ValuesPath == "" { reqDesiredUtilityVersion.ValuesPath = DefaultUtilityVersions[utilityName].ValuesPath } } func (request *CreateClusterRequest) setUtilitiesDefaults() { for utilityName := range DefaultUtilityVersions { request.setUtilityDefaults(utilityName) } } // SetDefaults sets the default values for a cluster create request. func (request *CreateClusterRequest) SetDefaults() { if len(request.Provider) == 0 { request.Provider = ProviderAWS } if len(request.Version) == 0 { request.Version = "latest" } if len(request.Zones) == 0 { request.Zones = []string{"us-east-1a"} } if len(request.MasterInstanceType) == 0 { request.MasterInstanceType = "t3.medium" } if request.MasterCount == 0 { request.MasterCount = 1 } if len(request.NodeInstanceType) == 0 { request.NodeInstanceType = "m5.large" } if request.NodeMinCount == 0 { request.NodeMinCount = 2 } if request.NodeMaxCount == 0 { request.NodeMaxCount = request.NodeMinCount } if len(request.Networking) == 0 { request.Networking = "amazon-vpc-routed-eni" } if request.DesiredUtilityVersions == nil { request.DesiredUtilityVersions = make(map[string]*HelmUtilityVersion) } request.setUtilitiesDefaults() } // Validate validates the values of a cluster create request. func (request *CreateClusterRequest) Validate() error { if request.Provider != ProviderAWS { return errors.Errorf("unsupported provider %s", request.Provider) } if !ValidClusterVersion(request.Version) { return errors.Errorf("unsupported cluster version %s", request.Version) } if request.MasterCount < 1 { return errors.Errorf("master count (%d) must be 1 or greater", request.MasterCount) } if request.NodeMinCount < 1 { return errors.Errorf("node min count (%d) must be 1 or greater", request.NodeMinCount) } if request.NodeMaxCount != request.NodeMinCount { return errors.Errorf("node min (%d) and max (%d) counts must match", request.NodeMinCount, request.NodeMaxCount) } // TODO: check zones and instance types? if !contains(GetSupportedCniList(), request.Networking) { return errors.Errorf("unsupported cluster networking option %s", request.Networking) } return nil } // GetSupportedCniList starting with three supported CNI networking options, we can add more as required func GetSupportedCniList() []string { return []string{"amazon-vpc-routed-eni", "amazonvpc", "weave", "canal", "calico"} } // contains checks if a string is present in a slice func contains(s []string, str string) bool { for _, v := range s { if v == str { return true } } return false } // NewCreateClusterRequestFromReader will create a CreateClusterRequest from an // io.Reader with JSON data. func NewCreateClusterRequestFromReader(reader io.Reader) (*CreateClusterRequest, error) { var createClusterRequest CreateClusterRequest err := json.NewDecoder(reader).Decode(&createClusterRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode create cluster request") } createClusterRequest.SetDefaults() err = createClusterRequest.Validate() if err != nil { return nil, errors.Wrap(err, "create cluster request failed validation") } return &createClusterRequest, nil } // GetClustersRequest describes the parameters to request a list of clusters. type GetClustersRequest struct { Paging } // ApplyToURL modifies the given url to include query string parameters for the request. func (request *GetClustersRequest) ApplyToURL(u *url.URL) { q := u.Query() request.Paging.AddToQuery(q) u.RawQuery = q.Encode() } // UpdateClusterRequest specifies the parameters available for updating a cluster. type UpdateClusterRequest struct { AllowInstallations bool } // NewUpdateClusterRequestFromReader will create an UpdateClusterRequest from an io.Reader with JSON data. func NewUpdateClusterRequestFromReader(reader io.Reader) (*UpdateClusterRequest, error) { var updateClusterRequest UpdateClusterRequest err := json.NewDecoder(reader).Decode(&updateClusterRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode update cluster request") } return &updateClusterRequest, nil } // PatchUpgradeClusterRequest specifies the parameters for upgrading a cluster. type PatchUpgradeClusterRequest struct { Version *string `json:"version,omitempty"` KopsAMI *string `json:"kops-ami,omitempty"` RotatorConfig *RotatorConfig `json:"rotatorConfig,omitempty"` } // Validate validates the values of a cluster upgrade request. func (p *PatchUpgradeClusterRequest) Validate() error { if p.Version != nil && !ValidClusterVersion(*p.Version) { return errors.Errorf("unsupported cluster version %s", *p.Version) } if p.RotatorConfig != nil { if p.RotatorConfig.UseRotator == nil { return errors.Errorf("rotator config use rotator should be set") } if *p.RotatorConfig.UseRotator { if p.RotatorConfig.EvictGracePeriod == nil { return errors.Errorf("rotator config evict grace period should be set") } if p.RotatorConfig.MaxDrainRetries == nil { return errors.Errorf("rotator config max drain retries should be set") } if p.RotatorConfig.MaxScaling == nil { return errors.Errorf("rotator config max scaling should be set") } if p.RotatorConfig.WaitBetweenDrains == nil { return errors.Errorf("rotator config wait between drains should be set") } if p.RotatorConfig.WaitBetweenRotations == nil { return errors.Errorf("rotator config wait between rotations should be set") } if p.RotatorConfig.WaitBetweenPodEvictions == nil { return errors.Errorf("rotator config wait between pod evictions should be set") } } } return nil } // Apply applies the patch to the given cluster's metadata. func (p *PatchUpgradeClusterRequest) Apply(metadata *KopsMetadata) bool { changes := &KopsMetadataRequestedState{} var applied bool if p.Version != nil && *p.Version != metadata.Version { applied = true changes.Version = *p.Version } if p.KopsAMI != nil && *p.KopsAMI != metadata.AMI { applied = true changes.AMI = *p.KopsAMI } if metadata.RotatorRequest == nil { metadata.RotatorRequest = &RotatorMetadata{} } if applied { metadata.ChangeRequest = changes metadata.RotatorRequest.Config = p.RotatorConfig } return applied } // NewUpgradeClusterRequestFromReader will create an UpgradeClusterRequest from an io.Reader with JSON data. func NewUpgradeClusterRequestFromReader(reader io.Reader) (*PatchUpgradeClusterRequest, error) { var upgradeClusterRequest PatchUpgradeClusterRequest err := json.NewDecoder(reader).Decode(&upgradeClusterRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode upgrade cluster request") } err = upgradeClusterRequest.Validate() if err != nil { return nil, errors.Wrap(err, "upgrade cluster request failed validation") } return &upgradeClusterRequest, nil } // PatchClusterSizeRequest specifies the parameters for resizing a cluster. type PatchClusterSizeRequest struct { NodeInstanceType *string `json:"node-instance-type,omitempty"` NodeMinCount *int64 `json:"node-min-count,omitempty"` NodeMaxCount *int64 `json:"node-max-count,omitempty"` } // Validate validates the values of a PatchClusterSizeRequest. func (p *PatchClusterSizeRequest) Validate() error { if p.NodeInstanceType != nil && len(*p.NodeInstanceType) == 0 { return errors.New("node instance type cannot be a blank value") } if p.NodeMinCount != nil && *p.NodeMinCount < 1 { return errors.New("node min count has to be 1 or greater") } if p.NodeMinCount != nil && p.NodeMaxCount != nil && *p.NodeMaxCount < *p.NodeMinCount { return errors.Errorf("node max count (%d) can't be less than min count (%d)", *p.NodeMaxCount, *p.NodeMinCount) } return nil } // Apply applies the patch to the given cluster's kops metadata. func (p *PatchClusterSizeRequest) Apply(metadata *KopsMetadata) bool { changes := &KopsMetadataRequestedState{} var applied bool if p.NodeInstanceType != nil && *p.NodeInstanceType != metadata.NodeInstanceType { applied = true changes.NodeInstanceType = *p.NodeInstanceType } if p.NodeMinCount != nil && *p.NodeMinCount != metadata.NodeMinCount { applied = true changes.NodeMinCount = *p.NodeMinCount } if p.NodeMaxCount != nil && *p.NodeMaxCount != metadata.NodeMaxCount { applied = true changes.NodeMaxCount = *p.NodeMaxCount } if applied { metadata.ChangeRequest = changes } return applied } // NewResizeClusterRequestFromReader will create an PatchClusterSizeRequest from an io.Reader with JSON data. func NewResizeClusterRequestFromReader(reader io.Reader) (*PatchClusterSizeRequest, error) { var patchClusterSizeRequest PatchClusterSizeRequest err := json.NewDecoder(reader).Decode(&patchClusterSizeRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode resize cluster request") } err = patchClusterSizeRequest.Validate() if err != nil { return nil, errors.Wrap(err, "resize cluster request failed validation") } return &patchClusterSizeRequest, nil } // ProvisionClusterRequest contains metadata related to changing the installed cluster state. type ProvisionClusterRequest struct { DesiredUtilityVersions map[string]*HelmUtilityVersion `json:"utility-versions,omitempty"` Force bool `json:"force"` } // NewProvisionClusterRequestFromReader will create an UpdateClusterRequest from an io.Reader with JSON data. func NewProvisionClusterRequestFromReader(reader io.Reader) (*ProvisionClusterRequest, error)
{ var provisionClusterRequest ProvisionClusterRequest err := json.NewDecoder(reader).Decode(&provisionClusterRequest) if err != nil && err != io.EOF { return nil, errors.Wrap(err, "failed to decode provision cluster request") } return &provisionClusterRequest, nil }
index.js
/*eslint-disable */ import React from 'react' import Zoom from 'react-reveal/Zoom'; import Fade from 'react-reveal/Fade'; import styled, { css } from 'react-emotion' import Card from '../../components/team/card.js' import Layout from '../../layouts'; import Helmet from '../../components/helmet'; import Img from 'gatsby-image' const teamCards =css` display:flex; flex-wrap:wrap; flex-direction:row; justify-content:center; align-items:center; margin-top:60px; margin-bottom:60px; ` const Wrapper = styled.section` position: relative; margin: 0; ` const BgImg = styled(Img)` position: absolute; margin-top: 0px; top: 0; left: 0; width: 100%; background-color: white; z-index: -1; height: ${props => props.height || 'auto'}; @media (min-width: 35em) { min-height: 300px; } & > img { object-fit: ${props => props.fit || 'cover'} !important; object-position: ${props => props.position || '20% 0%'} !important; } &:before { content: ''; background: rgba(0,0,0,0); position: absolute; top: 0; left: 0; right: 0; bottom: 0; height: 100%; width: 100%; z-index: 1; } ` const Title = styled.h1` font-size: 1.5em; line-height: 1.5em; letter-spacing: 0.05em; text-transform: capitalize; font-weight: 600; position: absolute; width: 90%; max-width: 650px; padding: 0 1rem; top: 50%; left: 50%; transform: translate(-50%, -50%); color: white; text-align: center; `; const Line = styled.h1` display: none; font-size: 2em; font-weight: 600; position: absolute; width: 70%; top: 50%; left: 50%; transform: translate(-50%, -50%); border-left: 3px solid white; height: 2em; `; const BannerSection = (props) => ( <Wrapper> <BgImg height={props.height} sizes={props.image.sizes} position={`50% ${props.position}%`}/> { props.title && <Line /> } { props.title && <Title>{props.title}</Title> } </Wrapper> ) class TeamPage extends React.Component{ render(){ const we= this.props.data.TeamImages.edges['0'].node.teamImages; const bannerImage = this.props.data.BannerImage.edges['0'].node.teamBanner; const yAxisForCoverImage = 20; console.log(this.props) return( <Layout location={this.props.location}> <Helmet title='Tech47 | team' description='Tech47-team' image={bannerImage.sizes} pathname={this.props.location.pathname}
absoluteUrl={true} /> <BannerSection title='Our Team' image={bannerImage} height={'50vh'} position={yAxisForCoverImage} /> <div className={teamCards} > { we.map((team) => (<Card key={team.title} description={team.description} pictureUrl={team.resolutions} fullName={team.title}/>)) } </div> </Layout>) } } export const contentfulTeamQuery = graphql` query TeamQuery { TeamImages:allContentfulTeamImages{ edges{ node{ teamImages{ title description resolutions(width:150, height: 150){ ...GatsbyContentfulResolutions } } } } } BannerImage:allContentfulTeamBanner{ edges{ node{ teamBanner{ sizes(maxWidth: 1800, quality:100) { ...GatsbyContentfulSizes_noBase64 } } } } } } `; export default TeamPage;
config.py
from disco import Disco class Config:
def __init__(self): self._numero_discos = int(input("\nInforme a quantidade de discos: ")) def adiciona_discos(self, torre_inicial): discos = self.add_disco() for ix in range(self._numero_discos): torre_inicial.empilha(discos[ix]) def add_disco(self): discos = [] arquivo = open('disco.txt', 'r') for linha in arquivo: discos.append(Disco(int(linha))) return discos def numero_discos(self): return self._numero_discos def status_torres(self, torres): print('\nNumero de discos: ' + str(self._numero_discos)) for torre in torres: torre.to_string()
fig8.py
#/usr/bin/env python from pygg import * import pandas from sqlalchemy import create_engine from tempfile import mkstemp import sys, os resfname='fig8.csv' res = pandas.read_csv(resfname) """ t = theme(axis.line=element_blank(), axis.text.x=element_blank(), axis.text.y=element_blank(), axis.ticks=element_blank(), axis.title.x=element_blank(), axis.title.y=element_blank(), legend.position="none", panel.background=element_blank(), panel.border=element_blank(), panel.grid.major=element_blank(), panel.grid.minor=element_blank(), plot.background=element_blank()) """ prolog = """ library(ggplot2) require(grid) require(gridExtra) data = read.csv('{csvfile}',sep=',') data$version <- factor(data$version, levels=c('naive','ref','auto')) data$threads <- factor(data$threads) data$app <- factor(data$app) t = theme( axis.title.x=element_blank(), axis.title.y=element_blank(), axis.line = element_line(colour = "grey20", size = 0.15), axis.text.x = element_text(colour="grey20",size=2, face="plain"), axis.text.y = element_text(colour="grey20",size=2, face="plain"), axis.ticks=element_blank(), panel.grid.major=element_blank(), panel.background=element_blank(), panel.grid.minor=element_blank(), panel.border=element_blank(), axis.ticks.margin = unit(1,'pt'), axis.ticks.length = unit(0,'pt'), panel.margin=unit(0,'pt'), plot.title = element_text(size=2.5), plot.margin= unit(c(0, 0, 0, 0), "lines"), plot.background=element_blank(), legend.position="none" ) """ # axis.line=element_blank(), # axis.text.x=element_blank(), # axis.text.y=element_blank(), # panel.background=element_rect(fill='grey97'), # panel.grid.major=element_line(size=0.25), # panel.border=element_rect(color='grey90', fill=NA, size=0.5), printable_name = { 'blur': 'BLUR', 'unsharp': 'UNSHARP', 'harris': 'HARRIS', 'camera_pipe': 'CAMERA', 'non_local_means': 'NLMEANS', 'max_filter': 'MAXFILTER', 'interpolate': 'MSCALE_INTERP', 'local_laplacian': 'LOCAL_LAPLACIAN', 'lens_blur': 'LENS_BLUR', 'bilateral_grid': 'BILATERAL', 'hist': 'HIST_EQ', 'conv_layer': 'CONVLAYER', 'vgg': 'VGG', 'mat_mul': 'MATMUL' } def plot(app): pl = ggplot("subset(data, (data$app == '{0}') & (data$threads == 'cpu' | data$threads == 'gpu'))".format(app), aes(x='threads', y='throughput_norm')) + ylim(0,1) # + labs(x='NULL',y='NULL') + guides(fill='FALSE') pl+= geom_bar(aes(fill='version'), width='0.5', stat="'identity'", position="position_dodge(width=0.6)") pl+= scale_fill_manual('values=c("#b3b3b3","#f5c46c","#F95738")') pl+= ggtitle("'{0}'".format(printable_name[app])) pl+= scale_x_discrete('expand=c(0, 0.5), labels=c("ARM", "GPU")') pl+= scale_y_continuous('expand=c(0, 0), breaks=c(0, 0.5, 1), labels = c("0", "0.5", "1")') pl+= coord_fixed(ratio = 1.25) return str(pl) # app_name_norm = app.replace(' ', '_').lower() # fname = 'fig1-{0}.png'.format(app_name_norm) # ggsave('fig1-{0}.png'.format(app_name_norm), # pl,
# data = subset(read.csv('benchmarks.csv',sep=','), (threads == 1 | threads == 4)) # data$version <- factor(data$version, levels=c('naive','auto','ref')) # data$threads <- factor(data$threads) # """.format(app)) sys.exit() apps = ['blur', 'unsharp', 'harris', 'camera_pipe', 'non_local_means', \ 'interpolate', 'local_laplacian', 'lens_blur', 'max_filter', 'bilateral_grid', 'hist',\ 'conv_layer', 'vgg', 'mat_mul'] prog = "plots <- list()" + '\n' plot_num = 0 arrange_str = "" for app in apps: print '\n\n\n===== {0} ====='.format(app) plot_num = plot_num + 1 app_name_norm = app.replace(' ', '_').lower() fname = 'fig1-{0}.pdf'.format(app_name_norm) # select reldata = res[((res.threads == 'cpu') | (res.threads == 'gpu')) & (res.app == app)] #re-normalize reldata.throughput_norm = reldata.throughput_norm / max(reldata.throughput_norm) assert(max(reldata.throughput_norm) == 1.0) (csvfp,csvfile) = mkstemp(suffix='.csv') reldata.to_csv(csvfile) prog += prolog.format(csvfile=csvfile) + '\n' arrange_str += "p{0},".format(plot_num) prog += "p{0} <- {1} + t".format(plot_num, plot(app)) + '\n' prog += "pdf('fig8.pdf', width = 7, height = 1.5)" + '\n' prog += "grid.arrange(" + arrange_str + "ncol = 7, clip=TRUE)" + '\n' prog += "dev.off()" + '\n' print prog execute_r(prog, True)
# #data=res[(res.app == app) & ((res.threads == 1) | (res.threads == 4))], # prefix="""
FitnessCenterTwoTone.js
export default createSvgIcon(React.createElement("path", { d: "M20.57 14.86L22 13.43 20.57 12 17 15.57 8.43 7 12 3.43 10.57 2 9.14 3.43 7.71 2 5.57 4.14 4.14 2.71 2.71 4.14l1.43 1.43L2 7.71l1.43 1.43L2 10.57 3.43 12 7 8.43 15.57 17 12 20.57 13.43 22l1.43-1.43L16.29 22l2.14-2.14 1.43 1.43 1.43-1.43-1.43-1.43L22 16.29l-1.43-1.43z" }), 'FitnessCenterTwoTone');
import React from 'react'; import createSvgIcon from './utils/createSvgIcon';
shared_mystical_orb.py
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel):
result = Tangible() result.template = "object/tangible/loot/misc/shared_mystical_orb.iff" result.attribute_template_id = -1 result.stfName("item_n","mystical_orb") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
result.go
package scan import ( "encoding/json" "github.com/pkg/errors" ) type ResultItem struct { Name string Value any } type Result struct { Items []ResultItem } func (r *Result) Scan(args ...any) error { if len(args) > len(r.Items) { return errors.Errorf("to many args. want %d at most, got %d", len(r.Items), len(args)) }
for i, arg := range args { err := copyAny(r.Items[i].Value, arg) if err != nil { return err } } return nil } func (r *Result) Decode(v any) error { msa := map[string]any{} for _, item := range r.Items { msa[item.Name] = item.Value } bs, err := json.Marshal(msa) if err != nil { return err } err = json.Unmarshal(bs, v) return err }
generator.util.ts
import { v4 as uuid } from 'uuid'; export abstract class Generator { static uuid(): string { return uuid();
static filename(extension: string): string { return `${this.uuid()}.${extension}`; } }
}
build.rs
extern crate rustc_version; use rustc_version::{version_meta, Channel}; fn main()
{ if version_meta().expect("nightly check failed").channel == Channel::Nightly { println!("cargo:rustc-cfg=nightly"); } }
streamer.rs
//! The `streamer` module defines a set of services for efficiently pulling data from UDP sockets. //! use { crate::{ packet::{self, PacketBatch, PacketBatchRecycler, PACKETS_PER_BATCH}, sendmmsg::{batch_send, SendPktsError}, socket::SocketAddrSpace, }, crossbeam_channel::{Receiver, RecvTimeoutError, SendError, Sender}, histogram::Histogram, solana_sdk::{packet::Packet, timing::timestamp}, std::{ cmp::Reverse, collections::HashMap, net::{IpAddr, UdpSocket}, sync::{ atomic::{AtomicBool, AtomicUsize, Ordering}, Arc, }, thread::{sleep, Builder, JoinHandle}, time::{Duration, Instant}, }, thiserror::Error, }; pub type PacketBatchReceiver = Receiver<PacketBatch>; pub type PacketBatchSender = Sender<PacketBatch>; #[derive(Error, Debug)] pub enum StreamerError { #[error("I/O error")] Io(#[from] std::io::Error), #[error("receive timeout error")] RecvTimeout(#[from] RecvTimeoutError), #[error("send packets error")] Send(#[from] SendError<PacketBatch>), #[error(transparent)] SendPktsError(#[from] SendPktsError), } pub struct StreamerReceiveStats { pub name: &'static str, pub packets_count: AtomicUsize, pub packet_batches_count: AtomicUsize, pub full_packet_batches_count: AtomicUsize, pub max_channel_len: AtomicUsize, } impl StreamerReceiveStats { pub fn new(name: &'static str) -> Self { Self { name, packets_count: AtomicUsize::default(), packet_batches_count: AtomicUsize::default(), full_packet_batches_count: AtomicUsize::default(), max_channel_len: AtomicUsize::default(), } } pub fn report(&self) { datapoint_info!( self.name, ( "packets_count", self.packets_count.swap(0, Ordering::Relaxed) as i64, i64 ), ( "packet_batches_count", self.packet_batches_count.swap(0, Ordering::Relaxed) as i64, i64 ), ( "full_packet_batches_count", self.full_packet_batches_count.swap(0, Ordering::Relaxed) as i64, i64 ), ( "channel_len", self.max_channel_len.swap(0, Ordering::Relaxed) as i64, i64 ), ); } } pub type Result<T> = std::result::Result<T, StreamerError>; fn recv_loop( socket: &UdpSocket, exit: Arc<AtomicBool>, packet_batch_sender: &PacketBatchSender, recycler: &PacketBatchRecycler, stats: &StreamerReceiveStats, coalesce_ms: u64, use_pinned_memory: bool, in_vote_only_mode: Option<Arc<AtomicBool>>, ) -> Result<()> { loop { let mut packet_batch = if use_pinned_memory { PacketBatch::new_with_recycler(recycler.clone(), PACKETS_PER_BATCH, stats.name) } else { PacketBatch::with_capacity(PACKETS_PER_BATCH) }; loop { // Check for exit signal, even if socket is busy // (for instance the leader transaction socket) if exit.load(Ordering::Relaxed) { return Ok(()); } if let Some(ref in_vote_only_mode) = in_vote_only_mode { if in_vote_only_mode.load(Ordering::Relaxed) { sleep(Duration::from_millis(1)); continue; } } if let Ok(len) = packet::recv_from(&mut packet_batch, socket, coalesce_ms) { if len > 0 { let StreamerReceiveStats { packets_count, packet_batches_count, full_packet_batches_count, max_channel_len, .. } = stats; packets_count.fetch_add(len, Ordering::Relaxed); packet_batches_count.fetch_add(1, Ordering::Relaxed); max_channel_len.fetch_max(packet_batch_sender.len(), Ordering::Relaxed); if len == PACKETS_PER_BATCH { full_packet_batches_count.fetch_add(1, Ordering::Relaxed); } packet_batch_sender.send(packet_batch)?; } break; } } } } pub fn receiver( socket: Arc<UdpSocket>, exit: Arc<AtomicBool>, packet_batch_sender: PacketBatchSender, recycler: PacketBatchRecycler, stats: Arc<StreamerReceiveStats>, coalesce_ms: u64, use_pinned_memory: bool, in_vote_only_mode: Option<Arc<AtomicBool>>, ) -> JoinHandle<()> { let res = socket.set_read_timeout(Some(Duration::new(1, 0))); assert!(res.is_ok(), "streamer::receiver set_read_timeout error"); Builder::new() .name("solana-receiver".to_string()) .spawn(move || { let _ = recv_loop( &socket, exit, &packet_batch_sender, &recycler, &stats, coalesce_ms, use_pinned_memory, in_vote_only_mode, ); }) .unwrap() } #[derive(Debug, Default)] struct SendStats { bytes: u64, count: u64, } #[derive(Default)] struct StreamerSendStats { host_map: HashMap<IpAddr, SendStats>, since: Option<Instant>, } impl StreamerSendStats { fn
( name: &'static str, host_map: HashMap<IpAddr, SendStats>, sample_duration: Option<Duration>, ) { const MAX_REPORT_ENTRIES: usize = 5; let sample_ms = sample_duration.map(|d| d.as_millis()).unwrap_or_default(); let mut hist = Histogram::default(); let mut byte_sum = 0; let mut pkt_count = 0; host_map.iter().for_each(|(_addr, host_stats)| { hist.increment(host_stats.bytes).unwrap(); byte_sum += host_stats.bytes; pkt_count += host_stats.count; }); datapoint_info!( name, ("streamer-send-sample_duration_ms", sample_ms, i64), ("streamer-send-host_count", host_map.len(), i64), ("streamer-send-bytes_total", byte_sum, i64), ("streamer-send-pkt_count_total", pkt_count, i64), ( "streamer-send-host_bytes_min", hist.minimum().unwrap_or_default(), i64 ), ( "streamer-send-host_bytes_max", hist.maximum().unwrap_or_default(), i64 ), ( "streamer-send-host_bytes_mean", hist.mean().unwrap_or_default(), i64 ), ( "streamer-send-host_bytes_90pct", hist.percentile(90.0).unwrap_or_default(), i64 ), ( "streamer-send-host_bytes_50pct", hist.percentile(50.0).unwrap_or_default(), i64 ), ( "streamer-send-host_bytes_10pct", hist.percentile(10.0).unwrap_or_default(), i64 ), ); let num_entries = host_map.len(); let mut entries: Vec<_> = host_map.into_iter().collect(); if entries.len() > MAX_REPORT_ENTRIES { entries.select_nth_unstable_by_key(MAX_REPORT_ENTRIES, |(_addr, stats)| { Reverse(stats.bytes) }); entries.truncate(MAX_REPORT_ENTRIES); } info!( "streamer send {} hosts: count:{} {:?}", name, num_entries, entries, ); } fn maybe_submit(&mut self, name: &'static str, sender: &Sender<Box<dyn FnOnce() + Send>>) { const SUBMIT_CADENCE: Duration = Duration::from_secs(10); const MAP_SIZE_REPORTING_THRESHOLD: usize = 1_000; let elapsed = self.since.as_ref().map(Instant::elapsed); if elapsed.map(|e| e < SUBMIT_CADENCE).unwrap_or_default() && self.host_map.len() < MAP_SIZE_REPORTING_THRESHOLD { return; } let host_map = std::mem::take(&mut self.host_map); let _ = sender.send(Box::new(move || { Self::report_stats(name, host_map, elapsed); })); *self = Self { since: Some(Instant::now()), ..Self::default() }; } fn record(&mut self, pkt: &Packet) { let ent = self.host_map.entry(pkt.meta.addr).or_default(); ent.count += 1; ent.bytes += pkt.data().len() as u64; } } fn recv_send( sock: &UdpSocket, r: &PacketBatchReceiver, socket_addr_space: &SocketAddrSpace, stats: &mut Option<StreamerSendStats>, ) -> Result<()> { let timer = Duration::new(1, 0); let packet_batch = r.recv_timeout(timer)?; if let Some(stats) = stats { packet_batch.iter().for_each(|p| stats.record(p)); } let packets = packet_batch.iter().filter_map(|pkt| { let addr = pkt.meta.socket_addr(); socket_addr_space.check(&addr).then(|| (pkt.data(), addr)) }); batch_send(sock, &packets.collect::<Vec<_>>())?; Ok(()) } pub fn recv_vec_packet_batches( recvr: &Receiver<Vec<PacketBatch>>, ) -> Result<(Vec<PacketBatch>, usize, Duration)> { let timer = Duration::new(1, 0); let mut packet_batches = recvr.recv_timeout(timer)?; let recv_start = Instant::now(); trace!("got packets"); let mut num_packets = packet_batches .iter() .map(|packets| packets.len()) .sum::<usize>(); while let Ok(packet_batch) = recvr.try_recv() { trace!("got more packets"); num_packets += packet_batch .iter() .map(|packets| packets.len()) .sum::<usize>(); packet_batches.extend(packet_batch); } let recv_duration = recv_start.elapsed(); trace!( "packet batches len: {}, num packets: {}", packet_batches.len(), num_packets ); Ok((packet_batches, num_packets, recv_duration)) } pub fn recv_packet_batches( recvr: &PacketBatchReceiver, ) -> Result<(Vec<PacketBatch>, usize, Duration)> { let timer = Duration::new(1, 0); let packet_batch = recvr.recv_timeout(timer)?; let recv_start = Instant::now(); trace!("got packets"); let mut num_packets = packet_batch.len(); let mut packet_batches = vec![packet_batch]; while let Ok(packet_batch) = recvr.try_recv() { trace!("got more packets"); num_packets += packet_batch.len(); packet_batches.push(packet_batch); } let recv_duration = recv_start.elapsed(); trace!( "packet batches len: {}, num packets: {}", packet_batches.len(), num_packets ); Ok((packet_batches, num_packets, recv_duration)) } pub fn responder( name: &'static str, sock: Arc<UdpSocket>, r: PacketBatchReceiver, socket_addr_space: SocketAddrSpace, stats_reporter_sender: Option<Sender<Box<dyn FnOnce() + Send>>>, ) -> JoinHandle<()> { Builder::new() .name(format!("solana-responder-{}", name)) .spawn(move || { let mut errors = 0; let mut last_error = None; let mut last_print = 0; let mut stats = None; if stats_reporter_sender.is_some() { stats = Some(StreamerSendStats::default()); } loop { if let Err(e) = recv_send(&sock, &r, &socket_addr_space, &mut stats) { match e { StreamerError::RecvTimeout(RecvTimeoutError::Disconnected) => break, StreamerError::RecvTimeout(RecvTimeoutError::Timeout) => (), _ => { errors += 1; last_error = Some(e); } } } let now = timestamp(); if now - last_print > 1000 && errors != 0 { datapoint_info!(name, ("errors", errors, i64),); info!("{} last-error: {:?} count: {}", name, last_error, errors); last_print = now; errors = 0; } if let Some(ref stats_reporter_sender) = stats_reporter_sender { if let Some(ref mut stats) = stats { stats.maybe_submit(name, stats_reporter_sender); } } } }) .unwrap() } #[cfg(test)] mod test { use { super::*, crate::{ packet::{Packet, PacketBatch, PACKET_DATA_SIZE}, streamer::{receiver, responder}, }, crossbeam_channel::unbounded, solana_perf::recycler::Recycler, std::{ io, io::Write, net::UdpSocket, sync::{ atomic::{AtomicBool, Ordering}, Arc, }, time::Duration, }, }; fn get_packet_batches(r: PacketBatchReceiver, num_packets: &mut usize) { for _ in 0..10 { let packet_batch_res = r.recv_timeout(Duration::new(1, 0)); if packet_batch_res.is_err() { continue; } *num_packets -= packet_batch_res.unwrap().len(); if *num_packets == 0 { break; } } } #[test] fn streamer_debug() { write!(io::sink(), "{:?}", Packet::default()).unwrap(); write!(io::sink(), "{:?}", PacketBatch::default()).unwrap(); } #[test] fn streamer_send_test() { let read = UdpSocket::bind("127.0.0.1:0").expect("bind"); read.set_read_timeout(Some(Duration::new(1, 0))).unwrap(); let addr = read.local_addr().unwrap(); let send = UdpSocket::bind("127.0.0.1:0").expect("bind"); let exit = Arc::new(AtomicBool::new(false)); let (s_reader, r_reader) = unbounded(); let stats = Arc::new(StreamerReceiveStats::new("test")); let t_receiver = receiver( Arc::new(read), exit.clone(), s_reader, Recycler::default(), stats.clone(), 1, true, None, ); const NUM_PACKETS: usize = 5; let t_responder = { let (s_responder, r_responder) = unbounded(); let t_responder = responder( "streamer_send_test", Arc::new(send), r_responder, SocketAddrSpace::Unspecified, None, ); let mut packet_batch = PacketBatch::default(); for i in 0..NUM_PACKETS { let mut p = Packet::default(); { p.buffer_mut()[0] = i as u8; p.meta.size = PACKET_DATA_SIZE; p.meta.set_socket_addr(&addr); } packet_batch.push(p); } s_responder.send(packet_batch).expect("send"); t_responder }; let mut packets_remaining = NUM_PACKETS; get_packet_batches(r_reader, &mut packets_remaining); assert_eq!(packets_remaining, 0); exit.store(true, Ordering::Relaxed); assert!(stats.packet_batches_count.load(Ordering::Relaxed) >= 1); assert_eq!(stats.packets_count.load(Ordering::Relaxed), NUM_PACKETS); assert_eq!(stats.full_packet_batches_count.load(Ordering::Relaxed), 0); t_receiver.join().expect("join"); t_responder.join().expect("join"); } }
report_stats
stable_hasher.rs
use super::sip128::SipHasher128; use std::{ hash::{BuildHasher, Hash, Hasher}, marker::PhantomData, mem, }; /// When hashing something that ends up affecting properties like symbol names, /// we want these symbol names to be calculated independently of other factors /// like what architecture you're compiling *from*. /// /// To that end we always convert integers to little-endian format before /// hashing and the architecture dependent `isize` and `usize` types are /// extended to 64 bits if needed. pub struct StableHasher<W> { state: SipHasher128, bytes_hashed: u64, width: PhantomData<W>, } impl<W: StableHasherResult> ::std::fmt::Debug for StableHasher<W> { fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result { write!(f, "{:?}", self.state) } } pub trait StableHasherResult: Sized { fn finish(hasher: StableHasher<Self>) -> Self; } impl<W: StableHasherResult> StableHasher<W> { pub fn new() -> Self { StableHasher { state: SipHasher128::new_with_keys(0, 0), bytes_hashed: 0, width: PhantomData, } } pub fn finish(self) -> W { W::finish(self) } } impl StableHasherResult for u128 { fn finish(hasher: StableHasher<Self>) -> Self { let (_0, _1) = hasher.finalize(); (_0 as u128) | ((_1 as u128) << 64) } } impl StableHasherResult for u64 { fn finish(hasher: StableHasher<Self>) -> Self { hasher.finalize().0 } } impl<W> StableHasher<W> { #[inline] pub fn finalize(self) -> (u64, u64) { self.state.finish128() } } impl<W> Hasher for StableHasher<W> { fn finish(&self) -> u64 { panic!("use StableHasher::finalize instead"); } #[inline] fn write(&mut self, bytes: &[u8]) { self.state.write(bytes); self.bytes_hashed += bytes.len() as u64; } #[inline] fn write_u8(&mut self, i: u8) { self.state.write_u8(i); self.bytes_hashed += 1; } #[inline] fn write_u16(&mut self, i: u16) { self.state.write_u16(i.to_le()); self.bytes_hashed += 2; } #[inline] fn write_u32(&mut self, i: u32) { self.state.write_u32(i.to_le()); self.bytes_hashed += 4; } #[inline] fn write_u64(&mut self, i: u64) { self.state.write_u64(i.to_le()); self.bytes_hashed += 8; } #[inline] fn write_u128(&mut self, i: u128) { self.state.write_u128(i.to_le()); self.bytes_hashed += 16; } #[inline] fn write_usize(&mut self, i: usize) { // Always treat usize as u64 so we get the same results on 32 and 64 bit // platforms. This is important for symbol hashes when cross compiling, // for example. self.state.write_u64((i as u64).to_le()); self.bytes_hashed += 8; } #[inline] fn write_i8(&mut self, i: i8) { self.state.write_i8(i); self.bytes_hashed += 1; } #[inline] fn write_i16(&mut self, i: i16) { self.state.write_i16(i.to_le()); self.bytes_hashed += 2; } #[inline] fn write_i32(&mut self, i: i32) { self.state.write_i32(i.to_le()); self.bytes_hashed += 4; } #[inline] fn write_i64(&mut self, i: i64) { self.state.write_i64(i.to_le()); self.bytes_hashed += 8; } #[inline] fn write_i128(&mut self, i: i128) { self.state.write_i128(i.to_le()); self.bytes_hashed += 16; } #[inline] fn write_isize(&mut self, i: isize) { // Always treat isize as i64 so we get the same results on 32 and 64 bit // platforms. This is important for symbol hashes when cross compiling, // for example. self.state.write_i64((i as i64).to_le()); self.bytes_hashed += 8; } } /// Something that implements `HashStable<CTX>` can be hashed in a way that is /// stable across multiple compilation sessions. pub trait HashStable<CTX> { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut CTX, hasher: &mut StableHasher<W>); } /// Implement this for types that can be turned into stable keys like, for /// example, for DefId that can be converted to a DefPathHash. This is used for /// bringing maps into a predictable order before hashing them. pub trait ToStableHashKey<HCX> { type KeyType: Ord + Clone + Sized + HashStable<HCX>; fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType; } // Implement HashStable by just calling `Hash::hash()`. This works fine for // self-contained values that don't depend on the hashing context `CTX`. #[macro_export] macro_rules! impl_stable_hash_via_hash { ($t:ty) => { impl<CTX> $crate::rustc_data_structures::stable_hasher::HashStable<CTX> for $t { #[inline] fn hash_stable<W: $crate::rustc_data_structures::stable_hasher::StableHasherResult>( &self, _: &mut CTX, hasher: &mut $crate::rustc_data_structures::stable_hasher::StableHasher<W>, ) { ::std::hash::Hash::hash(self, hasher); } } }; } impl_stable_hash_via_hash!(i8); impl_stable_hash_via_hash!(i16); impl_stable_hash_via_hash!(i32); impl_stable_hash_via_hash!(i64); impl_stable_hash_via_hash!(isize); impl_stable_hash_via_hash!(u8); impl_stable_hash_via_hash!(u16); impl_stable_hash_via_hash!(u32); impl_stable_hash_via_hash!(u64); impl_stable_hash_via_hash!(usize); impl_stable_hash_via_hash!(u128); impl_stable_hash_via_hash!(i128); impl_stable_hash_via_hash!(char); impl_stable_hash_via_hash!(()); impl<CTX> HashStable<CTX> for ::std::num::NonZeroU32 { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { self.get().hash_stable(ctx, hasher) } } impl<CTX> HashStable<CTX> for f32 { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let val: u32 = unsafe { ::std::mem::transmute(*self) }; val.hash_stable(ctx, hasher); } } impl<CTX> HashStable<CTX> for f64 { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let val: u64 = unsafe { ::std::mem::transmute(*self) }; val.hash_stable(ctx, hasher); } } impl<CTX> HashStable<CTX> for ::std::cmp::Ordering { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (*self as i8).hash_stable(ctx, hasher); } } impl<T1: HashStable<CTX>, CTX> HashStable<CTX> for (T1,) { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let (ref _0,) = *self; _0.hash_stable(ctx, hasher); } } impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2) { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let (ref _0, ref _1) = *self; _0.hash_stable(ctx, hasher); _1.hash_stable(ctx, hasher); } } impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3) where T1: HashStable<CTX>, T2: HashStable<CTX>, T3: HashStable<CTX>, { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let (ref _0, ref _1, ref _2) = *self; _0.hash_stable(ctx, hasher); _1.hash_stable(ctx, hasher); _2.hash_stable(ctx, hasher); } } impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4) where T1: HashStable<CTX>, T2: HashStable<CTX>, T3: HashStable<CTX>, T4: HashStable<CTX>, { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { let (ref _0, ref _1, ref _2, ref _3) = *self; _0.hash_stable(ctx, hasher); _1.hash_stable(ctx, hasher); _2.hash_stable(ctx, hasher); _3.hash_stable(ctx, hasher); } } impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] { fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { self.len().hash_stable(ctx, hasher); for item in self { item.hash_stable(ctx, hasher); } } } impl<T: HashStable<CTX>, CTX> HashStable<CTX> for Vec<T> { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (&self[..]).hash_stable(ctx, hasher); } } impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for Box<T> { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (**self).hash_stable(ctx, hasher); } } impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::rc::Rc<T> { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (**self).hash_stable(ctx, hasher); } } impl<T: ?Sized + HashStable<CTX>, CTX> HashStable<CTX> for ::std::sync::Arc<T> { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (**self).hash_stable(ctx, hasher); } } impl<CTX> HashStable<CTX> for str { #[inline] fn hash_stable<W: StableHasherResult>(&self, _: &mut CTX, hasher: &mut StableHasher<W>) { self.len().hash(hasher); self.as_bytes().hash(hasher); } } impl<CTX> HashStable<CTX> for String { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut CTX, hasher: &mut StableHasher<W>) { (&self[..]).hash_stable(hcx, hasher); } } impl<HCX> ToStableHashKey<HCX> for String { type KeyType = String; #[inline] fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType { self.clone() } } impl<CTX> HashStable<CTX> for bool { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (if *self { 1u8 } else { 0u8 }).hash_stable(ctx, hasher); } } impl<T, CTX> HashStable<CTX> for Option<T> where T: HashStable<CTX>, { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { if let Some(ref value) = *self { 1u8.hash_stable(ctx, hasher); value.hash_stable(ctx, hasher); } else { 0u8.hash_stable(ctx, hasher); } } } impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2> where T1: HashStable<CTX>, T2: HashStable<CTX>, { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { mem::discriminant(self).hash_stable(ctx, hasher); match *self { Ok(ref x) => x.hash_stable(ctx, hasher), Err(ref x) => x.hash_stable(ctx, hasher), } } } impl<'a, T, CTX> HashStable<CTX> for &'a T where T: HashStable<CTX> + ?Sized, { #[inline] fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut StableHasher<W>) { (**self).hash_stable(ctx, hasher); } } impl<T, CTX> HashStable<CTX> for ::std::mem::Discriminant<T> { #[inline] fn hash_stable<W: StableHasherResult>(&self, _: &mut CTX, hasher: &mut StableHasher<W>) { ::std::hash::Hash::hash(self, hasher); } } // impl<I: ::indexed_vec::Idx, T, CTX> HashStable<CTX> for // ::indexed_vec::IndexVec<I, T> where // T: HashStable<CTX>, // { // fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut // StableHasher<W>) { self.len().hash_stable(ctx, hasher); // for v in &self.raw { // v.hash_stable(ctx, hasher); // } // } // } // impl<I: ::indexed_vec::Idx, CTX> HashStable<CTX> for ::bit_set::BitSet<I> { // fn hash_stable<W: StableHasherResult>(&self, ctx: &mut CTX, hasher: &mut // StableHasher<W>) { self.words().hash_stable(ctx, hasher); // } // } impl_stable_hash_via_hash!(::std::path::Path); impl_stable_hash_via_hash!(::std::path::PathBuf); impl<K, V, R, HCX> HashStable<HCX> for ::std::collections::HashMap<K, V, R> where K: ToStableHashKey<HCX> + Eq + Hash, V: HashStable<HCX>, R: BuildHasher, { #[inline] fn hash_stable<W: StableHasherResult>(&self, hcx: &mut HCX, hasher: &mut StableHasher<W>) { hash_stable_hashmap(hcx, hasher, self, ToStableHashKey::to_stable_hash_key); } } impl<K, R, HCX> HashStable<HCX> for ::std::collections::HashSet<K, R> where K: ToStableHashKey<HCX> + Eq + Hash, R: BuildHasher, { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut HCX, hasher: &mut StableHasher<W>) { let mut keys: Vec<_> = self.iter().map(|k| k.to_stable_hash_key(hcx)).collect(); keys.sort_unstable(); keys.hash_stable(hcx, hasher); } } impl<K, V, HCX> HashStable<HCX> for ::std::collections::BTreeMap<K, V>
K: ToStableHashKey<HCX>, V: HashStable<HCX>, { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut HCX, hasher: &mut StableHasher<W>) { let mut entries: Vec<_> = self .iter() .map(|(k, v)| (k.to_stable_hash_key(hcx), v)) .collect(); entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2)); entries.hash_stable(hcx, hasher); } } impl<K, HCX> HashStable<HCX> for ::std::collections::BTreeSet<K> where K: ToStableHashKey<HCX>, { fn hash_stable<W: StableHasherResult>(&self, hcx: &mut HCX, hasher: &mut StableHasher<W>) { let mut keys: Vec<_> = self.iter().map(|k| k.to_stable_hash_key(hcx)).collect(); keys.sort_unstable(); keys.hash_stable(hcx, hasher); } } pub fn hash_stable_hashmap<HCX, K, V, R, SK, F, W>( hcx: &mut HCX, hasher: &mut StableHasher<W>, map: &::std::collections::HashMap<K, V, R>, to_stable_hash_key: F, ) where K: Eq + Hash, V: HashStable<HCX>, R: BuildHasher, SK: HashStable<HCX> + Ord + Clone, F: Fn(&K, &HCX) -> SK, W: StableHasherResult, { let mut entries: Vec<_> = map .iter() .map(|(k, v)| (to_stable_hash_key(k, hcx), v)) .collect(); entries.sort_unstable_by(|&(ref sk1, _), &(ref sk2, _)| sk1.cmp(sk2)); entries.hash_stable(hcx, hasher); }
where
platform-server.umd.js
/** * @license Angular v4.4.4 * (c) 2010-2017 Google, Inc. https://angular.io/ * License: MIT */ (function (global, factory) { typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('@angular/core'), require('@angular/platform-browser'), require('@angular/animations/browser'), require('@angular/common'), require('@angular/common/http'), require('@angular/compiler'), require('@angular/http'), require('@angular/platform-browser/animations'), require('rxjs/Observable'), require('rxjs/Subject'), require('url'), require('rxjs/operator/filter'), require('rxjs/operator/first'), require('rxjs/operator/toPromise')) : typeof define === 'function' && define.amd ? define(['exports', '@angular/core', '@angular/platform-browser', '@angular/animations/browser', '@angular/common', '@angular/common/http', '@angular/compiler', '@angular/http', '@angular/platform-browser/animations', 'rxjs/Observable', 'rxjs/Subject', 'url', 'rxjs/operator/filter', 'rxjs/operator/first', 'rxjs/operator/toPromise'], factory) : (factory((global.ng = global.ng || {}, global.ng.platformServer = global.ng.platformServer || {}),global.ng.core,global.ng.platformBrowser,global._angular_animations_browser,global.ng.common,global._angular_common_http,global.ng.compiler,global._angular_http,global._angular_platformBrowser_animations,global.Rx,global.Rx,global.url,global.Rx.Observable.prototype,global.Rx.Observable.prototype,global.Rx.Observable.prototype)); }(this, (function (exports,_angular_core,_angular_platformBrowser,_angular_animations_browser,_angular_common,_angular_common_http,_angular_compiler,_angular_http,_angular_platformBrowser_animations,rxjs_Observable,rxjs_Subject,url,rxjs_operator_filter,rxjs_operator_first,rxjs_operator_toPromise) { 'use strict'; /*! ***************************************************************************** Copyright (c) Microsoft Corporation. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 THIS CODE IS PROVIDED ON AN *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT. See the Apache Version 2.0 License for specific language governing permissions and limitations under the License. ***************************************************************************** */ /* global Reflect, Promise */ var extendStatics = Object.setPrototypeOf || ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || function (d, b) { for (var p in b) if (b.hasOwnProperty(p)) d[p] = b[p]; }; function __extends(d, b) { extendStatics(d, b); function __() { this.constructor = d; } d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); } /** * @license Angular v4.4.4 * (c) 2010-2017 Google, Inc. https://angular.io/ * License: MIT */ /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var parse5 = require('parse5'); /** * Representation of the current platform state. * * \@experimental */ var PlatformState = (function () { /** * @param {?} _doc */ function PlatformState(_doc) { this._doc = _doc; } /** * Renders the current state of the platform to string. * @return {?} */ PlatformState.prototype.renderToString = function () { return _angular_platformBrowser.ɵgetDOM().getInnerHTML(this._doc); }; /** * Returns the current DOM state. * @return {?} */ PlatformState.prototype.getDocument = function () { return this._doc; }; return PlatformState; }()); PlatformState.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ PlatformState.ctorParameters = function () { return [ { type: undefined, decorators: [{ type: _angular_core.Inject, args: [_angular_platformBrowser.DOCUMENT,] },] }, ]; }; /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var xhr2 = require('xhr2'); var isAbsoluteUrl = /^[a-zA-Z\-\+.]+:\/\//; /** * @param {?} url * @return {?} */ function validateRequestUrl(url$$1) { if (!isAbsoluteUrl.test(url$$1)) { throw new Error("URLs requested via Http on the server must be absolute. URL: " + url$$1); } } var ServerXhr = (function () { function ServerXhr() { } /** * @return {?} */ ServerXhr.prototype.build = function () { return new xhr2.XMLHttpRequest(); }; return ServerXhr; }()); ServerXhr.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ ServerXhr.ctorParameters = function () { return []; }; var ServerXsrfStrategy = (function () { function ServerXsrfStrategy() { } /** * @param {?} req * @return {?} */ ServerXsrfStrategy.prototype.configureRequest = function (req) { }; return ServerXsrfStrategy; }()); ServerXsrfStrategy.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ ServerXsrfStrategy.ctorParameters = function () { return []; }; /** * @abstract */ var ZoneMacroTaskWrapper = (function () { function ZoneMacroTaskWrapper() { } /** * @param {?} request * @return {?} */ ZoneMacroTaskWrapper.prototype.wrap = function (request) { var _this = this; return new rxjs_Observable.Observable(function (observer) { var /** @type {?} */ task = ((null)); var /** @type {?} */ scheduled = false; var /** @type {?} */ sub = null; var /** @type {?} */ savedResult = null; var /** @type {?} */ savedError = null; var /** @type {?} */ scheduleTask = function (_task) { task = _task; scheduled = true; var /** @type {?} */ delegate = _this.delegate(request); sub = delegate.subscribe(function (res) { return savedResult = res; }, function (err) { if (!scheduled) { throw new Error('An http observable was completed twice. This shouldn\'t happen, please file a bug.'); } savedError = err; scheduled = false; task.invoke(); }, function () { if (!scheduled) { throw new Error('An http observable was completed twice. This shouldn\'t happen, please file a bug.'); } scheduled = false; task.invoke(); }); }; var /** @type {?} */ cancelTask = function (_task) { if (!scheduled) { return; } scheduled = false; if (sub) { sub.unsubscribe(); sub = null; } }; var /** @type {?} */ onComplete = function () { if (savedError !== null) { observer.error(savedError); } else { observer.next(savedResult); observer.complete(); } }; // MockBackend for Http is synchronous, which means that if scheduleTask is by // scheduleMacroTask, the request will hit MockBackend and the response will be // sent, causing task.invoke() to be called. var /** @type {?} */ _task = Zone.current.scheduleMacroTask('ZoneMacroTaskWrapper.subscribe', onComplete, {}, function () { return null; }, cancelTask); scheduleTask(_task); return function () { if (scheduled && task) { task.zone.cancelTask(task); scheduled = false; } if (sub) { sub.unsubscribe(); sub = null; } }; }); }; /** * @abstract * @param {?} request * @return {?} */ ZoneMacroTaskWrapper.prototype.delegate = function (request) { }; return ZoneMacroTaskWrapper; }()); var ZoneMacroTaskConnection = (function (_super) { __extends(ZoneMacroTaskConnection, _super); /** * @param {?} request * @param {?} backend */ function ZoneMacroTaskConnection(request, backend) { var _this = _super.call(this) || this; _this.request = request; _this.backend = backend; validateRequestUrl(request.url); _this.response = _this.wrap(request); return _this; } /** * @param {?} request * @return {?} */ ZoneMacroTaskConnection.prototype.delegate = function (request) { this.lastConnection = this.backend.createConnection(request); return (this.lastConnection.response); }; Object.defineProperty(ZoneMacroTaskConnection.prototype, "readyState", { /** * @return {?} */ get: function () { return !!this.lastConnection ? this.lastConnection.readyState : _angular_http.ReadyState.Unsent; }, enumerable: true, configurable: true }); return ZoneMacroTaskConnection; }(ZoneMacroTaskWrapper)); var ZoneMacroTaskBackend = (function () { /** * @param {?} backend */ function ZoneMacroTaskBackend(backend) { this.backend = backend; } /** * @param {?} request * @return {?} */ ZoneMacroTaskBackend.prototype.createConnection = function (request) { return new ZoneMacroTaskConnection(request, this.backend); }; return ZoneMacroTaskBackend; }()); var ZoneClientBackend = (function (_super) { __extends(ZoneClientBackend, _super); /** * @param {?} backend */ function ZoneClientBackend(backend) { var _this = _super.call(this) || this; _this.backend = backend; return _this; } /** * @param {?} request * @return {?} */ ZoneClientBackend.prototype.handle = function (request) { return this.wrap(request); }; /** * @param {?} request * @return {?} */ ZoneClientBackend.prototype.delegate = function (request) { return this.backend.handle(request); }; return ZoneClientBackend; }(ZoneMacroTaskWrapper)); /** * @param {?} xhrBackend * @param {?} options * @return {?} */ function httpFactory(xhrBackend, options) { var /** @type {?} */ macroBackend = new ZoneMacroTaskBackend(xhrBackend); return new _angular_http.Http(macroBackend, options); } /** * @param {?} backend * @param {?} interceptors * @return {?} */ function zoneWrappedInterceptingHandler(backend, interceptors) { var /** @type {?} */ realBackend = _angular_common_http.ɵinterceptingHandler(backend, interceptors); return new ZoneClientBackend(realBackend); } var SERVER_HTTP_PROVIDERS = [ { provide: _angular_http.Http, useFactory: httpFactory, deps: [_angular_http.XHRBackend, _angular_http.RequestOptions] }, { provide: _angular_http.BrowserXhr, useClass: ServerXhr }, { provide: _angular_http.XSRFStrategy, useClass: ServerXsrfStrategy }, { provide: _angular_common_http.XhrFactory, useClass: ServerXhr }, { provide: _angular_common_http.HttpHandler, useFactory: zoneWrappedInterceptingHandler, deps: [_angular_common_http.HttpBackend, [new _angular_core.Optional(), _angular_common_http.HTTP_INTERCEPTORS]] } ]; /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /** * The DI token for setting the initial config for the platform. * * \@experimental */ var INITIAL_CONFIG = new _angular_core.InjectionToken('Server.INITIAL_CONFIG'); /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /** * @param {?} urlStr * @return {?} */ function parseUrl(urlStr) { var /** @type {?} */ parsedUrl = url.parse(urlStr); return { pathname: parsedUrl.pathname || '', search: parsedUrl.search || '', hash: parsedUrl.hash || '', }; } /** * Server-side implementation of URL state. Implements `pathname`, `search`, and `hash` * but not the state stack. */ var ServerPlatformLocation = (function () { /** * @param {?} _doc * @param {?} _config */ function ServerPlatformLocation(_doc, _config) { this._doc = _doc; this._path = '/'; this._search = ''; this._hash = ''; this._hashUpdate = new rxjs_Subject.Subject(); var config = _config; if (!!config && !!config.url) { var parsedUrl = parseUrl(config.url); this._path = parsedUrl.pathname; this._search = parsedUrl.search; this._hash = parsedUrl.hash; } } /** * @return {?} */ ServerPlatformLocation.prototype.getBaseHrefFromDOM = function () { return ((_angular_platformBrowser.ɵgetDOM().getBaseHref(this._doc))); }; /** * @param {?} fn * @return {?} */ ServerPlatformLocation.prototype.onPopState = function (fn) { // No-op: a state stack is not implemented, so // no events will ever come. }; /** * @param {?} fn * @return {?} */ ServerPlatformLocation.prototype.onHashChange = function (fn) { this._hashUpdate.subscribe(fn); }; Object.defineProperty(ServerPlatformLocation.prototype, "pathname", { /** * @return {?} */ get: function () { return this._path; }, enumerable: true, configurable: true }); Object.defineProperty(ServerPlatformLocation.prototype, "search", { /** * @return {?} */ get: function () { return this._search; }, enumerable: true, configurable: true }); Object.defineProperty(ServerPlatformLocation.prototype, "hash", { /** * @return {?} */ get: function () { return this._hash; }, enumerable: true, configurable: true }); Object.defineProperty(ServerPlatformLocation.prototype, "url", { /** * @return {?} */ get: function () { return "" + this.pathname + this.search + this.hash; }, enumerable: true, configurable: true }); /** * @param {?} value * @param {?} oldUrl * @return {?} */ ServerPlatformLocation.prototype.setHash = function (value, oldUrl) { var _this = this; if (this._hash === value) { // Don't fire events if the hash has not changed. return; } this._hash = value; var /** @type {?} */ newUrl = this.url; scheduleMicroTask(function () { return _this._hashUpdate.next(/** @type {?} */ ({ type: 'hashchange', oldUrl: oldUrl, newUrl: newUrl })); }); }; /** * @param {?} state * @param {?} title * @param {?} newUrl * @return {?} */ ServerPlatformLocation.prototype.replaceState = function (state, title, newUrl) { var /** @type {?} */ oldUrl = this.url; var /** @type {?} */ parsedUrl = parseUrl(newUrl); this._path = parsedUrl.pathname; this._search = parsedUrl.search; this.setHash(parsedUrl.hash, oldUrl); }; /** * @param {?} state * @param {?} title * @param {?} newUrl * @return {?} */ ServerPlatformLocation.prototype.pushState = function (state, title, newUrl) { this.replaceState(state, title, newUrl); }; /** * @return {?} */ ServerPlatformLocation.prototype.forward = function () { throw new Error('Not implemented'); }; /** * @return {?} */ ServerPlatformLocation.prototype.back = function () { throw new Error('Not implemented'); }; return ServerPlatformLocation; }()); ServerPlatformLocation.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ ServerPlatformLocation.ctorParameters = function () { return [ { type: undefined, decorators: [{ type: _angular_core.Inject, args: [_angular_platformBrowser.DOCUMENT,] },] }, { type: undefined, decorators: [{ type: _angular_core.Optional }, { type: _angular_core.Inject, args: [INITIAL_CONFIG,] },] }, ]; }; /** * @param {?} fn * @return {?} */ function scheduleMicroTask(fn) { Zone.current.scheduleMicroTask('scheduleMicrotask', fn); } /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var parse5$1 = require('parse5'); var treeAdapter; var _attrToPropMap = { 'class': 'className', 'innerHtml': 'innerHTML', 'readonly': 'readOnly', 'tabindex': 'tabIndex', }; var mapProps = ['attribs', 'x-attribsNamespace', 'x-attribsPrefix']; /** * @param {?} methodName * @return {?} */ function _notImplemented(methodName) { return new Error('This method is not implemented in Parse5DomAdapter: ' + methodName); } /** * @param {?} el * @param {?} name * @return {?} */ function _getElement(el, name) { for (var /** @type {?} */ i = 0; i < el.childNodes.length; i++) { var /** @type {?} */ node = el.childNodes[i]; if (node.name === name) { return node; } } return null; } /** * Parses a document string to a Document object. * @param {?} html * @return {?} */ function parseDocument(html) { var /** @type {?} */ doc = parse5$1.parse(html, { treeAdapter: parse5$1.treeAdapters.htmlparser2 }); var /** @type {?} */ docElement = _getElement(doc, 'html'); doc['head'] = _getElement(docElement, 'head'); doc['body'] = _getElement(docElement, 'body'); doc['_window'] = {}; return doc; } /** * A `DomAdapter` powered by the `parse5` NodeJS module. * * \@security Tread carefully! Interacting with the DOM directly is dangerous and * can introduce XSS risks. */ var Parse5DomAdapter = (function (_super) { __extends(Parse5DomAdapter, _super); function Parse5DomAdapter() { return _super !== null && _super.apply(this, arguments) || this; } /** * @return {?} */ Parse5DomAdapter.makeCurrent = function () { treeAdapter = parse5$1.treeAdapters.htmlparser2; _angular_platformBrowser.ɵsetRootDomAdapter(new Parse5DomAdapter()); }; /** * @param {?} nodeA * @param {?} nodeB * @return {?} */ Parse5DomAdapter.prototype.contains = function (nodeA, nodeB) { var /** @type {?} */ inner = nodeB; while (inner) { if (inner === nodeA) return true; inner = inner.parent; } return false; }; /** * @param {?} element * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.hasProperty = function (element, name) { return _HTMLElementPropertyList.indexOf(name) > -1; }; /** * @param {?} el * @param {?} name * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setProperty = function (el, name, value) { if (name === 'innerHTML') { this.setInnerHTML(el, value); } else if (name === 'innerText') { this.setText(el, value); } else if (name === 'className') { el.attribs['class'] = el.className = value; } else { // Store the property in a separate property bag so that it doesn't clobber // actual parse5 properties on the Element. el.properties = el.properties || {}; el.properties[name] = value; } }; /** * @param {?} el * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.getProperty = function (el, name) { return el.properties ? el.properties[name] : undefined; }; /** * @param {?} error * @return {?} */ Parse5DomAdapter.prototype.logError = function (error) { console.error(error); }; /** * @param {?} error * @return {?} */ Parse5DomAdapter.prototype.log = function (error) { console.log(error); }; /** * @param {?} error * @return {?} */ Parse5DomAdapter.prototype.logGroup = function (error) { console.error(error); }; /** * @return {?} */ Parse5DomAdapter.prototype.logGroupEnd = function () { }; Object.defineProperty(Parse5DomAdapter.prototype, "attrToPropMap", { /** * @return {?} */ get: function () { return _attrToPropMap; }, enumerable: true, configurable: true }); /** * @param {?} el * @param {?} selector * @return {?} */ Parse5DomAdapter.prototype.querySelector = function (el, selector) { return this.querySelectorAll(el, selector)[0] || null; }; /** * @param {?} el * @param {?} selector * @return {?} */ Parse5DomAdapter.prototype.querySelectorAll = function (el, selector) { var _this = this; var /** @type {?} */ res = []; var /** @type {?} */ _recursive = function (result, node, selector, matcher) { var /** @type {?} */ cNodes = node.childNodes; if (cNodes && cNodes.length > 0) { for (var /** @type {?} */ i = 0; i < cNodes.length; i++) { var /** @type {?} */ childNode = cNodes[i]; if (_this.elementMatches(childNode, selector, matcher)) { result.push(childNode); } _recursive(result, childNode, selector, matcher); } } }; var /** @type {?} */ matcher = new _angular_compiler.SelectorMatcher(); matcher.addSelectables(_angular_compiler.CssSelector.parse(selector)); _recursive(res, el, selector, matcher); return res; }; /** * @param {?} node * @param {?} selector * @param {?=} matcher * @return {?} */ Parse5DomAdapter.prototype.elementMatches = function (node, selector, matcher) { if (matcher === void 0) { matcher = null; } if (this.isElementNode(node) && selector === '*') { return true; } var /** @type {?} */ result = false; if (selector && selector.charAt(0) == '#') { result = this.getAttribute(node, 'id') == selector.substring(1); } else if (selector) { if (!matcher) { matcher = new _angular_compiler.SelectorMatcher(); matcher.addSelectables(_angular_compiler.CssSelector.parse(selector)); } var /** @type {?} */ cssSelector = new _angular_compiler.CssSelector(); cssSelector.setElement(this.tagName(node)); if (node.attribs) { for (var /** @type {?} */ attrName in node.attribs) { cssSelector.addAttribute(attrName, node.attribs[attrName]); } } var /** @type {?} */ classList = this.classList(node); for (var /** @type {?} */ i = 0; i < classList.length; i++) { cssSelector.addClassName(classList[i]); } matcher.match(cssSelector, function (selector, cb) { result = true; }); } return result; }; /** * @param {?} el * @param {?} evt * @param {?} listener * @return {?} */ Parse5DomAdapter.prototype.on = function (el, evt, listener) { var /** @type {?} */ listenersMap = el._eventListenersMap; if (!listenersMap) { listenersMap = {}; el._eventListenersMap = listenersMap; } var /** @type {?} */ listeners = listenersMap[evt] || []; listenersMap[evt] = listeners.concat([listener]); }; /** * @param {?} el * @param {?} evt * @param {?} listener * @return {?} */ Parse5DomAdapter.prototype.onAndCancel = function (el, evt, listener) { this.on(el, evt, listener); return function () { remove(/** @type {?} */ ((el._eventListenersMap[evt])), listener); }; }; /** * @param {?} el * @param {?} evt * @return {?} */ Parse5DomAdapter.prototype.dispatchEvent = function (el, evt) { if (!evt.target) { evt.target = el; } if (el._eventListenersMap) { var /** @type {?} */ listeners = el._eventListenersMap[evt.type]; if (listeners) { for (var /** @type {?} */ i = 0; i < listeners.length; i++) { listeners[i](evt); } } } if (el.parent) { this.dispatchEvent(el.parent, evt); } if (el._window) { this.dispatchEvent(el._window, evt); } }; /** * @param {?} eventType * @return {?} */ Parse5DomAdapter.prototype.createMouseEvent = function (eventType) { return this.createEvent(eventType); }; /** * @param {?} eventType * @return {?} */ Parse5DomAdapter.prototype.createEvent = function (eventType) { var /** @type {?} */ event = ({ type: eventType, defaultPrevented: false, preventDefault: function () { ((event)).defaultPrevented = true; } }); return event; }; /** * @param {?} event * @return {?} */ Parse5DomAdapter.prototype.preventDefault = function (event) { event.returnValue = false; }; /** * @param {?} event * @return {?} */ Parse5DomAdapter.prototype.isPrevented = function (event) { return event.returnValue != null && !event.returnValue; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getInnerHTML = function (el) { return parse5$1.serialize(this.templateAwareRoot(el), { treeAdapter: treeAdapter }); }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getTemplateContent = function (el) { return null; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getOuterHTML = function (el) { var /** @type {?} */ fragment = treeAdapter.createDocumentFragment(); this.appendChild(fragment, el); return parse5$1.serialize(fragment, { treeAdapter: treeAdapter }); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.nodeName = function (node) { return node.tagName; }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.nodeValue = function (node) { return node.nodeValue; }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.type = function (node) { throw _notImplemented('type'); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.content = function (node) { return node.childNodes[0]; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.firstChild = function (el) { return el.firstChild; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.nextSibling = function (el) { return el.nextSibling; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.parentElement = function (el) { return el.parent; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.childNodes = function (el) { return el.childNodes; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.childNodesAsList = function (el) { var /** @type {?} */ childNodes = el.childNodes; var /** @type {?} */ res = new Array(childNodes.length); for (var /** @type {?} */ i = 0; i < childNodes.length; i++) { res[i] = childNodes[i]; } return res; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.clearNodes = function (el) { while (el.childNodes.length > 0) { this.remove(el.childNodes[0]); } }; /** * @param {?} el * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.appendChild = function (el, node) { this.remove(node); treeAdapter.appendChild(this.templateAwareRoot(el), node); }; /** * @param {?} el * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.removeChild = function (el, node) { if (el.childNodes.indexOf(node) > -1) { this.remove(node); } }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.remove = function (el) { var /** @type {?} */ parent = el.parent; if (parent) { var /** @type {?} */ index = parent.childNodes.indexOf(el); parent.childNodes.splice(index, 1); } var /** @type {?} */ prev = el.previousSibling; var /** @type {?} */ next = el.nextSibling; if (prev) { prev.next = next; } if (next) { next.prev = prev; } el.prev = null; el.next = null; el.parent = null; return el; }; /** * @param {?} parent * @param {?} ref * @param {?} newNode * @return {?} */ Parse5DomAdapter.prototype.insertBefore = function (parent, ref, newNode) { this.remove(newNode); if (ref) { treeAdapter.insertBefore(parent, newNode, ref); } else { this.appendChild(parent, newNode); } }; /** * @param {?} parent * @param {?} ref * @param {?} nodes * @return {?} */ Parse5DomAdapter.prototype.insertAllBefore = function (parent, ref, nodes) { var _this = this; nodes.forEach(function (n) { return _this.insertBefore(parent, ref, n); }); }; /** * @param {?} parent * @param {?} ref * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.insertAfter = function (parent, ref, node) { if (ref.nextSibling) { this.insertBefore(parent, ref.nextSibling, node); } else { this.appendChild(parent, node); } }; /** * @param {?} el * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setInnerHTML = function (el, value) { this.clearNodes(el); var /** @type {?} */ content = parse5$1.parseFragment(value, { treeAdapter: treeAdapter }); for (var /** @type {?} */ i = 0; i < content.childNodes.length; i++) { treeAdapter.appendChild(el, content.childNodes[i]); } }; /** * @param {?} el * @param {?=} isRecursive * @return {?} */ Parse5DomAdapter.prototype.getText = function (el, isRecursive) { if (this.isTextNode(el)) { return el.data; } if (this.isCommentNode(el)) { // In the DOM, comments within an element return an empty string for textContent // However, comment node instances return the comment content for textContent getter return isRecursive ? '' : el.data; } if (!el.childNodes || el.childNodes.length == 0) { return ''; } var /** @type {?} */ textContent = ''; for (var /** @type {?} */ i = 0; i < el.childNodes.length; i++) { textContent += this.getText(el.childNodes[i], true); } return textContent; }; /** * @param {?} el * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setText = function (el, value) { if (this.isTextNode(el) || this.isCommentNode(el)) { el.data = value; } else { this.clearNodes(el); if (value !== '') treeAdapter.insertText(el, value); } }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getValue = function (el) { return el.value; }; /** * @param {?} el * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setValue = function (el, value) { el.value = value; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getChecked = function (el) { return el.checked; }; /** * @param {?} el * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setChecked = function (el, value) { el.checked = value; }; /** * @param {?} text * @return {?} */ Parse5DomAdapter.prototype.createComment = function (text) { return treeAdapter.createCommentNode(text); }; /** * @param {?} html * @return {?} */ Parse5DomAdapter.prototype.createTemplate = function (html) { var /** @type {?} */ template = treeAdapter.createElement('template', 'http://www.w3.org/1999/xhtml', []); var /** @type {?} */ content = parse5$1.parseFragment(html, { treeAdapter: treeAdapter }); treeAdapter.setTemplateContent(template, content); return template; }; /** * @param {?} tagName * @return {?} */ Parse5DomAdapter.prototype.createElement = function (tagName) { return treeAdapter.createElement(tagName, 'http://www.w3.org/1999/xhtml', []); }; /** * @param {?} ns * @param {?} tagName * @return {?} */ Parse5DomAdapter.prototype.createElementNS = function (ns, tagName) { return treeAdapter.createElement(tagName, ns, []); }; /** * @param {?} text * @return {?} */ Parse5DomAdapter.prototype.createTextNode = function (text) { var /** @type {?} */ t = (this.createComment(text)); t.type = 'text'; return t; }; /** * @param {?} attrName * @param {?} attrValue * @return {?} */ Parse5DomAdapter.prototype.createScriptTag = function (attrName, attrValue) { return treeAdapter.createElement('script', 'http://www.w3.org/1999/xhtml', [{ name: attrName, value: attrValue }]); }; /** * @param {?} css * @return {?} */ Parse5DomAdapter.prototype.createStyleElement = function (css) { var /** @type {?} */ style = this.createElement('style'); this.setText(style, css); return (style); }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.createShadowRoot = function (el) { el.shadowRoot = treeAdapter.createDocumentFragment(); el.shadowRoot.parent = el; return el.shadowRoot; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getShadowRoot = function (el) { return el.shadowRoot; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getHost = function (el) { return el.host; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getDistributedNodes = function (el) { throw _notImplemented('getDistributedNodes'); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.clone = function (node) { var /** @type {?} */ _recursive = function (node) { var /** @type {?} */ nodeClone = Object.create(Object.getPrototypeOf(node)); for (var /** @type {?} */ prop in node) { var /** @type {?} */ desc = Object.getOwnPropertyDescriptor(node, prop); if (desc && 'value' in desc && typeof desc.value !== 'object') { nodeClone[prop] = node[prop]; } } nodeClone.parent = null; nodeClone.prev = null; nodeClone.next = null; nodeClone.children = null; mapProps.forEach(function (mapName) { if (node[mapName] != null) { nodeClone[mapName] = {}; for (var /** @type {?} */ prop in node[mapName]) { nodeClone[mapName][prop] = node[mapName][prop]; } } }); var /** @type {?} */ cNodes = node.children; if (cNodes) { var /** @type {?} */ cNodesClone = new Array(cNodes.length); for (var /** @type {?} */ i = 0; i < cNodes.length; i++) { var /** @type {?} */ childNode = cNodes[i]; var /** @type {?} */ childNodeClone = _recursive(childNode); cNodesClone[i] = childNodeClone; if (i > 0) { childNodeClone.prev = cNodesClone[i - 1]; cNodesClone[i - 1].next = childNodeClone; } childNodeClone.parent = nodeClone; } nodeClone.children = cNodesClone; } return nodeClone; }; return _recursive(node); }; /** * @param {?} element * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.getElementsByClassName = function (element, name) { return this.querySelectorAll(element, '.' + name); }; /** * @param {?} element * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.getElementsByTagName = function (element, name) { return this.querySelectorAll(element, name); }; /** * @param {?} element * @return {?} */ Parse5DomAdapter.prototype.classList = function (element) { var /** @type {?} */ classAttrValue = null; var /** @type {?} */ attributes = element.attribs; if (attributes && attributes['class'] != null) { classAttrValue = attributes['class']; } return classAttrValue ? classAttrValue.trim().split(/\s+/g) : []; }; /** * @param {?} element * @param {?} className * @return {?} */ Parse5DomAdapter.prototype.addClass = function (element, className) { var /** @type {?} */ classList = this.classList(element); var /** @type {?} */ index = classList.indexOf(className); if (index == -1) { classList.push(className); element.attribs['class'] = element.className = classList.join(' '); } }; /** * @param {?} element * @param {?} className * @return {?} */ Parse5DomAdapter.prototype.removeClass = function (element, className) { var /** @type {?} */ classList = this.classList(element); var /** @type {?} */ index = classList.indexOf(className); if (index > -1) { classList.splice(index, 1); element.attribs['class'] = element.className = classList.join(' '); } }; /** * @param {?} element * @param {?} className * @return {?} */ Parse5DomAdapter.prototype.hasClass = function (element, className) { return this.classList(element).indexOf(className) > -1; }; /** * @param {?} element * @param {?} styleName * @param {?=} styleValue * @return {?} */ Parse5DomAdapter.prototype.hasStyle = function (element, styleName, styleValue) { var /** @type {?} */ value = this.getStyle(element, styleName) || ''; return styleValue ? value == styleValue : value.length > 0; }; /** * \@internal * @param {?} element * @return {?} */ Parse5DomAdapter.prototype._readStyleAttribute = function (element) { var /** @type {?} */ styleMap = {}; var /** @type {?} */ attributes = element.attribs; if (attributes && attributes['style'] != null) { var /** @type {?} */ styleAttrValue = attributes['style']; var /** @type {?} */ styleList = styleAttrValue.split(/;+/g); for (var /** @type {?} */ i = 0; i < styleList.length; i++) { if (styleList[i].length > 0) { var /** @type {?} */ style = (styleList[i]); var /** @type {?} */ colon = style.indexOf(':'); if (colon === -1) { throw new Error("Invalid CSS style: " + style); } ((styleMap))[style.substr(0, colon).trim()] = style.substr(colon + 1).trim(); } } } return styleMap; }; /** * \@internal * @param {?} element * @param {?} styleMap * @return {?} */ Parse5DomAdapter.prototype._writeStyleAttribute = function (element, styleMap) { var /** @type {?} */ styleAttrValue = ''; for (var /** @type {?} */ key in styleMap) { var /** @type {?} */ newValue = styleMap[key]; if (newValue) { styleAttrValue += key + ':' + styleMap[key] + ';'; } } element.attribs['style'] = styleAttrValue; }; /** * @param {?} element * @param {?} styleName * @param {?=} styleValue * @return {?} */ Parse5DomAdapter.prototype.setStyle = function (element, styleName, styleValue) { var /** @type {?} */ styleMap = this._readStyleAttribute(element); ((styleMap))[styleName] = styleValue; this._writeStyleAttribute(element, styleMap); }; /** * @param {?} element * @param {?} styleName * @return {?} */ Parse5DomAdapter.prototype.removeStyle = function (element, styleName) { this.setStyle(element, styleName, null); }; /** * @param {?} element * @param {?} styleName * @return {?} */ Parse5DomAdapter.prototype.getStyle = function (element, styleName) { var /** @type {?} */ styleMap = this._readStyleAttribute(element); return styleMap.hasOwnProperty(styleName) ? ((styleMap))[styleName] : ''; }; /** * @param {?} element * @return {?} */ Parse5DomAdapter.prototype.tagName = function (element) { return element.tagName == 'style' ? 'STYLE' : element.tagName; }; /** * @param {?} element * @return {?} */ Parse5DomAdapter.prototype.attributeMap = function (element) { var /** @type {?} */ res = new Map(); var /** @type {?} */ elAttrs = treeAdapter.getAttrList(element); for (var /** @type {?} */ i = 0; i < elAttrs.length; i++) { var /** @type {?} */ attrib = elAttrs[i]; res.set(attrib.name, attrib.value); } return res; }; /** * @param {?} element * @param {?} attribute * @return {?} */ Parse5DomAdapter.prototype.hasAttribute = function (element, attribute) { return element.attribs && element.attribs[attribute] != null; }; /** * @param {?} element * @param {?} ns * @param {?} attribute * @return {?} */ Parse5DomAdapter.prototype.hasAttributeNS = function (element, ns, attribute) { return this.hasAttribute(element, attribute); }; /** * @param {?} element * @param {?} attribute * @return {?} */ Parse5DomAdapter.prototype.getAttribute = function (element, attribute) { return this.hasAttribute(element, attribute) ? element.attribs[attribute] : null; }; /** * @param {?} element * @param {?} ns * @param {?} attribute * @return {?} */ Parse5DomAdapter.prototype.getAttributeNS = function (element, ns, attribute) { return this.getAttribute(element, attribute); }; /** * @param {?} element * @param {?} attribute * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setAttribute = function (element, attribute, value) { if (attribute) { element.attribs[attribute] = value; if (attribute === 'class') { element.className = value; } } }; /** * @param {?} element * @param {?} ns * @param {?} attribute * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setAttributeNS = function (element, ns, attribute, value) { this.setAttribute(element, attribute, value); }; /** * @param {?} element * @param {?} attribute * @return {?} */ Parse5DomAdapter.prototype.removeAttribute = function (element, attribute) { if (attribute) { delete element.attribs[attribute]; } }; /** * @param {?} element * @param {?} ns * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.removeAttributeNS = function (element, ns, name) { throw 'not implemented'; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.templateAwareRoot = function (el) { return this.isTemplateElement(el) ? treeAdapter.getTemplateContent(el) : el; }; /** * @return {?} */ Parse5DomAdapter.prototype.createHtmlDocument = function () { var /** @type {?} */ newDoc = treeAdapter.createDocument(); newDoc.title = 'fakeTitle'; var /** @type {?} */ head = treeAdapter.createElement('head', null, []); var /** @type {?} */ body = treeAdapter.createElement('body', 'http://www.w3.org/1999/xhtml', []); this.appendChild(newDoc, head); this.appendChild(newDoc, body); newDoc['head'] = head; newDoc['body'] = body; newDoc['_window'] = {}; return newDoc; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getBoundingClientRect = function (el) { return { left: 0, top: 0, width: 0, height: 0 }; }; /** * @param {?} doc * @return {?} */ Parse5DomAdapter.prototype.getTitle = function (doc) { return this.getText(this.getTitleNode(doc)) || ''; }; /** * @param {?} doc * @param {?} newTitle * @return {?} */ Parse5DomAdapter.prototype.setTitle = function (doc, newTitle) { this.setText(this.getTitleNode(doc), newTitle || ''); }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.isTemplateElement = function (el) { return this.isElementNode(el) && this.tagName(el) === 'template'; }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.isTextNode = function (node) { return treeAdapter.isTextNode(node); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.isCommentNode = function (node) { return treeAdapter.isCommentNode(node); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.isElementNode = function (node) { return node ? treeAdapter.isElementNode(node) : false; }; /** * @param {?} node * @return {?}
* @return {?} */ Parse5DomAdapter.prototype.isShadowRoot = function (node) { return this.getShadowRoot(node) == node; }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.importIntoDoc = function (node) { return this.clone(node); }; /** * @param {?} node * @return {?} */ Parse5DomAdapter.prototype.adoptNode = function (node) { return node; }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getHref = function (el) { return this.getAttribute(el, 'href'); }; /** * @param {?} el * @param {?} baseUrl * @param {?} href * @return {?} */ Parse5DomAdapter.prototype.resolveAndSetHref = function (el, baseUrl, href) { if (href == null) { el.href = baseUrl; } else { el.href = baseUrl + '/../' + href; } }; /** * \@internal * @param {?} parsedRules * @param {?=} css * @return {?} */ Parse5DomAdapter.prototype._buildRules = function (parsedRules, css) { var /** @type {?} */ rules = []; for (var /** @type {?} */ i = 0; i < parsedRules.length; i++) { var /** @type {?} */ parsedRule = parsedRules[i]; var /** @type {?} */ rule = {}; rule['cssText'] = css; rule['style'] = { content: '', cssText: '' }; if (parsedRule.type == 'rule') { rule['type'] = 1; rule['selectorText'] = parsedRule.selectors.join(', '.replace(/\s{2,}/g, ' ') .replace(/\s*~\s*/g, ' ~ ') .replace(/\s*\+\s*/g, ' + ') .replace(/\s*>\s*/g, ' > ') .replace(/\[(\w+)=(\w+)\]/g, '[$1="$2"]')); if (parsedRule.declarations == null) { continue; } for (var /** @type {?} */ j = 0; j < parsedRule.declarations.length; j++) { var /** @type {?} */ declaration = parsedRule.declarations[j]; rule['style'] = declaration.property[declaration.value]; rule['style'].cssText += declaration.property + ': ' + declaration.value + ';'; } } else if (parsedRule.type == 'media') { rule['type'] = 4; rule['media'] = { mediaText: parsedRule.media }; if (parsedRule.rules) { rule['cssRules'] = this._buildRules(parsedRule.rules); } } rules.push(rule); } return rules; }; /** * @return {?} */ Parse5DomAdapter.prototype.supportsDOMEvents = function () { return false; }; /** * @return {?} */ Parse5DomAdapter.prototype.supportsNativeShadowDOM = function () { return false; }; /** * @param {?} doc * @param {?} target * @return {?} */ Parse5DomAdapter.prototype.getGlobalEventTarget = function (doc, target) { if (target == 'window') { return ((doc))._window; } else if (target == 'document') { return doc; } else if (target == 'body') { return doc.body; } }; /** * @param {?} doc * @return {?} */ Parse5DomAdapter.prototype.getBaseHref = function (doc) { var /** @type {?} */ base = this.querySelector(doc, 'base'); var /** @type {?} */ href = ''; if (base) { href = this.getHref(base); } // TODO(alxhub): Need relative path logic from BrowserDomAdapter here? return href == null ? null : href; }; /** * @return {?} */ Parse5DomAdapter.prototype.resetBaseElement = function () { throw 'not implemented'; }; /** * @return {?} */ Parse5DomAdapter.prototype.getHistory = function () { throw 'not implemented'; }; /** * @return {?} */ Parse5DomAdapter.prototype.getLocation = function () { throw 'not implemented'; }; /** * @return {?} */ Parse5DomAdapter.prototype.getUserAgent = function () { return 'Fake user agent'; }; /** * @param {?} el * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.getData = function (el, name) { return this.getAttribute(el, 'data-' + name); }; /** * @param {?} el * @return {?} */ Parse5DomAdapter.prototype.getComputedStyle = function (el) { throw 'not implemented'; }; /** * @param {?} el * @param {?} name * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setData = function (el, name, value) { this.setAttribute(el, 'data-' + name, value); }; /** * @return {?} */ Parse5DomAdapter.prototype.supportsWebAnimation = function () { return false; }; /** * @return {?} */ Parse5DomAdapter.prototype.performanceNow = function () { return Date.now(); }; /** * @return {?} */ Parse5DomAdapter.prototype.getAnimationPrefix = function () { return ''; }; /** * @return {?} */ Parse5DomAdapter.prototype.getTransitionEnd = function () { return 'transitionend'; }; /** * @return {?} */ Parse5DomAdapter.prototype.supportsAnimation = function () { return true; }; /** * @param {?} el * @param {?} newNode * @param {?} oldNode * @return {?} */ Parse5DomAdapter.prototype.replaceChild = function (el, newNode, oldNode) { throw new Error('not implemented'); }; /** * @param {?} templateHtml * @return {?} */ Parse5DomAdapter.prototype.parse = function (templateHtml) { throw new Error('not implemented'); }; /** * @param {?} el * @param {?} methodName * @param {?} args * @return {?} */ Parse5DomAdapter.prototype.invoke = function (el, methodName, args) { throw new Error('not implemented'); }; /** * @param {?} event * @return {?} */ Parse5DomAdapter.prototype.getEventKey = function (event) { throw new Error('not implemented'); }; /** * @return {?} */ Parse5DomAdapter.prototype.supportsCookies = function () { return false; }; /** * @param {?} name * @return {?} */ Parse5DomAdapter.prototype.getCookie = function (name) { throw new Error('not implemented'); }; /** * @param {?} name * @param {?} value * @return {?} */ Parse5DomAdapter.prototype.setCookie = function (name, value) { throw new Error('not implemented'); }; /** * @param {?} element * @param {?} keyframes * @param {?} options * @return {?} */ Parse5DomAdapter.prototype.animate = function (element, keyframes, options) { throw new Error('not implemented'); }; /** * @param {?} doc * @return {?} */ Parse5DomAdapter.prototype.getTitleNode = function (doc) { var /** @type {?} */ title = this.querySelector(doc, 'title'); if (!title) { title = (this.createElement('title')); this.appendChild(this.querySelector(doc, 'head'), title); } return title; }; return Parse5DomAdapter; }(_angular_platformBrowser.ɵDomAdapter)); // TODO: build a proper list, this one is all the keys of a HTMLInputElement var _HTMLElementPropertyList = [ 'webkitEntries', 'incremental', 'webkitdirectory', 'selectionDirection', 'selectionEnd', 'selectionStart', 'labels', 'validationMessage', 'validity', 'willValidate', 'width', 'valueAsNumber', 'valueAsDate', 'value', 'useMap', 'defaultValue', 'type', 'step', 'src', 'size', 'required', 'readOnly', 'placeholder', 'pattern', 'name', 'multiple', 'min', 'minLength', 'maxLength', 'max', 'list', 'indeterminate', 'height', 'formTarget', 'formNoValidate', 'formMethod', 'formEnctype', 'formAction', 'files', 'form', 'disabled', 'dirName', 'checked', 'defaultChecked', 'autofocus', 'autocomplete', 'alt', 'align', 'accept', 'onautocompleteerror', 'onautocomplete', 'onwaiting', 'onvolumechange', 'ontoggle', 'ontimeupdate', 'onsuspend', 'onsubmit', 'onstalled', 'onshow', 'onselect', 'onseeking', 'onseeked', 'onscroll', 'onresize', 'onreset', 'onratechange', 'onprogress', 'onplaying', 'onplay', 'onpause', 'onmousewheel', 'onmouseup', 'onmouseover', 'onmouseout', 'onmousemove', 'onmouseleave', 'onmouseenter', 'onmousedown', 'onloadstart', 'onloadedmetadata', 'onloadeddata', 'onload', 'onkeyup', 'onkeypress', 'onkeydown', 'oninvalid', 'oninput', 'onfocus', 'onerror', 'onended', 'onemptied', 'ondurationchange', 'ondrop', 'ondragstart', 'ondragover', 'ondragleave', 'ondragenter', 'ondragend', 'ondrag', 'ondblclick', 'oncuechange', 'oncontextmenu', 'onclose', 'onclick', 'onchange', 'oncanplaythrough', 'oncanplay', 'oncancel', 'onblur', 'onabort', 'spellcheck', 'isContentEditable', 'contentEditable', 'outerText', 'innerText', 'accessKey', 'hidden', 'webkitdropzone', 'draggable', 'tabIndex', 'dir', 'translate', 'lang', 'title', 'childElementCount', 'lastElementChild', 'firstElementChild', 'children', 'onwebkitfullscreenerror', 'onwebkitfullscreenchange', 'nextElementSibling', 'previousElementSibling', 'onwheel', 'onselectstart', 'onsearch', 'onpaste', 'oncut', 'oncopy', 'onbeforepaste', 'onbeforecut', 'onbeforecopy', 'shadowRoot', 'dataset', 'classList', 'className', 'outerHTML', 'innerHTML', 'scrollHeight', 'scrollWidth', 'scrollTop', 'scrollLeft', 'clientHeight', 'clientWidth', 'clientTop', 'clientLeft', 'offsetParent', 'offsetHeight', 'offsetWidth', 'offsetTop', 'offsetLeft', 'localName', 'prefix', 'namespaceURI', 'id', 'style', 'attributes', 'tagName', 'parentElement', 'textContent', 'baseURI', 'ownerDocument', 'nextSibling', 'previousSibling', 'lastChild', 'firstChild', 'childNodes', 'parentNode', 'nodeType', 'nodeValue', 'nodeName', 'closure_lm_714617', '__jsaction', ]; /** * @template T * @param {?} list * @param {?} el * @return {?} */ function remove(list, el) { var /** @type {?} */ index = list.indexOf(el); if (index > -1) { list.splice(index, 1); } } /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var EMPTY_ARRAY = []; var ServerRendererFactory2 = (function () { /** * @param {?} ngZone * @param {?} document * @param {?} sharedStylesHost */ function ServerRendererFactory2(ngZone, document, sharedStylesHost) { this.ngZone = ngZone; this.document = document; this.sharedStylesHost = sharedStylesHost; this.rendererByCompId = new Map(); this.schema = new _angular_compiler.DomElementSchemaRegistry(); this.defaultRenderer = new DefaultServerRenderer2(document, ngZone, this.schema); } /** * @param {?} element * @param {?} type * @return {?} */ ServerRendererFactory2.prototype.createRenderer = function (element, type) { if (!element || !type) { return this.defaultRenderer; } switch (type.encapsulation) { case _angular_core.ViewEncapsulation.Native: case _angular_core.ViewEncapsulation.Emulated: { var /** @type {?} */ renderer = this.rendererByCompId.get(type.id); if (!renderer) { renderer = new EmulatedEncapsulationServerRenderer2(this.document, this.ngZone, this.sharedStylesHost, this.schema, type); this.rendererByCompId.set(type.id, renderer); } ((renderer)).applyToHost(element); return renderer; } case _angular_core.ViewEncapsulation.Native: throw new Error('Native encapsulation is not supported on the server!'); default: { if (!this.rendererByCompId.has(type.id)) { var /** @type {?} */ styles = _angular_platformBrowser.ɵflattenStyles(type.id, type.styles, []); this.sharedStylesHost.addStyles(styles); this.rendererByCompId.set(type.id, this.defaultRenderer); } return this.defaultRenderer; } } }; /** * @return {?} */ ServerRendererFactory2.prototype.begin = function () { }; /** * @return {?} */ ServerRendererFactory2.prototype.end = function () { }; return ServerRendererFactory2; }()); ServerRendererFactory2.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ ServerRendererFactory2.ctorParameters = function () { return [ { type: _angular_core.NgZone, }, { type: undefined, decorators: [{ type: _angular_core.Inject, args: [_angular_platformBrowser.DOCUMENT,] },] }, { type: _angular_platformBrowser.ɵSharedStylesHost, }, ]; }; var DefaultServerRenderer2 = (function () { /** * @param {?} document * @param {?} ngZone * @param {?} schema */ function DefaultServerRenderer2(document, ngZone, schema) { this.document = document; this.ngZone = ngZone; this.schema = schema; this.data = Object.create(null); } /** * @return {?} */ DefaultServerRenderer2.prototype.destroy = function () { }; /** * @param {?} name * @param {?=} namespace * @param {?=} debugInfo * @return {?} */ DefaultServerRenderer2.prototype.createElement = function (name, namespace, debugInfo) { if (namespace) { return _angular_platformBrowser.ɵgetDOM().createElementNS(_angular_platformBrowser.ɵNAMESPACE_URIS[namespace], name); } return _angular_platformBrowser.ɵgetDOM().createElement(name); }; /** * @param {?} value * @param {?=} debugInfo * @return {?} */ DefaultServerRenderer2.prototype.createComment = function (value, debugInfo) { return _angular_platformBrowser.ɵgetDOM().createComment(value); }; /** * @param {?} value * @param {?=} debugInfo * @return {?} */ DefaultServerRenderer2.prototype.createText = function (value, debugInfo) { return _angular_platformBrowser.ɵgetDOM().createTextNode(value); }; /** * @param {?} parent * @param {?} newChild * @return {?} */ DefaultServerRenderer2.prototype.appendChild = function (parent, newChild) { _angular_platformBrowser.ɵgetDOM().appendChild(parent, newChild); }; /** * @param {?} parent * @param {?} newChild * @param {?} refChild * @return {?} */ DefaultServerRenderer2.prototype.insertBefore = function (parent, newChild, refChild) { if (parent) { _angular_platformBrowser.ɵgetDOM().insertBefore(parent, refChild, newChild); } }; /** * @param {?} parent * @param {?} oldChild * @return {?} */ DefaultServerRenderer2.prototype.removeChild = function (parent, oldChild) { if (parent) { _angular_platformBrowser.ɵgetDOM().removeChild(parent, oldChild); } }; /** * @param {?} selectorOrNode * @param {?=} debugInfo * @return {?} */ DefaultServerRenderer2.prototype.selectRootElement = function (selectorOrNode, debugInfo) { var /** @type {?} */ el; if (typeof selectorOrNode === 'string') { el = _angular_platformBrowser.ɵgetDOM().querySelector(this.document, selectorOrNode); if (!el) { throw new Error("The selector \"" + selectorOrNode + "\" did not match any elements"); } } else { el = selectorOrNode; } _angular_platformBrowser.ɵgetDOM().clearNodes(el); return el; }; /** * @param {?} node * @return {?} */ DefaultServerRenderer2.prototype.parentNode = function (node) { return _angular_platformBrowser.ɵgetDOM().parentElement(node); }; /** * @param {?} node * @return {?} */ DefaultServerRenderer2.prototype.nextSibling = function (node) { return _angular_platformBrowser.ɵgetDOM().nextSibling(node); }; /** * @param {?} el * @param {?} name * @param {?} value * @param {?=} namespace * @return {?} */ DefaultServerRenderer2.prototype.setAttribute = function (el, name, value, namespace) { if (namespace) { _angular_platformBrowser.ɵgetDOM().setAttributeNS(el, _angular_platformBrowser.ɵNAMESPACE_URIS[namespace], namespace + ':' + name, value); } else { _angular_platformBrowser.ɵgetDOM().setAttribute(el, name, value); } }; /** * @param {?} el * @param {?} name * @param {?=} namespace * @return {?} */ DefaultServerRenderer2.prototype.removeAttribute = function (el, name, namespace) { if (namespace) { _angular_platformBrowser.ɵgetDOM().removeAttributeNS(el, _angular_platformBrowser.ɵNAMESPACE_URIS[namespace], name); } else { _angular_platformBrowser.ɵgetDOM().removeAttribute(el, name); } }; /** * @param {?} el * @param {?} name * @return {?} */ DefaultServerRenderer2.prototype.addClass = function (el, name) { _angular_platformBrowser.ɵgetDOM().addClass(el, name); }; /** * @param {?} el * @param {?} name * @return {?} */ DefaultServerRenderer2.prototype.removeClass = function (el, name) { _angular_platformBrowser.ɵgetDOM().removeClass(el, name); }; /** * @param {?} el * @param {?} style * @param {?} value * @param {?} flags * @return {?} */ DefaultServerRenderer2.prototype.setStyle = function (el, style, value, flags) { _angular_platformBrowser.ɵgetDOM().setStyle(el, style, value); }; /** * @param {?} el * @param {?} style * @param {?} flags * @return {?} */ DefaultServerRenderer2.prototype.removeStyle = function (el, style, flags) { _angular_platformBrowser.ɵgetDOM().removeStyle(el, style); }; /** * @param {?} tagName * @param {?} propertyName * @return {?} */ DefaultServerRenderer2.prototype._isSafeToReflectProperty = function (tagName, propertyName) { return this.schema.securityContext(tagName, propertyName, true) === this.schema.securityContext(tagName, propertyName, false); }; /** * @param {?} el * @param {?} name * @param {?} value * @return {?} */ DefaultServerRenderer2.prototype.setProperty = function (el, name, value) { checkNoSyntheticProp(name, 'property'); _angular_platformBrowser.ɵgetDOM().setProperty(el, name, value); // Mirror property values for known HTML element properties in the attributes. var /** @type {?} */ tagName = ((el.tagName)).toLowerCase(); if (value != null && (typeof value === 'number' || typeof value == 'string') && this.schema.hasElement(tagName, EMPTY_ARRAY) && this.schema.hasProperty(tagName, name, EMPTY_ARRAY) && this._isSafeToReflectProperty(tagName, name)) { this.setAttribute(el, name, value.toString()); } }; /** * @param {?} node * @param {?} value * @return {?} */ DefaultServerRenderer2.prototype.setValue = function (node, value) { _angular_platformBrowser.ɵgetDOM().setText(node, value); }; /** * @param {?} target * @param {?} eventName * @param {?} callback * @return {?} */ DefaultServerRenderer2.prototype.listen = function (target, eventName, callback) { var _this = this; // Note: We are not using the EventsPlugin here as this is not needed // to run our tests. checkNoSyntheticProp(eventName, 'listener'); var /** @type {?} */ el = typeof target === 'string' ? _angular_platformBrowser.ɵgetDOM().getGlobalEventTarget(this.document, target) : target; var /** @type {?} */ outsideHandler = function (event) { return _this.ngZone.runGuarded(function () { return callback(event); }); }; return this.ngZone.runOutsideAngular(function () { return _angular_platformBrowser.ɵgetDOM().onAndCancel(el, eventName, outsideHandler); }); }; return DefaultServerRenderer2; }()); var AT_CHARCODE = '@'.charCodeAt(0); /** * @param {?} name * @param {?} nameKind * @return {?} */ function checkNoSyntheticProp(name, nameKind) { if (name.charCodeAt(0) === AT_CHARCODE) { throw new Error("Found the synthetic " + nameKind + " " + name + ". Please include either \"BrowserAnimationsModule\" or \"NoopAnimationsModule\" in your application."); } } var EmulatedEncapsulationServerRenderer2 = (function (_super) { __extends(EmulatedEncapsulationServerRenderer2, _super); /** * @param {?} document * @param {?} ngZone * @param {?} sharedStylesHost * @param {?} schema * @param {?} component */ function EmulatedEncapsulationServerRenderer2(document, ngZone, sharedStylesHost, schema, component) { var _this = _super.call(this, document, ngZone, schema) || this; _this.component = component; var styles = _angular_platformBrowser.ɵflattenStyles(component.id, component.styles, []); sharedStylesHost.addStyles(styles); _this.contentAttr = _angular_platformBrowser.ɵshimContentAttribute(component.id); _this.hostAttr = _angular_platformBrowser.ɵshimHostAttribute(component.id); return _this; } /** * @param {?} element * @return {?} */ EmulatedEncapsulationServerRenderer2.prototype.applyToHost = function (element) { _super.prototype.setAttribute.call(this, element, this.hostAttr, ''); }; /** * @param {?} parent * @param {?} name * @return {?} */ EmulatedEncapsulationServerRenderer2.prototype.createElement = function (parent, name) { var /** @type {?} */ el = _super.prototype.createElement.call(this, parent, name); _super.prototype.setAttribute.call(this, el, this.contentAttr, ''); return el; }; return EmulatedEncapsulationServerRenderer2; }(DefaultServerRenderer2)); /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var ServerStylesHost = (function (_super) { __extends(ServerStylesHost, _super); /** * @param {?} doc * @param {?} transitionId */ function ServerStylesHost(doc, transitionId) { var _this = _super.call(this) || this; _this.doc = doc; _this.transitionId = transitionId; _this.head = null; _this.head = _angular_platformBrowser.ɵgetDOM().getElementsByTagName(doc, 'head')[0]; return _this; } /** * @param {?} style * @return {?} */ ServerStylesHost.prototype._addStyle = function (style) { var /** @type {?} */ adapter = (_angular_platformBrowser.ɵgetDOM()); var /** @type {?} */ el = adapter.createElement('style'); adapter.setText(el, style); if (!!this.transitionId) { adapter.setAttribute(el, 'ng-transition', this.transitionId); } adapter.appendChild(this.head, el); }; /** * @param {?} additions * @return {?} */ ServerStylesHost.prototype.onStylesAdded = function (additions) { var _this = this; additions.forEach(function (style) { return _this._addStyle(style); }); }; return ServerStylesHost; }(_angular_platformBrowser.ɵSharedStylesHost)); ServerStylesHost.decorators = [ { type: _angular_core.Injectable }, ]; /** * @nocollapse */ ServerStylesHost.ctorParameters = function () { return [ { type: undefined, decorators: [{ type: _angular_core.Inject, args: [_angular_platformBrowser.DOCUMENT,] },] }, { type: undefined, decorators: [{ type: _angular_core.Optional }, { type: _angular_core.Inject, args: [_angular_platformBrowser.ɵTRANSITION_ID,] },] }, ]; }; /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var INTERNAL_SERVER_PLATFORM_PROVIDERS = [ { provide: _angular_platformBrowser.DOCUMENT, useFactory: _document, deps: [_angular_core.Injector] }, { provide: _angular_core.PLATFORM_ID, useValue: _angular_common.ɵPLATFORM_SERVER_ID }, { provide: _angular_core.PLATFORM_INITIALIZER, useFactory: initParse5Adapter, multi: true, deps: [_angular_core.Injector] }, { provide: _angular_common.PlatformLocation, useClass: ServerPlatformLocation }, PlatformState, // Add special provider that allows multiple instances of platformServer* to be created. { provide: _angular_core.ɵALLOW_MULTIPLE_PLATFORMS, useValue: true } ]; /** * @param {?} injector * @return {?} */ function initParse5Adapter(injector) { return function () { Parse5DomAdapter.makeCurrent(); }; } /** * @param {?} renderer * @param {?} engine * @param {?} zone * @return {?} */ function instantiateServerRendererFactory(renderer, engine, zone) { return new _angular_platformBrowser_animations.ɵAnimationRendererFactory(renderer, engine, zone); } var SERVER_RENDER_PROVIDERS = [ ServerRendererFactory2, { provide: _angular_core.RendererFactory2, useFactory: instantiateServerRendererFactory, deps: [ServerRendererFactory2, _angular_animations_browser.ɵAnimationEngine, _angular_core.NgZone] }, ServerStylesHost, { provide: _angular_platformBrowser.ɵSharedStylesHost, useExisting: ServerStylesHost }, ]; /** * The ng module for the server. * * \@experimental */ var ServerModule = (function () { function ServerModule() { } return ServerModule; }()); ServerModule.decorators = [ { type: _angular_core.NgModule, args: [{ exports: [_angular_platformBrowser.BrowserModule], imports: [_angular_http.HttpModule, _angular_common_http.HttpClientModule, _angular_platformBrowser_animations.NoopAnimationsModule], providers: [ SERVER_RENDER_PROVIDERS, SERVER_HTTP_PROVIDERS, { provide: _angular_core.Testability, useValue: null }, ], },] }, ]; /** * @nocollapse */ ServerModule.ctorParameters = function () { return []; }; /** * @param {?} injector * @return {?} */ function _document(injector) { var /** @type {?} */ config = injector.get(INITIAL_CONFIG, null); if (config && config.document) { return parseDocument(config.document); } else { return _angular_platformBrowser.ɵgetDOM().createHtmlDocument(); } } /** * \@experimental */ var platformServer = _angular_core.createPlatformFactory(_angular_core.platformCore, 'server', INTERNAL_SERVER_PLATFORM_PROVIDERS); /** * The server platform that supports the runtime compiler. * * \@experimental */ var platformDynamicServer = _angular_core.createPlatformFactory(_angular_compiler.platformCoreDynamic, 'serverDynamic', INTERNAL_SERVER_PLATFORM_PROVIDERS); /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ var parse5$2 = require('parse5'); /** * @param {?} platformFactory * @param {?} options * @return {?} */ function _getPlatform(platformFactory, options) { var /** @type {?} */ extraProviders = options.extraProviders ? options.extraProviders : []; return platformFactory([ { provide: INITIAL_CONFIG, useValue: { document: options.document, url: options.url } }, extraProviders ]); } /** * @template T * @param {?} platform * @param {?} moduleRefPromise * @return {?} */ function _render(platform, moduleRefPromise) { return moduleRefPromise.then(function (moduleRef) { var /** @type {?} */ transitionId = moduleRef.injector.get(_angular_platformBrowser.ɵTRANSITION_ID, null); if (!transitionId) { throw new Error("renderModule[Factory]() requires the use of BrowserModule.withServerTransition() to ensure\nthe server-rendered app can be properly bootstrapped into a client app."); } var /** @type {?} */ applicationRef = moduleRef.injector.get(_angular_core.ApplicationRef); return rxjs_operator_toPromise.toPromise .call(rxjs_operator_first.first.call(rxjs_operator_filter.filter.call(applicationRef.isStable, function (isStable) { return isStable; }))) .then(function () { var /** @type {?} */ output = platform.injector.get(PlatformState).renderToString(); platform.destroy(); return output; }); }); } /** * Renders a Module to string. * * `document` is the full document HTML of the page to render, as a string. * `url` is the URL for the current render request. * `extraProviders` are the platform level providers for the current render request. * * Do not use this in a production server environment. Use pre-compiled {\@link NgModuleFactory} with * {\@link renderModuleFactory} instead. * * \@experimental * @template T * @param {?} module * @param {?} options * @return {?} */ function renderModule(module, options) { var /** @type {?} */ platform = _getPlatform(platformDynamicServer, options); return _render(platform, platform.bootstrapModule(module)); } /** * Renders a {\@link NgModuleFactory} to string. * * `document` is the full document HTML of the page to render, as a string. * `url` is the URL for the current render request. * `extraProviders` are the platform level providers for the current render request. * * \@experimental * @template T * @param {?} moduleFactory * @param {?} options * @return {?} */ function renderModuleFactory(moduleFactory, options) { var /** @type {?} */ platform = _getPlatform(platformServer, options); return _render(platform, platform.bootstrapModuleFactory(moduleFactory)); } /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /** * @license * Copyright Google Inc. All Rights Reserved. * * Use of this source code is governed by an MIT-style license that can be * found in the LICENSE file at https://angular.io/license */ /** * @module * @description * Entry point for all public APIs of the common package. */ /** * \@stable */ var VERSION = new _angular_core.Version('4.4.4'); exports.PlatformState = PlatformState; exports.ServerModule = ServerModule; exports.platformDynamicServer = platformDynamicServer; exports.platformServer = platformServer; exports.INITIAL_CONFIG = INITIAL_CONFIG; exports.renderModule = renderModule; exports.renderModuleFactory = renderModuleFactory; exports.VERSION = VERSION; exports.ɵINTERNAL_SERVER_PLATFORM_PROVIDERS = INTERNAL_SERVER_PLATFORM_PROVIDERS; exports.ɵSERVER_RENDER_PROVIDERS = SERVER_RENDER_PROVIDERS; exports.ɵServerRendererFactory2 = ServerRendererFactory2; exports.ɵg = SERVER_HTTP_PROVIDERS; exports.ɵc = ServerXhr; exports.ɵd = ServerXsrfStrategy; exports.ɵe = httpFactory; exports.ɵf = zoneWrappedInterceptingHandler; exports.ɵa = instantiateServerRendererFactory; exports.ɵb = ServerStylesHost; Object.defineProperty(exports, '__esModule', { value: true }); }))); //# sourceMappingURL=platform-server.umd.js.map
*/ Parse5DomAdapter.prototype.hasShadowRoot = function (node) { return node.shadowRoot != null; }; /** * @param {?} node
decorators.py
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """ Extend numpy's decorators to use nipy's gui and data labels. """ from numpy.testing.decorators import * from nipy.utils import templates, example_data, DataError def make_label_dec(label, ds=None): """Factory function to create a decorator that applies one or more labels. Parameters ---------- label : str or sequence One or more labels that will be applied by the decorator to the functions it decorates. Labels are attributes of the decorated function with their value set to True. ds : str An optional docstring for the resulting decorator. If not given, a default docstring is auto-generated. Returns ------- ldec : function A decorator. Examples -------- >>> slow = make_label_dec('slow') >>> print slow.__doc__ Labels a test as 'slow' >>> rare = make_label_dec(['slow','hard'], ... "Mix labels 'slow' and 'hard' for rare tests") >>> @rare ... def f(): pass ... >>> >>> f.slow True >>> f.hard True """ if isinstance(label,basestring): labels = [label] else: labels = label # Validate that the given label(s) are OK for use in setattr() by doing a # dry run on a dummy function. tmp = lambda : None for label in labels: setattr(tmp,label,True) # This is the actual decorator we'll return def decor(f): for label in labels: setattr(f,label,True) return f # Apply the user's docstring if ds is None: ds = "Labels a test as %r" % label decor.__doc__ = ds return decor # Nipy specific labels gui = make_label_dec('gui') data = make_label_dec('data') # For tests that need further review def needs_review(msg): """ Skip a test that needs further review. Parameters ---------- msg : string msg regarding the review that needs to be done """ def skip_func(func): return skipif(True, msg)(func) return skip_func
# Easier version of the numpy knownfailure def knownfailure(f): return knownfailureif(True)(f) def if_datasource(ds, msg): try: ds.get_filename() except DataError: return skipif(True, msg) return lambda f : f def if_templates(f): return if_datasource(templates, 'Cannot find template data')(f) def if_example_data(f): return if_datasource(example_data, 'Cannot find example data')(f) def skip_doctest_if(condition): """Decorator - mark a function or method for skipping its doctest. This decorator allows you to mark a function whose docstring you wish to omit from testing, while preserving the docstring for introspection, help, etc.""" if not condition: return lambda f : f return make_label_dec('skip_doctest')
strategy.py
import pprint import numpy as np import copy import operator from cloud_mocks import * pp = pprint.PrettyPrinter(indent=2) class BaseStrategy(object): def execute(self, cluster, goal): raise NotImplementedError() def Migration(vm, source, dest): return { 'vm': vm.name, 'source': source.name, 'dest': dest.name, } class SchedulerAwareStrategy(BaseStrategy): def __init__(self, active_filters): self.filters = active_filters self.migrations = [] def host_passes(self, vm, host, cluster_state): for f in self.filters: passes = f.host_passes(vm, host, cluster_state) if not passes: return False return True def get_host_util(self, host, vm_key, host_key, allocation_ratio): vm_sum = 0.0 for vm in host.vms: vm_sum += getattr(vm, vm_key) return vm_sum / (allocation_ratio * getattr(host, host_key)) def get_host_cpu_util(self, host, use_flavor = True, alloc_ratio = None): allocation_ratio = host.get_metadata('cpu_allocation_ratio', min, 1.0) if alloc_ratio: allocation_ratio = min(allocation_ratio, alloc_ratio) if use_flavor: vm_key = 'vcpus' else: vm_key = 'avg_cpu_util' return self.get_host_util(host, vm_key, 'cpus', allocation_ratio) def get_host_ram_util(self, host, alloc_ratio = None): allocation_ratio = host.get_metadata('ram_allocation_ratio', min, 1.0) if alloc_ratio: allocation_ratio = min(allocation_ratio, alloc_ratio) vm_key = 'vram' return self.get_host_util(host, vm_key, 'ram', allocation_ratio) class ConsolidationGoal(object): def __init__(self, data_source = 'flavor'): self.data_source = data_source class ConsolidationStrategy(SchedulerAwareStrategy): def __init__(self, active_filters, **kwargs):
def execute(self, cluster, goal): # we'll manipulate deep copy of our cluster result = copy.deepcopy(cluster) use_flavor = goal.data_source == 'flavor' # get sorted by load list of hosts hosts_loads = self.get_hosts_loads(result, use_flavor) sorted_loads = sorted(hosts_loads.items(), key=lambda hl: hl[1]['total']) #print 'sorted hosts:' #pp.pprint(sorted_loads) # TODO: maybe we should offload vms from overloaded hosts here # not let's try to offload hosts starting from least loaded one donour_i = 0 while donour_i < len(result) - 1: recipient_i = len(result) - 1 while recipient_i > donour_i: donour = sorted_loads[donour_i][0] recipient = sorted_loads[recipient_i][0] print 'donour ' + str(donour_i) + ' ' + donour.name +\ ' recipient ' + str(recipient_i) + ' ' + recipient.name candidates = [] for vm in donour.vms: # TODO: cache filter outputs. Looks like N^3 complexity if self.can_migrate(vm, donour, recipient, result, use_flavor): candidates.append(vm) print 'len of candidates ' + str(len(candidates)) best_candidate = self.choose_best_candidate(candidates, donour, recipient, use_flavor) if best_candidate: print 'Chosen instance for migration: ' + best_candidate.name else: print 'Best candidate not chosen' if best_candidate: # preform migration self.migrate(best_candidate, donour, recipient) self.migrations.append(Migration(best_candidate, donour, recipient)) # we need to update model # TODO: we actually need to update only two hosts, # so full load rebuilding feels excessive hosts_loads = self.get_hosts_loads(result, use_flavor) sorted_loads = sorted(hosts_loads.items(), key=lambda hl: hl[1]['total']) # reset counters # Probably we need more efficient solution donour_i = 0 recipient_i = len(result) - 1 else: recipient_i -= 1 donour_i += 1 # we're gucci return result def choose_best_candidate(self, candidates, source, dest, use_flavor): # choose the best vm to migrate to dest # at first let's choose vm that will make host utilization # balanced cpu-ram wise if len(candidates) == 0: return None resulting_loads = {} for vm in candidates: self.migrate(vm, source, dest) new_load = self.host_load(dest, use_flavor) resulting_loads[vm] = abs(new_load['cpu'] - new_load['ram']) self.migrate(vm, dest, source) sorted_results = sorted(resulting_loads.items(), key=operator.itemgetter(1)) return sorted_results[0][0] def can_migrate(self, vm, source, dest, cluster_state, use_flavor): schedulers_ok = self.host_passes(vm, dest, cluster_state) if not schedulers_ok: return False # now let's check our own, strategy policies: cpu and ram overbooking self.migrate(vm, source, dest) try: host_util = self.host_load(dest, use_flavor) if not use_flavor: if host_util['cpu'] > self.cpu_allocation_ratio: return False if host_util['ram'] > self.ram_allocation_ratio: return False finally: self.migrate(vm, dest, source) # rollback migration return True def migrate(self, vm, source, dest): source.vms.remove(vm) dest.vms.add(vm) def get_hosts_loads(self, cluster, use_flavor): res = {} for host in cluster: res[host] = self.host_load(host, use_flavor) return res def host_load(self, host, use_flavor = True): # get area ratio if not use_flavor: cpu_alloc_ratio = self.cpu_allocation_ratio # <1 for ceilometer else: cpu_alloc_ratio = None # filter-based ratio for flavor-consolidation cpu_ratio = self.get_host_cpu_util(host, use_flavor, cpu_alloc_ratio) ram_ratio = self.get_host_ram_util(host, self.ram_allocation_ratio) return {'total': cpu_ratio * ram_ratio, 'cpu': cpu_ratio, 'ram': ram_ratio}
SchedulerAwareStrategy.__init__(self, active_filters) self.cpu_allocation_ratio = kwargs.get('cpu_max_util', 0.8) self.ram_allocation_ratio = kwargs.get('ram_max_util', 1.5)
japanpostEMSDL.js
/* 引数:user_id:ユーザID    ourder_id:Order Transaction ID input_url:検証→http://dev.world-viewing.com/ebay/orderrest.json 本番→http:// output_url:検証→/var/www/html/dev_camerascm/emslabel/ */ var x = require('casper').selectXPath; var fs = require('fs'); var inputdata; var input_url; var output_url; var casper = require("casper").create({ viewportSize: { width: 1800, height: 768 }, verbose: true, logLevel: 'error' }); if (casper.cli.has(2)){ input_url = casper.cli.get(2); }else{ input_url = 'http://dev.world-viewing.com/ebay/orderrest.json'; } if (casper.cli.has(3)){ output_url = casper.cli.get(3); }else{ output_url = '/var/www/html/dev_camerascm/public/emslabel/'; } if(casper.cli.has(0)){ output_url = output_url + casper.cli.get(0) + '/'; if(!fs.exists(output_url)){ casper.echo(output_url); var wasSuccessful = fs.makeDirectory(output_url); if(wasSuccessful === true ? 0 : 1){ casper.exit(0); }; } } casper.userAgent("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.98 Safari/537.36"); //casper.userAgent("Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/28.0.1500.63 Safari/537.36"); casper.start(); if (casper.cli.has(0) && casper.cli.has(1)){ casper.open(input_url, { method: 'post', data: { 'user_id' : casper.cli.get(0), 'order_id': casper.cli.get(1) } }); casper.then(function() { inputdata = JSON.parse(casper.getPageContent()); }); }else{ casper.exit(); } // 指定した URL へ遷移する //casper.thenOpen("https://www.int-mypage.post.japanpost.jp/mypage/M010000.do"); casper.thenOpen("https://www.int-mypage.post.japanpost.jp/index.html"); //日本語環境に変更 casper.then(function() { this.wait(500, function() { this.evaluate(function() { document.querySelector("select[name='localeSel']").value = 'ja'; return true; }); this.mouseEvent("click", "input.txt_form_button"); }); }); //casper.then(function() { // this.echo("日本語環境に変更前"); // this.evaluate(function() { // document.querySelector("select[name='localeSel']").value = 'ja'; // return true; // }); // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.txt_form_button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/1.png"); // this.mouseEvent("click", "input.txt_form_button"); // }); // //}); casper.then(function() { this.wait(500, function() { this.sendKeys("input[name='loginBean.id']", "[email protected]"); this.sendKeys("input[name='loginBean.pw']", "rjfr4299"); // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/2.png"); }); // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.txt_form_button').length >= 1; // }); // }, function() { // this.mouseEvent("click", x('//*[@id="loaded"]/table/tbody/tr[2]/td[2]/table/tbody/tr[2]/td/table/tbody/tr[1]/td/table/tbody/tr[2]/td[1]/table[1]/tbody/tr[2]/td[2]/div/table[2]/tbody/tr/td[2]/a')); // }); }); casper.then(function() { this.wait(100, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/3.png"); //this.mouseEvent("click", x('//*[@id="loaded"]/table/tbody/tr[2]/td[2]/table/tbody/tr[2]/td/table/tbody/tr[1]/td/table/tbody/tr[2]/td[1]/table[1]/tbody/tr[2]/td[2]/div/table[2]/tbody/tr/td[2]/a')); this.mouseEvent("click", 'td div table.layout tbody tr td a img'); }); }); //送り状作成 casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('div.mrgTB10 ul.listmark-arrow a').length >= 1; // }); // }, function() { // this.mouseEvent("click", x('//*[@id="loaded"]/table[4]/tbody/tr/td/div[1]/div/div[2]/table/tbody/tr/td/div[2]/ul[1]/li[1]/a')) // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/4.png"); this.mouseEvent("click", x('//*[@id="loaded"]/table[4]/tbody/tr/td/div[1]/div/div[2]/table/tbody/tr/td/div[2]/ul[1]/li[1]/a')) }); }); //依頼主選択 //casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.mouseEvent("click", 'input[value="お届け先の選択へ"]') // },10000); // //this.mouseEvent("click", x('//*[@id="M060000_sel157516"]')) // //casper.capture("4.png"); // //this.mouseEvent("click", 'input[value="お届け先の選択へ"]') // //this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/div[4]/div[4]/input')) //}); casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/5.png"); this.mouseEvent("click", 'input[value="お届け先の選択へ"]') }); }); //お届け先選択(都度入力) //casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.mouseEvent("click", 'input[id="M060400_sel-1"]') // //this.mouseEvent("click", x('//*[@id="M060400_sel-1"]')) // },10000); // //casper.capture("6.png"); //}); casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/6.png"); this.mouseEvent("click", 'input[id="M060400_sel-1"]') }); }); casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/7.png"); this.sendKeys("input[name='addrToBean.nam']", inputdata.buyer_name); this.evaluate(function(countryCode) { document.querySelector("select[name='addrToBean.couCode']").value = countryCode; return true; },inputdata.buyer_country_code); if(inputdata.buyer_address2 == ''){ this.sendKeys("input[name='addrToBean.add2']", inputdata.buyer_address1); }else{ this.sendKeys("input[name='addrToBean.add1']", inputdata.buyer_address1); this.sendKeys("input[name='addrToBean.add2']", inputdata.buyer_address2); } this.sendKeys("input[name='addrToBean.add3']", inputdata.buyer_address3); this.sendKeys("input[name='addrToBean.pref']", inputdata.buyer_pref); this.sendKeys("input[name='addrToBean.postal']", inputdata.buyer_postal); this.sendKeys("input[name='addrToBean.tel']", inputdata.buyer_tel); //this.sendKeys("input[name='addrToBean.fax']", inputdata.addToFax); this.sendKeys("input[name='addrToBean.mail']", inputdata.buyer_email); }); }); casper.then(function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/8.png"); this.mouseEvent("click", 'input[value="この内容で登録する"]') //this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/div[4]/input')); }); //EMS,ePacket選択 casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/9.png"); if(inputdata.shipping_type == 'ePacket'){ this.mouseEvent("click", x('//*[@id="M060800_shippingBean_sendType4"]')); }else{ this.mouseEvent("click", x('//*[@id="M060800_shippingBean_sendType1"]')); } }); }); casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelector('input[name="item_button01"]').disabled == false; // }); // }, function() { // this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/input[2]')); // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/10.png"); this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/input[2]')); }); }); casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button2').length >= 1; // }); // }, function() { // this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/div/div[2]/table[2]/tbody/tr[2]/td[6]/input')) // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/11.png"); this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/div/div[2]/table[2]/tbody/tr[2]/td[6]/input')) }); }); casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/12.png"); this.sendKeys("input[name='itemBean.pkg']", inputdata.item_pkg_category); this.evaluate(function(SalePrice) { document.querySelector("input[name='itemBean.cost.value']").value = SalePrice; return true; },inputdata.sale_round_price); this.evaluate(function(CurUnit) { document.querySelector("select[name='itemBean.curUnit']").value = CurUnit; return true; },inputdata.itemCurUnit); this.mouseEvent("click", 'input[value="この内容で登録する"]'); }); // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.mouseEvent("click", 'input[value="この内容で登録する"]'); // }); }); casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/13.png"); // this.mouseEvent("click", 'input[value=" 登 録 "]'); // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/13.png"); this.mouseEvent("click", 'input[value=" 登 録 "]'); }); }); casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/14.png"); // this.mouseEvent("click", 'input[value="内容品リスト一覧へ戻る"]'); // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/14.png"); this.mouseEvent("click", 'input[value="内容品リスト一覧へ戻る"]'); }); }); casper.then(function() { this.wait(500, function() { this.evaluate(function() { document.querySelector("input[name='itemSearchBean.chk']").checked = true; return true; }); // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/15.png"); this.mouseEvent("click", 'input[value="送り状に内容品を登録"]'); }); }); casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/16.png"); this.sendKeys("input[name='itemCount']", inputdata.itemCount); this.evaluate(function(SaleYenPrice) { document.querySelector("input[name='shippingBean.pkgTotalPrice.value']").value = SaleYenPrice; return true; },inputdata.sale_round_yen_price); this.evaluate(function() { //document.querySelector("select[name='shippingBean.pkgType']").value = inputdata.pkgType; document.querySelector("select[name='shippingBean.pkgType']").value = 3; return true; }); this.evaluate(function() { //document.querySelector("input[name='shippingBean.noCm']").checked = true; document.querySelector("input[name='shippingBean.noCm']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='ShippingBean.danger']").checked = true; return true; }); }); }); casper.then(function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/17.png"); this.mouseEvent("click", 'input[value="発送関連情報の入力へ"]'); casper.log('17.png', 'info'); }); //発送関連情報登録 casper.then(function() { this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/18.png"); this.evaluate(function(shipDate) { document.querySelector("select[name='shippingBean.sendDate.YMD']").value = shipDate; return true; },inputdata.shipdate); if(inputdata.shipping_type !== 'ePacket'){ this.sendKeys("input[name='shippingBean.num.value']", inputdata.itemCount); this.sendKeys("input[name='shippingBean.totalNum.value']", inputdata.itemCount); this.evaluate(function(SaleYenPrice) { document.querySelector("input[name='shippingBean.damges']").value = SaleYenPrice; return true; },inputdata.sale_round_yen_price); this.sendKeys("input[name='shippingBean.damges']", {modifiers: 'alt'}); } this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf1']").checked = true; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf3']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf4']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf5']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf6']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf2']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.fromConf7']").checked = true; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf1']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf3']").checked = true; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf4']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf5']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf6']").checked = false; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf2']").checked = true; return true; }); this.evaluate(function() { document.querySelector("input[name='shippingBean.ctrlMailConfBean.toConf7']").checked = false; return true; }); }); }); casper.then(function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/19.png"); this.mouseEvent("click", x('//*[@id="loaded"]/table[2]/tbody/tr[1]/td/div/div[8]/input')); casper.log('19.png', 'info'); }); //送り状を登録する casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/20.png"); // this.mouseEvent("click", 'input[value="送り状を登録する"]'); // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/20.png"); this.mouseEvent("click", 'input[value="送り状を登録する"]'); }); }); //casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/19.png"); // this.mouseEvent("click", 'input[value="送り状を登録する"]'); // }); //}); //送り状をPDFにする casper.then(function() { // this.waitFor(function check() { // return this.evaluate(function() { // return document.querySelectorAll('input.button').length >= 1; // }); // }, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/21.png"); // this.mouseEvent("click", 'input[value="注意事項に同意して送り状を印刷"]'); // }); this.wait(500, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/21.png"); this.mouseEvent("click", 'input[value="注意事項に同意して送り状を印刷"]'); casper.log('21.png', 'info'); }); }); //送り状をダウンロードする casper.then(function() { this.wait(2000, function() { // this.capture("/var/www/html/dev_camerascm/public/assets/img/photo/22.png"); var pdfname = this.evaluate(function() { return document.querySelector('div.boxMain iframe').src; }); var originalname = pdfname.substring(pdfname.lastIndexOf('/')+1, pdfname.length); if(CheckLength(inputdata.buyer_name,1)){ var output_file = originalname.substr(13,8) + '_'+ originalname.substr(27,13) + '_'+ inputdata.buyer_country_code + '.pdf'; }else{ var output_file = originalname.substr(13,8) + '_'+ originalname.substr(27,13) + '_'+ inputdata.buyer_country_code + '_'+ inputdata.buyer_name + '.pdf'; } this.download(pdfname, output_url +output_file); var outputobj = { output_path :output_url, file_name :output_file, tracking_number :originalname.substr(27,13), buyer_name :inputdata.buyer_name }; this.echo(JSON.stringify(outputobj)); }); }); // 処理を開始する casper.run(); /**************************************************************** * 全角/半角文字判定 * * 引数 : str チェックする文字列 * flg 0:半角文字、1:全角文字 * 戻り値: true:含まれている、false:含まれていない * ****************************************************************/ function CheckLength(str,flg) { for (var i = 0; i < str.length; i++) { var c = str.charCodeAt(i); // Shift_JIS: 0x0 ~ 0x80, 0xa0 , 0xa1 ~ 0xdf , 0xfd ~ 0xff // Unicode : 0x0 ~ 0x80, 0xf8f0, 0xff61 ~ 0xff9f, 0xf8f1 ~ 0xf8f3 if ( (c >= 0x0 && c < 0x81) || (c == 0xf8f0) || (c >= 0xff61 && c < 0xffa0) || (c >= 0xf8f1 && c < 0xf8f4)) { if(!flg) return true; } else { if(flg) return true; } } return false; }
cli.rs
// * This file is part of the uutils coreutils package. // * // * (c) 2014 T. Jameson Little <[email protected]> // * (c) 2020 nicoo <[email protected]> // * // * For the full copyright and license information, please view the LICENSE file // * that was distributed with this source code. #[macro_use] extern crate uucore; use std::error::Error; use std::fmt::Write as FmtWrite; use std::io::{self, stdin, stdout, BufRead, Write}; mod factor; use clap::{crate_version, App, Arg}; pub use factor::*; mod miller_rabin; pub mod numeric; mod rho; pub mod table; static SUMMARY: &str = "Print the prime factors of the given NUMBER(s). If none are specified, read from standard input."; mod options { pub static NUMBER: &str = "NUMBER"; } fn print_factors_str( num_str: &str, w: &mut io::BufWriter<impl io::Write>, factors_buffer: &mut String, ) -> Result<(), Box<dyn Error>> { num_str.parse::<u64>().map_err(|e| e.into()).and_then(|x| { factors_buffer.clear(); writeln!(factors_buffer, "{}:{}", x, factor(x))?; w.write_all(factors_buffer.as_bytes())?; Ok(()) }) } pub fn uumain(args: impl uucore::Args) -> i32 { let matches = uu_app().get_matches_from(args); let stdout = stdout(); // We use a smaller buffer here to pass a gnu test. 4KiB appears to be the default pipe size for bash. let mut w = io::BufWriter::with_capacity(4 * 1024, stdout.lock()); let mut factors_buffer = String::new(); if let Some(values) = matches.values_of(options::NUMBER) { for number in values { if let Err(e) = print_factors_str(number, &mut w, &mut factors_buffer) { show_warning!("{}: {}", number, e); } } } else { let stdin = stdin(); for line in stdin.lock().lines() { for number in line.unwrap().split_whitespace() { if let Err(e) = print_factors_str(number, &mut w, &mut factors_buffer) { show_warning!("{}: {}", number, e); } } } } if let Err(e) = w.flush() { show_error!("{}", e); } 0 } pub fn
() -> App<'static, 'static> { App::new(executable!()) .version(crate_version!()) .about(SUMMARY) .arg(Arg::with_name(options::NUMBER).multiple(true)) }
uu_app
index.js
import Vue from 'vue' import Router from 'vue-router'
Vue.use(Router) // route-level code splitting const createListView = id => () => import('../views/CreateListView').then(m => m.default(id)) const ItemView = () => import('../views/ItemView.vue') const UserView = () => import('../views/UserView.vue') const LoginView = () => import('../views/Login.vue') const isDev = process.env.NODE_ENV !== 'production' let routePrefix = isDev ? '': '/blog' let redirectPrefix = isDev ? '': '/blog' export function createRouter () { return new Router({ mode: 'history', fallback: false, scrollBehavior: () => ({ y: 0 }), routes: [ { path: routePrefix + '/login/', component: LoginView}, { path: routePrefix + '/top/:page(\\d+)?', component: createListView('top') }, { path: routePrefix + '/new/:page(\\d+)?', component: createListView('new') }, { path: routePrefix + '/show/:page(\\d+)?', component: createListView('show') }, { path: routePrefix + '/ask/:page(\\d+)?', component: createListView('ask') }, { path: routePrefix + '/job/:page(\\d+)?', component: createListView('job') }, { path: routePrefix + '/item/:id(\\d+)', component: ItemView }, { path: routePrefix + '/user/:id', component: UserView }, { path: routePrefix + '/', redirect: redirectPrefix + '/top' } ] }) }
client.go
/* Copyright 2018 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fake import ( "context" "encoding/json" "errors" "fmt" "strconv" "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/testing" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/internal/objectutil" ) type versionedTracker struct { testing.ObjectTracker } type fakeClient struct { tracker versionedTracker scheme *runtime.Scheme } var _ client.Client = &fakeClient{} const ( maxNameLength = 63 randomLength = 5 maxGeneratedNameLength = maxNameLength - randomLength ) // NewFakeClient creates a new fake client for testing. // You can choose to initialize it with a slice of runtime.Object. // Deprecated: use NewFakeClientWithScheme. You should always be // passing an explicit Scheme. func
(initObjs ...runtime.Object) client.Client { return NewFakeClientWithScheme(scheme.Scheme, initObjs...) } // NewFakeClientWithScheme creates a new fake client with the given scheme // for testing. // You can choose to initialize it with a slice of runtime.Object. func NewFakeClientWithScheme(clientScheme *runtime.Scheme, initObjs ...runtime.Object) client.Client { tracker := testing.NewObjectTracker(clientScheme, scheme.Codecs.UniversalDecoder()) for _, obj := range initObjs { err := tracker.Add(obj) if err != nil { panic(fmt.Errorf("failed to add object %v to fake client: %w", obj, err)) } } return &fakeClient{ tracker: versionedTracker{tracker}, scheme: clientScheme, } } func (t versionedTracker) Create(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { accessor, err := meta.Accessor(obj) if err != nil { return err } if accessor.GetName() == "" { return apierrors.NewInvalid( obj.GetObjectKind().GroupVersionKind().GroupKind(), accessor.GetName(), field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) } if accessor.GetResourceVersion() != "" { return apierrors.NewBadRequest("resourceVersion can not be set for Create requests") } accessor.SetResourceVersion("1") if err := t.ObjectTracker.Create(gvr, obj, ns); err != nil { accessor.SetResourceVersion("") return err } return nil } func (t versionedTracker) Update(gvr schema.GroupVersionResource, obj runtime.Object, ns string) error { accessor, err := meta.Accessor(obj) if err != nil { return fmt.Errorf("failed to get accessor for object: %v", err) } if accessor.GetName() == "" { return apierrors.NewInvalid( obj.GetObjectKind().GroupVersionKind().GroupKind(), accessor.GetName(), field.ErrorList{field.Required(field.NewPath("metadata.name"), "name is required")}) } oldObject, err := t.ObjectTracker.Get(gvr, ns, accessor.GetName()) if err != nil { return err } oldAccessor, err := meta.Accessor(oldObject) if err != nil { return err } if accessor.GetResourceVersion() != oldAccessor.GetResourceVersion() { return apierrors.NewConflict(gvr.GroupResource(), accessor.GetName(), errors.New("object was modified")) } if oldAccessor.GetResourceVersion() == "" { oldAccessor.SetResourceVersion("0") } intResourceVersion, err := strconv.ParseUint(oldAccessor.GetResourceVersion(), 10, 64) if err != nil { return fmt.Errorf("can not convert resourceVersion %q to int: %v", oldAccessor.GetResourceVersion(), err) } intResourceVersion++ accessor.SetResourceVersion(strconv.FormatUint(intResourceVersion, 10)) return t.ObjectTracker.Update(gvr, obj, ns) } func (c *fakeClient) Get(ctx context.Context, key client.ObjectKey, obj runtime.Object) error { gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } o, err := c.tracker.Get(gvr, key.Namespace, key.Name) if err != nil { return err } gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } ta, err := meta.TypeAccessor(o) if err != nil { return err } ta.SetKind(gvk.Kind) ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } decoder := scheme.Codecs.UniversalDecoder() _, _, err = decoder.Decode(j, nil, obj) return err } func (c *fakeClient) List(ctx context.Context, obj runtime.Object, opts ...client.ListOption) error { gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } OriginalKind := gvk.Kind if !strings.HasSuffix(gvk.Kind, "List") { return fmt.Errorf("non-list type %T (kind %q) passed as output", obj, gvk) } // we need the non-list GVK, so chop off the "List" from the end of the kind gvk.Kind = gvk.Kind[:len(gvk.Kind)-4] listOpts := client.ListOptions{} listOpts.ApplyOptions(opts) gvr, _ := meta.UnsafeGuessKindToResource(gvk) o, err := c.tracker.List(gvr, gvk, listOpts.Namespace) if err != nil { return err } ta, err := meta.TypeAccessor(o) if err != nil { return err } ta.SetKind(OriginalKind) ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } decoder := scheme.Codecs.UniversalDecoder() _, _, err = decoder.Decode(j, nil, obj) if err != nil { return err } if listOpts.LabelSelector != nil { objs, err := meta.ExtractList(obj) if err != nil { return err } filteredObjs, err := objectutil.FilterWithLabels(objs, listOpts.LabelSelector) if err != nil { return err } err = meta.SetList(obj, filteredObjs) if err != nil { return err } } return nil } func (c *fakeClient) Create(ctx context.Context, obj runtime.Object, opts ...client.CreateOption) error { createOptions := &client.CreateOptions{} createOptions.ApplyOptions(opts) for _, dryRunOpt := range createOptions.DryRun { if dryRunOpt == metav1.DryRunAll { return nil } } gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } accessor, err := meta.Accessor(obj) if err != nil { return err } if accessor.GetName() == "" && accessor.GetGenerateName() != "" { base := accessor.GetGenerateName() if len(base) > maxGeneratedNameLength { base = base[:maxGeneratedNameLength] } accessor.SetName(fmt.Sprintf("%s%s", base, utilrand.String(randomLength))) } return c.tracker.Create(gvr, obj, accessor.GetNamespace()) } func (c *fakeClient) Delete(ctx context.Context, obj runtime.Object, opts ...client.DeleteOption) error { gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } accessor, err := meta.Accessor(obj) if err != nil { return err } delOptions := client.DeleteOptions{} delOptions.ApplyOptions(opts) //TODO: implement propagation return c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) } func (c *fakeClient) DeleteAllOf(ctx context.Context, obj runtime.Object, opts ...client.DeleteAllOfOption) error { gvk, err := apiutil.GVKForObject(obj, scheme.Scheme) if err != nil { return err } dcOptions := client.DeleteAllOfOptions{} dcOptions.ApplyOptions(opts) gvr, _ := meta.UnsafeGuessKindToResource(gvk) o, err := c.tracker.List(gvr, gvk, dcOptions.Namespace) if err != nil { return err } objs, err := meta.ExtractList(o) if err != nil { return err } filteredObjs, err := objectutil.FilterWithLabels(objs, dcOptions.LabelSelector) if err != nil { return err } for _, o := range filteredObjs { accessor, err := meta.Accessor(o) if err != nil { return err } err = c.tracker.Delete(gvr, accessor.GetNamespace(), accessor.GetName()) if err != nil { return err } } return nil } func (c *fakeClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { updateOptions := &client.UpdateOptions{} updateOptions.ApplyOptions(opts) for _, dryRunOpt := range updateOptions.DryRun { if dryRunOpt == metav1.DryRunAll { return nil } } gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } accessor, err := meta.Accessor(obj) if err != nil { return err } return c.tracker.Update(gvr, obj, accessor.GetNamespace()) } func (c *fakeClient) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { patchOptions := &client.PatchOptions{} patchOptions.ApplyOptions(opts) for _, dryRunOpt := range patchOptions.DryRun { if dryRunOpt == metav1.DryRunAll { return nil } } gvr, err := getGVRFromObject(obj, c.scheme) if err != nil { return err } accessor, err := meta.Accessor(obj) if err != nil { return err } data, err := patch.Data(obj) if err != nil { return err } reaction := testing.ObjectReaction(c.tracker) handled, o, err := reaction(testing.NewPatchAction(gvr, accessor.GetNamespace(), accessor.GetName(), patch.Type(), data)) if err != nil { return err } if !handled { panic("tracker could not handle patch method") } gvk, err := apiutil.GVKForObject(obj, c.scheme) if err != nil { return err } ta, err := meta.TypeAccessor(o) if err != nil { return err } ta.SetKind(gvk.Kind) ta.SetAPIVersion(gvk.GroupVersion().String()) j, err := json.Marshal(o) if err != nil { return err } decoder := scheme.Codecs.UniversalDecoder() _, _, err = decoder.Decode(j, nil, obj) return err } func (c *fakeClient) Status() client.StatusWriter { return &fakeStatusWriter{client: c} } func getGVRFromObject(obj runtime.Object, scheme *runtime.Scheme) (schema.GroupVersionResource, error) { gvk, err := apiutil.GVKForObject(obj, scheme) if err != nil { return schema.GroupVersionResource{}, err } gvr, _ := meta.UnsafeGuessKindToResource(gvk) return gvr, nil } type fakeStatusWriter struct { client *fakeClient } func (sw *fakeStatusWriter) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { // TODO(droot): This results in full update of the obj (spec + status). Need // a way to update status field only. return sw.client.Update(ctx, obj, opts...) } func (sw *fakeStatusWriter) Patch(ctx context.Context, obj runtime.Object, patch client.Patch, opts ...client.PatchOption) error { // TODO(droot): This results in full update of the obj (spec + status). Need // a way to update status field only. return sw.client.Patch(ctx, obj, patch, opts...) }
NewFakeClient
cli.js
#!/usr/bin/env node const exec = require('child_process').exec const alert = require('./') if (process.argv.length !== 3) { console.log('You need to specify one argument') return }
exec(alert(process.argv[2]))
ntt.go
package ntt import ( "encoding/json" "fmt" "os" "strconv" "github.com/nokia/ntt/internal/env" "github.com/nokia/ntt/internal/fs" "github.com/nokia/ntt/internal/log" "github.com/nokia/ntt/internal/memoize" "github.com/nokia/ntt/internal/results" "github.com/nokia/ntt/internal/session" "github.com/nokia/ntt/project" ) // Suite represents a TTCN-3 test suite. type Suite struct { id int // A unique session id p *project.Project // Environent handling envFiles []*fs.File
name string testHook *fs.File // Memoization store memoize.Store } func (suite *Suite) lazyInit() { if suite.p == nil { suite.p = &project.Project{} } } // Id returns the unique session id (aka NTT_SESSION_ID). This ID is the smallest // integer available on this machine. func (suite *Suite) Id() (int, error) { if suite.id == 0 { if s, ok := env.LookupEnv("NTT_SESSION_ID)"); ok { id, err := strconv.ParseUint(s, 10, 32) if err != nil { return 0, err } suite.id = int(id) return suite.id, nil } id, err := session.Get() if err != nil { return 0, err } suite.id = id } return suite.id, nil } func (suite *Suite) Root() string { if suite.p != nil { return suite.p.Root() } return "" } // SetRoot set the root folder for Suite. // // The root folder is the main-package, which may contain a manifest file // (`package.yml`) func (suite *Suite) SetRoot(folder string) { p, err := project.Open(folder) if err != nil { log.Verbosef(fmt.Sprintf("error opening project: %s", err.Error())) } suite.p = p } func (suite *Suite) LatestResults() (*results.DB, error) { b, err := fs.Open("test_results.json").Bytes() if err != nil { if os.IsNotExist(err) { return nil, nil } return nil, err } var db results.DB return &db, json.Unmarshal(b, &db) } func init() { env.Load() // TODO(5nord) We still have to figure how this sharedDir could be handled // more elegantly, maybe even with support for Windows. // // Change SharedDir to /tmp/k3 to be compatible with legacy k3 scripts. session.SharedDir = "/tmp/k3" }
// Manifest stuff
certificatesigningrequest.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was automatically generated by informer-gen with arguments: --input-dirs=[k8s.io/kubernetes/pkg/api,k8s.io/kubernetes/pkg/api/v1,k8s.io/kubernetes/pkg/apis/abac,k8s.io/kubernetes/pkg/apis/abac/v0,k8s.io/kubernetes/pkg/apis/abac/v1beta1,k8s.io/kubernetes/pkg/apis/apps,k8s.io/kubernetes/pkg/apis/apps/v1beta1,k8s.io/kubernetes/pkg/apis/authentication,k8s.io/kubernetes/pkg/apis/authentication/v1beta1,k8s.io/kubernetes/pkg/apis/authorization,k8s.io/kubernetes/pkg/apis/authorization/v1beta1,k8s.io/kubernetes/pkg/apis/autoscaling,k8s.io/kubernetes/pkg/apis/autoscaling/v1,k8s.io/kubernetes/pkg/apis/batch,k8s.io/kubernetes/pkg/apis/batch/v1,k8s.io/kubernetes/pkg/apis/batch/v2alpha1,k8s.io/kubernetes/pkg/apis/certificates,k8s.io/kubernetes/pkg/apis/certificates/v1alpha1,k8s.io/kubernetes/pkg/apis/componentconfig,k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1,k8s.io/kubernetes/pkg/apis/extensions,k8s.io/kubernetes/pkg/apis/extensions/v1beta1,k8s.io/kubernetes/pkg/apis/imagepolicy,k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1,k8s.io/kubernetes/pkg/apis/policy,k8s.io/kubernetes/pkg/apis/policy/v1beta1,k8s.io/kubernetes/pkg/apis/rbac,k8s.io/kubernetes/pkg/apis/rbac/v1alpha1,k8s.io/kubernetes/pkg/apis/storage,k8s.io/kubernetes/pkg/apis/storage/v1beta1] --internal-clientset-package=k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset --listers-package=k8s.io/kubernetes/pkg/client/listers --versioned-clientset-package=k8s.io/kubernetes/pkg/client/clientset_generated/clientset package internalversion import ( runtime "k8s.io/apimachinery/pkg/runtime" watch "k8s.io/apimachinery/pkg/watch" api "k8s.io/kubernetes/pkg/api" v1 "k8s.io/kubernetes/pkg/api/v1" certificates "k8s.io/kubernetes/pkg/apis/certificates" cache "k8s.io/kubernetes/pkg/client/cache" internalclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalinterfaces "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalinterfaces" internalversion "k8s.io/kubernetes/pkg/client/listers/certificates/internalversion" time "time" ) // CertificateSigningRequestInformer provides access to a shared informer and lister for // CertificateSigningRequests. type CertificateSigningRequestInformer interface { Informer() cache.SharedIndexInformer Lister() internalversion.CertificateSigningRequestLister } type certificateSigningRequestInformer struct { factory internalinterfaces.SharedInformerFactory } func newCertificateSigningRequestInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { sharedIndexInformer := cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options v1.ListOptions) (runtime.Object, error) { var internalOptions api.ListOptions if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil
return client.Certificates().CertificateSigningRequests().List(internalOptions) }, WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { var internalOptions api.ListOptions if err := api.Scheme.Convert(&options, &internalOptions, nil); err != nil { return nil, err } return client.Certificates().CertificateSigningRequests().Watch(internalOptions) }, }, &certificates.CertificateSigningRequest{}, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, ) return sharedIndexInformer } func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { return f.factory.InternalInformerFor(&certificates.CertificateSigningRequest{}, newCertificateSigningRequestInformer) } func (f *certificateSigningRequestInformer) Lister() internalversion.CertificateSigningRequestLister { return internalversion.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) }
{ return nil, err }
databricks.py
import hashlib import json import os import shutil import tempfile import textwrap import time from six.moves import shlex_quote, urllib from mlflow.entities import RunStatus from mlflow.projects import _fetch_project from mlflow.projects.submitted_run import SubmittedRun from mlflow.utils import rest_utils, file_utils from mlflow.utils.exception import ExecutionException from mlflow.utils.logging_utils import eprint from mlflow import tracking from mlflow.version import VERSION # Base directory within driver container for storing files related to MLflow DB_CONTAINER_BASE = "/databricks/mlflow" # Base directory within driver container for storing project archives DB_TARFILE_BASE = os.path.join(DB_CONTAINER_BASE, "project-tars") # Base directory directory within driver container for storing extracted project directories DB_PROJECTS_BASE = os.path.join(DB_CONTAINER_BASE, "projects") # Name to use for project directory when archiving it for upload to DBFS; the TAR will contain # a single directory with this name DB_TARFILE_ARCHIVE_NAME = "mlflow-project" # Base directory within DBFS for storing code for project runs for experiments DBFS_EXPERIMENT_DIR_BASE = "mlflow-experiments" def _jobs_runs_get(databricks_run_id): return rest_utils.databricks_api_request( endpoint="jobs/runs/get", method="GET", json={"run_id": databricks_run_id}) def _jobs_runs_cancel(databricks_run_id): return rest_utils.databricks_api_request( endpoint="jobs/runs/cancel", method="POST", json={"run_id": databricks_run_id}) def _jobs_runs_submit(req_body_json): return rest_utils.databricks_api_request( endpoint="jobs/runs/submit", method="POST", json=req_body_json) def _get_databricks_run_cmd(dbfs_fuse_tar_uri, run_id, entry_point, parameters): """ Generates MLflow CLI command to run on Databricks cluster in order to launch a run on Databricks """ # Strip ".gz" and ".tar" file extensions from base filename of the tarfile tar_hash = os.path.splitext(os.path.splitext(os.path.basename(dbfs_fuse_tar_uri))[0])[0] container_tar_path = os.path.abspath(os.path.join(DB_TARFILE_BASE, os.path.basename(dbfs_fuse_tar_uri))) project_dir = os.path.join(DB_PROJECTS_BASE, tar_hash) mlflow_run_arr = list(map(shlex_quote, ["mlflow", "run", project_dir, "--entry-point", entry_point])) if run_id: mlflow_run_arr.extend(["--run-id", run_id]) if parameters: for key, value in parameters.items(): mlflow_run_arr.extend(["-P", "%s=%s" % (key, value)]) mlflow_run_cmd = " ".join(mlflow_run_arr) shell_command = textwrap.dedent(""" export PATH=$DB_HOME/conda/bin:$DB_HOME/python/bin:$PATH && mlflow --version && # Make local directories in the container into which to copy/extract the tarred project mkdir -p {tarfile_base} {projects_base} && # Rsync from DBFS FUSE to avoid copying archive into local filesystem if it already exists rsync -a -v --ignore-existing {dbfs_fuse_tar_path} {tarfile_base} && # Extract project into a temporary directory. We don't extract directly into the desired # directory as tar extraction isn't guaranteed to be atomic cd $(mktemp -d) && tar --no-same-owner -xzvf {container_tar_path} && # Atomically move the extracted project into the desired directory mv -T {tarfile_archive_name} {work_dir} && {mlflow_run} """.format(tarfile_base=DB_TARFILE_BASE, projects_base=DB_PROJECTS_BASE, dbfs_fuse_tar_path=dbfs_fuse_tar_uri, container_tar_path=container_tar_path, tarfile_archive_name=DB_TARFILE_ARCHIVE_NAME, work_dir=project_dir, mlflow_run=mlflow_run_cmd)) return ["bash", "-c", shell_command] def _check_databricks_auth_available(): """ Verifies that information for making API requests to Databricks is available to MLflow, raising an exception if not. """ rest_utils.get_databricks_http_request_kwargs_or_fail() def _upload_to_dbfs(src_path, dbfs_fuse_uri): """ Uploads the file at `src_path` to the specified DBFS URI within the Databricks workspace corresponding to the default Databricks CLI profile. """ eprint("=== Uploading project to DBFS path %s ===" % dbfs_fuse_uri) http_endpoint = dbfs_fuse_uri http_request_kwargs = rest_utils.get_databricks_http_request_kwargs_or_fail() with open(src_path, 'rb') as f: rest_utils.http_request( endpoint=http_endpoint, method='POST', data=f, **http_request_kwargs) def _dbfs_path_exists(dbfs_uri):
""" dbfs_path = _parse_dbfs_uri_path(dbfs_uri) json_response_obj = rest_utils.databricks_api_request( endpoint="dbfs/get-status", method="GET", json={"path": dbfs_path}) # If request fails with a RESOURCE_DOES_NOT_EXIST error, the file does not exist on DBFS error_code_field = "error_code" if error_code_field in json_response_obj: if json_response_obj[error_code_field] == "RESOURCE_DOES_NOT_EXIST": return False raise ExecutionException("Got unexpected error response when checking whether file %s " "exists in DBFS: %s" % json_response_obj) return True def _upload_project_to_dbfs(project_dir, experiment_id): """ Tars a project directory into an archive in a temp dir and uploads it to DBFS, returning the HDFS-style URI of the tarball in DBFS (e.g. dbfs:/path/to/tar). :param project_dir: Path to a directory containing an MLflow project to upload to DBFS (e.g. a directory containing an MLproject file). """ temp_tarfile_dir = tempfile.mkdtemp() temp_tar_filename = file_utils.build_path(temp_tarfile_dir, "project.tar.gz") def custom_filter(x): return None if os.path.basename(x.name) == "mlruns" else x try: file_utils.make_tarfile(temp_tar_filename, project_dir, DB_TARFILE_ARCHIVE_NAME, custom_filter=custom_filter) with open(temp_tar_filename, "rb") as tarred_project: tarfile_hash = hashlib.sha256(tarred_project.read()).hexdigest() # TODO: Get subdirectory for experiment from the tracking server dbfs_fuse_uri = os.path.join("/dbfs", DBFS_EXPERIMENT_DIR_BASE, str(experiment_id), "projects-code", "%s.tar.gz" % tarfile_hash) if not _dbfs_path_exists(dbfs_fuse_uri): _upload_to_dbfs(temp_tar_filename, dbfs_fuse_uri) eprint("=== Finished uploading project to %s ===" % dbfs_fuse_uri) else: eprint("=== Project already exists in DBFS ===") finally: shutil.rmtree(temp_tarfile_dir) return dbfs_fuse_uri def _get_run_result_state(databricks_run_id): """ Returns the run result state (string) of the Databricks run with the passed-in ID, or None if the run is still active. See possible values at https://docs.databricks.com/api/latest/jobs.html#runresultstate. """ res = _jobs_runs_get(databricks_run_id) return res["state"].get("result_state", None) def _run_shell_command_job(project_uri, command, env_vars, cluster_spec): """ Runs the specified shell command on a Databricks cluster. :param project_uri: URI of the project from which our shell command originates :param command: Shell command to run :param env_vars: Environment variables to set in the process running `command` :param cluster_spec: Dictionary describing the cluster, expected to contain the fields for a NewCluster (see https://docs.databricks.com/api/latest/jobs.html#jobsclusterspecnewcluster) :return: The ID of the Databricks Job Run. Can be used to query the run's status via the Databricks Runs Get API (https://docs.databricks.com/api/latest/jobs.html#runs-get). """ # Make jobs API request to launch run. req_body_json = { 'run_name': 'MLflow Run for %s' % project_uri, 'new_cluster': cluster_spec, 'shell_command_task': { 'command': command, "env_vars": env_vars }, "libraries": [{"pypi": {"package": "mlflow==%s" % VERSION}}] } run_submit_res = _jobs_runs_submit(req_body_json) databricks_run_id = run_submit_res["run_id"] eprint("=== Launched MLflow run as Databricks job run with ID %s. Getting run status " "page URL... ===" % databricks_run_id) run_info = _jobs_runs_get(databricks_run_id) jobs_page_url = run_info["run_page_url"] eprint("=== Check the run's status at %s ===" % jobs_page_url) return databricks_run_id def _parse_dbfs_uri_path(dbfs_uri): """ Parses and returns the absolute path within DBFS of the file with the specified URI. For example, given an input of "dbfs:/my/dbfs/path", this method will return "/my/dbfs/path" """ return urllib.parse.urlparse(dbfs_uri).path def _fetch_and_clean_project(uri, version=None, git_username=None, git_password=None): """ Fetches the project at the passed-in URI & prepares it for upload to DBFS. Returns the path of the temporary directory into which the project was fetched. """ work_dir = _fetch_project( uri=uri, force_tempdir=True, version=version, git_username=git_username, git_password=git_password) # Remove the mlruns directory from the fetched project to avoid cache-busting mlruns_dir = os.path.join(work_dir, "mlruns") if os.path.exists(mlruns_dir): shutil.rmtree(mlruns_dir) return work_dir def _before_run_validations(tracking_uri, cluster_spec): """Validations to perform before running a project on Databricks.""" _check_databricks_auth_available() if cluster_spec is None: raise ExecutionException("Cluster spec must be provided when launching MLflow project runs " "on Databricks.") if tracking.utils._is_local_uri(tracking_uri): raise ExecutionException( "When running on Databricks, the MLflow tracking URI must be set to a remote URI " "accessible to both the current client and code running on Databricks. Got local " "tracking URI %s." % tracking_uri) def run_databricks(remote_run, uri, entry_point, work_dir, parameters, experiment_id, cluster_spec): """ Runs the project at the specified URI on Databricks, returning a `SubmittedRun` that can be used to query the run's status or wait for the resulting Databricks Job run to terminate. """ tracking_uri = tracking.get_tracking_uri() _before_run_validations(tracking_uri, cluster_spec) dbfs_fuse_uri = _upload_project_to_dbfs(work_dir, experiment_id) env_vars = { tracking._TRACKING_URI_ENV_VAR: tracking_uri, tracking._EXPERIMENT_ID_ENV_VAR: experiment_id, } run_id = remote_run.info.run_uuid eprint("=== Running entry point %s of project %s on Databricks. ===" % (entry_point, uri)) # Launch run on Databricks with open(cluster_spec, 'r') as handle: try: cluster_spec = json.load(handle) except ValueError: eprint("Error when attempting to load and parse JSON cluster spec from file " "%s. " % cluster_spec) raise command = _get_databricks_run_cmd(dbfs_fuse_uri, run_id, entry_point, parameters) db_run_id = _run_shell_command_job(uri, command, env_vars, cluster_spec) return DatabricksSubmittedRun(db_run_id, run_id) def _cancel_databricks(databricks_run_id): _jobs_runs_cancel(databricks_run_id) def _monitor_databricks(databricks_run_id, sleep_interval=30): """ Polls a Databricks Job run (with run ID `databricks_run_id`) for termination, checking the run's status every `sleep_interval` seconds. """ result_state = _get_run_result_state(databricks_run_id) while result_state is None: time.sleep(sleep_interval) result_state = _get_run_result_state(databricks_run_id) return result_state == "SUCCESS" class DatabricksSubmittedRun(SubmittedRun): """ Instance of SubmittedRun corresponding to a Databricks Job run launched to run an MLflow project. Note that run_id may be None, e.g. if we did not launch the run against a tracking server accessible to the local client. """ def __init__(self, databricks_run_id, run_id): super(DatabricksSubmittedRun, self).__init__() self.databricks_run_id = databricks_run_id self._run_id = run_id @property def run_id(self): return self._run_id def wait(self): return _monitor_databricks(self.databricks_run_id) def cancel(self): _cancel_databricks(self.databricks_run_id) self.wait() def _get_status(self): run_state = _get_run_result_state(self.databricks_run_id) if run_state is None: return RunStatus.RUNNING if run_state == "SUCCESS": return RunStatus.FINISHED return RunStatus.FAILED def get_status(self): return RunStatus.to_string(self._get_status())
""" Returns True if the passed-in path exists in DBFS for the workspace corresponding to the default Databricks CLI profile.
tag-input-event.ts
export class TagInputEvent { private _defaultPrevented = false; constructor(public tag: any) {} preventDefault() { this._defaultPrevented = true; } defaultPrevented(): boolean {
return this._defaultPrevented; } }
adapter.ts
import { Requester, Validator } from '@chainlink/ea-bootstrap' import { ExecuteFactory, ExecuteWithConfig } from '@chainlink/types' import * as BigQuery from '@chainlink/google-bigquery-adapter' import { Config, makeConfig } from './config' import * as gjv from 'geojson-validation' import convert from 'convert-units' export interface Polygon { type: "Polygon" coordinates: [number, number][][] } export type Point = { type: "Point" coordinates: [number, number] } export interface Feature { type: string geometry: Polygon | Point } export interface GeoJSON { type: string features: Feature[] } const customParams = { geoJson: true, pointInPolygon: false, dateFrom: true, dateTo: true, method: true, column: true, units: false, } export const execute: ExecuteWithConfig<Config> = async (input, context, config) => { const validator = new Validator(input, customParams) if (validator.error) throw validator.error const jobRunID = validator.validated.jobRunID let geoJson = validator.validated.data.geoJson if (typeof geoJson === 'string') { geoJson = JSON.parse(geoJson) } const dateFrom = validator.validated.data.dateFrom const dateTo = validator.validated.data.dateTo const method = validator.validated.data.method const column = validator.validated.data.column.toLowerCase() const units = validator.validated.data.units || 'imperial' if (!gjv.valid(geoJson)) { throw new Error('Provided GeoJSON data is not valid') } const queryBuilder = new QueryBuilder(geoJson, dateFrom, dateTo, method, column, config.dataset) const bigQuery = BigQuery.makeExecute(BigQuery.makeConfig()) const response = await bigQuery({ id: jobRunID, data: queryBuilder.toQuery() }, context) const imperialValue = Requester.validateResultNumber(response.result, [0, "result"]) const result = convertUnits(column, imperialValue, units) return Requester.success(jobRunID, { data: { result } }) } const convertUnits = (column: string, value: number, units: string): number => { if (units !== 'metric') return value const conv = convert(value) switch (column) { case 'temp': case 'dewp': case 'max': case 'min': return conv.from('F').to('C') case 'slp': case 'stp': return conv.from('bar').to('hPa') case 'visib': return conv.from('mi').to('m') case 'wdsp': case 'gust': case 'mxpsd': return conv.from('knot').to('m/s') case 'prcp': case 'sndp': return conv.from('in').to('mm') default: return value } } export const makeExecute: ExecuteFactory<Config> = (config) => { return async (request, context) => execute(request, context, config || makeConfig()) } type Method = 'SUM' | 'AVG' | 'MIN' | 'MAX' class
{ private readonly geoJson: GeoJSON private readonly dateFrom: Date private readonly dateTo: Date private readonly method: Method private readonly column: string private readonly dataset: string constructor(geoJson: GeoJSON, dateFrom: string, dateTo: string, method: Method, column: string, dataset: string) { this.geoJson = geoJson this.dateFrom = new Date(dateFrom) this.dateTo = new Date(dateTo) this.method = method this.column = column this.dataset = dataset } private modifiedColumn() { switch (this.column) { case 'fog': case 'rain_drizzle': case 'snow_ice_pellets': case 'hail': case 'tornado_funnel_cloud': case 'thunder': { return `cast(${this.column} as int64)` } } return this.column } private select() { switch (this.method) { case 'AVG': return `AVG(${this.modifiedColumn()})` case 'SUM': return `SUM(${this.modifiedColumn()})` case 'MIN': return `MIN(${this.modifiedColumn()})` case 'MAX': return `MAX(${this.modifiedColumn()})` default: throw new Error(`Unrecognized method: "${this.method}"`) } } private from() { const diff = this.dateTo.getUTCFullYear() - this.dateFrom.getUTCFullYear() if (diff === 0) { return `SELECT \`stn\`, \`${this.column}\`, \`date\` FROM \`${this.dataset}.gsod${this.dateTo.getUTCFullYear()}\`` } const years = new Array(diff + 1).fill(0) .map((_, i) => `SELECT \`stn\`, \`${this.column}\`, \`date\` FROM \`${this.dataset}.gsod${this.dateTo.getUTCFullYear()-i}\``) return years.join('\nUNION ALL\n') } private geoJsonQuery(): string[] { return this.geoJson.features.map((ft, i) => { switch (ft.geometry.type) { case "Polygon": { return `ST_CONTAINS(ST_GEOGFROMGEOJSON(@geoJson${i}), stations.geog)` } case "Point": { return [ 'usaf = ', '(SELECT usaf FROM stations AS sts', 'WHERE PARSE_DATE("%Y%m%d", sts.`begin`) <= DATE(@dateFrom)', 'AND PARSE_DATE("%Y%m%d", sts.`end`) >= DATE(@dateTo)', `ORDER BY ST_DISTANCE(ST_GEOGFROMGEOJSON(@geoJson${i}), sts.geog) LIMIT 1)` ].join('\n') } default: { return undefined } } }).filter(line => !!line) as string[] } private geoJsonParams(): { [key: string]: string } { const map: { [key: string]: string } = {} this.geoJson.features.forEach((ft, i) => { map[`geoJson${i}`] = JSON.stringify(ft.geometry) }) return map } private static formatDate(date: Date): string { const year = date.getUTCFullYear() let month = '' + (date.getUTCMonth() + 1) let day = '' + date.getUTCDate() if (month.length < 2) month = '0' + month if (day.length < 2) day = '0' + day return [year, month, day].join('-') } private columnFiltering(): string[] { switch (this.column) { case 'prcp': { // TODO: Causes issues if method is AVG, as // this could have been 0 instead. return ['AND prcp != 99.99'] } case 'visib': case 'wdsp': { return [`AND ${this.column} != 999.9`] } case 'dewp': case 'slp': case 'stp': case 'max': case 'min': case 'temp': { return [`AND ${this.column} != 9999.9`] } case 'fog': case 'rain_drizzle': case 'snow_ice_pellets': case 'hail': case 'tornado_funnel_cloud': case 'thunder': { return [`AND (${this.column} = "0" OR ${this.column} = "1")`] } } return [] } public toQuery(): { query: string, params: { [key: string]: string | number }} { return { query: [ // Stations 'WITH', 'stations AS (', ` SELECT usaf, ST_GEOGPOINT(lon, lat) AS geog, \`begin\`, \`end\` FROM \`${this.dataset}.stations\``, ')', // Main query `SELECT ${this.select()} AS result`, `FROM (${this.from()})`, 'WHERE stn IN (SELECT usaf FROM stations', `WHERE (${this.geoJsonQuery().join(`)\nOR\n(`)}))`, 'AND date BETWEEN @dateFrom AND @dateTo', ...this.columnFiltering() ].join('\n') + ';', params: { ...this.geoJsonParams(), dateFrom: QueryBuilder.formatDate(this.dateFrom), dateTo: QueryBuilder.formatDate(this.dateTo) } } } }
QueryBuilder
test_api_snapshot_execution_plan.py
from dagster.api.snapshot_execution_plan import sync_get_external_execution_plan from dagster.core.snap.execution_plan_snapshot import ExecutionPlanSnapshot from .utils import get_foo_pipeline_handle def test_execution_plan_snapshot_api(): pipeline_handle = get_foo_pipeline_handle() execution_plan_snapshot = sync_get_external_execution_plan( pipeline_handle.get_origin(), environment_dict={}, mode="default", snapshot_id="12345", ) assert isinstance(execution_plan_snapshot, ExecutionPlanSnapshot) assert execution_plan_snapshot.step_keys_to_execute == [ 'do_something.compute', 'do_input.compute', ] assert len(execution_plan_snapshot.steps) == 2 def
(): pipeline_handle = get_foo_pipeline_handle() execution_plan_snapshot = sync_get_external_execution_plan( pipeline_handle.get_origin(), environment_dict={}, mode="default", snapshot_id="12345", step_keys_to_execute=['do_something.compute'], ) assert isinstance(execution_plan_snapshot, ExecutionPlanSnapshot) assert execution_plan_snapshot.step_keys_to_execute == [ 'do_something.compute', ] assert len(execution_plan_snapshot.steps) == 2 def test_execution_plan_with_subset_snapshot_api(): pipeline_handle = get_foo_pipeline_handle() execution_plan_snapshot = sync_get_external_execution_plan( pipeline_handle.get_origin(), environment_dict={'solids': {'do_input': {'inputs': {'x': {'value': "test"}}}}}, mode="default", snapshot_id="12345", solid_selection=["do_input"], ) assert isinstance(execution_plan_snapshot, ExecutionPlanSnapshot) assert execution_plan_snapshot.step_keys_to_execute == [ 'do_input.compute', ] assert len(execution_plan_snapshot.steps) == 1
test_execution_plan_with_step_keys_to_execute_snapshot_api
QALogs.py
# Coding:utf-8 # # The MIT License (MIT) # # Copyright (c) 2016-2018 yutiansut/QUANTAXIS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """ QUANTAXIS Log Module @yutiansut QA_util_log_x is under [QAStandard#0.0.2@602-x] Protocol QA_util_log_info() QA_util_log_debug() QA_util_log_expection() """ import datetime from zenlog import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s QUANTAXIS>>> %(message)s', datefmt='%H:%M:%S', filename='quantaxis-' + str(datetime.datetime.now().strftime( '%Y-%m-%d-%H-%M-%S')) + '-.log', filemode='w') console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('QUANTAXIS>> %(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) logging.info('start QUANTAXIS') def
(logs): """ QUANTAXIS Log Module @yutiansut QA_util_log_x is under [QAStandard#0.0.2@602-x] Protocol """ logging.debug(logs) def QA_util_log_info(logs): """ QUANTAXIS Log Module @yutiansut QA_util_log_x is under [QAStandard#0.0.2@602-x] Protocol """ logging.info(logs) def QA_util_log_expection(logs): """ QUANTAXIS Log Module @yutiansut QA_util_log_x is under [QAStandard#0.0.2@602-x] Protocol """ logging.exception(logs)
QA_util_log_debug
tests.rs
use crate::types::{Export, Name, Namespace}; use path_slash::PathBufExt as _; use std::path::PathBuf; fn check(s: &str, exports: &[(Namespace, &str)], members: &[&str]) { let (got_exports, got_members) = crate::get(s).unwrap(); let want_exports: Vec<_> = exports .iter() .map(|&(namespace, s)| Export { namespace,
.collect(); let want_members: Vec<_> = members.iter().map(|&s| PathBuf::from_slash(s)).collect(); assert_eq!(want_exports, got_exports); assert_eq!(want_members, got_members); } #[test] fn group() { check( r#" Group is ; comment hi.sml (* uh.sml *) support.sml "#, &[], &["hi.sml", "support.sml"], ); } #[test] fn library() { check( r#" Library structure A functor B signature C is a.sml b/c/d.sml e.fun f.sig uh:sml "#, &[ (Namespace::Structure, "A"), (Namespace::Functor, "B"), (Namespace::Signature, "C"), ], &["a.sml", "b/c/d.sml", "e.fun", "f.sig", "uh"], ); }
name: Name::new(s), })
data_operations.py
import numpy as np def euclidean_distance(p1,p2): """ returns euclidean distance between matrices @params: p1, p2: np.ndarray matrices to perform operation to. """ return np.sqrt(np.sum((p1-p2)**2, axis=1)) def entropy(p): """ Will be our measurement for uncertainty in our construction of descision tree @params: p: float """ if p == 0: return 0 elif p == 1: return 0 else: return -(p * np.log2(p) + (1 - p) * np.log2(1 - p)) def information_gain(left_child, right_child): """ measurement of how much info we gained when splitting a node using our entropy method. @def: takes in a list of classes from left and right child to return the information gain of our curr split @params: left_child: np.ndarray curr left child arr right_child: np.ndarray curr left child arr """ parent = left_child + right_child p_par = parent.count(1) / len(parent) if len(parent) > 0 else 0 p_left = left_child.count(1) / len(left_child) if len(left_child) \ > 0 else 0
infogain_p = self.entropy(p_par) infogain_l = self.entropy(p_left) infogain_r = self.entropy(p_right) return infogain_p - len(left_child) / len(parent) * infogain_l - \ len(right_child) / len(parent) * infogain_r
p_right = right_child.count(1) / len(right_child) if len(right_child) \ > 0 else 0
emit.rs
use crate::{ listener::{Listener, Listeners}, EventTypeFlags, }; #[allow(unused_imports)] use log::{debug, info, trace, warn}; use twilight_model::gateway::event::{shard::Payload, Event}; pub async fn bytes(listeners: Listeners<Event>, bytes: &[u8]) { for listener in listeners.all().lock().await.values() { if listener.events.contains(EventTypeFlags::SHARD_PAYLOAD) { let event = Event::ShardPayload(Payload { bytes: bytes.to_owned(), }); // If the channel isn't active, this'll be caught by event emissions // later. let _ = listener.tx.unbounded_send(event); } } } pub fn event(listeners: Listeners<Event>, event: Event) { tokio::spawn(Box::pin(_event(listeners, event))); } async fn _event(listeners: Listeners<Event>, event: Event)
/// Returns whether the channel is still active. /// /// If the receiver dropped, return `false` so we know to remove it. /// These are unbounded channels, so we know it's not because it's full. fn _emit_to_listener(id: u64, listener: &Listener<Event>, event: Event) -> bool { let event_type = EventTypeFlags::from(event.kind()); if !listener.events.contains(event_type) { trace!( "[ShardProcessor] Listener {} doesn't want event type {:?}", id, event_type, ); return true; } listener.tx.unbounded_send(event).is_ok() }
{ let mut listeners = listeners.all().lock().await; let mut remove_listeners = Vec::new(); // Take up to the last one so that we can later get the last and *move* // the event into the listener's channel, rather than clone it like we // do here. // // This avoids a clone, and for users with only 1 listener this will // entirely avoid cloning. let mut last = None; for (idx, (id, listener)) in listeners.iter().enumerate() { if idx == listeners.len() - 1 { last = Some(*id); break; } let event_type = EventTypeFlags::from(event.kind()); if !listener.events.contains(event_type) { trace!( "[ShardProcessor] Listener {} doesn't want event type {:?}", id, event_type, ); continue; } if !_emit_to_listener(*id, listener, event.clone()) { remove_listeners.push(*id); } } if let Some(id) = last { if let Some(listener) = listeners.get(&id) { if !_emit_to_listener(id, listener, event) { remove_listeners.push(id); } } } for id in &remove_listeners { debug!("[ShardProcessor] Removing listener {}", id); listeners.remove(id); } }