file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
testfiles.py
#!/usr/bin/env python
## setup testfiles for easier access __base_dir = os.path.dirname(os.path.abspath(__file__)) __testfiles_dir = os.path.realpath(__base_dir + "/../testdata") testfiles = {} for file in os.listdir(__testfiles_dir): testfiles[file] = "%s/%s" % (__testfiles_dir, file)
import os
event.go
package avs import ( "time" ) const AlertFamily = "Alerts" const AlertEnteredBackgroundEvent = "AlertEnteredBackground" const AlertEnteredForegroundEvent = "AlertEnteredForeground" const AlertStartedEvent = "AlertStarted" const AlertStoppedEvent = "AlertStopped" const DeleteAlertFailedEvent = "DeleteAlertFailed" const DeleteAlertSucceededEvent = "DeleteAlertSucceeded" const SetAlertFailedEvent = "SetAlertFailed" const SetAlertSucceededEvent = "SetAlertSucceeded" const AudioPlayerFamily = "AudioPlayer" const PlaybackFailedEvent = "PlaybackFailed" const PlaybackFinishedEvent = "PlaybackFinished" const PlaybackNearlyFinishedEvent = "PlaybackNearlyFinished" const PlaybackPausedEvent = "PlaybackPaused" const PlaybackQueueClearedEvent = "PlaybackQueueCleared" const PlaybackResumedEvent = "PlaybackResumed" const PlaybackStartedEvent = "PlaybackStarted" const PlaybackStoppedEvent = "PlaybackStopped" const PlaybackStutterStartedEvent = "PlaybackStutterStarted" const PlaybackStutterFinishedEvent = "PlaybackStutterFinished" const ProgressReportDelayElapsedEvent = "ProgressReportDelayElapsed" const ProgressReportIntervalElapsedEvent = "ProgressReportIntervalElapsed" const StreamMetadataExtractedEvent = "StreamMetadataExtracted" const PlaybackControllerFamily = "PlaybackController" const NextCommandIssuedEvent = "NextCommandIssued" const PauseCommandIssuedEvent = "PauseCommandIssued" const PlayCommandIssuedEvent = "PlayCommandIssued" const PreviousCommandIssuedEvent = "PreviousCommandIssued" const SpeakerFamily = "Speaker" const MuteChangedEvent = "MuteChanged" const VolumeChangedEvent = "VolumeChanged" const SpeechRecognizerFamily = "SpeechRecognizer" const ExpectSpeechTimedOutEvent = "ExpectSpeechTimedOut" const RecognizeEvent = "Recognize" const SpeechSynthesizerFamily = "SpeechSynthesizer" const SpeechFinishedEvent = "SpeechFinished" const SpeechStartedEvent = "SpeechStarted" const SettingsFamily = "Settings" const SettingsUpdatedEvent = "SettingsUpdated" const SystemFamily = "System" const ExceptionEncounteredEvent = "ExceptionEncountered" const SynchronizeStateEvent = "SynchronizeState" const UserInactivityReportEvent = "UserInactivityReport" // newEvent creates a Message suited for being used as an event value. func newEvent(namespace, name, messageId, dialogRequestId string) *Message { m := &Message{ Header: map[string]string{ "namespace": namespace, "name": name, "messageId": messageId, }, Payload: nil, } if dialogRequestId != "" { m.Header["dialogRequestId"] = dialogRequestId } return m } /********** Alerts **********/ // The AlertEnteredBackground event. type AlertEnteredBackground struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewAlertEnteredBackground(messageId, token string) *AlertEnteredBackground { m := new(AlertEnteredBackground) m.Message = newEvent(AlertFamily, AlertEnteredBackgroundEvent, messageId, "") m.Payload.Token = token return m } // The AlertEnteredForeground event. type AlertEnteredForeground struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewAlertEnteredForeground(messageId, token string) *AlertEnteredForeground { m := new(AlertEnteredForeground) m.Message = newEvent(AlertFamily, AlertEnteredForegroundEvent, messageId, "") m.Payload.Token = token return m } // The AlertStarted event. type AlertStarted struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewAlertStarted(messageId, token string) *AlertStarted { m := new(AlertStarted) m.Message = newEvent(AlertFamily, AlertStartedEvent, messageId, "") m.Payload.Token = token return m } // The AlertStopped event. type AlertStopped struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewAlertStopped(messageId, token string) *AlertStopped { m := new(AlertStopped) m.Message = newEvent(AlertFamily, AlertStoppedEvent, messageId, "") m.Payload.Token = token return m } // The DeleteAlertFailed event. type DeleteAlertFailed struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewDeleteAlertFailed(messageId, token string) *DeleteAlertFailed { m := new(DeleteAlertFailed) m.Message = newEvent(AlertFamily, DeleteAlertFailedEvent, messageId, "") m.Payload.Token = token return m } // The DeleteAlertSucceeded event. type DeleteAlertSucceeded struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewDeleteAlertSucceeded(messageId, token string) *DeleteAlertSucceeded { m := new(DeleteAlertSucceeded) m.Message = newEvent(AlertFamily, DeleteAlertSucceededEvent, messageId, "") m.Payload.Token = token return m } // The SetAlertFailed event. type SetAlertFailed struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewSetAlertFailed(messageId, token string) *SetAlertFailed { m := new(SetAlertFailed) m.Message = newEvent(AlertFamily, SetAlertFailedEvent, messageId, "") m.Payload.Token = token return m } // The SetAlertSucceeded event. type SetAlertSucceeded struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewSetAlertSucceeded(messageId, token string) *SetAlertSucceeded { m := new(SetAlertSucceeded) m.Message = newEvent(AlertFamily, SetAlertSucceededEvent, messageId, "") m.Payload.Token = token return m } /********** AudioPlayer **********/ // Also used by the PlaybackState context. type playbackState struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` PlayerActivity PlayerActivity `json:"playerActivity"` } // The PlaybackFailed event. type PlaybackFailed struct { *Message Payload struct { Token string `json:"token"` CurrentPlaybackState playbackState `json:"currentPlaybackState"` Error struct { Type MediaErrorType `json:"type"` Message string `json:"message"` } `json:"error"` } `json:"payload"` } func NewPlaybackFailed(messageId, token string, errorType MediaErrorType, errorMessage string) *PlaybackFailed { m := new(PlaybackFailed) m.Message = newEvent(AudioPlayerFamily, PlaybackFailedEvent, messageId, "") m.Payload.Token = token m.Payload.Error.Type = errorType m.Payload.Error.Message = errorMessage return m } // The PlaybackFinished event. type PlaybackFinished struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackFinished(messageId, token string, offset time.Duration) *PlaybackFinished { m := new(PlaybackFinished) m.Message = newEvent(AudioPlayerFamily, PlaybackFinishedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackNearlyFinished event. type PlaybackNearlyFinished struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackNearlyFinished(messageId, token string, offset time.Duration) *PlaybackNearlyFinished { m := new(PlaybackNearlyFinished) m.Message = newEvent(AudioPlayerFamily, PlaybackNearlyFinishedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackPaused event. type PlaybackPaused struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackPaused(messageId, token string, offset time.Duration) *PlaybackPaused { m := new(PlaybackPaused) m.Message = newEvent(AudioPlayerFamily, PlaybackPausedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackQueueCleared event. type PlaybackQueueCleared struct { *Message Payload struct{} `json:"payload"` } func NewPlaybackQueueCleared(messageId string) *PlaybackQueueCleared { m := new(PlaybackQueueCleared) m.Message = newEvent(AudioPlayerFamily, PlaybackQueueClearedEvent, messageId, "") return m } // The PlaybackResumed event. type PlaybackResumed struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackResumed(messageId, token string, offset time.Duration) *PlaybackResumed { m := new(PlaybackResumed) m.Message = newEvent(AudioPlayerFamily, PlaybackResumedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackStarted event. type PlaybackStarted struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func
(messageId, token string, offset time.Duration) *PlaybackStarted { m := new(PlaybackStarted) m.Message = newEvent(AudioPlayerFamily, PlaybackStartedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackStopped event. type PlaybackStopped struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackStopped(messageId, token string, offset time.Duration) *PlaybackStopped { m := new(PlaybackStopped) m.Message = newEvent(AudioPlayerFamily, PlaybackStoppedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackStutterStarted event. type PlaybackStutterStarted struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewPlaybackStutterStarted(messageId, token string, offset time.Duration) *PlaybackStutterStarted { m := new(PlaybackStutterStarted) m.Message = newEvent(AudioPlayerFamily, PlaybackStutterStartedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The PlaybackStutterFinished event. type PlaybackStutterFinished struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` StutterDurationInMilliseconds int `json:"stutterDurationInMilliseconds"` } `json:"payload"` } func NewPlaybackStutterFinished(messageId, token string, offset, stutterDuration time.Duration) *PlaybackStutterFinished { m := new(PlaybackStutterFinished) m.Message = newEvent(AudioPlayerFamily, PlaybackStutterFinishedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) m.Payload.StutterDurationInMilliseconds = int(stutterDuration.Seconds() * 1000) return m } // The ProgressReportDelayElapsed event. type ProgressReportDelayElapsed struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewProgressReportDelayElapsed(messageId, token string, offset time.Duration) *ProgressReportDelayElapsed { m := new(ProgressReportDelayElapsed) m.Message = newEvent(AudioPlayerFamily, ProgressReportDelayElapsedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The ProgressReportIntervalElapsed event. type ProgressReportIntervalElapsed struct { *Message Payload struct { Token string `json:"token"` OffsetInMilliseconds int `json:"offsetInMilliseconds"` } `json:"payload"` } func NewProgressReportIntervalElapsed(messageId, token string, offset time.Duration) *ProgressReportIntervalElapsed { m := new(ProgressReportIntervalElapsed) m.Message = newEvent(AudioPlayerFamily, ProgressReportIntervalElapsedEvent, messageId, "") m.Payload.Token = token m.Payload.OffsetInMilliseconds = int(offset.Seconds() * 1000) return m } // The StreamMetadataExtracted event. type StreamMetadataExtracted struct { *Message Payload struct { Token string `json:"token"` Metadata map[string]interface{} `json:"metadata"` } `json:"payload"` } func NewStreamMetadataExtracted(messageId, token string, metadata map[string]interface{}) *StreamMetadataExtracted { m := new(StreamMetadataExtracted) m.Message = newEvent(AudioPlayerFamily, StreamMetadataExtractedEvent, messageId, "") m.Payload.Token = token m.Payload.Metadata = metadata return m } /********** PlaybackController **********/ // The NextCommandIssued event. type NextCommandIssued struct { *Message Payload struct{} `json:"payload"` } func NewNextCommandIssued(messageId string) *NextCommandIssued { m := new(NextCommandIssued) m.Message = newEvent(PlaybackControllerFamily, NextCommandIssuedEvent, messageId, "") return m } // The PauseCommandIssued event. type PauseCommandIssued struct { *Message Payload struct{} `json:"payload"` } func NewPauseCommandIssued(messageId string) *PauseCommandIssued { m := new(PauseCommandIssued) m.Message = newEvent(PlaybackControllerFamily, PauseCommandIssuedEvent, messageId, "") return m } // The PlayCommandIssued event. type PlayCommandIssued struct { *Message Payload struct{} `json:"payload"` } func NewPlayCommandIssued(messageId string) *PlayCommandIssued { m := new(PlayCommandIssued) m.Message = newEvent(PlaybackControllerFamily, PlayCommandIssuedEvent, messageId, "") return m } // The PreviousCommandIssued event. type PreviousCommandIssued struct { *Message Payload struct{} `json:"payload"` } func NewPreviousCommandIssued(messageId string) *PreviousCommandIssued { m := new(PreviousCommandIssued) m.Message = newEvent(PlaybackControllerFamily, PreviousCommandIssuedEvent, messageId, "") return m } /********** Speaker **********/ // The MuteChanged event. type MuteChanged struct { *Message Payload struct { Volume int `json:"volume"` Muted bool `json:"muted"` } `json:"payload"` } func NewMuteChanged(messageId string, volume int, muted bool) *MuteChanged { m := new(MuteChanged) m.Message = newEvent(SpeakerFamily, MuteChangedEvent, messageId, "") m.Payload.Volume = volume m.Payload.Muted = muted return m } // The VolumeChanged event. type VolumeChanged struct { *Message Payload struct { Volume int `json:"volume"` Muted bool `json:"muted"` } `json:"payload"` } func NewVolumeChanged(messageId string, volume int, muted bool) *VolumeChanged { m := new(VolumeChanged) m.Message = newEvent(SpeakerFamily, VolumeChangedEvent, messageId, "") m.Payload.Volume = volume m.Payload.Muted = muted return m } /********** SpeechRecognizer **********/ // The ExpectSpeechTimedOut event. type ExpectSpeechTimedOut struct { *Message Payload struct{} `json:"payload"` } func NewExpectSpeechTimedOut(messageId string) *ExpectSpeechTimedOut { m := new(ExpectSpeechTimedOut) m.Message = newEvent(SpeechRecognizerFamily, ExpectSpeechTimedOutEvent, messageId, "") return m } // RecognizeProfile identifies the ASR profile associated with your product. type RecognizeProfile string // Possible values for RecognizeProfile. // Supports three distinct profiles optimized for speech at varying distances. const ( RecognizeProfileCloseTalk = RecognizeProfile("CLOSE_TALK") RecognizeProfileNearField = RecognizeProfile("NEAR_FIELD") RecognizeProfileFarField = RecognizeProfile("FAR_FIELD") ) // The Recognize event. type Recognize struct { *Message Payload struct { Profile RecognizeProfile `json:"profile"` Format string `json:"format"` } `json:"payload"` } func NewRecognize(messageId, dialogRequestId string) *Recognize { return NewRecognizeWithProfile(messageId, dialogRequestId, RecognizeProfileCloseTalk) } func NewRecognizeWithProfile(messageId, dialogRequestId string, profile RecognizeProfile) *Recognize { m := new(Recognize) m.Message = newEvent(SpeechRecognizerFamily, RecognizeEvent, messageId, dialogRequestId) m.Payload.Format = "AUDIO_L16_RATE_16000_CHANNELS_1" m.Payload.Profile = profile return m } /********** SpeechSynthesizer **********/ // The SpeechFinished event. type SpeechFinished struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewSpeechFinished(messageId, token string) *SpeechFinished { m := new(SpeechFinished) m.Message = newEvent(SpeechSynthesizerFamily, SpeechFinishedEvent, messageId, "") m.Payload.Token = token return m } // The SpeechStarted event. type SpeechStarted struct { *Message Payload struct { Token string `json:"token"` } `json:"payload"` } func NewSpeechStarted(messageId, token string) *SpeechStarted { m := new(SpeechStarted) m.Message = newEvent(SpeechSynthesizerFamily, SpeechStartedEvent, messageId, "") m.Payload.Token = token return m } /********** Settings **********/ // The SettingsUpdated event. type Setting struct { Key string `json:"key"` Value string `json:"value"` } type SettingsUpdated struct { *Message Payload struct { Settings []Setting `json:"settings"` } `json:"payload"` } type SettingLocale string // Possible values for SettingLocale. const ( SettingLocaleUS = SettingLocale("en-US") SettingLocaleGB = SettingLocale("en-GB") SettingLocaleDE = SettingLocale("de-DE") ) func NewLocaleSettingsUpdated(messageId string, locale SettingLocale) *SettingsUpdated { m := new(SettingsUpdated) m.Message = newEvent(SettingsFamily, SettingsUpdatedEvent, messageId, "") m.Payload.Settings = append(m.Payload.Settings, Setting{ Key: "locale", Value: string(locale), }) return m } /********** System **********/ // The ExceptionEncountered event. type ExceptionEncountered struct { *Message Payload struct { UnparsedDirective string `json:"unparsedDirective"` Error struct { Type ErrorType `json:"type"` Message string `json:"message"` } `json:"error"` } `json:"payload"` } func NewExceptionEncountered(messageId, directive string, errorType ErrorType, errorMessage string) *ExceptionEncountered { m := new(ExceptionEncountered) m.Message = newEvent(SystemFamily, ExceptionEncounteredEvent, messageId, "") m.Payload.UnparsedDirective = directive m.Payload.Error.Type = errorType m.Payload.Error.Message = errorMessage return m } // The SynchronizeState event. type SynchronizeState struct { *Message Payload struct{} `json:"payload"` } func NewSynchronizeState(messageId string) *SynchronizeState { m := new(SynchronizeState) m.Message = newEvent(SystemFamily, SynchronizeStateEvent, messageId, "") return m } // The UserInactivityReport event. type UserInactivityReport struct { *Message Payload struct { InactiveTimeInSeconds int `json:"inactiveTimeInSeconds"` } `json:"payload"` } func NewUserInactivityReport(messageId string, inactiveTime time.Duration) *UserInactivityReport { m := new(UserInactivityReport) m.Message = newEvent(SystemFamily, UserInactivityReportEvent, messageId, "") m.Payload.InactiveTimeInSeconds = int(inactiveTime.Seconds()) return m }
NewPlaybackStarted
ordenamientoBurbuja.js
const array = [10, 4, 40, 32 , 67, 12, 43, 31, 65, 1] function
(array){ // Recorrer el arreglo basado en su tamaño total for (let i = 0; i < array.length; i++) { for(let j = 0; j < array.length; j++) { //Comprobar el valor actual con el valor siguiente if(array[j] > array[j+1]){ let currentNumber = array[j] // variable auxiliar para almacenar el estado y evitar que se pierda //Ahora la posicion actual obtiene el valor de la siguiente posición array[j] = array[j+1] //Ahora la siguiente posición obtiene el valor de la posición actual array[j+1] = currentNumber; } } } return array; } const result = bubbleSort(array) console.log(result)
bubbleSort
tls.rs
use std::fmt::Debug; use std::net::SocketAddr; use std::pin::Pin; use async_std::net::TcpStream; use async_trait::async_trait; use deadpool::managed::{Manager, Object, RecycleResult}; use futures::io::{AsyncRead, AsyncWrite}; use futures::task::{Context, Poll}; cfg_if::cfg_if! { if #[cfg(feature = "rustls")] { use async_tls::client::TlsStream; } else if #[cfg(feature = "native-tls")] { use async_native_tls::TlsStream; } } use crate::Error; #[derive(Clone, Debug)] pub(crate) struct
{ host: String, addr: SocketAddr, } impl TlsConnection { pub(crate) fn new(host: String, addr: SocketAddr) -> Self { Self { host, addr } } } pub(crate) struct TlsConnWrapper { conn: Object<TlsStream<TcpStream>, Error>, } impl TlsConnWrapper { pub(crate) fn new(conn: Object<TlsStream<TcpStream>, Error>) -> Self { Self { conn } } } impl AsyncRead for TlsConnWrapper { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<Result<usize, std::io::Error>> { Pin::new(&mut *self.conn).poll_read(cx, buf) } } impl AsyncWrite for TlsConnWrapper { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { Pin::new(&mut *self.conn).poll_write(cx, buf) } fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { Pin::new(&mut *self.conn).poll_flush(cx) } fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { Pin::new(&mut *self.conn).poll_close(cx) } } #[async_trait] impl Manager<TlsStream<TcpStream>, Error> for TlsConnection { async fn create(&self) -> Result<TlsStream<TcpStream>, Error> { let raw_stream = async_std::net::TcpStream::connect(self.addr).await?; let tls_stream = add_tls(&self.host, raw_stream).await?; Ok(tls_stream) } async fn recycle(&self, conn: &mut TlsStream<TcpStream>) -> RecycleResult<Error> { let mut buf = [0; 4]; let mut cx = Context::from_waker(futures::task::noop_waker_ref()); match Pin::new(conn).poll_read(&mut cx, &mut buf) { Poll::Ready(Err(error)) => Err(error), Poll::Ready(Ok(bytes)) if bytes == 0 => Err(std::io::Error::new( std::io::ErrorKind::UnexpectedEof, "connection appeared to be closed (EoF)", )), _ => Ok(()), } .map_err(Error::from)?; Ok(()) } } cfg_if::cfg_if! { if #[cfg(feature = "rustls")] { async fn add_tls(host: &str, stream: TcpStream) -> Result<TlsStream<TcpStream>, std::io::Error> { let connector = async_tls::TlsConnector::default(); connector.connect(host, stream).await } } else if #[cfg(feature = "native-tls")] { async fn add_tls( host: &str, stream: TcpStream, ) -> Result<TlsStream<TcpStream>, async_native_tls::Error> { async_native_tls::connect(host, stream).await } } }
TlsConnection
heavy_math_op.rs
use std::{thread, time}; #[derive(Clone, Copy)] struct HeavyNumber { value: f64, } impl HeavyNumber { fn new(value: f64) -> HeavyNumber { HeavyNumber { value } } fn get_value(&self) -> f64 { thread::sleep(time::Duration::from_millis(1)); self.value } } #[inline] pub fn log(a: f64) -> f64 { a.ln() } // #[inline] // pub fn f_dag_full(x: [f64; 5]) -> f64 {
// y // } #[inline] pub fn f_dag(x: [f64; 5]) -> f64 { let x_heavy = { let mut x_heavy = [HeavyNumber::new(0f64); 5]; for (i, v) in x.iter().enumerate() { x_heavy[i] = HeavyNumber::new(v.clone()); } x_heavy }; let a = x_heavy[0]; let b = x_heavy[1]; let c = x_heavy[2]; let d = x_heavy[3]; let e = x_heavy[4]; let y = a.get_value() * b.get_value() + a.get_value() * c.get_value() + a.get_value() * d.get_value() + a.get_value() * e.get_value(); y } #[inline] pub fn f_dag_long(x: [f64; 5]) -> f64 { let x_heavy = { let mut x_heavy = [HeavyNumber::new(0f64); 5]; for (i, v) in x.iter().enumerate() { x_heavy[i] = HeavyNumber::new(v.clone()); } x_heavy }; let a = x_heavy[0]; let b = x_heavy[1]; let c = x_heavy[2]; let d = x_heavy[3]; let e = x_heavy[4]; let y = a.get_value() * (b.get_value() + c.get_value() + d.get_value() + e.get_value()); y }
// let y1 = x[2] * (5.0 * x[0] + x[1]); // let y2 = log(y1); // let y = (y1 + x[3] * y2) * (y1 + y2);
deduced_test.go
/* Copyright 2019 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package merge_test import ( "testing" "sigs.k8s.io/structured-merge-diff/fieldpath" . "sigs.k8s.io/structured-merge-diff/internal/fixture" "sigs.k8s.io/structured-merge-diff/merge" "sigs.k8s.io/structured-merge-diff/typed" ) func TestDeduced(t *testing.T) { tests := map[string]TestCase{ "leaf_apply_twice": { Ops: []Operation{ Apply{ Manager: "default", Object: ` numeric: 1 string: "string" `, APIVersion: "v1", }, Apply{ Manager: "default", Object: ` numeric: 2 string: "string" bool: false `, APIVersion: "v1", }, }, Object: ` numeric: 2 string: "string" bool: false `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS( _P("numeric"), _P("string"), _P("bool"), ), APIVersion: "v1", }, }, }, "leaf_apply_update_apply_no_conflict": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 1 string: "string" `, }, Update{ Manager: "controller", APIVersion: "v1", Object: ` numeric: 1 string: "string" bool: true `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 2 string: "string" `, }, }, Object: ` numeric: 2 string: "string" bool: true `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS( _P("numeric"), _P("string"), ), APIVersion: "v1", }, "controller": &fieldpath.VersionedSet{ Set: _NS( _P("bool"), ), APIVersion: "v1", }, }, }, "leaf_apply_update_apply_with_conflict": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 1 string: "string" `, }, Update{ Manager: "controller", APIVersion: "v1", Object: ` numeric: 1 string: "controller string" bool: true `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 2 string: "user string" `, Conflicts: merge.Conflicts{ merge.Conflict{Manager: "controller", Path: _P("string")}, }, }, ForceApply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 2 string: "user string" `, }, }, Object: ` numeric: 2 string: "user string" bool: true `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS( _P("numeric"), _P("string"), ), APIVersion: "v1", }, "controller": &fieldpath.VersionedSet{ Set: _NS( _P("bool"), ), APIVersion: "v1", }, }, }, "leaf_apply_twice_dangling": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` numeric: 1 string: "string" bool: false `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ` string: "new string" `, }, }, Object: ` numeric: 1 string: "new string" bool: false `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS( _P("string"), ), APIVersion: "v1", }, }, }, "leaf_update_remove_empty_set": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` string: "string" `, }, Update{ Manager: "controller", APIVersion: "v1", Object: ` string: "new string" `, }, }, Object: ` string: "new string" `, Managed: fieldpath.ManagedFields{ "controller": &fieldpath.VersionedSet{ Set: _NS( _P("string"), ), APIVersion: "v1", }, }, }, "apply_twice_list_is_atomic": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` list: - a - c `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ` list: - a - d - c - b `, }, }, Object: ` list: - a - d - c - b `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS(_P("list")), APIVersion: "v1", }, }, }, "apply_update_apply_list": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` list: - a - c `, }, Update{ Manager: "controller", APIVersion: "v1", Object: ` list: - a - b - c - d `, }, ForceApply{ Manager: "default", APIVersion: "v1", Object: ` list: - a - b - c `, }, }, Object: ` list: - a - b - c `, Managed: fieldpath.ManagedFields{ "default": &fieldpath.VersionedSet{ Set: _NS(_P("list")), APIVersion: "v1", }, }, }, "leaf_apply_remove_empty_set": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` string: "string" `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ``, }, }, Object: ` string: "string" `, Managed: fieldpath.ManagedFields{}, }, "apply_update_apply_nested": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` a: 1 b: c: d: 2 e: - 1 - 2 - 3 f: - name: n value: 1 `, }, Update{ Manager: "controller", APIVersion: "v1", Object: ` a: 1 b: c: d: 3 e: - 1 - 2 - 3 - 4 f: - name: n value: 2 g: 5 `, }, Apply{ Manager: "default", APIVersion: "v1", Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 `, Conflicts: merge.Conflicts{ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")}, merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")}, merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")}, }, }, ForceApply{ Manager: "default", APIVersion: "v1", Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 `, }, }, Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 g: 5 `, }, "apply_update_apply_nested_different_version": { Ops: []Operation{ Apply{ Manager: "default", APIVersion: "v1", Object: ` a: 1 b: c: d: 2 e: - 1 - 2 - 3 f: - name: n value: 1 `, }, Update{ Manager: "controller", APIVersion: "v2", Object: ` a: 1 b: c: d: 3 e: - 1 - 2 - 3 - 4 f: - name: n value: 2 g: 5 `, }, Apply{ Manager: "default", APIVersion: "v3", Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 `, Conflicts: merge.Conflicts{ merge.Conflict{Manager: "controller", Path: _P("b", "c", "d")}, merge.Conflict{Manager: "controller", Path: _P("b", "c", "e")}, merge.Conflict{Manager: "controller", Path: _P("b", "c", "f")}, }, }, ForceApply{ Manager: "default", APIVersion: "v3", Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 `, }, }, Object: ` a: 2 b: c: d: 2 e: - 3 - 2 - 1 f: - name: n value: 1 g: 5 `, }, } for name, test := range tests { t.Run(name, func(t *testing.T) { if err := test.Test(typed.DeducedParseableType{}); err != nil
}) } }
{ t.Fatal(err) }
minNode.d.ts
tree: ImmutableTree<T>, ): ImmutableTree<T>;
import type {ImmutableTree} from './types'; export default function minNode<T>(
main.js
import Vue from 'vue'; import Vuetify from 'vuetify'; import Vuelidate from 'vuelidate' import VueRouter from 'vue-router'; import axios from 'axios'; import App from './App.vue'; import store from './store/store'; import { routes } from './routes'; // import 'materialize-css/dist/css/materialize.css'; // import 'materialize-css/js/hammer.min.js'; // import 'materialize-css/dist/js/materialize.min.js'; import 'vuetify/dist/vuetify.min.css'; Vue.use(Vuetify); Vue.use(Vuelidate) Vue.use(VueRouter); Vue.prototype.$http = axios; axios.defaults.baseURL = 'http://localhost:3000'; axios.defaults.validateStatus = false; axios.interceptors.request.use(config => { if (store.getters.isAuthenicated) { config.headers['x-access-token'] = store.getters.getToken; } return config; }, error => { return Promise.reject(error); }); const router = new VueRouter({ mode: 'history',
}); router.beforeEach((to, from, next) => { if (to.meta.auth && !store.getters.isAuthenicated) { next({ path: '/login' }); } else { next(); } }); new Vue({ el: '#app', router, store, render: h => h(App) });
routes
storage.go
package storage import ( "strings" "github.com/rghiorghisor/basic-go-rest-api/config" "github.com/rghiorghisor/basic-go-rest-api/logger" property "github.com/rghiorghisor/basic-go-rest-api/property/gateway/storage" propertyset "github.com/rghiorghisor/basic-go-rest-api/propertyset/gateway/storage" ) // Storage structure contains all repositories. type Storage struct { factories []func() factory defaultFactory func() factory PropertyRepository property.Repository PropertySetRepository propertyset.Repository } type factory interface { id() string init(storage *Storage, config *config.StorageConfiguration) error } // New returns a bare-bone storage. func New() *Storage { return &Storage{ factories: []func() factory{newBoltFactory, newMongoFactory}, defaultFactory: newBoltFactory, } } // SetupStorage prepares the repository connections based on the provided // configuration and retrieves a handle for the database. // // Besides connecting to the database it also prepares repositories based on any // collection names. func (storage *Storage) SetupStorage(config *config.StorageConfiguration) error { var factory factory for _, ff := range storage.factories { f := ff() if !checkConfig(f, config.Type) { continue } factory = f
if factory == nil { factory = storage.defaultFactory() logger.Main.Infof("Unknown storage type '%s'. Using default '%s'.\n", config.Type, factory.id()) } err := factory.init(storage, config) return err } func checkConfig(f factory, storageType string) bool { return strings.EqualFold(f.id(), storageType) }
break }
mtext_complex.py
# Copyright (c) 2021, Manfred Moitzi # License: MIT License import copy import math from typing import Iterable, List, Optional, Tuple from ezdxf import colors from ezdxf.entities import MText from ezdxf.lldxf import const from ezdxf.math import Matrix44, Vec3 from ezdxf.render.abstract_mtext_renderer import AbstractMTextRenderer from ezdxf.tools import text_layout as tl, fonts from ezdxf.tools.text import MTextContext from .backend import BackendInterface from .properties import Properties, RenderContext, rgb_to_hex from .type_hints import Color __all__ = ["complex_mtext_renderer"] def corner_vertices( left: float, bottom: float, right: float, top: float, m: Matrix44 = None, ) -> Iterable[Vec3]: corners = [ # closed polygon: fist vertex == last vertex (left, top), (right, top), (right, bottom), (left, bottom), (left, top), ] if m is None: return Vec3.generate(corners) else: return m.transform_vertices(corners) class FrameRenderer(tl.ContentRenderer): def __init__(self, properties: Properties, backend: BackendInterface): self.properties = properties self.backend = backend def render( self, left: float, bottom: float, right: float, top: float, m: Matrix44 = None, ) -> None: self._render_outline(list(corner_vertices(left, bottom, right, top, m))) def _render_outline(self, vertices: List[Vec3]) -> None: backend = self.backend properties = self.properties prev = vertices.pop(0) for vertex in vertices: backend.draw_line(prev, vertex, properties) prev = vertex def line( self, x1: float, y1: float, x2: float, y2: float, m: Matrix44 = None ) -> None: points = [(x1, y1), (x2, y2)] if m is not None: p1, p2 = m.transform_vertices(points) else: p1, p2 = Vec3.generate(points) self.backend.draw_line(p1, p2, self.properties) class ColumnBackgroundRenderer(FrameRenderer): def __init__( self, properties: Properties, backend: BackendInterface, bg_properties: Properties = None, offset: float = 0, text_frame: bool = False, ): super().__init__(properties, backend) self.bg_properties = bg_properties self.offset = offset # background border offset self.has_text_frame = text_frame def render( self, left: float, bottom: float, right: float, top: float, m: Matrix44 = None, ) -> None: # Important: this is not a clipping box, it is possible to # render anything outside of the given borders! offset = self.offset vertices = list( corner_vertices( left - offset, bottom - offset, right + offset, top + offset, m ) ) if self.bg_properties is not None: self.backend.draw_filled_polygon(vertices, self.bg_properties) if self.has_text_frame: self._render_outline(vertices) class TextRenderer(FrameRenderer): """Text content renderer.""" def __init__( self, text: str, cap_height: float, width_factor: float, oblique: float, # angle in degrees properties: Properties, backend: BackendInterface, ): super().__init__(properties, backend) self.text = text self.cap_height = cap_height self.width_factor = width_factor self.oblique = oblique # angle in degrees def render( self, left: float, bottom: float, right: float, top: float, m: Matrix44 = None, ): """Create/render the text content""" sx = 1.0 tx = 0.0 if not math.isclose(self.width_factor, 1.0, rel_tol=1e-6): sx = self.width_factor if abs(self.oblique) > 1e-3: # degrees tx = math.tan(math.radians(self.oblique)) # fmt: off t = Matrix44(( sx, 0.0, 0.0, 0.0, tx, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, left, bottom, 0.0, 1.0 )) # fmt: on if m is not None: t *= m self.backend.draw_text(self.text, t, self.properties, self.cap_height) def complex_mtext_renderer( ctx: RenderContext, backend: BackendInterface, mtext: MText, properties: Properties ) -> None: cmr = ComplexMTextRenderer(ctx, backend, properties) align = tl.LayoutAlignment(mtext.dxf.attachment_point) layout_engine = cmr.layout_engine(mtext) layout_engine.place(align=align) layout_engine.render(mtext.ucs().matrix) class ComplexMTextRenderer(AbstractMTextRenderer): def __init__( self, ctx: RenderContext, backend: BackendInterface, properties: Properties, ): super().__init__() self._render_ctx = ctx self._backend = backend self._properties = properties # Implementation of required AbstractMTextRenderer methods: def word(self, text: str, ctx: MTextContext) -> tl.ContentCell:
def fraction( self, data: Tuple[str, str, str], ctx: MTextContext ) -> tl.ContentCell: upr, lwr, type_ = data if type_: return tl.Fraction( top=self.word(upr, ctx), bottom=self.word(lwr, ctx), stacking=self.get_stacking(type_), # renders just the divider line: renderer=FrameRenderer(self._properties, self._backend), ) else: return self.word(upr, ctx) def get_font_face(self, mtext: MText) -> fonts.FontFace: return self._properties.font # type: ignore def make_bg_renderer(self, mtext: MText) -> tl.ContentRenderer: dxf = mtext.dxf bg_fill = dxf.get("bg_fill", 0) bg_aci = None bg_true_color = None bg_properties: Optional[Properties] = None has_text_frame = False offset = 0 if bg_fill: # The fill scale is a multiple of the initial char height and # a scale of 1, fits exact the outer border # of the column -> offset = 0 offset = dxf.char_height * (dxf.get("box_fill_scale", 1.5) - 1) if bg_fill & const.MTEXT_BG_COLOR: if dxf.hasattr("bg_fill_color"): bg_aci = dxf.bg_fill_color if dxf.hasattr("bg_fill_true_color"): bg_aci = None bg_true_color = dxf.bg_fill_true_color if (bg_fill & 3) == 3: # canvas color = bit 0 and 1 set # can not detect canvas color from DXF document! # do not draw any background: bg_aci = None bg_true_color = None if bg_fill & const.MTEXT_TEXT_FRAME: has_text_frame = True bg_properties = self.new_bg_properties(bg_aci, bg_true_color) return ColumnBackgroundRenderer( self._properties, self._backend, bg_properties, offset=offset, text_frame=has_text_frame, ) # Implementation details of ComplexMTextRenderer: @property def backend(self) -> BackendInterface: return self._backend def resolve_aci_color(self, aci: int) -> Color: return self._render_ctx.resolve_aci_color(aci, self._properties.layer) def new_text_properties( self, properties: Properties, ctx: MTextContext ) -> Properties: new_properties = copy.copy(properties) if ctx.rgb is None: new_properties.color = self.resolve_aci_color(ctx.aci) else: new_properties.color = rgb_to_hex(ctx.rgb) new_properties.font = ctx.font_face return new_properties def new_bg_properties( self, aci: Optional[int], true_color: Optional[int] ) -> Properties: new_properties = copy.copy(self._properties) new_properties.color = ( # canvas background color self._render_ctx.current_layout_properties.background_color ) if true_color is None: if aci is not None: new_properties.color = self.resolve_aci_color(aci) # else canvas background color else: new_properties.color = rgb_to_hex(colors.int2rgb(true_color)) return new_properties
return tl.Text( width=self.get_font(ctx).text_width(text), height=ctx.cap_height, valign=tl.CellAlignment(ctx.align), stroke=self.get_stroke(ctx), renderer=TextRenderer( text, ctx.cap_height, ctx.width_factor, ctx.oblique, self.new_text_properties(self._properties, ctx), self._backend, ))
credentials.py
CLIENT_SECRET = "18f7ba3185ae43df90092e87aedf0b31" REDIRECT_URI = "http://127.0.01:8000/spotify/redirect"
CLIENT_ID = "a126d5791c694dac84956d88bdeab74f"
__init__.py
"""Make everything from submodules appear at the top level. """ from pandana.utils.mpiutils import *
from pandana.utils.pandasutils import *
aescbc_asm.rs
use super::Encryptor; use aead::{ generic_array::{ typenum::{Unsigned, U0, U16, U32, U48, U64}, GenericArray, }, Aead, Error, NewAead, Payload, }; use openssl::{ hash::MessageDigest, memcmp, pkey::PKey, sign::Signer, symm::{decrypt as openssl_decrypt, encrypt as openssl_encrypt, Cipher as OpenSslCipher}, }; #[cfg(feature = "serde")] use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer}; use zeroize::Zeroize; macro_rules! aes_cbc_hmac_impl { ($name:ident, $cipherid:ident, $keysize:ident, $noncesize:ident, $tagsize:ident, $macid:ident, $visitor:ident) => { #[derive(Debug, Clone, Eq, PartialEq)] pub struct $name { key: GenericArray<u8, $keysize>, } impl Encryptor for $name { type MinSize = U48; } impl NewAead for $name { type KeySize = $keysize; fn new(key: GenericArray<u8, Self::KeySize>) -> Self { Self { key } } } impl Aead for $name { type NonceSize = $noncesize; type TagSize = $tagsize; type CiphertextOverhead = U0; fn encrypt<'msg, 'aad>( &self, nonce: &GenericArray<u8, Self::NonceSize>, plaintext: impl Into<Payload<'msg, 'aad>>, ) -> Result<Vec<u8>, Error> { let payload = plaintext.into(); let cipher = OpenSslCipher::$cipherid(); let mut ciphertext = openssl_encrypt( cipher, &self.key[..cipher.key_len()], Some(nonce.as_slice()), payload.msg, ) .map_err(|_| Error)?; let sslkey = PKey::hmac(&self.key[cipher.key_len()..]).map_err(|_| Error)?; let mut hmac = Signer::new(MessageDigest::$macid(), &sslkey).map_err(|_| Error)?; hmac.update(payload.aad).map_err(|_| Error)?; hmac.update(nonce.as_slice()).map_err(|_| Error)?; hmac.update(ciphertext.as_slice()).map_err(|_| Error)?; let mac = hmac.sign_to_vec().map_err(|_| Error)?; ciphertext.extend_from_slice(mac.as_slice()); Ok(ciphertext) } fn decrypt<'msg, 'aad>( &self, nonce: &GenericArray<u8, Self::NonceSize>, ciphertext: impl Into<Payload<'msg, 'aad>>, ) -> Result<Vec<u8>, Error> { let payload = ciphertext.into(); let cipher = OpenSslCipher::$cipherid(); if payload.msg.len() < Self::TagSize::to_usize() + cipher.key_len() { return Err(Error); } let tag_start = payload.msg.len() - Self::TagSize::to_usize(); let buffer = Vec::from(&payload.msg[..tag_start]); let tag = Vec::from(&payload.msg[tag_start..]); let sslkey = PKey::hmac(&self.key[cipher.key_len()..]).map_err(|_| Error)?; let mut hmac = Signer::new(MessageDigest::$macid(), &sslkey).map_err(|_| Error)?; hmac.update(payload.aad).map_err(|_| Error)?; hmac.update(nonce.as_slice()).map_err(|_| Error)?; hmac.update(buffer.as_slice()).map_err(|_| Error)?; let mac = hmac.sign_to_vec().map_err(|_| Error)?; if memcmp::eq(&mac, &tag) { let plaintext = openssl_decrypt( cipher, &self.key[..cipher.key_len()], Some(nonce.as_slice()), buffer.as_slice(), ) .map_err(|_| Error)?; Ok(plaintext) } else { Err(Error) } } // TODO fn encrypt_in_place_detached( &self, _nonce: &GenericArray<u8, Self::NonceSize>, _associated_data: &[u8], _buffer: &mut [u8], ) -> Result<GenericArray<u8, Self::TagSize>, Error> { unimplemented!(); } // TODO fn decrypt_in_place_detached( &self, _nonce: &GenericArray<u8, Self::NonceSize>, _associated_data: &[u8], _buffer: &mut [u8], _tag: &GenericArray<u8, Self::TagSize>, ) -> Result<(), Error> { unimplemented!(); } } default_impl!($name); drop_impl!($name); #[cfg(feature = "serde")] serialize_impl!($name, $visitor); }; } aes_cbc_hmac_impl!( Aes128CbcHmac256, aes_128_cbc, U32, U16,
aes_cbc_hmac_impl!( Aes256CbcHmac512, aes_256_cbc, U64, U16, U64, sha512, Aes256CbcHmac512Visitor ); #[cfg(test)] mod aes128_cbc_hmac256_tests { tests_impl!(Aes128CbcHmac256); } #[cfg(test)] mod aes256_cbc_hmac512_tests { tests_impl!(Aes256CbcHmac512); }
U32, sha256, Aes128CbcHmac256Visitor );
integration_test.go
package test import ( "fmt" "os" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tsuru/nginx-operator/pkg/apis/nginx/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( testingNamespace = "nginx-operator-integration" testingEnvironment = "NGINX_OPERATOR_INTEGRATION" ) func TestMain(m *testing.M) { if os.Getenv(testingEnvironment) == "" { os.Exit(0) } os.Exit(m.Run()) } func Test_Operator(t *testing.T) { cleanup, err := createNamespace(testingNamespace) if err != nil { t.Fatal(err) } defer cleanup() t.Run("simple.yaml", func(t *testing.T) { if err := apply("testdata/simple.yaml", testingNamespace); err != nil { t.Error(err) } nginx, err := getReadyNginx("simple", 2, 1) require.NoError(t, err) require.NotNil(t, nginx) assert.Equal(t, 2, len(nginx.Status.Pods)) assert.Equal(t, 1, len(nginx.Status.Services)) }) } func getReadyNginx(name string, expectedPods int, expectedSvcs int) (*v1alpha1.Nginx, error) { nginx := &v1alpha1.Nginx{TypeMeta: metav1.TypeMeta{Kind: "Nginx"}} timeout := time.After(60 * time.Second) for { err := get(nginx, name, testingNamespace) if err != nil { fmt.Printf("Err getting nginx %q: %v. Retrying...\n", name, err) } if len(nginx.Status.Pods) == expectedPods && len(nginx.Status.Services) == expectedSvcs { return nginx, nil } select { case <-timeout: return nil, fmt.Errorf("Timeout waiting for nginx status. Last nginx object: %#v. Last error: %v", nginx, err) case <-time.After(time.Millisecond * 100): } } }
payments.test.ts
import wireMockClient from '../../../wireMockClient'; test('createCustomerPayment', async () => { const { adapter, client } = wireMockClient();
createdAt: '2018-03-13T14:02:29+00:00', amount: { value: '20.00', currency: 'EUR', }, description: 'My first API payment', method: null, metadata: { order_id: '1234', }, status: 'open', isCancelable: false, expiresAt: '2018-03-13T14:17:29+00:00', details: null, profileId: 'pfl_2A1gacu42V', sequenceType: 'oneoff', redirectUrl: 'https://example.org/redirect', webhookUrl: 'https://example.org/webhook', _links: { self: { href: 'https://api.mollie.com/v2/payments/tr_44aKxzEbr8', type: 'application/hal+json', }, checkout: { href: 'https://www.mollie.com/payscreen/select-method/44aKxzEbr8', type: 'text/html', }, customer: { href: 'https://api.mollie.com/v2/customers/cst_FhQJRw4s2n', type: 'application/hal+json', }, documentation: { href: 'https://docs.mollie.com/reference/v2/customers-api/create-payment', type: 'text/html', }, }, }); const payment = await bluster(client.customers_payments.create.bind(client.customers_payments))({ amount: { currency: 'EUR', value: '20.00', }, customerId: 'cst_FhQJRw4s2n', description: 'My first API payment', redirectUrl: 'https://example.org/redirect', webhookUrl: 'https://example.org/webhook', metadata: { order_id: '1234', }, }); expect(payment.id).toBe('tr_44aKxzEbr8'); expect(payment.mode).toBe('test'); expect(payment.createdAt).toBe('2018-03-13T14:02:29+00:00'); expect(payment.amount).toEqual({ value: '20.00', currency: 'EUR' }); expect(payment.description).toBe('My first API payment'); expect(payment.method).toBeNull(); expect(payment.metadata).toEqual({ order_id: '1234' }); expect(payment.status).toBe('open'); expect(payment.isCancelable).toBe(false); expect(payment.expiresAt).toBe('2018-03-13T14:17:29+00:00'); expect(payment.details).toBeNull(); expect(payment.profileId).toBe('pfl_2A1gacu42V'); expect(payment.sequenceType).toBe('oneoff'); expect(payment.redirectUrl).toBe('https://example.org/redirect'); expect(payment.webhookUrl).toBe('https://example.org/webhook'); expect(payment._links.self).toEqual({ href: 'https://api.mollie.com/v2/payments/tr_44aKxzEbr8', type: 'application/hal+json' }); expect(payment._links.checkout).toEqual({ href: 'https://www.mollie.com/payscreen/select-method/44aKxzEbr8', type: 'text/html' }); expect(payment._links.customer).toEqual({ href: 'https://api.mollie.com/v2/customers/cst_FhQJRw4s2n', type: 'application/hal+json' }); expect(payment._links.documentation).toEqual({ href: 'https://docs.mollie.com/reference/v2/customers-api/create-payment', type: 'text/html' }); }); test('listCustomerPayouts', async () => { const { adapter, client } = wireMockClient(); adapter.onGet('/customers/cst_FhQJRw4s2n/payments').reply(200, { _embedded: { payments: [ { resource: 'payment', id: 'tr_admNa2tFfa', mode: 'test', createdAt: '2018-03-19T15:00:50+00:00', amount: { value: '100.00', currency: 'EUR', }, description: 'Payment no 1', method: null, metadata: null, status: 'open', isCancelable: false, expiresAt: '2018-03-19T15:15:50+00:00', details: null, locale: 'nl_NL', profileId: 'pfl_7N5qjbu42V', sequenceType: 'oneoff', redirectUrl: 'https://www.example.org/', _links: { self: { href: 'https://api.mollie.com/v2/payments/tr_admNa2tFfa', type: 'application/hal+json', }, checkout: { href: 'https://www.mollie.com/payscreen/select-method/admNa2tFfa', type: 'text/html', }, customer: { href: 'https://api.mollie.com/v2/customers/cst_FhQJRw4s2n', type: 'application/hal+json', }, }, }, { resource: 'payment', id: 'tr_bcaLc7hFfa', mode: 'test', createdAt: '2018-03-19T15:00:50+00:00', amount: { value: '100.00', currency: 'EUR', }, description: 'Payment no 2', method: null, metadata: null, status: 'open', isCancelable: false, expiresAt: '2018-03-19T15:15:50+00:00', details: null, locale: 'nl_NL', profileId: 'pfl_7N5qjbu42V', sequenceType: 'oneoff', redirectUrl: 'https://www.example.org/', _links: { self: { href: 'https://api.mollie.com/v2/payments/tr_bcaLc7hFfa', type: 'application/hal+json', }, checkout: { href: 'https://www.mollie.com/payscreen/select-method/bcaLc7hFfa', type: 'text/html', }, customer: { href: 'https://api.mollie.com/v2/customers/cst_FhQJRw4s2n', type: 'application/hal+json', }, }, }, { resource: 'payment', id: 'tr_pslHy1tFfa', mode: 'test', createdAt: '2018-03-19T15:00:50+00:00', amount: { value: '100.00', currency: 'EUR', }, description: 'Payment no 3', method: null, metadata: null, status: 'open', isCancelable: false, expiresAt: '2018-03-19T15:15:50+00:00', details: null, locale: 'nl_NL', profileId: 'pfl_7N5qjbu42V', sequenceType: 'oneoff', redirectUrl: 'https://www.example.org/', _links: { self: { href: 'https://api.mollie.com/v2/payments/tr_pslHy1tFfa', type: 'application/hal+json', }, checkout: { href: 'https://www.mollie.com/payscreen/select-method/pslHy1tFfa', type: 'text/html', }, customer: { href: 'https://api.mollie.com/v2/customers/cst_FhQJRw4s2n', type: 'application/hal+json', }, }, }, ], }, _links: { documentation: { href: 'https://docs.mollie.com/reference/v2/customers-api/list-customer-payments', type: 'text/html', }, self: { href: 'https://api.mollie.com/v2/customers/cst_TkNdP8yPrH/payments?limit=50', type: 'application/hal+json', }, previous: null, next: null, }, count: 3, }); const payments = await bluster(client.customers_payments.all.bind(client.customers_payments))({ customerId: 'cst_FhQJRw4s2n' }); expect(payments.length).toBe(3); expect(payments.links.documentation).toEqual({ href: 'https://docs.mollie.com/reference/v2/customers-api/list-customer-payments', type: 'text/html', }); expect(payments.links.self).toEqual({ href: 'https://api.mollie.com/v2/customers/cst_TkNdP8yPrH/payments?limit=50', type: 'application/hal+json', }); });
adapter.onPost('/customers/cst_FhQJRw4s2n/payments').reply(201, { resource: 'payment', id: 'tr_44aKxzEbr8', mode: 'test',
encoder_test.go
package bencode import "testing" func
(t *testing.T) { encoded, err := Encode(32) expected := "i32e" if err != nil { t.Errorf("Got error when encoding: %v", err) } if encoded != expected { t.Errorf("Expected encoded value to be %v but got %v", expected, encoded) } } func TestEncodeList(t *testing.T) { input := make([]interface{}, 3) input[0] = 1 input[1] = 2 input[2] = 3 encoded, err := Encode(input) expected := "li1ei2ei3ee" if err != nil { t.Errorf("Got error when encoding: %v", err) } if encoded != expected { t.Errorf("Expected encoded value to be %v but got %v", expected, encoded) } } func TestEncodeString(t *testing.T) { input := "test" expected := "4:test" encoded, err := Encode(input) if err != nil { t.Errorf("Got error when encoding: %v", err) } if encoded != expected { t.Errorf("Expected encoded value to be %v but got %v", expected, encoded) } } func TestEncodeDict(t *testing.T) { input := make(map[string]interface{}) input["a"] = 42 input["b"] = "spam" arr := make([]interface{}, 2) arr[0] = 42 arr[1] = "eggs" input["c"] = arr expected := "d1:ai42e1:b4:spam1:cli42e4:eggsee" encoded, err := Encode(input) if err != nil { t.Errorf("Got error when encoding: %v", err) } if encoded != expected { t.Errorf("Expected encoded value to be %v but got %v", expected, encoded) } }
TestEncodeInt
index.js
import React from "react" import PropTypes from "prop-types" import reduxFormField from "./../../../utils/reduxFormField" import { Select } from "antd" const Option = Select.Option const Select1 = props => { return ( <div style={{ marginBottom: "25px" }}> <p className="label">{props.label}</p> <Select
mode={props.type} style={{ width: "100%" }} optionFilterProp="children" onChange={props.onChange} onFocus={props.onFocus} onBlur={props.handleBlur} filterOption={(input, option) => option.props.children.toLowerCase().indexOf(input.toLowerCase()) >= 0 } > {props.option.map((optionData, index) => { return ( <Option key={index} value={optionData.value}> {optionData.name} </Option> ) })} </Select> </div> ) } Select1.propTypes = { name: PropTypes.string, value: PropTypes.node, label: PropTypes.string, onChange: PropTypes.func, option: PropTypes.array, helperText: PropTypes.string } Select1.defaultProps = { helperText: "" } export default reduxFormField(Select1)
showSearch value={props.value} showSearch={props.showSearch}
mixins.py
from django.contrib.auth import REDIRECT_FIELD_NAME from django.contrib.auth.views import redirect_to_login from django.core.exceptions import PermissionDenied from django.template.response import TemplateResponse from django.urls import reverse from ..utils import default_device class OTPRequiredMixin: """ View mixin which verifies that the user logged in using OTP. .. note:: This mixin should be the left-most base class. """ raise_anonymous = False """ Whether to raise PermissionDenied if the user isn't logged in. """ login_url = None """ If :attr:`raise_anonymous` is set to `False`, this defines where the user will be redirected to. Defaults to ``two_factor:login``. """ redirect_field_name = REDIRECT_FIELD_NAME """
raise_unverified = False """ Whether to raise PermissionDenied if the user isn't verified. """ verification_url = None """ If :attr:`raise_unverified` is set to `False`, this defines where the user will be redirected to. If set to ``None``, an explanation will be shown to the user on why access was denied. """ def get_login_url(self): """ Returns login url to redirect to. """ return self.login_url and str(self.login_url) or reverse('two_factor:login') def get_verification_url(self): """ Returns verification url to redirect to. """ return self.verification_url and str(self.verification_url) def dispatch(self, request, *args, **kwargs): if not request.user or not request.user.is_authenticated or \ (not request.user.is_verified() and default_device(request.user)): # If the user has not authenticated raise or redirect to the login # page. Also if the user just enabled two-factor authentication and # has not yet logged in since should also have the same result. If # the user receives a 'you need to enable TFA' by now, he gets # confuses as TFA has just been enabled. So we either raise or # redirect to the login page. if self.raise_anonymous: raise PermissionDenied() else: return redirect_to_login(request.get_full_path(), self.get_login_url()) if not request.user.is_verified(): if self.raise_unverified: raise PermissionDenied() elif self.get_verification_url(): return redirect_to_login(request.get_full_path(), self.get_verification_url()) else: return TemplateResponse( request=request, template='two_factor/core/otp_required.html', status=403, ) return super().dispatch(request, *args, **kwargs)
URL query name to use for providing the destination URL. """
_client_factory.py
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- def _compute_client_factory(cli_ctx): from azure.cli.core.commands.client_factory import get_mgmt_service_client from .vendored_sdks.azure_mgmt_compute._compute_management_client import ComputeManagementClient return get_mgmt_service_client(cli_ctx, ComputeManagementClient) def cf_galleries(cli_ctx, _): return _compute_client_factory(cli_ctx).galleries def cf_gallery_images(cli_ctx, _): return _compute_client_factory(cli_ctx).gallery_images def cf_community_gallery(cli_ctx, *_): return _compute_client_factory(cli_ctx).community_galleries def cf_community_gallery_image(cli_ctx, *_): return _compute_client_factory(cli_ctx).community_gallery_images def cf_community_gallery_image_version(cli_ctx, *_): return _compute_client_factory(cli_ctx).community_gallery_image_versions def cf_community_gallery_sharing_profile(cli_ctx, *_): return _compute_client_factory(cli_ctx).gallery_sharing_profile
# --------------------------------------------------------------------------------------------
quantize.py
# Copyright (c) 2016, The Bifrost Authors. All rights reserved. # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of The Bifrost Authors nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY # EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from libbifrost import _bf, _check, _get, _fast_call from ndarray import asarray def quantize(src, dst, scale=1.):
src_bf = asarray(src).as_BFarray() dst_bf = asarray(dst).as_BFarray() _fast_call(_bf.Quantize, src_bf, dst_bf, scale) return dst
useAuth.ts
import { useCallback, useMemo } from "react" import { atom, useRecoilState } from "recoil" import { encode } from "js-base64" import { CreateTxOptions, Tx, isTxError } from "@terra-money/terra.js" import { AccAddress, SignDoc, PublicKey } from "@terra-money/terra.js" import { MnemonicKey, RawKey, SignatureV2 } from "@terra-money/terra.js" import { useChainID } from "data/wallet" import { useLCDClient } from "data/queries/lcdClient" import is from "../scripts/is" import { PasswordError } from "../scripts/keystore" import { getDecryptedKey, testPassword } from "../scripts/keystore" import { getWallet, storeWallet } from "../scripts/keystore" import { clearWallet, lockWallet } from "../scripts/keystore" import { getStoredWallet, getStoredWallets } from "../scripts/keystore" import encrypt from "../scripts/encrypt" import * as ledger from "../ledger/ledger" import LedgerKey from "../ledger/LedgerKey" import useAvailable from "./useAvailable" const walletState = atom({ key: "wallet", default: getWallet(), }) const useAuth = () => { const lcd = useLCDClient() const available = useAvailable() const [wallet, setWallet] = useRecoilState(walletState) const wallets = getStoredWallets() /* connect */ const connect = useCallback( (name: string) => { const storedWallet = getStoredWallet(name) const { address, lock } = storedWallet if (lock) throw new Error("Wallet is locked") const wallet = is.multisig(storedWallet) ? { name, address, multisig: true } : { name, address } storeWallet(wallet) setWallet(wallet) }, [setWallet] ) const connectPreconfigured = useCallback( (wallet: PreconfiguredWallet) => { storeWallet(wallet) setWallet(wallet) }, [setWallet] ) const connectLedger = useCallback( (address: AccAddress) => { const wallet = { address, ledger: true as const } storeWallet(wallet) setWallet(wallet) }, [setWallet] ) /* connected */ const connectedWallet = useMemo(() => { if (!is.local(wallet)) return return wallet }, [wallet]) const getConnectedWallet = useCallback(() => { if (!connectedWallet) throw new Error("Wallet is not defined") return connectedWallet }, [connectedWallet]) /* disconnected */ const disconnect = useCallback(() => { clearWallet() setWallet(undefined) }, [setWallet]) const lock = useCallback(() => { const { name } = getConnectedWallet() lockWallet(name) disconnect() }, [disconnect, getConnectedWallet]) /* helpers */ const getKey = (password: string) => { const { name } = getConnectedWallet() return getDecryptedKey({ name, password }) } const getLedgerKey = async () => { const pk = await ledger.getPubKey() if (!pk) throw new Error("Public key is not defined") const publicKey = PublicKey.fromAmino({ type: "tendermint/PubKeySecp256k1", value: pk.toString("base64"), }) const key = new LedgerKey(publicKey) return key } /* manage: export */ const encodeEncryptedWallet = (password: string) => { const { name, address } = getConnectedWallet() const key = getKey(password) const data = { name, address, encrypted_key: encrypt(key, password) } return encode(JSON.stringify(data)) } /* form */ const validatePassword = (password: string) => { try { const { name } = getConnectedWallet() return testPassword({ name, password }) } catch (error) { return "Incorrect password" } } /* tx */ const chainID = useChainID() const create = async (txOptions: CreateTxOptions) => { if (!wallet) throw new Error("Wallet is not defined") const { address } = wallet return await lcd.tx.create([{ address }], txOptions) } const createSignature = async ( tx: Tx, address: AccAddress, password = "" ) => { if (!wallet) throw new Error("Wallet is not defined") const accountInfo = await lcd.auth.accountInfo(address) const doc = new SignDoc( lcd.config.chainID, accountInfo.getAccountNumber(), accountInfo.getSequenceNumber(), tx.auth_info, tx.body ) if (is.ledger(wallet)) { const key = await getLedgerKey() return await key.createSignatureAmino(doc) } else { const pk = getKey(password) if (!pk) throw new PasswordError("Incorrect password") const key = new RawKey(Buffer.from(pk, "hex")) return await key.createSignatureAmino(doc) } } const sign = async (txOptions: CreateTxOptions, password = "") => { if (!wallet) throw new Error("Wallet is not defined") if (is.ledger(wallet)) { const key = await getLedgerKey() const wallet = lcd.wallet(key) const { account_number: accountNumber, sequence } = await wallet.accountNumberAndSequence() const signMode = SignatureV2.SignMode.SIGN_MODE_LEGACY_AMINO_JSON const unsignedTx = await create(txOptions) const options = { chainID, accountNumber, sequence, signMode } return await key.signTx(unsignedTx, options) } else if (is.preconfigured(wallet)) { const key = new MnemonicKey({ mnemonic: wallet.mnemonic }) return await lcd.wallet(key).createAndSignTx(txOptions) } else { const pk = getKey(password) if (!pk) throw new PasswordError("Incorrect password") const key = new RawKey(Buffer.from(pk, "hex")) const wallet = lcd.wallet(key) return await wallet.createAndSignTx(txOptions) } } const signBytes = (bytes: Buffer, password = "") => { if (!wallet) throw new Error("Wallet is not defined") if (is.ledger(wallet)) { throw new Error("Ledger can not sign arbitrary data") } else { const pk = getKey(password) if (!pk) throw new PasswordError("Incorrect password") const key = new RawKey(Buffer.from(pk, "hex")) const { signature, recid } = key.ecdsaSign(bytes) if (!signature) throw new Error("Signature is undefined") return { recid, signature: Buffer.from(signature).toString("base64"), public_key: key.publicKey?.toAmino().value as string, } } } const post = async (txOptions: CreateTxOptions, password = "") => { if (!wallet) throw new Error("Wallet is not defined")
const result = await lcd.tx.broadcastSync(signedTx) if (isTxError(result)) throw new Error(result.raw_log) return result } return { wallet, wallets, getConnectedWallet, connectedWallet, connect, connectPreconfigured, connectLedger, disconnect, lock, available, encodeEncryptedWallet, validatePassword, createSignature, create, signBytes, sign, post, } } export default useAuth
const signedTx = await sign(txOptions, password)
pyasstosrt.py
import os import re from os.path import isfile from pathlib import Path from typing import AnyStr, List, Union, Optional from .dialogue import Dialogue class Subtitle: """ Converting ass to art. :type filepath: Path to a file that contains text in Advanced SubStation Alpha format """ dialog_mask = re.compile(r"Dialogue: \d+?,(\d:\d{2}:\d{2}.\d{2}),(\d:\d{2}:\d{2}.\d{2}),.*?,\d+,\d+,\d+,.*?,(.*)") def __init__(self, filepath: Union[str, os.PathLike]): if not isfile(filepath): raise FileNotFoundError('"{}" does not exist'.format(filepath)) if isinstance(filepath, os.PathLike): self.filepath: AnyStr = str(filepath) self.file: AnyStr = filepath.stem elif isinstance(filepath, str): self.filepath: AnyStr = filepath self.file: AnyStr = Path(filepath).stem else: raise TypeError('"{}" is not of type str'.format(filepath)) self.raw_text: AnyStr = self.get_text() self.dialogues: List = [] def get_text(self) -> AnyStr: """ Reads the file and returns the complete contents :return: File contents """ return Path(self.filepath).read_text(encoding="utf8") def convert(self): """ Convert the format ass subtitles to srt. :return: """ cleaning_old_format = re.compile(r"{.*?}") dialogs = re.findall(self.dialog_mask, re.sub(cleaning_old_format, "", self.raw_text)) dialogs = sorted(list(filter(lambda x: x[2], dialogs))) self.subtitle_formatting(dialogs) @staticmethod def
(raw_text: str) -> str: """ We're clearing the text from unnecessary tags. :param raw_text: Dialog text with whitespace characters :return: Dialog text without whitespaces and with the right move to a new line """ text = raw_text.replace(r'\h', '\xa0').strip() line_text = text.split(r'\N') return '\n'.join(item.strip() for item in line_text).strip() def subtitle_formatting(self, dialogues: List): """ Formatting ass into srt. :param dialogues: Prepared dialogues :return: Prepared dialogue sheet """ for index, values in enumerate(dialogues, start=1): start, end, text = values text = self.text_clearing(text.strip()) dialogue = Dialogue(index, start, end, text) self.dialogues.append(dialogue) def export( self, output_dir: AnyStr = None, encoding: AnyStr = "utf8", output_dialogues: bool = False ) -> Optional[List]: """ If ret_dialogues parameter is False exports the subtitles to a file. :param output_dir: Export path SubRip file :param encoding: In which encoding you should save the file :param output_dialogues: Whereas it should return a list of dialogues not creating a SubRip file :return: List of dialogues """ self.convert() if output_dialogues: return self.dialogues path = Path(self.filepath) file = self.file + ".srt" if output_dir: Path(output_dir).mkdir(parents=True, exist_ok=True) out_path = os.path.join(output_dir, file) else: out_path = os.path.join(path.parent, file) with open(out_path, encoding=encoding, mode="w") as writer: for dialogue in self.dialogues: writer.write(str(dialogue))
text_clearing
__init__.py
import os from shutil import copyfile from logic_bank.util import prt def setup_db():
""" copy db/database-gold.db over db/database.db""" print("\n" + prt("restoring database-gold\n")) basedir = os.path.abspath(os.path.dirname(__file__)) basedir = os.path.dirname(basedir) print("\n********************************\n" " IMPORTANT - create database.db from database-gold.db in " + basedir + "/nw/db/\n" + " - from -- " + prt("") + "\n********************************") nw_loc = os.path.join(basedir, "db/database.db") nw_source = os.path.join(basedir, "db/database-gold.db") copyfile(src=nw_source, dst=nw_loc)
trace.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: opentelemetry/proto/trace/v1/trace.proto package v1 import ( encoding_binary "encoding/binary" fmt "fmt" io "io" math "math" math_bits "math/bits" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" go_opentelemetry_io_collector_model_internal_data "go.opentelemetry.io/collector/model/internal/data" v11 "go.opentelemetry.io/collector/model/internal/data/protogen/common/v1" v1 "go.opentelemetry.io/collector/model/internal/data/protogen/resource/v1" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // SpanKind is the type of span. Can be used to specify additional relationships between spans // in addition to a parent/child relationship. type Span_SpanKind int32 const ( // Unspecified. Do NOT use as default. // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 // Indicates that the span represents an internal operation within an application, // as opposed to an operation happening at the boundaries. Default value. Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 // Indicates that the span covers server-side handling of an RPC or other // remote network request. Span_SPAN_KIND_SERVER Span_SpanKind = 2 // Indicates that the span describes a request to some remote service. Span_SPAN_KIND_CLIENT Span_SpanKind = 3 // Indicates that the span describes a producer sending a message to a broker. // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship // between producer and consumer spans. A PRODUCER span ends when the message was accepted // by the broker while the logical processing of the message might span a much longer time. Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 // Indicates that the span describes consumer receiving a message from a broker. // Like the PRODUCER kind, there is often no direct critical path latency relationship // between producer and consumer spans. Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 ) var Span_SpanKind_name = map[int32]string{ 0: "SPAN_KIND_UNSPECIFIED", 1: "SPAN_KIND_INTERNAL", 2: "SPAN_KIND_SERVER", 3: "SPAN_KIND_CLIENT", 4: "SPAN_KIND_PRODUCER", 5: "SPAN_KIND_CONSUMER", } var Span_SpanKind_value = map[string]int32{ "SPAN_KIND_UNSPECIFIED": 0, "SPAN_KIND_INTERNAL": 1, "SPAN_KIND_SERVER": 2, "SPAN_KIND_CLIENT": 3, "SPAN_KIND_PRODUCER": 4, "SPAN_KIND_CONSUMER": 5, } func (x Span_SpanKind) String() string { return proto.EnumName(Span_SpanKind_name, int32(x)) } func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{3, 0} } // For the semantics of status codes see // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status type Status_StatusCode int32 const ( // The default status. Status_STATUS_CODE_UNSET Status_StatusCode = 0 // The Span has been validated by an Application developers or Operator to have // completed successfully. Status_STATUS_CODE_OK Status_StatusCode = 1 // The Span contains an error. Status_STATUS_CODE_ERROR Status_StatusCode = 2 ) var Status_StatusCode_name = map[int32]string{ 0: "STATUS_CODE_UNSET", 1: "STATUS_CODE_OK", 2: "STATUS_CODE_ERROR", } var Status_StatusCode_value = map[string]int32{ "STATUS_CODE_UNSET": 0, "STATUS_CODE_OK": 1, "STATUS_CODE_ERROR": 2, } func (x Status_StatusCode) String() string { return proto.EnumName(Status_StatusCode_name, int32(x)) } func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{4, 0} } // TracesData represents the traces data that can be stored in a persistent storage, // OR can be embedded by other protocols that transfer OTLP traces data but do // not implement the OTLP protocol. // // The main difference between this message and collector protocol is that // in this message there will not be any "control" or "metadata" specific to // OTLP protocol. // // When new fields are added into this message, the OTLP request MUST be updated // as well. type TracesData struct { // An array of ResourceSpans. // For data coming from a single resource this array will typically contain // one element. Intermediary nodes that receive data from multiple origins // typically batch the data before forwarding further and in that case this // array will contain multiple elements. ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` } func (m *TracesData) Reset() { *m = TracesData{} } func (m *TracesData) String() string { return proto.CompactTextString(m) } func (*TracesData) ProtoMessage() {} func (*TracesData) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{0} } func (m *TracesData) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *TracesData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_TracesData.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *TracesData) XXX_Merge(src proto.Message) { xxx_messageInfo_TracesData.Merge(m, src) } func (m *TracesData) XXX_Size() int { return m.Size() } func (m *TracesData) XXX_DiscardUnknown() { xxx_messageInfo_TracesData.DiscardUnknown(m) } var xxx_messageInfo_TracesData proto.InternalMessageInfo func (m *TracesData) GetResourceSpans() []*ResourceSpans { if m != nil { return m.ResourceSpans } return nil } // A collection of InstrumentationLibrarySpans from a Resource. type ResourceSpans struct { // The resource for the spans in this message. // If this field is not set then no resource info is known. Resource v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource"` // A list of InstrumentationLibrarySpans that originate from a resource. InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` // This schema_url applies to the data in the "resource" field. It does not apply // to the data in the "instrumentation_library_spans" field which have their own // schema_url field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } func (m *ResourceSpans) Reset() { *m = ResourceSpans{} } func (m *ResourceSpans) String() string { return proto.CompactTextString(m) } func (*ResourceSpans) ProtoMessage() {} func (*ResourceSpans) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{1} } func (m *ResourceSpans) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ResourceSpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ResourceSpans.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ResourceSpans) XXX_Merge(src proto.Message) { xxx_messageInfo_ResourceSpans.Merge(m, src) } func (m *ResourceSpans) XXX_Size() int { return m.Size() } func (m *ResourceSpans) XXX_DiscardUnknown() { xxx_messageInfo_ResourceSpans.DiscardUnknown(m) } var xxx_messageInfo_ResourceSpans proto.InternalMessageInfo func (m *ResourceSpans) GetResource() v1.Resource { if m != nil { return m.Resource } return v1.Resource{} } func (m *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { if m != nil { return m.InstrumentationLibrarySpans } return nil } func (m *ResourceSpans) GetSchemaUrl() string { if m != nil { return m.SchemaUrl } return "" } // A collection of Spans produced by an InstrumentationLibrary. type InstrumentationLibrarySpans struct { // The instrumentation library information for the spans in this message. // Semantically when InstrumentationLibrary isn't set, it is equivalent with // an empty instrumentation library name (unknown). InstrumentationLibrary v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library"` // A list of Spans that originate from an instrumentation library. Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` // This schema_url applies to all spans and span events in the "spans" field. SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` } func (m *InstrumentationLibrarySpans) Reset() { *m = InstrumentationLibrarySpans{} } func (m *InstrumentationLibrarySpans) String() string { return proto.CompactTextString(m) } func (*InstrumentationLibrarySpans) ProtoMessage() {} func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{2} } func (m *InstrumentationLibrarySpans) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *InstrumentationLibrarySpans) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_InstrumentationLibrarySpans.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *InstrumentationLibrarySpans) XXX_Merge(src proto.Message) { xxx_messageInfo_InstrumentationLibrarySpans.Merge(m, src) } func (m *InstrumentationLibrarySpans) XXX_Size() int { return m.Size() } func (m *InstrumentationLibrarySpans) XXX_DiscardUnknown() { xxx_messageInfo_InstrumentationLibrarySpans.DiscardUnknown(m) } var xxx_messageInfo_InstrumentationLibrarySpans proto.InternalMessageInfo func (m *InstrumentationLibrarySpans) GetInstrumentationLibrary() v11.InstrumentationLibrary { if m != nil { return m.InstrumentationLibrary } return v11.InstrumentationLibrary{} } func (m *InstrumentationLibrarySpans) GetSpans() []*Span { if m != nil { return m.Spans } return nil } func (m *InstrumentationLibrarySpans) GetSchemaUrl() string { if m != nil { return m.SchemaUrl } return "" } // Span represents a single operation within a trace. Spans can be // nested to form a trace tree. Spans may also be linked to other spans // from the same or different trace and form graphs. Often, a trace // contains a root span that describes the end-to-end latency, and one // or more subspans for its sub-operations. A trace can also contain // multiple root spans, or none at all. Spans do not need to be // contiguous - there may be gaps or overlaps between spans in a trace. // // The next available field id is 17. type Span struct { // A unique identifier for a trace. All spans from the same trace share // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes // is considered invalid. // // This field is semantically required. Receiver should generate new // random trace_id if empty or invalid trace_id was received. // // This field is required. TraceId go_opentelemetry_io_collector_model_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/model/internal/data.TraceID" json:"trace_id"` // A unique identifier for a span within a trace, assigned when the span // is created. The ID is an 8-byte array. An ID with all zeroes is considered // invalid. // // This field is semantically required. Receiver should generate new // random span_id if empty or invalid span_id was received. // // This field is required. SpanId go_opentelemetry_io_collector_model_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/model/internal/data.SpanID" json:"span_id"` // trace_state conveys information about request position in multiple distributed tracing graphs. // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header // See also https://github.com/w3c/distributed-tracing for more details about this field. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` // The `span_id` of this span's parent span. If this is a root span, then this // field must be empty. The ID is an 8-byte array. ParentSpanId go_opentelemetry_io_collector_model_internal_data.SpanID `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3,customtype=go.opentelemetry.io/collector/model/internal/data.SpanID" json:"parent_span_id"` // A description of the span's operation. // // For example, the name can be a qualified method name or a file name // and a line number where the operation is called. A best practice is to use // the same display name at the same call point in an application. // This makes it easier to correlate spans in different traces. // // This field is semantically required to be set to non-empty string. // Empty value is equivalent to an unknown span name. // // This field is required. Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` // Distinguishes between spans generated in a particular context. For example, // two spans with the same name may be distinguished using `CLIENT` (caller) // and `SERVER` (callee) to identify queueing latency associated with the span. Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` // start_time_unix_nano is the start time of the span. On the client side, this is the time // kept by the local machine where the span execution starts. On the server side, this // is the time when the server's application handler starts running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` // end_time_unix_nano is the end time of the span. On the client side, this is the time // kept by the local machine where the span execution ends. On the server side, this // is the time when the server application handler stops running. // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. // // This field is semantically required and it is expected that end_time >= start_time. EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` // attributes is a collection of key/value pairs. Note, global attributes // like server name can be set using the resource API. Examples of attributes: // // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" // "/http/server_latency": 300 // "abc.com/myattribute": true // "abc.com/score": 10.239 // // The OpenTelemetry API specification further restricts the allowed value types: // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). Attributes []v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes"` // dropped_attributes_count is the number of attributes that were discarded. Attributes // can be discarded because their keys are too long or because there are too many // attributes. If this value is 0, then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` // events is a collection of Event items. Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` // dropped_events_count is the number of dropped events. If the value is 0, then no // events were dropped. DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` // links is a collection of Links, which are references from this span to a span // in the same or different trace. Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` // dropped_links_count is the number of dropped links after the maximum size was // enforced. If this value is 0, then no links were dropped. DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` // An optional final status for this span. Semantically when Status isn't set, it means // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). Status Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status"` } func (m *Span) Reset() { *m = Span{} } func (m *Span) String() string { return proto.CompactTextString(m) } func (*Span) ProtoMessage() {} func (*Span) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{3} } func (m *Span) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Span.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Span) XXX_Merge(src proto.Message) { xxx_messageInfo_Span.Merge(m, src) } func (m *Span) XXX_Size() int { return m.Size() } func (m *Span) XXX_DiscardUnknown() { xxx_messageInfo_Span.DiscardUnknown(m) } var xxx_messageInfo_Span proto.InternalMessageInfo func (m *Span) GetTraceState() string { if m != nil { return m.TraceState } return "" } func (m *Span) GetName() string { if m != nil { return m.Name } return "" } func (m *Span) GetKind() Span_SpanKind { if m != nil { return m.Kind } return Span_SPAN_KIND_UNSPECIFIED } func (m *Span) GetStartTimeUnixNano() uint64 { if m != nil { return m.StartTimeUnixNano } return 0 } func (m *Span) GetEndTimeUnixNano() uint64 { if m != nil { return m.EndTimeUnixNano } return 0 } func (m *Span) GetAttributes() []v11.KeyValue { if m != nil { return m.Attributes } return nil } func (m *Span) GetDroppedAttributesCount() uint32 { if m != nil { return m.DroppedAttributesCount } return 0 } func (m *Span) GetEvents() []*Span_Event { if m != nil { return m.Events } return nil } func (m *Span) GetDroppedEventsCount() uint32 { if m != nil { return m.DroppedEventsCount } return 0 } func (m *Span) GetLinks() []*Span_Link { if m != nil { return m.Links } return nil } func (m *Span) GetDroppedLinksCount() uint32 { if m != nil { return m.DroppedLinksCount } return 0 } func (m *Span) GetStatus() Status { if m != nil { return m.Status } return Status{} } // Event is a time-stamped annotation of the span, consisting of user-supplied // text description and key-value pairs. type Span_Event struct { // time_unix_nano is the time the event occurred. TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` // name of the event. // This field is semantically required to be set to non-empty string. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // attributes is a collection of attribute key/value pairs on the event. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). Attributes []v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (m *Span_Event) Reset() { *m = Span_Event{} } func (m *Span_Event) String() string { return proto.CompactTextString(m) } func (*Span_Event) ProtoMessage() {} func (*Span_Event) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{3, 0} } func (m *Span_Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Span_Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Span_Event.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Span_Event) XXX_Merge(src proto.Message) { xxx_messageInfo_Span_Event.Merge(m, src) } func (m *Span_Event) XXX_Size() int { return m.Size() } func (m *Span_Event) XXX_DiscardUnknown() { xxx_messageInfo_Span_Event.DiscardUnknown(m) } var xxx_messageInfo_Span_Event proto.InternalMessageInfo func (m *Span_Event) GetTimeUnixNano() uint64 { if m != nil { return m.TimeUnixNano } return 0 } func (m *Span_Event) GetName() string { if m != nil { return m.Name } return "" } func (m *Span_Event) GetAttributes() []v11.KeyValue { if m != nil { return m.Attributes } return nil } func (m *Span_Event) GetDroppedAttributesCount() uint32 { if m != nil { return m.DroppedAttributesCount } return 0 } // A pointer from the current span to another span in the same trace or in a // different trace. For example, this can be used in batching operations, // where a single batch handler processes multiple requests from different // traces or when the handler receives a request from a different project. type Span_Link struct { // A unique identifier of a trace that this linked span is part of. The ID is a // 16-byte array. TraceId go_opentelemetry_io_collector_model_internal_data.TraceID `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3,customtype=go.opentelemetry.io/collector/model/internal/data.TraceID" json:"trace_id"` // A unique identifier for the linked span. The ID is an 8-byte array. SpanId go_opentelemetry_io_collector_model_internal_data.SpanID `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3,customtype=go.opentelemetry.io/collector/model/internal/data.SpanID" json:"span_id"` // The trace_state associated with the link. TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` // attributes is a collection of attribute key/value pairs on the link. // Attribute keys MUST be unique (it is not allowed to have more than one // attribute with the same key). Attributes []v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes"` // dropped_attributes_count is the number of dropped attributes. If the value is 0, // then no attributes were dropped. DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` } func (m *Span_Link) Reset() { *m = Span_Link{} } func (m *Span_Link) String() string { return proto.CompactTextString(m) } func (*Span_Link) ProtoMessage() {} func (*Span_Link) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{3, 1} } func (m *Span_Link) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Span_Link) XXX_Merge(src proto.Message) { xxx_messageInfo_Span_Link.Merge(m, src) } func (m *Span_Link) XXX_Size() int { return m.Size() } func (m *Span_Link) XXX_DiscardUnknown() { xxx_messageInfo_Span_Link.DiscardUnknown(m) } var xxx_messageInfo_Span_Link proto.InternalMessageInfo func (m *Span_Link) GetTraceState() string { if m != nil { return m.TraceState } return "" } func (m *Span_Link) GetAttributes() []v11.KeyValue { if m != nil { return m.Attributes } return nil } func (m *Span_Link) GetDroppedAttributesCount() uint32 { if m != nil { return m.DroppedAttributesCount } return 0 } // The Status type defines a logical error model that is suitable for different // programming environments, including REST APIs and RPC APIs. type Status struct { // A developer-facing human readable error message. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // The status code. Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` } func (m *Status) Reset() { *m = Status{} } func (m *Status) String() string { return proto.CompactTextString(m) } func (*Status) ProtoMessage() {} func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor_5c407ac9c675a601, []int{4} } func (m *Status) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_Status.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *Status) XXX_Merge(src proto.Message) { xxx_messageInfo_Status.Merge(m, src) } func (m *Status) XXX_Size() int { return m.Size() } func (m *Status) XXX_DiscardUnknown() { xxx_messageInfo_Status.DiscardUnknown(m) } var xxx_messageInfo_Status proto.InternalMessageInfo func (m *Status) GetMessage() string { if m != nil { return m.Message } return "" } func (m *Status) GetCode() Status_StatusCode { if m != nil { return m.Code } return Status_STATUS_CODE_UNSET } func init() { proto.RegisterEnum("opentelemetry.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value) proto.RegisterEnum("opentelemetry.proto.trace.v1.Status_StatusCode", Status_StatusCode_name, Status_StatusCode_value) proto.RegisterType((*TracesData)(nil), "opentelemetry.proto.trace.v1.TracesData") proto.RegisterType((*ResourceSpans)(nil), "opentelemetry.proto.trace.v1.ResourceSpans") proto.RegisterType((*InstrumentationLibrarySpans)(nil), "opentelemetry.proto.trace.v1.InstrumentationLibrarySpans") proto.RegisterType((*Span)(nil), "opentelemetry.proto.trace.v1.Span") proto.RegisterType((*Span_Event)(nil), "opentelemetry.proto.trace.v1.Span.Event") proto.RegisterType((*Span_Link)(nil), "opentelemetry.proto.trace.v1.Span.Link") proto.RegisterType((*Status)(nil), "opentelemetry.proto.trace.v1.Status") } func init() { proto.RegisterFile("opentelemetry/proto/trace/v1/trace.proto", fileDescriptor_5c407ac9c675a601) } var fileDescriptor_5c407ac9c675a601 = []byte{ // 984 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, 0x14, 0xcf, 0xa4, 0x4e, 0xda, 0xbe, 0xb6, 0x59, 0x77, 0xe8, 0x16, 0xd3, 0x65, 0xd3, 0x28, 0x5a, 0x89, 0xc0, 0x4a, 0x09, 0x5b, 0x84, 0xd4, 0x3d, 0x20, 0xb6, 0x4d, 0x8c, 0x64, 0xd2, 0x4d, 0xa2, 0x49, 0xb2, 0x12, 0x08, 0xc9, 0xb8, 0xf1, 0x10, 0xac, 0xda, 0xe3, 0xc8, 0x9e, 0x54, 0xdb, 0x03, 0x12, 0x1f, 0x80, 0x03, 0x57, 0x3e, 0x0c, 0xf7, 0x15, 0xa7, 0x3d, 0x22, 0x0e, 0x2b, 0xd4, 0x9e, 0x90, 0xf8, 0x04, 0x9c, 0xd0, 0xcc, 0xd8, 0x6d, 0x5c, 0xa5, 0xe9, 0xae, 0xc4, 0x5e, 0xb8, 0x24, 0xe3, 0xdf, 0x7b, 0xef, 0xf7, 0x7e, 0xef, 0x8f, 0xad, 0x81, 0x5a, 0x38, 0xa1, 0x8c, 0x53, 0x9f, 0x06, 0x94, 0x47, 0x67, 0x8d, 0x49, 0x14, 0xf2, 0xb0, 0xc1, 0x23, 0x67, 0x44, 0x1b, 0xa7, 0x8f, 0xd4, 0xa1, 0x2e, 0x41, 0xfc, 0x7e, 0xc6, 0x53, 0x81, 0x75, 0xe5, 0x70, 0xfa, 0x68, 0x67, 0x6b, 0x1c, 0x8e, 0x43, 0x15, 0x2d, 0x4e, 0xca, 0xbc, 0xf3, 0xd1, 0x3c, 0xf6, 0x51, 0x18, 0x04, 0x21, 0x13, 0xf4, 0xea, 0x94, 0xf8, 0xd6, 0xe7, 0xf9, 0x46, 0x34, 0x0e, 0xa7, 0x91, 0x12, 0x93, 0x9e, 0x95, 0x7f, 0xf5, 0x5b, 0x80, 0x81, 0xc8, 0x1e, 0xb7, 0x1c, 0xee, 0x60, 0x02, 0xa5, 0xd4, 0x6e, 0xc7, 0x13, 0x87, 0xc5, 0x06, 0xaa, 0x2c, 0xd5, 0xd6, 0xf6, 0x1e, 0xd6, 0x17, 0xc9, 0xae, 0x93, 0x24, 0xa6, 0x2f, 0x42, 0xc8, 0x46, 0x34, 0xfb, 0x58, 0xfd, 0x07, 0xc1, 0x46, 0xc6, 0x01, 0xb7, 0x61, 0x25, 0x75, 0x31, 0x50, 0x05, 0xd5, 0xd6, 0xf6, 0x3e, 0x9c, 0xcb, 0x7f, 0x29, 0x75, 0x26, 0xc5, 0xa1, 0xf6, 0xe2, 0xd5, 0x6e, 0x8e, 0x5c, 0x12, 0xe0, 0x1f, 0xe0, 0xbe, 0xc7, 0x62, 0x1e, 0x4d, 0x03, 0xca, 0xb8, 0xc3, 0xbd, 0x90, 0xd9, 0xbe, 0x77, 0x1c, 0x39, 0xd1, 0x59, 0x52, 0x41, 0x5e, 0x56, 0xf0, 0x78, 0x71, 0x05, 0x56, 0x96, 0xe2, 0x48, 0x31, 0xa8, 0x7a, 0xee, 0x79, 0x37, 0x1b, 0xf1, 0x7d, 0x80, 0x78, 0xf4, 0x3d, 0x0d, 0x1c, 0x7b, 0x1a, 0xf9, 0xc6, 0x52, 0x05, 0xd5, 0x56, 0xc9, 0xaa, 0x42, 0x86, 0x91, 0x5f, 0xfd, 0x1b, 0xc1, 0xbd, 0x05, 0xdc, 0x98, 0xc3, 0xbb, 0x37, 0xa8, 0x4f, 0x3a, 0xf3, 0xe9, 0x5c, 0xdd, 0xc9, 0xc8, 0x6f, 0x14, 0x9e, 0x74, 0x69, 0x7b, 0xbe, 0x72, 0xbc, 0x0f, 0x85, 0xd9, 0xde, 0x54, 0x17, 0xf7, 0x46, 0x28, 0x25, 0x2a, 0xe0, 0xb6, 0x72, 0x7f, 0xdc, 0x00, 0x4d, 0xb8, 0xe3, 0x6f, 0x60, 0x45, 0xc6, 0xdb, 0x9e, 0x2b, 0x0b, 0x59, 0x3f, 0x3c, 0x10, 0x8a, 0xfe, 0x78, 0xb5, 0xfb, 0x78, 0x1c, 0x5e, 0x4b, 0xe7, 0x89, 0x65, 0xf6, 0x7d, 0x3a, 0xe2, 0x61, 0xd4, 0x08, 0x42, 0x97, 0xfa, 0x0d, 0x8f, 0x71, 0x1a, 0x31, 0xc7, 0x6f, 0xb8, 0x0e, 0x77, 0xea, 0x72, 0x41, 0xad, 0x16, 0x59, 0x96, 0x94, 0x96, 0x8b, 0xbf, 0x82, 0x65, 0x21, 0x47, 0x90, 0xe7, 0x25, 0xf9, 0x93, 0x84, 0x7c, 0xff, 0xcd, 0xc9, 0x85, 0x5c, 0xab, 0x45, 0x8a, 0x82, 0xd0, 0x72, 0xf1, 0x2e, 0xac, 0x29, 0xe1, 0x31, 0x77, 0x38, 0x4d, 0x2a, 0x04, 0x09, 0xf5, 0x05, 0x82, 0xbf, 0x83, 0xd2, 0xc4, 0x89, 0x28, 0xe3, 0x76, 0x2a, 0x41, 0xfb, 0x8f, 0x24, 0xac, 0x2b, 0xde, 0xbe, 0x12, 0x82, 0x41, 0x63, 0x4e, 0x40, 0x8d, 0x82, 0x54, 0x20, 0xcf, 0xf8, 0x73, 0xd0, 0x4e, 0x3c, 0xe6, 0x1a, 0xc5, 0x0a, 0xaa, 0x95, 0x6e, 0x7b, 0x29, 0x05, 0x8f, 0xfc, 0x69, 0x7b, 0xcc, 0x25, 0x32, 0x10, 0x37, 0x60, 0x2b, 0xe6, 0x4e, 0xc4, 0x6d, 0xee, 0x05, 0xd4, 0x9e, 0x32, 0xef, 0xb9, 0xcd, 0x1c, 0x16, 0x1a, 0xcb, 0x15, 0x54, 0x2b, 0x92, 0x4d, 0x69, 0x1b, 0x78, 0x01, 0x1d, 0x32, 0xef, 0x79, 0xc7, 0x61, 0x21, 0x7e, 0x08, 0x98, 0x32, 0xf7, 0xba, 0xfb, 0x8a, 0x74, 0xbf, 0x43, 0x99, 0x9b, 0x71, 0x7e, 0x0a, 0xe0, 0x70, 0x1e, 0x79, 0xc7, 0x53, 0x4e, 0x63, 0x63, 0x55, 0xee, 0xd6, 0x07, 0xb7, 0xec, 0x6f, 0x9b, 0x9e, 0x3d, 0x73, 0xfc, 0x69, 0xfa, 0x5e, 0xcf, 0x10, 0xe0, 0x7d, 0x30, 0xdc, 0x28, 0x9c, 0x4c, 0xa8, 0x6b, 0x5f, 0xa1, 0xf6, 0x28, 0x9c, 0x32, 0x6e, 0x40, 0x05, 0xd5, 0x36, 0xc8, 0x76, 0x62, 0x3f, 0xb8, 0x34, 0x37, 0x85, 0x15, 0x3f, 0x81, 0x22, 0x3d, 0xa5, 0x8c, 0xc7, 0xc6, 0x9a, 0x14, 0x51, 0x7b, 0x8d, 0x4e, 0x99, 0x22, 0x80, 0x24, 0x71, 0xf8, 0x63, 0xd8, 0x4a, 0x73, 0x2b, 0x24, 0xc9, 0xbb, 0x2e, 0xf3, 0xe2, 0xc4, 0x26, 0x63, 0x92, 0x9c, 0x9f, 0x41, 0xc1, 0xf7, 0xd8, 0x49, 0x6c, 0x6c, 0x2c, 0xa8, 0x3b, 0x9b, 0xf2, 0xc8, 0x63, 0x27, 0x44, 0x45, 0xe1, 0x3a, 0xbc, 0x93, 0x26, 0x94, 0x40, 0x92, 0xaf, 0x24, 0xf3, 0x6d, 0x26, 0x26, 0x11, 0x90, 0xa4, 0x3b, 0x84, 0xa2, 0xd8, 0xd0, 0x69, 0x6c, 0xdc, 0x91, 0xdf, 0x89, 0x07, 0xb7, 0xe4, 0x93, 0xbe, 0x49, 0x93, 0x93, 0xc8, 0x9d, 0xdf, 0x10, 0x14, 0x64, 0x09, 0xf8, 0x01, 0x94, 0xae, 0x8d, 0x18, 0xc9, 0x11, 0xaf, 0xf3, 0xd9, 0xf9, 0xa6, 0x2b, 0x99, 0x9f, 0x59, 0xc9, 0xec, 0xcc, 0x97, 0xde, 0xe6, 0xcc, 0xb5, 0x45, 0x33, 0xdf, 0xf9, 0x2b, 0x0f, 0x9a, 0xe8, 0xcf, 0xff, 0xf8, 0xd3, 0x93, 0xed, 0xb5, 0xf6, 0x36, 0x7b, 0x5d, 0x58, 0xd4, 0xeb, 0xea, 0x2f, 0x08, 0x56, 0xd2, 0x2f, 0x0b, 0x7e, 0x0f, 0xee, 0xf6, 0x7b, 0x07, 0x1d, 0xbb, 0x6d, 0x75, 0x5a, 0xf6, 0xb0, 0xd3, 0xef, 0x99, 0x4d, 0xeb, 0x0b, 0xcb, 0x6c, 0xe9, 0x39, 0xbc, 0x0d, 0xf8, 0xca, 0x64, 0x75, 0x06, 0x26, 0xe9, 0x1c, 0x1c, 0xe9, 0x08, 0x6f, 0x81, 0x7e, 0x85, 0xf7, 0x4d, 0xf2, 0xcc, 0x24, 0x7a, 0x3e, 0x8b, 0x36, 0x8f, 0x2c, 0xb3, 0x33, 0xd0, 0x97, 0xb2, 0x1c, 0x3d, 0xd2, 0x6d, 0x0d, 0x9b, 0x26, 0xd1, 0xb5, 0x2c, 0xde, 0xec, 0x76, 0xfa, 0xc3, 0xa7, 0x26, 0xd1, 0x0b, 0xd5, 0x5f, 0x11, 0x14, 0xd5, 0xb6, 0x63, 0x03, 0x96, 0x03, 0x1a, 0xc7, 0xce, 0x38, 0x5d, 0xd9, 0xf4, 0x11, 0x37, 0x41, 0x1b, 0x85, 0xae, 0xea, 0x71, 0x69, 0xaf, 0xf1, 0x3a, 0xef, 0x4e, 0xf2, 0xd7, 0x0c, 0x5d, 0x4a, 0x64, 0x70, 0xb5, 0x03, 0x70, 0x85, 0xe1, 0xbb, 0xb0, 0xd9, 0x1f, 0x1c, 0x0c, 0x86, 0x7d, 0xbb, 0xd9, 0x6d, 0x99, 0xa2, 0x11, 0xe6, 0x40, 0xcf, 0x61, 0x0c, 0xa5, 0x59, 0xb8, 0xdb, 0xd6, 0xd1, 0x75, 0x57, 0x93, 0x90, 0x2e, 0xd1, 0xf3, 0x5f, 0x6a, 0x2b, 0x48, 0xcf, 0x1f, 0xfe, 0x84, 0x5e, 0x9c, 0x97, 0xd1, 0xcb, 0xf3, 0x32, 0xfa, 0xf3, 0xbc, 0x8c, 0x7e, 0xbe, 0x28, 0xe7, 0x5e, 0x5e, 0x94, 0x73, 0xbf, 0x5f, 0x94, 0x73, 0xb0, 0xeb, 0x85, 0x0b, 0x95, 0x1e, 0xaa, 0xab, 0x5c, 0x4f, 0x80, 0x3d, 0xf4, 0x75, 0xf3, 0x8d, 0x37, 0x52, 0x5d, 0x17, 0xc7, 0x94, 0x5d, 0xde, 0x5d, 0x8f, 0x8b, 0x12, 0xfa, 0xe4, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0x9b, 0x48, 0xd6, 0xe2, 0x0a, 0x00, 0x00, } func (m *TracesData) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *TracesData) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *TracesData) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.ResourceSpans) > 0 { for iNdEx := len(m.ResourceSpans) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.ResourceSpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa } } return len(dAtA) - i, nil } func (m *ResourceSpans) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ResourceSpans) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *ResourceSpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SchemaUrl) > 0 { i -= len(m.SchemaUrl) copy(dAtA[i:], m.SchemaUrl) i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) i-- dAtA[i] = 0x1a } if len(m.InstrumentationLibrarySpans) > 0 { for iNdEx := len(m.InstrumentationLibrarySpans) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.InstrumentationLibrarySpans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *InstrumentationLibrarySpans) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *InstrumentationLibrarySpans) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *InstrumentationLibrarySpans) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if len(m.SchemaUrl) > 0 { i -= len(m.SchemaUrl) copy(dAtA[i:], m.SchemaUrl) i = encodeVarintTrace(dAtA, i, uint64(len(m.SchemaUrl))) i-- dAtA[i] = 0x1a } if len(m.Spans) > 0 { for iNdEx := len(m.Spans) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Spans[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 } } { size, err := m.InstrumentationLibrary.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *Span) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Span) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Span) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l { size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) if err != nil
i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x7a if m.DroppedLinksCount != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.DroppedLinksCount)) i-- dAtA[i] = 0x70 } if len(m.Links) > 0 { for iNdEx := len(m.Links) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Links[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x6a } } if m.DroppedEventsCount != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.DroppedEventsCount)) i-- dAtA[i] = 0x60 } if len(m.Events) > 0 { for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x5a } } if m.DroppedAttributesCount != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) i-- dAtA[i] = 0x50 } if len(m.Attributes) > 0 { for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x4a } } if m.EndTimeUnixNano != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.EndTimeUnixNano)) i-- dAtA[i] = 0x41 } if m.StartTimeUnixNano != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.StartTimeUnixNano)) i-- dAtA[i] = 0x39 } if m.Kind != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.Kind)) i-- dAtA[i] = 0x30 } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x2a } { size := m.ParentSpanId.Size() i -= size if _, err := m.ParentSpanId.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 if len(m.TraceState) > 0 { i -= len(m.TraceState) copy(dAtA[i:], m.TraceState) i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) i-- dAtA[i] = 0x1a } { size := m.SpanId.Size() i -= size if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size := m.TraceId.Size() i -= size if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *Span_Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Span_Event) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Span_Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.DroppedAttributesCount != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) i-- dAtA[i] = 0x20 } if len(m.Attributes) > 0 { for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x1a } } if len(m.Name) > 0 { i -= len(m.Name) copy(dAtA[i:], m.Name) i = encodeVarintTrace(dAtA, i, uint64(len(m.Name))) i-- dAtA[i] = 0x12 } if m.TimeUnixNano != 0 { i -= 8 encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TimeUnixNano)) i-- dAtA[i] = 0x9 } return len(dAtA) - i, nil } func (m *Span_Link) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Span_Link) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Span_Link) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.DroppedAttributesCount != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.DroppedAttributesCount)) i-- dAtA[i] = 0x28 } if len(m.Attributes) > 0 { for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { { size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } i -= size i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x22 } } if len(m.TraceState) > 0 { i -= len(m.TraceState) copy(dAtA[i:], m.TraceState) i = encodeVarintTrace(dAtA, i, uint64(len(m.TraceState))) i-- dAtA[i] = 0x1a } { size := m.SpanId.Size() i -= size if _, err := m.SpanId.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0x12 { size := m.TraceId.Size() i -= size if _, err := m.TraceId.MarshalTo(dAtA[i:]); err != nil { return 0, err } i = encodeVarintTrace(dAtA, i, uint64(size)) } i-- dAtA[i] = 0xa return len(dAtA) - i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *Status) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.Code != 0 { i = encodeVarintTrace(dAtA, i, uint64(m.Code)) i-- dAtA[i] = 0x18 } if len(m.Message) > 0 { i -= len(m.Message) copy(dAtA[i:], m.Message) i = encodeVarintTrace(dAtA, i, uint64(len(m.Message))) i-- dAtA[i] = 0x12 } return len(dAtA) - i, nil } func encodeVarintTrace(dAtA []byte, offset int, v uint64) int { offset -= sovTrace(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *TracesData) Size() (n int) { if m == nil { return 0 } var l int _ = l if len(m.ResourceSpans) > 0 { for _, e := range m.ResourceSpans { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } return n } func (m *ResourceSpans) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.Resource.Size() n += 1 + l + sovTrace(uint64(l)) if len(m.InstrumentationLibrarySpans) > 0 { for _, e := range m.InstrumentationLibrarySpans { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } l = len(m.SchemaUrl) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } return n } func (m *InstrumentationLibrarySpans) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.InstrumentationLibrary.Size() n += 1 + l + sovTrace(uint64(l)) if len(m.Spans) > 0 { for _, e := range m.Spans { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } l = len(m.SchemaUrl) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } return n } func (m *Span) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.TraceId.Size() n += 1 + l + sovTrace(uint64(l)) l = m.SpanId.Size() n += 1 + l + sovTrace(uint64(l)) l = len(m.TraceState) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } l = m.ParentSpanId.Size() n += 1 + l + sovTrace(uint64(l)) l = len(m.Name) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } if m.Kind != 0 { n += 1 + sovTrace(uint64(m.Kind)) } if m.StartTimeUnixNano != 0 { n += 9 } if m.EndTimeUnixNano != 0 { n += 9 } if len(m.Attributes) > 0 { for _, e := range m.Attributes { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } if m.DroppedAttributesCount != 0 { n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) } if len(m.Events) > 0 { for _, e := range m.Events { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } if m.DroppedEventsCount != 0 { n += 1 + sovTrace(uint64(m.DroppedEventsCount)) } if len(m.Links) > 0 { for _, e := range m.Links { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } if m.DroppedLinksCount != 0 { n += 1 + sovTrace(uint64(m.DroppedLinksCount)) } l = m.Status.Size() n += 1 + l + sovTrace(uint64(l)) return n } func (m *Span_Event) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.TimeUnixNano != 0 { n += 9 } l = len(m.Name) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } if len(m.Attributes) > 0 { for _, e := range m.Attributes { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } if m.DroppedAttributesCount != 0 { n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) } return n } func (m *Span_Link) Size() (n int) { if m == nil { return 0 } var l int _ = l l = m.TraceId.Size() n += 1 + l + sovTrace(uint64(l)) l = m.SpanId.Size() n += 1 + l + sovTrace(uint64(l)) l = len(m.TraceState) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } if len(m.Attributes) > 0 { for _, e := range m.Attributes { l = e.Size() n += 1 + l + sovTrace(uint64(l)) } } if m.DroppedAttributesCount != 0 { n += 1 + sovTrace(uint64(m.DroppedAttributesCount)) } return n } func (m *Status) Size() (n int) { if m == nil { return 0 } var l int _ = l l = len(m.Message) if l > 0 { n += 1 + l + sovTrace(uint64(l)) } if m.Code != 0 { n += 1 + sovTrace(uint64(m.Code)) } return n } func sovTrace(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozTrace(x uint64) (n int) { return sovTrace(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *TracesData) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: TracesData: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: TracesData: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ResourceSpans", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.ResourceSpans = append(m.ResourceSpans, &ResourceSpans{}) if err := m.ResourceSpans[len(m.ResourceSpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *ResourceSpans) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ResourceSpans: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ResourceSpans: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrarySpans", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.InstrumentationLibrarySpans = append(m.InstrumentationLibrarySpans, &InstrumentationLibrarySpans{}) if err := m.InstrumentationLibrarySpans[len(m.InstrumentationLibrarySpans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.SchemaUrl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *InstrumentationLibrarySpans) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: InstrumentationLibrarySpans: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: InstrumentationLibrarySpans: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field InstrumentationLibrary", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.InstrumentationLibrary.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Spans", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Spans = append(m.Spans, &Span{}) if err := m.Spans[len(m.Spans)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SchemaUrl", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.SchemaUrl = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Span) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Span: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Span: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.TraceState = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.ParentSpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } m.Kind = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Kind |= Span_SpanKind(b&0x7F) << shift if b < 0x80 { break } } case 7: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field StartTimeUnixNano", wireType) } m.StartTimeUnixNano = 0 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } m.StartTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 case 8: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field EndTimeUnixNano", wireType) } m.EndTimeUnixNano = 0 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } m.EndTimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 case 9: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Attributes = append(m.Attributes, v11.KeyValue{}) if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) } m.DroppedAttributesCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DroppedAttributesCount |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 11: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Events = append(m.Events, &Span_Event{}) if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 12: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedEventsCount", wireType) } m.DroppedEventsCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DroppedEventsCount |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 13: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Links", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Links = append(m.Links, &Span_Link{}) if err := m.Links[len(m.Links)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 14: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedLinksCount", wireType) } m.DroppedLinksCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DroppedLinksCount |= uint32(b&0x7F) << shift if b < 0x80 { break } } case 15: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Span_Event) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Event: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 1 { return fmt.Errorf("proto: wrong wireType = %d for field TimeUnixNano", wireType) } m.TimeUnixNano = 0 if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } m.TimeUnixNano = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Attributes = append(m.Attributes, v11.KeyValue{}) if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) } m.DroppedAttributesCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DroppedAttributesCount |= uint32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Span_Link) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Link: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Link: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.TraceId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } if byteLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } if err := m.SpanId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field TraceState", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.TraceState = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ msglen |= int(b&0x7F) << shift if b < 0x80 { break } } if msglen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Attributes = append(m.Attributes, v11.KeyValue{}) if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DroppedAttributesCount", wireType) } m.DroppedAttributesCount = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.DroppedAttributesCount |= uint32(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func (m *Status) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: Status: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthTrace } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthTrace } if postIndex > l { return io.ErrUnexpectedEOF } m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) } m.Code = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTrace } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.Code |= Status_StatusCode(b&0x7F) << shift if b < 0x80 { break } } default: iNdEx = preIndex skippy, err := skipTrace(dAtA[iNdEx:]) if err != nil { return err } if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthTrace } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipTrace(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowTrace } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowTrace } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowTrace } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthTrace } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupTrace } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthTrace } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthTrace = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowTrace = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupTrace = fmt.Errorf("proto: unexpected end of group") )
{ return 0, err }
fsnotify_open_bsd.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build freebsd openbsd netbsd dragonfly
import "syscall" const open_FLAGS = syscall.O_NONBLOCK | syscall.O_RDONLY
package fsnotify
stats.go
package params // import "github.com/SevereCloud/vksdk/api/params" import ( "github.com/SevereCloud/vksdk/api" ) // StatsGetBuilder builder // // Returns statistics of a community or an application. // // https://vk.com/dev/stats.get type StatsGetBuilder struct { api.Params } // NewStatsGetBuilder func func NewStatsGetBuilder() *StatsGetBuilder { return &StatsGetBuilder{api.Params{}} } // GroupID Community ID. func (b *StatsGetBuilder) GroupID(v int) { b.Params["group_id"] = v } // AppID Application ID. func (b *StatsGetBuilder) AppID(v int) { b.Params["app_id"] = v } // TimestampFrom parameter func (b *StatsGetBuilder) TimestampFrom(v int) { b.Params["timestamp_from"] = v } // TimestampTo parameter func (b *StatsGetBuilder) TimestampTo(v int) { b.Params["timestamp_to"] = v } // Interval parameter func (b *StatsGetBuilder) Interval(v string) { b.Params["interval"] = v } // IntervalsCount parameter func (b *StatsGetBuilder) IntervalsCount(v int) { b.Params["intervals_count"] = v } // Filters parameter func (b *StatsGetBuilder) Filters(v []string) { b.Params["filters"] = v } // StatsGroups parameter func (b *StatsGetBuilder) StatsGroups(v []string) { b.Params["stats_groups"] = v } // Extended parameter func (b *StatsGetBuilder) Extended(v bool) { b.Params["extended"] = v } // StatsGetPostReachBuilder builder // // Returns stats for a wall post. // // https://vk.com/dev/stats.getPostReach type StatsGetPostReachBuilder struct { api.Params } // NewStatsGetPostReachBuilder func func NewStatsGetPostReachBuilder() *StatsGetPostReachBuilder { return &StatsGetPostReachBuilder{api.Params{}} } // OwnerID post owner community id. Specify with "-" sign. func (b *StatsGetPostReachBuilder) OwnerID(v string) { b.Params["owner_id"] = v } // PostID wall post id. Note that stats are available only for '300' last (newest) posts on a community wall. func (b *StatsGetPostReachBuilder) PostID(v int) { b.Params["post_id"] = v } // StatsTrackVisitorBuilder builder // // https://vk.com/dev/stats.trackVisitor type StatsTrackVisitorBuilder struct { api.Params } // NewStatsTrackVisitorBuilder func func NewStatsTrackVisitorBuilder() *StatsTrackVisitorBuilder
// ID parameter func (b *StatsTrackVisitorBuilder) ID(v string) { b.Params["id"] = v }
{ return &StatsTrackVisitorBuilder{api.Params{}} }
mock_query.go
// Code generated by go-mockgen 1.1.5; DO NOT EDIT. package mocks import ( "context" "sync" resolvers "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/codeintel/resolvers" dbstore "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore" lsifstore "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/lsifstore" precise "github.com/sourcegraph/sourcegraph/lib/codeintel/precise" ) // MockQueryResolver is a mock implementation of the QueryResolver interface // (from the package // github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/codeintel/resolvers) // used for unit testing. type MockQueryResolver struct { // DefinitionsFunc is an instance of a mock function object controlling // the behavior of the method Definitions. DefinitionsFunc *QueryResolverDefinitionsFunc // DiagnosticsFunc is an instance of a mock function object controlling // the behavior of the method Diagnostics. DiagnosticsFunc *QueryResolverDiagnosticsFunc // DocumentationFunc is an instance of a mock function object // controlling the behavior of the method Documentation. DocumentationFunc *QueryResolverDocumentationFunc // DocumentationDefinitionsFunc is an instance of a mock function object // controlling the behavior of the method DocumentationDefinitions. DocumentationDefinitionsFunc *QueryResolverDocumentationDefinitionsFunc // DocumentationPageFunc is an instance of a mock function object // controlling the behavior of the method DocumentationPage. DocumentationPageFunc *QueryResolverDocumentationPageFunc // DocumentationPathInfoFunc is an instance of a mock function object // controlling the behavior of the method DocumentationPathInfo. DocumentationPathInfoFunc *QueryResolverDocumentationPathInfoFunc // DocumentationReferencesFunc is an instance of a mock function object // controlling the behavior of the method DocumentationReferences. DocumentationReferencesFunc *QueryResolverDocumentationReferencesFunc // HoverFunc is an instance of a mock function object controlling the // behavior of the method Hover. HoverFunc *QueryResolverHoverFunc // ImplementationsFunc is an instance of a mock function object // controlling the behavior of the method Implementations. ImplementationsFunc *QueryResolverImplementationsFunc // LSIFUploadsFunc is an instance of a mock function object controlling // the behavior of the method LSIFUploads. LSIFUploadsFunc *QueryResolverLSIFUploadsFunc // RangesFunc is an instance of a mock function object controlling the // behavior of the method Ranges. RangesFunc *QueryResolverRangesFunc // ReferencesFunc is an instance of a mock function object controlling // the behavior of the method References. ReferencesFunc *QueryResolverReferencesFunc // StencilFunc is an instance of a mock function object controlling the // behavior of the method Stencil. StencilFunc *QueryResolverStencilFunc } // NewMockQueryResolver creates a new mock of the QueryResolver interface. // All methods return zero values for all results, unless overwritten. func NewMockQueryResolver() *MockQueryResolver { return &MockQueryResolver{ DefinitionsFunc: &QueryResolverDefinitionsFunc{ defaultHook: func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) { return nil, nil }, }, DiagnosticsFunc: &QueryResolverDiagnosticsFunc{ defaultHook: func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) { return nil, 0, nil }, }, DocumentationFunc: &QueryResolverDocumentationFunc{ defaultHook: func(context.Context, int, int) ([]*resolvers.Documentation, error) { return nil, nil }, }, DocumentationDefinitionsFunc: &QueryResolverDocumentationDefinitionsFunc{ defaultHook: func(context.Context, string) ([]resolvers.AdjustedLocation, error) { return nil, nil }, }, DocumentationPageFunc: &QueryResolverDocumentationPageFunc{ defaultHook: func(context.Context, string) (*precise.DocumentationPageData, error) { return nil, nil }, }, DocumentationPathInfoFunc: &QueryResolverDocumentationPathInfoFunc{ defaultHook: func(context.Context, string) (*precise.DocumentationPathInfoData, error) { return nil, nil }, }, DocumentationReferencesFunc: &QueryResolverDocumentationReferencesFunc{ defaultHook: func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) { return nil, "", nil }, }, HoverFunc: &QueryResolverHoverFunc{ defaultHook: func(context.Context, int, int) (string, lsifstore.Range, bool, error) { return "", lsifstore.Range{}, false, nil }, }, ImplementationsFunc: &QueryResolverImplementationsFunc{ defaultHook: func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return nil, "", nil }, }, LSIFUploadsFunc: &QueryResolverLSIFUploadsFunc{ defaultHook: func(context.Context) ([]dbstore.Upload, error) { return nil, nil }, }, RangesFunc: &QueryResolverRangesFunc{ defaultHook: func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { return nil, nil }, }, ReferencesFunc: &QueryResolverReferencesFunc{ defaultHook: func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return nil, "", nil }, }, StencilFunc: &QueryResolverStencilFunc{ defaultHook: func(context.Context) ([]lsifstore.Range, error) { return nil, nil }, }, } } // NewStrictMockQueryResolver creates a new mock of the QueryResolver // interface. All methods panic on invocation, unless overwritten. func NewStrictMockQueryResolver() *MockQueryResolver { return &MockQueryResolver{ DefinitionsFunc: &QueryResolverDefinitionsFunc{ defaultHook: func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) { panic("unexpected invocation of MockQueryResolver.Definitions") }, }, DiagnosticsFunc: &QueryResolverDiagnosticsFunc{ defaultHook: func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) { panic("unexpected invocation of MockQueryResolver.Diagnostics") }, }, DocumentationFunc: &QueryResolverDocumentationFunc{ defaultHook: func(context.Context, int, int) ([]*resolvers.Documentation, error) { panic("unexpected invocation of MockQueryResolver.Documentation") }, }, DocumentationDefinitionsFunc: &QueryResolverDocumentationDefinitionsFunc{ defaultHook: func(context.Context, string) ([]resolvers.AdjustedLocation, error) { panic("unexpected invocation of MockQueryResolver.DocumentationDefinitions") }, }, DocumentationPageFunc: &QueryResolverDocumentationPageFunc{ defaultHook: func(context.Context, string) (*precise.DocumentationPageData, error) { panic("unexpected invocation of MockQueryResolver.DocumentationPage") }, }, DocumentationPathInfoFunc: &QueryResolverDocumentationPathInfoFunc{ defaultHook: func(context.Context, string) (*precise.DocumentationPathInfoData, error) { panic("unexpected invocation of MockQueryResolver.DocumentationPathInfo") }, }, DocumentationReferencesFunc: &QueryResolverDocumentationReferencesFunc{ defaultHook: func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) { panic("unexpected invocation of MockQueryResolver.DocumentationReferences") }, }, HoverFunc: &QueryResolverHoverFunc{ defaultHook: func(context.Context, int, int) (string, lsifstore.Range, bool, error) { panic("unexpected invocation of MockQueryResolver.Hover") }, }, ImplementationsFunc: &QueryResolverImplementationsFunc{ defaultHook: func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { panic("unexpected invocation of MockQueryResolver.Implementations") }, }, LSIFUploadsFunc: &QueryResolverLSIFUploadsFunc{ defaultHook: func(context.Context) ([]dbstore.Upload, error) { panic("unexpected invocation of MockQueryResolver.LSIFUploads") }, }, RangesFunc: &QueryResolverRangesFunc{ defaultHook: func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { panic("unexpected invocation of MockQueryResolver.Ranges") }, }, ReferencesFunc: &QueryResolverReferencesFunc{ defaultHook: func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { panic("unexpected invocation of MockQueryResolver.References") }, }, StencilFunc: &QueryResolverStencilFunc{ defaultHook: func(context.Context) ([]lsifstore.Range, error) { panic("unexpected invocation of MockQueryResolver.Stencil") }, }, } } // NewMockQueryResolverFrom creates a new mock of the MockQueryResolver // interface. All methods delegate to the given implementation, unless // overwritten. func NewMockQueryResolverFrom(i resolvers.QueryResolver) *MockQueryResolver
// QueryResolverDefinitionsFunc describes the behavior when the Definitions // method of the parent MockQueryResolver instance is invoked. type QueryResolverDefinitionsFunc struct { defaultHook func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) hooks []func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) history []QueryResolverDefinitionsFuncCall mutex sync.Mutex } // Definitions delegates to the next hook function in the queue and stores // the parameter and result values of this invocation. func (m *MockQueryResolver) Definitions(v0 context.Context, v1 int, v2 int) ([]resolvers.AdjustedLocation, error) { r0, r1 := m.DefinitionsFunc.nextHook()(v0, v1, v2) m.DefinitionsFunc.appendCall(QueryResolverDefinitionsFuncCall{v0, v1, v2, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the Definitions method // of the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverDefinitionsFunc) SetDefaultHook(hook func(context.Context, int, int) ([]resolvers.AdjustedLocation, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Definitions method of the parent MockQueryResolver instance invokes the // hook at the front of the queue and discards it. After the queue is empty, // the default hook function is invoked for any future action. func (f *QueryResolverDefinitionsFunc) PushHook(hook func(context.Context, int, int) ([]resolvers.AdjustedLocation, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDefinitionsFunc) SetDefaultReturn(r0 []resolvers.AdjustedLocation, r1 error) { f.SetDefaultHook(func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDefinitionsFunc) PushReturn(r0 []resolvers.AdjustedLocation, r1 error) { f.PushHook(func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) { return r0, r1 }) } func (f *QueryResolverDefinitionsFunc) nextHook() func(context.Context, int, int) ([]resolvers.AdjustedLocation, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDefinitionsFunc) appendCall(r0 QueryResolverDefinitionsFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverDefinitionsFuncCall objects // describing the invocations of this function. func (f *QueryResolverDefinitionsFunc) History() []QueryResolverDefinitionsFuncCall { f.mutex.Lock() history := make([]QueryResolverDefinitionsFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDefinitionsFuncCall is an object that describes an // invocation of method Definitions on an instance of MockQueryResolver. type QueryResolverDefinitionsFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedLocation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDefinitionsFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDefinitionsFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverDiagnosticsFunc describes the behavior when the Diagnostics // method of the parent MockQueryResolver instance is invoked. type QueryResolverDiagnosticsFunc struct { defaultHook func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) hooks []func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) history []QueryResolverDiagnosticsFuncCall mutex sync.Mutex } // Diagnostics delegates to the next hook function in the queue and stores // the parameter and result values of this invocation. func (m *MockQueryResolver) Diagnostics(v0 context.Context, v1 int) ([]resolvers.AdjustedDiagnostic, int, error) { r0, r1, r2 := m.DiagnosticsFunc.nextHook()(v0, v1) m.DiagnosticsFunc.appendCall(QueryResolverDiagnosticsFuncCall{v0, v1, r0, r1, r2}) return r0, r1, r2 } // SetDefaultHook sets function that is called when the Diagnostics method // of the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverDiagnosticsFunc) SetDefaultHook(hook func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Diagnostics method of the parent MockQueryResolver instance invokes the // hook at the front of the queue and discards it. After the queue is empty, // the default hook function is invoked for any future action. func (f *QueryResolverDiagnosticsFunc) PushHook(hook func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDiagnosticsFunc) SetDefaultReturn(r0 []resolvers.AdjustedDiagnostic, r1 int, r2 error) { f.SetDefaultHook(func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) { return r0, r1, r2 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDiagnosticsFunc) PushReturn(r0 []resolvers.AdjustedDiagnostic, r1 int, r2 error) { f.PushHook(func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) { return r0, r1, r2 }) } func (f *QueryResolverDiagnosticsFunc) nextHook() func(context.Context, int) ([]resolvers.AdjustedDiagnostic, int, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDiagnosticsFunc) appendCall(r0 QueryResolverDiagnosticsFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverDiagnosticsFuncCall objects // describing the invocations of this function. func (f *QueryResolverDiagnosticsFunc) History() []QueryResolverDiagnosticsFuncCall { f.mutex.Lock() history := make([]QueryResolverDiagnosticsFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDiagnosticsFuncCall is an object that describes an // invocation of method Diagnostics on an instance of MockQueryResolver. type QueryResolverDiagnosticsFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedDiagnostic // Result1 is the value of the 2nd result returned from this method // invocation. Result1 int // Result2 is the value of the 3rd result returned from this method // invocation. Result2 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDiagnosticsFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDiagnosticsFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1, c.Result2} } // QueryResolverDocumentationFunc describes the behavior when the // Documentation method of the parent MockQueryResolver instance is invoked. type QueryResolverDocumentationFunc struct { defaultHook func(context.Context, int, int) ([]*resolvers.Documentation, error) hooks []func(context.Context, int, int) ([]*resolvers.Documentation, error) history []QueryResolverDocumentationFuncCall mutex sync.Mutex } // Documentation delegates to the next hook function in the queue and stores // the parameter and result values of this invocation. func (m *MockQueryResolver) Documentation(v0 context.Context, v1 int, v2 int) ([]*resolvers.Documentation, error) { r0, r1 := m.DocumentationFunc.nextHook()(v0, v1, v2) m.DocumentationFunc.appendCall(QueryResolverDocumentationFuncCall{v0, v1, v2, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the Documentation method // of the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverDocumentationFunc) SetDefaultHook(hook func(context.Context, int, int) ([]*resolvers.Documentation, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Documentation method of the parent MockQueryResolver instance invokes the // hook at the front of the queue and discards it. After the queue is empty, // the default hook function is invoked for any future action. func (f *QueryResolverDocumentationFunc) PushHook(hook func(context.Context, int, int) ([]*resolvers.Documentation, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDocumentationFunc) SetDefaultReturn(r0 []*resolvers.Documentation, r1 error) { f.SetDefaultHook(func(context.Context, int, int) ([]*resolvers.Documentation, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDocumentationFunc) PushReturn(r0 []*resolvers.Documentation, r1 error) { f.PushHook(func(context.Context, int, int) ([]*resolvers.Documentation, error) { return r0, r1 }) } func (f *QueryResolverDocumentationFunc) nextHook() func(context.Context, int, int) ([]*resolvers.Documentation, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDocumentationFunc) appendCall(r0 QueryResolverDocumentationFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverDocumentationFuncCall objects // describing the invocations of this function. func (f *QueryResolverDocumentationFunc) History() []QueryResolverDocumentationFuncCall { f.mutex.Lock() history := make([]QueryResolverDocumentationFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDocumentationFuncCall is an object that describes an // invocation of method Documentation on an instance of MockQueryResolver. type QueryResolverDocumentationFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Result0 is the value of the 1st result returned from this method // invocation. Result0 []*resolvers.Documentation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDocumentationFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDocumentationFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverDocumentationDefinitionsFunc describes the behavior when the // DocumentationDefinitions method of the parent MockQueryResolver instance // is invoked. type QueryResolverDocumentationDefinitionsFunc struct { defaultHook func(context.Context, string) ([]resolvers.AdjustedLocation, error) hooks []func(context.Context, string) ([]resolvers.AdjustedLocation, error) history []QueryResolverDocumentationDefinitionsFuncCall mutex sync.Mutex } // DocumentationDefinitions delegates to the next hook function in the queue // and stores the parameter and result values of this invocation. func (m *MockQueryResolver) DocumentationDefinitions(v0 context.Context, v1 string) ([]resolvers.AdjustedLocation, error) { r0, r1 := m.DocumentationDefinitionsFunc.nextHook()(v0, v1) m.DocumentationDefinitionsFunc.appendCall(QueryResolverDocumentationDefinitionsFuncCall{v0, v1, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the // DocumentationDefinitions method of the parent MockQueryResolver instance // is invoked and the hook queue is empty. func (f *QueryResolverDocumentationDefinitionsFunc) SetDefaultHook(hook func(context.Context, string) ([]resolvers.AdjustedLocation, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // DocumentationDefinitions method of the parent MockQueryResolver instance // invokes the hook at the front of the queue and discards it. After the // queue is empty, the default hook function is invoked for any future // action. func (f *QueryResolverDocumentationDefinitionsFunc) PushHook(hook func(context.Context, string) ([]resolvers.AdjustedLocation, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDocumentationDefinitionsFunc) SetDefaultReturn(r0 []resolvers.AdjustedLocation, r1 error) { f.SetDefaultHook(func(context.Context, string) ([]resolvers.AdjustedLocation, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDocumentationDefinitionsFunc) PushReturn(r0 []resolvers.AdjustedLocation, r1 error) { f.PushHook(func(context.Context, string) ([]resolvers.AdjustedLocation, error) { return r0, r1 }) } func (f *QueryResolverDocumentationDefinitionsFunc) nextHook() func(context.Context, string) ([]resolvers.AdjustedLocation, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDocumentationDefinitionsFunc) appendCall(r0 QueryResolverDocumentationDefinitionsFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of // QueryResolverDocumentationDefinitionsFuncCall objects describing the // invocations of this function. func (f *QueryResolverDocumentationDefinitionsFunc) History() []QueryResolverDocumentationDefinitionsFuncCall { f.mutex.Lock() history := make([]QueryResolverDocumentationDefinitionsFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDocumentationDefinitionsFuncCall is an object that describes // an invocation of method DocumentationDefinitions on an instance of // MockQueryResolver. type QueryResolverDocumentationDefinitionsFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedLocation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDocumentationDefinitionsFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDocumentationDefinitionsFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverDocumentationPageFunc describes the behavior when the // DocumentationPage method of the parent MockQueryResolver instance is // invoked. type QueryResolverDocumentationPageFunc struct { defaultHook func(context.Context, string) (*precise.DocumentationPageData, error) hooks []func(context.Context, string) (*precise.DocumentationPageData, error) history []QueryResolverDocumentationPageFuncCall mutex sync.Mutex } // DocumentationPage delegates to the next hook function in the queue and // stores the parameter and result values of this invocation. func (m *MockQueryResolver) DocumentationPage(v0 context.Context, v1 string) (*precise.DocumentationPageData, error) { r0, r1 := m.DocumentationPageFunc.nextHook()(v0, v1) m.DocumentationPageFunc.appendCall(QueryResolverDocumentationPageFuncCall{v0, v1, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the DocumentationPage // method of the parent MockQueryResolver instance is invoked and the hook // queue is empty. func (f *QueryResolverDocumentationPageFunc) SetDefaultHook(hook func(context.Context, string) (*precise.DocumentationPageData, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // DocumentationPage method of the parent MockQueryResolver instance invokes // the hook at the front of the queue and discards it. After the queue is // empty, the default hook function is invoked for any future action. func (f *QueryResolverDocumentationPageFunc) PushHook(hook func(context.Context, string) (*precise.DocumentationPageData, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDocumentationPageFunc) SetDefaultReturn(r0 *precise.DocumentationPageData, r1 error) { f.SetDefaultHook(func(context.Context, string) (*precise.DocumentationPageData, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDocumentationPageFunc) PushReturn(r0 *precise.DocumentationPageData, r1 error) { f.PushHook(func(context.Context, string) (*precise.DocumentationPageData, error) { return r0, r1 }) } func (f *QueryResolverDocumentationPageFunc) nextHook() func(context.Context, string) (*precise.DocumentationPageData, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDocumentationPageFunc) appendCall(r0 QueryResolverDocumentationPageFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverDocumentationPageFuncCall // objects describing the invocations of this function. func (f *QueryResolverDocumentationPageFunc) History() []QueryResolverDocumentationPageFuncCall { f.mutex.Lock() history := make([]QueryResolverDocumentationPageFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDocumentationPageFuncCall is an object that describes an // invocation of method DocumentationPage on an instance of // MockQueryResolver. type QueryResolverDocumentationPageFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 *precise.DocumentationPageData // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDocumentationPageFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDocumentationPageFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverDocumentationPathInfoFunc describes the behavior when the // DocumentationPathInfo method of the parent MockQueryResolver instance is // invoked. type QueryResolverDocumentationPathInfoFunc struct { defaultHook func(context.Context, string) (*precise.DocumentationPathInfoData, error) hooks []func(context.Context, string) (*precise.DocumentationPathInfoData, error) history []QueryResolverDocumentationPathInfoFuncCall mutex sync.Mutex } // DocumentationPathInfo delegates to the next hook function in the queue // and stores the parameter and result values of this invocation. func (m *MockQueryResolver) DocumentationPathInfo(v0 context.Context, v1 string) (*precise.DocumentationPathInfoData, error) { r0, r1 := m.DocumentationPathInfoFunc.nextHook()(v0, v1) m.DocumentationPathInfoFunc.appendCall(QueryResolverDocumentationPathInfoFuncCall{v0, v1, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the // DocumentationPathInfo method of the parent MockQueryResolver instance is // invoked and the hook queue is empty. func (f *QueryResolverDocumentationPathInfoFunc) SetDefaultHook(hook func(context.Context, string) (*precise.DocumentationPathInfoData, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // DocumentationPathInfo method of the parent MockQueryResolver instance // invokes the hook at the front of the queue and discards it. After the // queue is empty, the default hook function is invoked for any future // action. func (f *QueryResolverDocumentationPathInfoFunc) PushHook(hook func(context.Context, string) (*precise.DocumentationPathInfoData, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDocumentationPathInfoFunc) SetDefaultReturn(r0 *precise.DocumentationPathInfoData, r1 error) { f.SetDefaultHook(func(context.Context, string) (*precise.DocumentationPathInfoData, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDocumentationPathInfoFunc) PushReturn(r0 *precise.DocumentationPathInfoData, r1 error) { f.PushHook(func(context.Context, string) (*precise.DocumentationPathInfoData, error) { return r0, r1 }) } func (f *QueryResolverDocumentationPathInfoFunc) nextHook() func(context.Context, string) (*precise.DocumentationPathInfoData, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDocumentationPathInfoFunc) appendCall(r0 QueryResolverDocumentationPathInfoFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverDocumentationPathInfoFuncCall // objects describing the invocations of this function. func (f *QueryResolverDocumentationPathInfoFunc) History() []QueryResolverDocumentationPathInfoFuncCall { f.mutex.Lock() history := make([]QueryResolverDocumentationPathInfoFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDocumentationPathInfoFuncCall is an object that describes an // invocation of method DocumentationPathInfo on an instance of // MockQueryResolver. type QueryResolverDocumentationPathInfoFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 *precise.DocumentationPathInfoData // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDocumentationPathInfoFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDocumentationPathInfoFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverDocumentationReferencesFunc describes the behavior when the // DocumentationReferences method of the parent MockQueryResolver instance // is invoked. type QueryResolverDocumentationReferencesFunc struct { defaultHook func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) hooks []func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) history []QueryResolverDocumentationReferencesFuncCall mutex sync.Mutex } // DocumentationReferences delegates to the next hook function in the queue // and stores the parameter and result values of this invocation. func (m *MockQueryResolver) DocumentationReferences(v0 context.Context, v1 string, v2 int, v3 string) ([]resolvers.AdjustedLocation, string, error) { r0, r1, r2 := m.DocumentationReferencesFunc.nextHook()(v0, v1, v2, v3) m.DocumentationReferencesFunc.appendCall(QueryResolverDocumentationReferencesFuncCall{v0, v1, v2, v3, r0, r1, r2}) return r0, r1, r2 } // SetDefaultHook sets function that is called when the // DocumentationReferences method of the parent MockQueryResolver instance // is invoked and the hook queue is empty. func (f *QueryResolverDocumentationReferencesFunc) SetDefaultHook(hook func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // DocumentationReferences method of the parent MockQueryResolver instance // invokes the hook at the front of the queue and discards it. After the // queue is empty, the default hook function is invoked for any future // action. func (f *QueryResolverDocumentationReferencesFunc) PushHook(hook func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverDocumentationReferencesFunc) SetDefaultReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.SetDefaultHook(func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverDocumentationReferencesFunc) PushReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.PushHook(func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } func (f *QueryResolverDocumentationReferencesFunc) nextHook() func(context.Context, string, int, string) ([]resolvers.AdjustedLocation, string, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverDocumentationReferencesFunc) appendCall(r0 QueryResolverDocumentationReferencesFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of // QueryResolverDocumentationReferencesFuncCall objects describing the // invocations of this function. func (f *QueryResolverDocumentationReferencesFunc) History() []QueryResolverDocumentationReferencesFuncCall { f.mutex.Lock() history := make([]QueryResolverDocumentationReferencesFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverDocumentationReferencesFuncCall is an object that describes // an invocation of method DocumentationReferences on an instance of // MockQueryResolver. type QueryResolverDocumentationReferencesFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 string // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Arg3 is the value of the 4th argument passed to this method // invocation. Arg3 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedLocation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 string // Result2 is the value of the 3rd result returned from this method // invocation. Result2 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverDocumentationReferencesFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverDocumentationReferencesFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1, c.Result2} } // QueryResolverHoverFunc describes the behavior when the Hover method of // the parent MockQueryResolver instance is invoked. type QueryResolverHoverFunc struct { defaultHook func(context.Context, int, int) (string, lsifstore.Range, bool, error) hooks []func(context.Context, int, int) (string, lsifstore.Range, bool, error) history []QueryResolverHoverFuncCall mutex sync.Mutex } // Hover delegates to the next hook function in the queue and stores the // parameter and result values of this invocation. func (m *MockQueryResolver) Hover(v0 context.Context, v1 int, v2 int) (string, lsifstore.Range, bool, error) { r0, r1, r2, r3 := m.HoverFunc.nextHook()(v0, v1, v2) m.HoverFunc.appendCall(QueryResolverHoverFuncCall{v0, v1, v2, r0, r1, r2, r3}) return r0, r1, r2, r3 } // SetDefaultHook sets function that is called when the Hover method of the // parent MockQueryResolver instance is invoked and the hook queue is empty. func (f *QueryResolverHoverFunc) SetDefaultHook(hook func(context.Context, int, int) (string, lsifstore.Range, bool, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Hover method of the parent MockQueryResolver instance invokes the hook at // the front of the queue and discards it. After the queue is empty, the // default hook function is invoked for any future action. func (f *QueryResolverHoverFunc) PushHook(hook func(context.Context, int, int) (string, lsifstore.Range, bool, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverHoverFunc) SetDefaultReturn(r0 string, r1 lsifstore.Range, r2 bool, r3 error) { f.SetDefaultHook(func(context.Context, int, int) (string, lsifstore.Range, bool, error) { return r0, r1, r2, r3 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverHoverFunc) PushReturn(r0 string, r1 lsifstore.Range, r2 bool, r3 error) { f.PushHook(func(context.Context, int, int) (string, lsifstore.Range, bool, error) { return r0, r1, r2, r3 }) } func (f *QueryResolverHoverFunc) nextHook() func(context.Context, int, int) (string, lsifstore.Range, bool, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverHoverFunc) appendCall(r0 QueryResolverHoverFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverHoverFuncCall objects // describing the invocations of this function. func (f *QueryResolverHoverFunc) History() []QueryResolverHoverFuncCall { f.mutex.Lock() history := make([]QueryResolverHoverFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverHoverFuncCall is an object that describes an invocation of // method Hover on an instance of MockQueryResolver. type QueryResolverHoverFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Result0 is the value of the 1st result returned from this method // invocation. Result0 string // Result1 is the value of the 2nd result returned from this method // invocation. Result1 lsifstore.Range // Result2 is the value of the 3rd result returned from this method // invocation. Result2 bool // Result3 is the value of the 4th result returned from this method // invocation. Result3 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverHoverFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverHoverFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1, c.Result2, c.Result3} } // QueryResolverImplementationsFunc describes the behavior when the // Implementations method of the parent MockQueryResolver instance is // invoked. type QueryResolverImplementationsFunc struct { defaultHook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) hooks []func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) history []QueryResolverImplementationsFuncCall mutex sync.Mutex } // Implementations delegates to the next hook function in the queue and // stores the parameter and result values of this invocation. func (m *MockQueryResolver) Implementations(v0 context.Context, v1 int, v2 int, v3 int, v4 string) ([]resolvers.AdjustedLocation, string, error) { r0, r1, r2 := m.ImplementationsFunc.nextHook()(v0, v1, v2, v3, v4) m.ImplementationsFunc.appendCall(QueryResolverImplementationsFuncCall{v0, v1, v2, v3, v4, r0, r1, r2}) return r0, r1, r2 } // SetDefaultHook sets function that is called when the Implementations // method of the parent MockQueryResolver instance is invoked and the hook // queue is empty. func (f *QueryResolverImplementationsFunc) SetDefaultHook(hook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Implementations method of the parent MockQueryResolver instance invokes // the hook at the front of the queue and discards it. After the queue is // empty, the default hook function is invoked for any future action. func (f *QueryResolverImplementationsFunc) PushHook(hook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverImplementationsFunc) SetDefaultReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.SetDefaultHook(func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverImplementationsFunc) PushReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.PushHook(func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } func (f *QueryResolverImplementationsFunc) nextHook() func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverImplementationsFunc) appendCall(r0 QueryResolverImplementationsFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverImplementationsFuncCall // objects describing the invocations of this function. func (f *QueryResolverImplementationsFunc) History() []QueryResolverImplementationsFuncCall { f.mutex.Lock() history := make([]QueryResolverImplementationsFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverImplementationsFuncCall is an object that describes an // invocation of method Implementations on an instance of MockQueryResolver. type QueryResolverImplementationsFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Arg3 is the value of the 4th argument passed to this method // invocation. Arg3 int // Arg4 is the value of the 5th argument passed to this method // invocation. Arg4 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedLocation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 string // Result2 is the value of the 3rd result returned from this method // invocation. Result2 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverImplementationsFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3, c.Arg4} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverImplementationsFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1, c.Result2} } // QueryResolverLSIFUploadsFunc describes the behavior when the LSIFUploads // method of the parent MockQueryResolver instance is invoked. type QueryResolverLSIFUploadsFunc struct { defaultHook func(context.Context) ([]dbstore.Upload, error) hooks []func(context.Context) ([]dbstore.Upload, error) history []QueryResolverLSIFUploadsFuncCall mutex sync.Mutex } // LSIFUploads delegates to the next hook function in the queue and stores // the parameter and result values of this invocation. func (m *MockQueryResolver) LSIFUploads(v0 context.Context) ([]dbstore.Upload, error) { r0, r1 := m.LSIFUploadsFunc.nextHook()(v0) m.LSIFUploadsFunc.appendCall(QueryResolverLSIFUploadsFuncCall{v0, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the LSIFUploads method // of the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverLSIFUploadsFunc) SetDefaultHook(hook func(context.Context) ([]dbstore.Upload, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // LSIFUploads method of the parent MockQueryResolver instance invokes the // hook at the front of the queue and discards it. After the queue is empty, // the default hook function is invoked for any future action. func (f *QueryResolverLSIFUploadsFunc) PushHook(hook func(context.Context) ([]dbstore.Upload, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverLSIFUploadsFunc) SetDefaultReturn(r0 []dbstore.Upload, r1 error) { f.SetDefaultHook(func(context.Context) ([]dbstore.Upload, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverLSIFUploadsFunc) PushReturn(r0 []dbstore.Upload, r1 error) { f.PushHook(func(context.Context) ([]dbstore.Upload, error) { return r0, r1 }) } func (f *QueryResolverLSIFUploadsFunc) nextHook() func(context.Context) ([]dbstore.Upload, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverLSIFUploadsFunc) appendCall(r0 QueryResolverLSIFUploadsFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverLSIFUploadsFuncCall objects // describing the invocations of this function. func (f *QueryResolverLSIFUploadsFunc) History() []QueryResolverLSIFUploadsFuncCall { f.mutex.Lock() history := make([]QueryResolverLSIFUploadsFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverLSIFUploadsFuncCall is an object that describes an // invocation of method LSIFUploads on an instance of MockQueryResolver. type QueryResolverLSIFUploadsFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Result0 is the value of the 1st result returned from this method // invocation. Result0 []dbstore.Upload // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverLSIFUploadsFuncCall) Args() []interface{} { return []interface{}{c.Arg0} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverLSIFUploadsFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverRangesFunc describes the behavior when the Ranges method of // the parent MockQueryResolver instance is invoked. type QueryResolverRangesFunc struct { defaultHook func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) hooks []func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) history []QueryResolverRangesFuncCall mutex sync.Mutex } // Ranges delegates to the next hook function in the queue and stores the // parameter and result values of this invocation. func (m *MockQueryResolver) Ranges(v0 context.Context, v1 int, v2 int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { r0, r1 := m.RangesFunc.nextHook()(v0, v1, v2) m.RangesFunc.appendCall(QueryResolverRangesFuncCall{v0, v1, v2, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the Ranges method of the // parent MockQueryResolver instance is invoked and the hook queue is empty. func (f *QueryResolverRangesFunc) SetDefaultHook(hook func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Ranges method of the parent MockQueryResolver instance invokes the hook // at the front of the queue and discards it. After the queue is empty, the // default hook function is invoked for any future action. func (f *QueryResolverRangesFunc) PushHook(hook func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverRangesFunc) SetDefaultReturn(r0 []resolvers.AdjustedCodeIntelligenceRange, r1 error) { f.SetDefaultHook(func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverRangesFunc) PushReturn(r0 []resolvers.AdjustedCodeIntelligenceRange, r1 error) { f.PushHook(func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { return r0, r1 }) } func (f *QueryResolverRangesFunc) nextHook() func(context.Context, int, int) ([]resolvers.AdjustedCodeIntelligenceRange, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverRangesFunc) appendCall(r0 QueryResolverRangesFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverRangesFuncCall objects // describing the invocations of this function. func (f *QueryResolverRangesFunc) History() []QueryResolverRangesFuncCall { f.mutex.Lock() history := make([]QueryResolverRangesFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverRangesFuncCall is an object that describes an invocation of // method Ranges on an instance of MockQueryResolver. type QueryResolverRangesFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedCodeIntelligenceRange // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverRangesFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverRangesFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} } // QueryResolverReferencesFunc describes the behavior when the References // method of the parent MockQueryResolver instance is invoked. type QueryResolverReferencesFunc struct { defaultHook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) hooks []func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) history []QueryResolverReferencesFuncCall mutex sync.Mutex } // References delegates to the next hook function in the queue and stores // the parameter and result values of this invocation. func (m *MockQueryResolver) References(v0 context.Context, v1 int, v2 int, v3 int, v4 string) ([]resolvers.AdjustedLocation, string, error) { r0, r1, r2 := m.ReferencesFunc.nextHook()(v0, v1, v2, v3, v4) m.ReferencesFunc.appendCall(QueryResolverReferencesFuncCall{v0, v1, v2, v3, v4, r0, r1, r2}) return r0, r1, r2 } // SetDefaultHook sets function that is called when the References method of // the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverReferencesFunc) SetDefaultHook(hook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // References method of the parent MockQueryResolver instance invokes the // hook at the front of the queue and discards it. After the queue is empty, // the default hook function is invoked for any future action. func (f *QueryResolverReferencesFunc) PushHook(hook func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverReferencesFunc) SetDefaultReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.SetDefaultHook(func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverReferencesFunc) PushReturn(r0 []resolvers.AdjustedLocation, r1 string, r2 error) { f.PushHook(func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { return r0, r1, r2 }) } func (f *QueryResolverReferencesFunc) nextHook() func(context.Context, int, int, int, string) ([]resolvers.AdjustedLocation, string, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverReferencesFunc) appendCall(r0 QueryResolverReferencesFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverReferencesFuncCall objects // describing the invocations of this function. func (f *QueryResolverReferencesFunc) History() []QueryResolverReferencesFuncCall { f.mutex.Lock() history := make([]QueryResolverReferencesFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverReferencesFuncCall is an object that describes an invocation // of method References on an instance of MockQueryResolver. type QueryResolverReferencesFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Arg1 is the value of the 2nd argument passed to this method // invocation. Arg1 int // Arg2 is the value of the 3rd argument passed to this method // invocation. Arg2 int // Arg3 is the value of the 4th argument passed to this method // invocation. Arg3 int // Arg4 is the value of the 5th argument passed to this method // invocation. Arg4 string // Result0 is the value of the 1st result returned from this method // invocation. Result0 []resolvers.AdjustedLocation // Result1 is the value of the 2nd result returned from this method // invocation. Result1 string // Result2 is the value of the 3rd result returned from this method // invocation. Result2 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverReferencesFuncCall) Args() []interface{} { return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3, c.Arg4} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverReferencesFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1, c.Result2} } // QueryResolverStencilFunc describes the behavior when the Stencil method // of the parent MockQueryResolver instance is invoked. type QueryResolverStencilFunc struct { defaultHook func(context.Context) ([]lsifstore.Range, error) hooks []func(context.Context) ([]lsifstore.Range, error) history []QueryResolverStencilFuncCall mutex sync.Mutex } // Stencil delegates to the next hook function in the queue and stores the // parameter and result values of this invocation. func (m *MockQueryResolver) Stencil(v0 context.Context) ([]lsifstore.Range, error) { r0, r1 := m.StencilFunc.nextHook()(v0) m.StencilFunc.appendCall(QueryResolverStencilFuncCall{v0, r0, r1}) return r0, r1 } // SetDefaultHook sets function that is called when the Stencil method of // the parent MockQueryResolver instance is invoked and the hook queue is // empty. func (f *QueryResolverStencilFunc) SetDefaultHook(hook func(context.Context) ([]lsifstore.Range, error)) { f.defaultHook = hook } // PushHook adds a function to the end of hook queue. Each invocation of the // Stencil method of the parent MockQueryResolver instance invokes the hook // at the front of the queue and discards it. After the queue is empty, the // default hook function is invoked for any future action. func (f *QueryResolverStencilFunc) PushHook(hook func(context.Context) ([]lsifstore.Range, error)) { f.mutex.Lock() f.hooks = append(f.hooks, hook) f.mutex.Unlock() } // SetDefaultReturn calls SetDefaultHook with a function that returns the // given values. func (f *QueryResolverStencilFunc) SetDefaultReturn(r0 []lsifstore.Range, r1 error) { f.SetDefaultHook(func(context.Context) ([]lsifstore.Range, error) { return r0, r1 }) } // PushReturn calls PushHook with a function that returns the given values. func (f *QueryResolverStencilFunc) PushReturn(r0 []lsifstore.Range, r1 error) { f.PushHook(func(context.Context) ([]lsifstore.Range, error) { return r0, r1 }) } func (f *QueryResolverStencilFunc) nextHook() func(context.Context) ([]lsifstore.Range, error) { f.mutex.Lock() defer f.mutex.Unlock() if len(f.hooks) == 0 { return f.defaultHook } hook := f.hooks[0] f.hooks = f.hooks[1:] return hook } func (f *QueryResolverStencilFunc) appendCall(r0 QueryResolverStencilFuncCall) { f.mutex.Lock() f.history = append(f.history, r0) f.mutex.Unlock() } // History returns a sequence of QueryResolverStencilFuncCall objects // describing the invocations of this function. func (f *QueryResolverStencilFunc) History() []QueryResolverStencilFuncCall { f.mutex.Lock() history := make([]QueryResolverStencilFuncCall, len(f.history)) copy(history, f.history) f.mutex.Unlock() return history } // QueryResolverStencilFuncCall is an object that describes an invocation of // method Stencil on an instance of MockQueryResolver. type QueryResolverStencilFuncCall struct { // Arg0 is the value of the 1st argument passed to this method // invocation. Arg0 context.Context // Result0 is the value of the 1st result returned from this method // invocation. Result0 []lsifstore.Range // Result1 is the value of the 2nd result returned from this method // invocation. Result1 error } // Args returns an interface slice containing the arguments of this // invocation. func (c QueryResolverStencilFuncCall) Args() []interface{} { return []interface{}{c.Arg0} } // Results returns an interface slice containing the results of this // invocation. func (c QueryResolverStencilFuncCall) Results() []interface{} { return []interface{}{c.Result0, c.Result1} }
{ return &MockQueryResolver{ DefinitionsFunc: &QueryResolverDefinitionsFunc{ defaultHook: i.Definitions, }, DiagnosticsFunc: &QueryResolverDiagnosticsFunc{ defaultHook: i.Diagnostics, }, DocumentationFunc: &QueryResolverDocumentationFunc{ defaultHook: i.Documentation, }, DocumentationDefinitionsFunc: &QueryResolverDocumentationDefinitionsFunc{ defaultHook: i.DocumentationDefinitions, }, DocumentationPageFunc: &QueryResolverDocumentationPageFunc{ defaultHook: i.DocumentationPage, }, DocumentationPathInfoFunc: &QueryResolverDocumentationPathInfoFunc{ defaultHook: i.DocumentationPathInfo, }, DocumentationReferencesFunc: &QueryResolverDocumentationReferencesFunc{ defaultHook: i.DocumentationReferences, }, HoverFunc: &QueryResolverHoverFunc{ defaultHook: i.Hover, }, ImplementationsFunc: &QueryResolverImplementationsFunc{ defaultHook: i.Implementations, }, LSIFUploadsFunc: &QueryResolverLSIFUploadsFunc{ defaultHook: i.LSIFUploads, }, RangesFunc: &QueryResolverRangesFunc{ defaultHook: i.Ranges, }, ReferencesFunc: &QueryResolverReferencesFunc{ defaultHook: i.References, }, StencilFunc: &QueryResolverStencilFunc{ defaultHook: i.Stencil, }, } }
duckdns.go
// Package duckdns implements a DNS provider for solving the DNS-01 challenge using DuckDNS. // See http://www.duckdns.org/spec.jsp for more info on updating TXT records. package duckdns import ( "errors" "fmt" "net/http" "time" "github.com/vostronet/lego/challenge/dns01" "github.com/vostronet/lego/platform/config/env" ) // Config is used to configure the creation of the DNSProvider type Config struct { Token string PropagationTimeout time.Duration PollingInterval time.Duration SequenceInterval time.Duration HTTPClient *http.Client } // NewDefaultConfig returns a default configuration for the DNSProvider func NewDefaultConfig() *Config { return &Config{ PropagationTimeout: env.GetOrDefaultSecond("DUCKDNS_PROPAGATION_TIMEOUT", dns01.DefaultPropagationTimeout), PollingInterval: env.GetOrDefaultSecond("DUCKDNS_POLLING_INTERVAL", dns01.DefaultPollingInterval), SequenceInterval: env.GetOrDefaultSecond("DUCKDNS_SEQUENCE_INTERVAL", dns01.DefaultPropagationTimeout), HTTPClient: &http.Client{ Timeout: env.GetOrDefaultSecond("DUCKDNS_HTTP_TIMEOUT", 30*time.Second), }, } } // DNSProvider adds and removes the record for the DNS challenge type DNSProvider struct { config *Config } // NewDNSProvider returns a new DNS provider using // environment variable DUCKDNS_TOKEN for adding and removing the DNS record. func NewDNSProvider() (*DNSProvider, error) { values, err := env.Get("DUCKDNS_TOKEN") if err != nil
config := NewDefaultConfig() config.Token = values["DUCKDNS_TOKEN"] return NewDNSProviderConfig(config) } // NewDNSProviderConfig return a DNSProvider instance configured for DuckDNS. func NewDNSProviderConfig(config *Config) (*DNSProvider, error) { if config == nil { return nil, errors.New("duckdns: the configuration of the DNS provider is nil") } if config.Token == "" { return nil, errors.New("duckdns: credentials missing") } return &DNSProvider{config: config}, nil } // Present creates a TXT record to fulfill the dns-01 challenge. func (d *DNSProvider) Present(domain, token, keyAuth string) error { _, txtRecord := dns01.GetRecord(domain, keyAuth) return d.updateTxtRecord(domain, d.config.Token, txtRecord, false) } // CleanUp clears DuckDNS TXT record func (d *DNSProvider) CleanUp(domain, token, keyAuth string) error { return d.updateTxtRecord(domain, d.config.Token, "", true) } // Timeout returns the timeout and interval to use when checking for DNS propagation. // Adjusting here to cope with spikes in propagation times. func (d *DNSProvider) Timeout() (timeout, interval time.Duration) { return d.config.PropagationTimeout, d.config.PollingInterval } // Sequential All DNS challenges for this provider will be resolved sequentially. // Returns the interval between each iteration. func (d *DNSProvider) Sequential() time.Duration { return d.config.SequenceInterval }
{ return nil, fmt.Errorf("duckdns: %v", err) }
ecb_test.go
package aescrypto import ( "encoding/base64" "fmt" "strconv" "testing" ) func Test_ecb(t *testing.T) { /* *src 要加密的字符串 *key 用来加密的密钥 密钥长度可以是128bit、192bit、256bit中的任意一个 *16位key对应128bit */ src := "200" key := "d201e68d1fe59792c308ca0b79b03d29" crypted, err := AesEcbPkcs5Encrypt([]byte(src), []byte(key)) if err != nil { fmt.Println(err) } fmt.Println("base64UrlSafe result:", base64.URLEncoding.EncodeToString(crypted)) data, err := AesEcbPkcs5Decrypt(crypted, []byte(key)) if err != nil { fmt.Println(err) } fmt.Println("source is :", string(data)) } func Test_ecb2(t *testing.T) { /* *src 要加密的字符串 *key 用来加密的密钥 密钥长度可以
92bit、256bit中的任意一个 *16位key对应128bit */ key := []byte("57d065d2f3442d968ecb9fc4f21847ef") cipherHexText := []byte("bQCXBJeiHz9maEMOpOio5Q") fmt.Println(qtfmDecrypt(cipherHexText, key, key)) } func qtfmDecrypt(cipher, ekey, ikey []byte, args ...interface{}) (float64, error) { equalSignLen := 4 - len(cipher)%4 for i := 0; i < equalSignLen; i++ { cipher = append(cipher, '=') } crypted,err := base64.URLEncoding.DecodeString(string(cipher)) if err != nil { return 0, err } data, err := AesEcbPkcs5Decrypt(crypted, []byte(ekey)) if err != nil { return 0, err } f, err := strconv.ParseFloat(string(data), 64) if err != nil { return 0, err } return f, nil }
是128bit、1
environment.ts
// This file can be replaced during build by using the `fileReplacements` array. // `ng build ---prod` replaces `environment.ts` with `environment.prod.ts`. // The list of file replacements can be found in `angular.json`. export const environment = { production: true, config : { apiKey: "AIzaSyDth0JyFHiNrZ0E4i2dm9WwFwKjJlHTsSM", authDomain: "underground-509f2.firebaseapp.com", databaseURL: "https://underground-509f2.firebaseio.com", projectId: "underground-509f2", storageBucket: "underground-509f2.appspot.com", messagingSenderId: "482777969395" } };
* `zone.run`, `zoneDelegate.invokeTask` for easier debugging, you can * import the following file, but please comment it out in production mode * because it will have performance impact when throw error */ // import 'zone.js/dist/zone-error'; // Included with Angular CLI.
/* * In development mode, to ignore zone related error stack frames such as
encoder.py
import torch.nn as nn class
(nn.Module): pass
Encoder
models.py
from sqlalchemy import Column, DateTime, Integer, String, TEXT, ForeignKey from sqlalchemy.orm import relationship from sqlalchemy.sql import func from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() class BaseModel(Base): __abstract__ = True creation_date = Column(DateTime(timezone=True), server_default=func.now()) update_date = Column(DateTime, nullable=False, server_default=func.now(), onupdate=func.now()) def __repr__(self): fields = "" for c in self.__table__.columns: if fields == "": fields = "{}='{}'".format(c.name, getattr(self, c.name)) else: fields = "{}, {}='{}'".format(fields, c.name, getattr(self, c.name)) return "<{}({})>".format(self.__class__.__name__, fields) @staticmethod def list_as_dict(items): return [i.as_dict() for i in items] def as_dict(self): return {c.name: getattr(self, c.name) for c in self.__table__.columns} class Order(BaseModel): STATUS_PENDING = "Pending" STATUS_CANCELLED = "Cancelled" STATUS_FINISHED = "Finished" STATUS_ACCEPTED = "Accepted" __tablename__ = "manufacturing_order" id = Column(Integer, primary_key=True) number_of_pieces = Column(Integer, nullable=False) description = Column(TEXT, nullable=False, default="No description") country = Column(TEXT, nullable=False, default="araba") status = Column(String(256), nullable=False, default="Created") client_id = Column(Integer, nullable=False) pieces = relationship("Piece", lazy="joined") def
(self): d = super().as_dict() d['pieces'] = [i.as_dict() for i in self.pieces] return d class Piece(BaseModel): STATUS_CREATED = "Created" STATUS_CANCELLED = "Cancelled" STATUS_QUEUED = "Queued" STATUS_MANUFACTURING = "Manufacturing" STATUS_MANUFACTURED = "Manufactured" __tablename__ = "piece" ref = Column(Integer, primary_key=True) manufacturing_date = Column(DateTime(timezone=True), server_default=None) status = Column(String(256), default=STATUS_QUEUED) order_id = Column(Integer, ForeignKey('manufacturing_order.id')) order = relationship('Order', backref='piece')
as_dict
ModuleEnvironment.py
############################################################################### # Copyright (c) 2013 INRIA # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License version 2 as # published by the Free Software Foundation; # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Daniel Camara <[email protected]> # Mathieu Lacage <[email protected]> ############################################################################### ''' ModuleEnvironment.py This file stores the class Module Environment responsible for the interaction between Bake and the execution of third party softwares and the operating system. ''' import os import subprocess import sys import platform from bake.Exceptions import TaskError from bake.Utils import ColorTool class ModuleEnvironment: ''' Main class to interact with the host system to execute the external tools. ''' _stopOnError = False _libpaths = set([]) _binpaths = set([]) _pkgpaths = set([]) _variables = set([]) (HIGHER, LOWER, EQUAL) = range(0,3) def __init__(self, logger, installdir, sourcedir, objdir, debug=False): ''' Internal variables initialization.''' self._logger = logger self._installdir = installdir self._sourcedir = sourcedir self._objdir = objdir self._module_name = None self._module_dir = None self._module_supports_objdir = None # self._libpaths = set([]) # self._binpaths = set([]) # self._pkgpaths = set([]) # self._variables = set([]) self._debug = debug self._sudoEnabled = False def _module_directory(self): ''' Returns the name of the directory of the on use module.''' if not self._module_dir : return self._module_name return self._module_dir @property def installdir(self): ''' Returns the name of the set installation directory.''' return self._installdir @property def debug(self): ''' Returns if this execution was set to show the debug messages or not.''' return self._debug @property def srcdir(self): ''' Returns the directory where Bake stores the source of the present module. ''' try: return os.path.join(self._sourcedir, self._module_directory()) except AttributeError as e: raise TaskError('Missing configuration: sourcedir= %s, ' 'module_directory= %s, Error: %s' % (self._sourcedir,self._module_directory(), e)) @property def srcrepo(self): ''' The root of the source repository, where all the sources for all the modules will be stored. ''' return self._sourcedir @property def objdir(self): ''' Returns the directory where Bake stores the object code of the present module. ''' if not self._module_supports_objdir: obj = self.srcdir else: try: obj = os.path.join(self.srcdir, self._objdir) except AttributeError as e: raise TaskError('Missing configuration: sourcedir= %s, ' 'objdir= %s, Error: %s' % (self._sourcedir, self._module_directory(), e)) return obj @property def sudoEnabled(self): ''' Returns the setting of the --sudo option''' return self._sudoEnabled @property def stopOnErrorEnabled(self): ''' Returns the setting of the --stop_on_error option''' return ModuleEnvironment._stopOnError def _pkgconfig_var(self): ''' Returns the PKG_CONFIG_PATH configured environment variable.''' return 'PKG_CONFIG_PATH' def _pkgconfig_path(self): ''' Returns the PKG_CONFIG_PATH configured path. ''' return os.path.join(self._lib_path(), 'pkgconfig') def _lib_var(self): ''' Returns the value of the system configured library path.''' lib_var = {'Linux' : 'LD_LIBRARY_PATH', 'FreeBSD' : 'LD_LIBRARY_PATH', 'Darwin' : 'DYLD_LIBRARY_PATH', 'Windows' : 'PATH'} if not platform.system() in lib_var: sys.stderr('Error: Unsupported platform. Send email to ' '[email protected] (%s)' % platform.system()) sys.exit(1) return lib_var[platform.system()] def _lib_path(self): ''' Returns the value of the library path for the in-use module.''' return os.path.join(self._installdir, 'lib') def _bin_var(self): return 'PATH' def _bin_path(self): ''' Returns the value of the binary path for the in-use module.''' return os.path.join(self._installdir, 'bin') def _py_var(self): return 'PYTHONPATH' def _py_path(self): ''' Returns the value of the python path for the in-use module.''' return os.path.join(self._installdir, 'lib', 'python'+platform.python_version_tuple()[0]+ '.'+platform.python_version_tuple()[1], 'site-packages') def _append_path(self, d, name, value, sep): ''' Append the variable to the system in use configuration. ''' if not name in d: d[name] = value else: d[name] = d[name] + sep + value def start_source(self, name, dir): ''' Sets the environment to be used by the given source module.''' assert self._module_supports_objdir is None self._module_name = name self._module_dir = dir self._logger.set_current_module(name) # ensure source directory exists if not os.path.isdir(self._sourcedir): os.makedirs(self._sourcedir) def end_source(self): ''' Cleans the environment regarding the informations of the last used source module. ''' self._module_name = None self._module_dir = None self._logger.clear_current_module() def start_build(self, name, dir, supports_objdir): ''' Sets the environment to be used by the given build module.''' # assert self._module_supports_objdir is None self._module_name = name self._module_dir = dir self._module_supports_objdir = supports_objdir self._logger.set_current_module(name) if not os.path.isdir(self.installdir): os.makedirs(self.installdir) if not os.path.isdir(self.objdir): os.makedirs(self.objdir) def end_build(self): ''' Cleans the environment regarding the informations of the last used build module. ''' self._module_name = None self._module_dir = None self._module_supports_objdir = None self._logger.clear_current_module() def exist_file(self, file): ''' Finds if the file exists in the path.''' return os.path.exists(file) def path_list(self): ''' Return path that will be searched for executables ''' pythonpath=[] if os.environ.get('PYTHONPATH'): pythonpath=os.environ.get('PYTHONPATH').split(os.pathsep) return os.environ.get('PATH').split(os.pathsep) + [self._bin_path()] + pythonpath def _program_location(self, program): ''' Finds where the executable is located in the user's path.''' # function to verify if the program exists on the given path # and if it is executable def is_exe(path): return os.path.exists(path) and os.access(path, os.X_OK) path, name = os.path.split(program) # if the path for the executable was passed as part of its name if path: if is_exe(program): return program else: # for all the directories in the path search for the executable for path in self.path_list(): exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file toFindIn=None # search for libs with that name on the library path index=program.find(".so") + program.find(".a") if index>0 : toFindIn=['/usr/lib','/usr/lib64','/usr/lib32','/usr/local/lib', '/lib','/opt/local/lib','/opt/local/Library', '/usr/local/opt'] for libpath in self._libpaths: toFindIn.append(libpath) stdLibs = [] try: libPath = os.environ[self._lib_var()] if libPath: stdLibs=libPath.split(os.pathsep) except: pass tofindIn=toFindIn+stdLibs+[self._lib_path()] elif program.endswith(".h"): toFindIn=['/usr/include', '/usr/local/include', '/usr/lib','/opt/local/include', '/usr/local/opt'] if toFindIn : for eachdir in toFindIn: if sys.platform == "darwin": # enable symlink walking for MacOS only (bug 2975) for dirname, dirnames, filenames in os.walk(eachdir, True, None, True): for filename in filenames: if filename==name: return os.path.join(dirname, filename) else: for dirname, dirnames, filenames in os.walk(eachdir): for filename in filenames: if filename==name: return os.path.join(dirname, filename) return None def _check_version(self, found, required, match_type): ''' Checks the version of the required executable.''' smallerSize=min(len(found),len(required)) if match_type == self.HIGHER: for i in range(0,smallerSize): if not found[i]: return False if int(found[i]) < int(required[i]): return False elif int(found[i]) > int(required[i]): return True return True elif match_type == self.LOWER: for i in range(0,smallerSize): if not found[i]: return True if int(found[i]) > int(required[i]): return False elif int(found[i]) < int(required[i]): return True if len(found) >= len(required): return False return True elif match_type == self.EQUAL: if len(found) != len(required): return False for i in range(0,smallerSize): if int(found[i]) != int(required[i]): return False return True else: assert False def add_libpaths(self, libpaths): ''' Adds the list of paths to the in-use library path environment variable. ''' for element in libpaths : self._libpaths.add(self.replace_variables(element)) def add_binpaths(self, libpaths): ''' Adds the list of paths to the in-use binary path environment variable. ''' for element in libpaths : self._binpaths.add(self.replace_variables(element)) def add_pkgpaths(self, libpaths): ''' Adds the list of paths to the in-use package path environment variable. ''' for element in libpaths : self._pkgpaths.add(self.replace_variables(element)) def add_variables(self, libpaths): ''' Adds/replace the list of variables to the in-use set of environment variables. ''' for element in libpaths :
def create_environment_file(self, fileName): ''' Creates the set environment file to help users to call the Bake built modules. ''' script = "#!/bin/bash \n#### \n# Environment setting script. Automatically generated by Bake\n####\n\n" script = script + "if [ \"${BASH_SOURCE:-}\" == \"${0}\" ]; then \n" + \ " echo \"> Call with . bakeSetEnv.sh or source bakeSetEnv.sh\" \n" + \ " exit 1 \n" + \ "fi \n\n" self._binpaths.add(self._bin_path()) if os.path.isdir(self._lib_path()): self._libpaths.add(self._lib_path()) if os.path.isdir(self._lib_path()+'64'): self._libpaths.add(self._lib_path()+'64') if len(self._libpaths) > 0: script = script + self.add_onPath("LD_LIBRARY_PATH", self._libpaths) + "\n" if len(self._binpaths) > 0: script = script + self.add_onPath("PATH", self._binpaths) + "\n" if len(self._pkgpaths) > 0: script = script + self.add_onPath("PKG_CONFIG_PATH", self._pkgpaths) + "\n" from distutils.sysconfig import get_python_lib localLibPath='' libDir=get_python_lib() if libDir: begin=libDir.lower().index('python') localLibPath=os.path.join(self._lib_path(),libDir[begin:]) script = script + self.add_onPath("PYTHONPATH", [sys.path[0],self._lib_path(),localLibPath]) + "\n" for element in self._variables: script = script + " export " + element + "\n" fout = open(fileName, "w") fout.write(script) fout.close() os.chmod(fileName, 0o755) return script def add_onPath (self, variableName, vectorPath): ''' Format the variable to be added on the system. ''' returnString = " export " + variableName + "=\"${" + variableName + ":+${" + variableName + "}:}" for element in vectorPath: returnString = returnString + element + ":" # Strip extra ':' returnString = returnString[:-1] returnString = returnString + "\"" return returnString def replace_variables(self, string): ''' Replace the variables on the string, if they exist, by their system real values. ''' import re tmp = string tmp = re.sub('\$INSTALLDIR', self.installdir, tmp) tmp = re.sub('\$OBJDIR', self.objdir, tmp) tmp = re.sub('\$SRCDIR', self.srcdir, tmp) return tmp def check_program(self, program, version_arg = None, version_regexp = None, version_required = None, match_type=HIGHER): '''Checks if the program, with the desired version, exists in the system. ''' if self._program_location(program) is None: return False if version_arg is None and version_regexp is None and version_required is None: return True else: # This assert as it was avoided the checking of the version of the # executable assert not (version_arg is None or version_regexp is # None or version_required is None) assert not (version_arg is None and version_regexp is None and version_required is None) popen = subprocess.Popen([self._program_location(program), version_arg], stdout = subprocess.PIPE, stderr = subprocess.STDOUT) (out, err) = popen.communicate('') import re reg = re.compile(version_regexp) for line in out.splitlines(): m = reg.search(line) if not m is None: found = m.groups() return self._check_version(found, version_required, match_type) def append_to_path(self, env_vars): """Sets the library and binary paths.""" for libpath in self._libpaths: self._append_path(env_vars, self._lib_var(), libpath, os.pathsep) if self.debug: print(" -> " + self._lib_var() + " " + libpath + " ") self._append_path(env_vars, self._lib_var(), self._lib_path(), os.pathsep) for libpath in self._binpaths: self._append_path(env_vars, self._bin_var(), libpath, os.pathsep) if self.debug: print(" -> " + self._bin_var() + " " + libpath + " ") self._append_path(env_vars, self._bin_var(), self._bin_path(), os.pathsep) for libpath in self._pkgpaths: self._append_path(env_vars, self._pkgconfig_var(), libpath, os.pathsep) if self.debug: print(" -> " + self._pkgconfig_var() + " " + libpath + " ") self._append_path(env_vars, self._pkgconfig_var(), self._pkgconfig_path(), os.pathsep) self._append_path(env_vars, self._py_var(), self._py_path(), os.pathsep) self._append_path(env_vars, self._py_var(), os.path.join(self._installdir, 'lib'), os.pathsep) return env_vars def run(self, args, directory = None, env = dict(), interactive = False): '''Executes a system program adding the libraries and over the correct directories. ''' if not interactive: env_string = '' if len(env) != 0: env_string = ' '.join([a + '=' + b for a,b in env.items()]) try: args_string = ' '.join(args) except TypeError as e: raise TaskError('Wrong argument type: %s, expected string,' ' error: %s' % (str(args), e)) self._logger.commands.write(env_string + ' ' + args_string + ' dir=' + str(directory) + '\n') stdin = None stdout = self._logger.stdout stderr = self._logger.stderr else: stdin = sys.stdin stdout = sys.stdout stderr = sys.stderr tmp = dict(list(os.environ.items()) + list(env.items())) # sets the library and binary paths tmp = self.append_to_path(tmp) # Calls the third party executable with the whole context try: popen = subprocess.Popen(args, stdin = stdin, stdout = stdout, stderr = stderr, cwd = directory, env = tmp) except Exception as e: raise TaskError('could not execute: %s %s. \nUnexpected error: %s' % (str(directory), str(args), str(e))) # Waits for the full execution of the third party software retcode = popen.wait() if retcode != 0: raise TaskError('Subprocess failed with error %d: %s' % (retcode, str(args)))
self._variables.add(self.replace_variables(element))
IAMPolicyDocumentTransformer.py
import json from airiam.terraform.entity_terraformers.BaseEntityTransformer import BaseEntityTransformer class IAMPolicyDocumentTransformer(BaseEntityTransformer): def __init__(self, entity_json: dict, policy_name, principal_name=None): policy_document_name = f"{policy_name}_document" if principal_name: policy_document_name = f"{principal_name}_{policy_document_name}" super().__init__('data.aws_iam_policy_document', policy_document_name, entity_json) def _generate_hcl2_code(self, entity_json) -> str: statements = IAMPolicyDocumentTransformer.force_list(entity_json['Statement']) if 'Principal' in statements[0]: statements = self.transform_assume_policy_statements(statements) else: statements = self.transform_execution_policy(statements) code = f"""data "aws_iam_policy_document" "{self._safe_name}" {{ version = "{entity_json.get('Version', '2012-10-17')}" {statements}}}""" return code @staticmethod def transform_execution_policy(statements): statement_block = "" for statement in statements: sid_string = "" if statement.get('Sid', '') != '': sid_string = f"sid = \"{statement['Sid']}\"\n " actions = IAMPolicyDocumentTransformer.force_list(statement.get('Action')) if 'Action' in statement: action_str = f"actions = {json.dumps(actions)}" else: actions = IAMPolicyDocumentTransformer.force_list(statement.get('NotAction')) action_str = f"not_actions = {json.dumps(actions)}" condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement) resources_list_str = json.dumps(IAMPolicyDocumentTransformer.force_list(statement.get('Resource'))).replace('${', '$\\u0024{') statement_block += f""" statement {{ {sid_string}effect = "{statement['Effect']}" {action_str} resources = {resources_list_str} {condition_block} }} """ return statement_block @staticmethod def transform_assume_policy_statements(statements): statement_block = "" for statement in statements: sid_string = "" if statement.get('Sid', '') != '': sid_string = f"sid = \"{statement['Sid']}\"\n " condition_block = IAMPolicyDocumentTransformer.transform_conditions(statement) statement_block += f""" statement {{ {sid_string}effect = "{statement['Effect']}" actions = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Action']))} principals {{ type = "{list(statement['Principal'].keys())[0]}" identifiers = {json.dumps(IAMPolicyDocumentTransformer.force_list(statement['Principal'][list(statement['Principal'].keys())[0]]))} }} {condition_block}}} """ return statement_block @staticmethod def transform_conditions(statement): condition_block = "" if 'Condition' in statement: for test, items in statement['Condition'].items():
test = "{test}" variable = "{variable}" values = {values_str} }} """ return condition_block @staticmethod def force_list(x): if isinstance(x, list): return x return [x] def entities_to_import(self) -> list: return []
for variable, values in items.items(): values_str = json.dumps(IAMPolicyDocumentTransformer.force_list(values)).replace('${', '$\\u0024{') condition_block += f""" condition {{
client.go
// Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v20190311 import ( "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common" tchttp "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/http" "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile" ) const APIVersion = "2019-03-11" type Client struct { common.Client } // Deprecated func NewClientWithSecretId(secretId, secretKey, region string) (client *Client, err error) { cpf := profile.NewClientProfile() client = &Client{} client.Init(region).WithSecretId(secretId, secretKey).WithProfile(cpf) return } func NewClient(credential common.CredentialIface, region string, clientProfile *profile.ClientProfile) (client *Client, err error) { client = &Client{} client.Init(region). WithCredential(credential). WithProfile(clientProfile) return } func NewCreateBotRequest() (request *CreateBotRequest) { request = &CreateBotRequest{ BaseRequest: &tchttp.BaseRequest{}, } request.Init().WithApiInfo("tbp", APIVersion, "CreateBot") return } func NewCreateBotResponse() (response *CreateBotResponse) { response = &CreateBotResponse{ BaseResponse: &tchttp.BaseResponse{}, } return } // CreateBot // 创建机器人 // // 可能返回的错误码: // INTERNALERROR_CALLMMSFAILED = "InternalError.CallMMSFailed" // INTERNALERROR_MMSINTERNALERROR = "InternalError.MMSInternalError" // INVALIDPARAMETER = "InvalidParameter" func (c *Client) CreateBot(request *CreateBotRequest) (response *CreateBotResponse, err error) { if request == nil { request = NewCreateBotRequest() } response = NewCreateBotResponse() err = c.Send(request, response) return } func NewResetRequest() (request *ResetRequest) { request = &ResetRequest{ BaseRequest: &tchttp.BaseRequest{}, } request.Init().WithApiInfo("tbp", APIVersion, "Reset") return } func NewResetResponse() (response *ResetResponse) { response = &ResetResponse{ BaseResponse: &tchttp.BaseResponse{}, } return } // Reset // 对当前机器人的会话状态进行复位 // // 可能返回的错误码: // INTERNALERROR_ERRORASR = "InternalError.ErrorAsr" // INTERNALERROR_ERRORMMS = "InternalError.ErrorMms" // INTERNALERROR_ERRORRPC = "InternalError.ErrorRpc" // INTERNALERROR_ERRORTTS = "InternalError.ErrorTts" // INTERNALERROR_NOAPPPRIVILEGE = "InternalError.NoAppPrivilege" // INVALIDPARAMETER = "InvalidParameter" func (c *Client) Reset(request *ResetRequest) (response *ResetResponse, err error) { if request == nil { request = NewResetRequest() } response = NewResetResponse() err = c.Send(request, response) return } func NewTextProcessRequest() (request *TextProcessRequest) { request = &TextProcessRequest{ BaseRequest: &tchttp.BaseRe
sponse = &TextProcessResponse{ BaseResponse: &tchttp.BaseResponse{}, } return } // TextProcess // 接收调用侧的文本输入,返回应答文本。已废弃,推荐使用最新版TextProcess接口。 // // 可能返回的错误码: // INTERNALERROR = "InternalError" // INTERNALERROR_ERRORMMS = "InternalError.ErrorMms" // INTERNALERROR_ERRORNLU = "InternalError.ErrorNlu" // INTERNALERROR_ERRORRPC = "InternalError.ErrorRpc" // INTERNALERROR_ERRORWEBHOOK = "InternalError.ErrorWebHook" // INTERNALERROR_NOAPPPRIVILEGE = "InternalError.NoAppPrivilege" // INVALIDPARAMETER = "InvalidParameter" func (c *Client) TextProcess(request *TextProcessRequest) (response *TextProcessResponse, err error) { if request == nil { request = NewTextProcessRequest() } response = NewTextProcessResponse() err = c.Send(request, response) return } func NewTextResetRequest() (request *TextResetRequest) { request = &TextResetRequest{ BaseRequest: &tchttp.BaseRequest{}, } request.Init().WithApiInfo("tbp", APIVersion, "TextReset") return } func NewTextResetResponse() (response *TextResetResponse) { response = &TextResetResponse{ BaseResponse: &tchttp.BaseResponse{}, } return } // TextReset // 会话重置接口。已废弃,推荐使用最新版TextReset接口。 // // 可能返回的错误码: // INTERNALERROR = "InternalError" // INTERNALERROR_ERRORMMS = "InternalError.ErrorMms" // INTERNALERROR_ERRORNLU = "InternalError.ErrorNlu" // INTERNALERROR_ERRORRPC = "InternalError.ErrorRpc" // INTERNALERROR_ERRORWEBHOOK = "InternalError.ErrorWebHook" // INTERNALERROR_NOAPPPRIVILEGE = "InternalError.NoAppPrivilege" // INVALIDPARAMETER = "InvalidParameter" func (c *Client) TextReset(request *TextResetRequest) (response *TextResetResponse, err error) { if request == nil { request = NewTextResetRequest() } response = NewTextResetResponse() err = c.Send(request, response) return }
quest{}, } request.Init().WithApiInfo("tbp", APIVersion, "TextProcess") return } func NewTextProcessResponse() (response *TextProcessResponse) { re
sherlock-and-the-valid-string.py
#!/bin/python3 import math import os import random import re import sys # # Complete the 'isValid' function below. # # The function is expected to return a STRING. # The function accepts STRING s as parameter. # def isValid(s): # Write your code here # Write your code here
if __name__ == '__main__': fptr = open('CON', 'w') s = input() result = isValid(s) fptr.write(result + '\n') fptr.close()
freq = {i : s.count(i) for i in set(s)} fv = list(freq.values()) ffreq = {v : list(fv).count(v) for v in set(fv)} print("s:",s, "\nfreq:", freq, "\nfv:", fv, "\nffreq:", ffreq) if len(ffreq)>2: return "NO" elif len(ffreq)<=1: return "YES" else: mx = max(ffreq) mn = min(ffreq) print("mx:", mx, " mn:", mn) if (mn==1) and ffreq.get(mn, 0)<=1: return "YES" if abs(mx - mn)>1: return "NO" if min(ffreq.values()) > 1: return "NO" else: return "YES"
column.py
# -*- coding: utf-8 -*- """ pyrseas.column ~~~~~~~~~~~~~~ This module defines two classes: Column derived from DbSchemaObject and ColumnDict derived from DbObjectDict. """ from pyrseas.dbobject import DbObjectDict, DbSchemaObject, quote_id class Column(DbSchemaObject): "A table column definition" keylist = ['schema', 'table'] def to_map(self): """Convert a column to a YAML-suitable format :return: dictionary """ if hasattr(self, 'dropped'): return None dct = self._base_map() del dct['number'], dct['name'], dct['_table'] if hasattr(self, 'inherited'): dct['inherited'] = (self.inherited != 0) return {self.name: dct} def add(self): """Return a string to specify the column in a CREATE or ALTER TABLE :return: partial SQL statement """ stmt = "%s %s" % (quote_id(self.name), self.type) if hasattr(self, 'not_null'): stmt += ' NOT NULL' if hasattr(self, 'default'): if not self.default.startswith('nextval'): stmt += ' DEFAULT ' + self.default return (stmt, '' if not hasattr(self, 'description') else self.comment()) def comment(self): """Return a SQL COMMENT statement for the column :return: SQL statement """ return "COMMENT ON COLUMN %s.%s IS %s" % ( self._table.qualname(), self.name, self._comment_text()) def drop(self): """Return string to drop the column via ALTER TABLE :return: SQL statement """ if hasattr(self, 'dropped'): return "" if hasattr(self, '_table'): (comptype, objtype) = (self._table.objtype, 'COLUMN') compname = self._table.qualname() else: # TODO: this is only a PG 9.1 feature, so more is required (comptype, objtype) = ('TYPE', 'ATTRIBUTE') compname = self.table return "ALTER %s %s DROP %s %s" % (comptype, compname, objtype, self.name) def rename(self, newname): """Return SQL statement to RENAME the column :param newname: the new name of the object :return: SQL statement """ stmt = "ALTER TABLE %s RENAME COLUMN %s TO %s" % ( self._table.qualname(), self.name, newname) self.name = newname return stmt def set_sequence_default(self): """Return SQL statements to set a nextval() DEFAULT :return: list of SQL statements """ stmts = [] pth = self.set_search_path() if pth: stmts.append(pth) stmts.append("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT %s" % ( quote_id(self.table), quote_id(self.name), self.default)) return stmts def diff_map(self, incol): """Generate SQL to transform an existing column :param insequence: a YAML map defining the new column :return: list of partial SQL statements Compares the column to an input column and generates partial SQL statements to transform it into the one represented by the input. """ stmts = [] base = "ALTER COLUMN %s " % self.name # check NOT NULL if not hasattr(self, 'not_null') and hasattr(incol, 'not_null'): stmts.append(base + "SET NOT NULL") if hasattr(self, 'not_null') and not hasattr(incol, 'not_null'): stmts.append(base + "DROP NOT NULL") # check data types if not hasattr(self, 'type'): raise ValueError("Column '%s' missing datatype" % self.name) if not hasattr(incol, 'type'): raise ValueError("Input column '%s' missing datatype" % incol.name) if self.type != incol.type: # validate type conversion? stmts.append(base + "TYPE %s" % incol.type) # check DEFAULTs if not hasattr(self, 'default') and hasattr(incol, 'default'): stmts.append(base + "SET DEFAULT %s" % incol.default) if hasattr(self, 'default') and not hasattr(incol, 'default'): stmts.append(base + "DROP DEFAULT") return (", ".join(stmts), self.diff_description(incol)) class ColumnDict(DbObjectDict): "The collection of columns in tables in a database" cls = Column query = \ """SELECT nspname AS schema, relname AS table, attname AS name, attnum AS number, format_type(atttypid, atttypmod) AS type, attnotnull AS not_null, attinhcount AS inherited, pg_get_expr(adbin, adrelid) AS default, attisdropped AS dropped, col_description(c.oid, attnum) AS description FROM pg_attribute JOIN pg_class c ON (attrelid = c.oid) JOIN pg_namespace ON (relnamespace = pg_namespace.oid) LEFT JOIN pg_attrdef ON (attrelid = pg_attrdef.adrelid AND attnum = pg_attrdef.adnum) WHERE relkind in ('c', 'r', 'f') AND (nspname != 'pg_catalog' AND nspname != 'information_schema') AND attnum > 0 ORDER BY nspname, relname, attnum""" def _from_catalog(self): """Initialize the dictionary of columns by querying the catalogs""" for col in self.fetch(): sch, tbl = col.key() if (sch, tbl) not in self: self[(sch, tbl)] = [] self[(sch, tbl)].append(col) def from_map(self, table, incols): """Initialize the dictionary of columns by converting the input list :param table: table or type owning the columns/attributes :param incols: YAML list defining the columns """ if not incols: raise ValueError("Table '%s' has no columns" % table.name) cols = self[(table.schema, table.name)] = [] for col in incols: for key in list(col.keys()): if isinstance(col[key], dict): arg = col[key] else: arg = {'type': col[key]} cols.append(Column(schema=table.schema, table=table.name, name=key, **arg)) def diff_map(self, incols):
"""Generate SQL to transform existing columns :param incols: a YAML map defining the new columns :return: list of SQL statements Compares the existing column definitions, as fetched from the catalogs, to the input map and generates SQL statements to transform the columns accordingly. This takes care of dropping columns that are not present in the input map. It's separate so that it can be done last, after other table, constraint and index changes. """ stmts = [] if not incols or not self: return stmts for (sch, tbl) in list(incols.keys()): if (sch, tbl) in list(self.keys()): for col in self[(sch, tbl)]: if col.name not in [c.name for c in incols[(sch, tbl)]] \ and not hasattr(col, 'dropped'): stmts.append(col.drop()) return stmts
PPC64Ops.go
// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build ignore package main import "strings" // Notes: // - Less-than-64-bit integer types live in the low portion of registers. // For now, the upper portion is junk; sign/zero-extension might be optimized in the future, but not yet. // - Boolean types are zero or 1; stored in a byte, but loaded with AMOVBZ so the upper bytes of a register are zero. // - *const instructions may use a constant larger than the instruction can encode. // In this case the assembler expands to multiple instructions and uses tmp // register (R31). var regNamesPPC64 = []string{ "R0", // REGZERO, not used, but simplifies counting in regalloc "SP", // REGSP "SB", // REGSB "R3", "R4", "R5", "R6", "R7", "R8", "R9", "R10", "R11", // REGCTXT for closures "R12", "R13", // REGTLS "R14", "R15", "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23", "R24", "R25", "R26", "R27", "R28", "R29", "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen". "R31", // REGTMP "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23", "F24", "F25", "F26", "F27", "F28", "F29", "F30", "F31", // If you add registers, update asyncPreempt in runtime. // "CR0", // "CR1", // "CR2", // "CR3", // "CR4", // "CR5", // "CR6", // "CR7", // "CR", // "XER", // "LR", // "CTR", } func
() { // Make map from reg names to reg integers. if len(regNamesPPC64) > 64 { panic("too many registers") } num := map[string]int{} for i, name := range regNamesPPC64 { num[name] = i } buildReg := func(s string) regMask { m := regMask(0) for _, r := range strings.Split(s, " ") { if n, ok := num[r]; ok { m |= regMask(1) << uint(n) continue } panic("register " + r + " not found") } return m } var ( gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29") fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26") sp = buildReg("SP") sb = buildReg("SB") gr = buildReg("g") // cr = buildReg("CR") // ctr = buildReg("CTR") // lr = buildReg("LR") tmp = buildReg("R31") ctxt = buildReg("R11") callptr = buildReg("R12") // tls = buildReg("R13") gp01 = regInfo{inputs: nil, outputs: []regMask{gp}} gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}} gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}} gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}} gp32 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}} gp1cr = regInfo{inputs: []regMask{gp | sp | sb}} gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} crgp = regInfo{inputs: nil, outputs: []regMask{gp}} crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}} crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}} gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}} gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}} gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}} gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}} gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}} fp01 = regInfo{inputs: nil, outputs: []regMask{fp}} fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}} fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}} gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}} fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}} fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}} fp2cr = regInfo{inputs: []regMask{fp, fp}} fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}} fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}} fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}} fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}} callerSave = regMask(gp | fp | gr) r3 = buildReg("R3") r4 = buildReg("R4") r5 = buildReg("R5") r6 = buildReg("R6") ) ops := []opData{ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1 {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1 {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1 {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1 {name: "SUBFCconst", argLength: 1, reg: gp11, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (with carry) {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1 {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1 {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit) {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit) {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit) {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit) {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo) {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1 {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1 {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2 {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2 {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2 {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2 {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!) {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64 {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32 // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA. // The constant shift values are packed into the aux int32. {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"}, // arg0 extract bits identified by shift params" {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, // {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, // {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry) {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"}, {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit) {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit) {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0 {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1 {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1 {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit) {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit) {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit) {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit) {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit) {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit) {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit) {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit) // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1 // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register. {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC. // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues). // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr) {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1 {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1 {name: "ANDCC", argLength: 2, reg: gp2cr, asm: "ANDCC", commutative: true, typ: "Flags"}, // arg0&arg1 sets CC {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1 {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1 {name: "ORCC", argLength: 2, reg: gp2cr, asm: "ORCC", commutative: true, typ: "Flags"}, // arg0|arg1 sets CC {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1) {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1 {name: "XORCC", argLength: 2, reg: gp2cr, asm: "XORCC", commutative: true, typ: "Flags"}, // arg0^arg1 sets CC {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1 {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer) {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point) {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point) {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision) {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64 {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64 {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64 {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64 {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64 {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64 {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64 {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always. {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}}, asm: "ANDCC", aux: "Int64", typ: "Flags"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always. {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64 {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64 {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64 {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64 {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64 {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64 // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register. {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend. // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used. // In these cases the index register field is set to 0 and the full address is in the base register. {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes reverse order {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend reverse order {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend reverse order // In these cases an index register is used in addition to a base register // Loads from memory location arg[0] + arg[1]. {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64 {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64 {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64 {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64 {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64 {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"}, {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64 {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64 {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"}, {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"}, {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"}, // Store bytes in the reverse endian order of the arch into arg0. // These are indexed stores with no offset field in the instruction so the auxint fields are not used. {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes reverse order {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes reverse order {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes reverse order // Floating point loads from arg0+aux+auxint {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float // Store bytes in the endian order of the arch into arg0+aux+auxint {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes // Store floating point value into arg0+aux+auxint {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float // Stores using index and base registers // Stores to arg[0] + arg[1] {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg // The following ops store 0 into arg0+aux+auxint arg1=mem {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, // {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, // {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, // {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"}, {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"}, {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"}, {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"}, {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"}, // ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1 // ISEL auxInt values 4=GE 5=LE 6=NE arg2 ? arg1 : arg0 // ISELB special case where arg0, arg1 values are 0, 1 for boolean result {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above {name: "ISELB", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above // pseudo-ops {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise. {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise. {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise. {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise. {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise. {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise. {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN // Scheduler ensures LoweredGetClosurePtr occurs only in entry block, // and sorts it to the very beginning of the block to prevent other // use of the closure pointer. {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true}, // LoweredGetCallerSP returns the SP of the caller of the current function. {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, // LoweredGetCallerPC evaluates to the PC to which its "caller" will return. // I.e., if f calls g "calls" getcallerpc, // the result should be the PC within f that g will return to. // See runtime/stubs.go for a more detailed discussion. {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil. {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true}, // Round ops to block fused-multiply-add extraction. {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true}, {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem // large or unaligned zeroing // arg0 = address of memory to zero (in R3, changed as side effect) // returns mem // // a loop is generated when there is more than one iteration // needed to clear 4 doublewords // // XXLXOR VS32,VS32,VS32 // MOVD $len/32,R31 // MOVD R31,CTR // MOVD $16,R31 // loop: // STXVD2X VS32,(R0)(R3) // STXVD2X VS32,(R31),R3) // ADD R3,32 // BC loop // remaining doubleword clears generated as needed // MOVD R0,(R3) // MOVD R0,8(R3) // MOVD R0,16(R3) // MOVD R0,24(R3) // one or more of these to clear remainder < 8 bytes // MOVW R0,n1(R3) // MOVH R0,n2(R3) // MOVB R0,n3(R3) { name: "LoweredZero", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{buildReg("R20")}, clobbers: buildReg("R20"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredZeroShort", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{gp}}, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredQuadZeroShort", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{gp}, }, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, { name: "LoweredQuadZero", aux: "Int64", argLength: 2, reg: regInfo{ inputs: []regMask{buildReg("R20")}, clobbers: buildReg("R20"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, unsafePoint: true, }, // R31 is temp register // Loop code: // MOVD len/32,R31 set up loop ctr // MOVD R31,CTR // MOVD $16,R31 index register // loop: // LXVD2X (R0)(R4),VS32 // LXVD2X (R31)(R4),VS33 // ADD R4,$32 increment src // STXVD2X VS32,(R0)(R3) // STXVD2X VS33,(R31)(R3) // ADD R3,$32 increment dst // BC 16,0,loop branch ctr // For this purpose, VS32 and VS33 are treated as // scratch registers. Since regalloc does not // track vector registers, even if it could be marked // as clobbered it would have no effect. // TODO: If vector registers are managed by regalloc // mark these as clobbered. // // Bytes not moved by this loop are moved // with a combination of the following instructions, // starting with the largest sizes and generating as // many as needed, using the appropriate offset value. // MOVD n(R4),R14 // MOVD R14,n(R3) // MOVW n1(R4),R14 // MOVW R14,n1(R3) // MOVH n2(R4),R14 // MOVH R14,n2(R3) // MOVB n3(R4),R14 // MOVB R14,n3(R3) { name: "LoweredMove", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: buildReg("R20 R21"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, { name: "LoweredMoveShort", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{gp, gp}, }, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, // The following is similar to the LoweredMove, but uses // LXV instead of LXVD2X, which does not require an index // register and will do 4 in a loop instead of only. { name: "LoweredQuadMove", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: buildReg("R20 R21"), }, clobberFlags: true, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, { name: "LoweredQuadMoveShort", aux: "Int64", argLength: 3, reg: regInfo{ inputs: []regMask{gp, gp}, }, typ: "Mem", faultOnNilArg0: true, faultOnNilArg1: true, unsafePoint: true, }, {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true}, // atomic add32, 64 // LWSYNC // LDAR (Rarg0), Rout // ADD Rarg1, Rout // STDCCC Rout, (Rarg0) // BNE -3(PC) // return new sum {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic exchange32, 64 // LWSYNC // LDAR (Rarg0), Rout // STDCCC Rarg1, (Rarg0) // BNE -2(PC) // ISYNC // return old val {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic compare and swap. // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero. // if *arg0 == arg1 { // *arg0 = arg2 // return (true, memory) // } else { // return (false, memory) // } // SYNC // LDAR (Rarg0), Rtmp // CMP Rarg1, Rtmp // BNE 3(PC) // STDCCC Rarg2, (Rarg0) // BNE -4(PC) // CBNZ Rtmp, -4(PC) // CSET EQ, Rout {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true}, // atomic 8/32 and/or. // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero. // LBAR/LWAT (Rarg0), Rtmp // AND/OR Rarg1, Rtmp // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp // BNE Rtmp, -3(PC) {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true}, // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and its arguments R20 and R21, // but may clobber anything else, including R31 (REGTMP). {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"}, // There are three of these functions so that they can have three different register inputs. // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the // default registers to match so we don't need to copy registers around unnecessarily. {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go). // (InvertFlags (CMP a b)) == (CMP b a) // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant, // then we do (LessThan (InvertFlags (CMP b a))) instead. // Rewrites will convert this to (GreaterThan (CMP b a)). // InvertFlags is a pseudo-op which can't appear in assembly output. {name: "InvertFlags", argLength: 1}, // reverse direction of arg0 // Constant flag values. For any comparison, there are 3 possible // outcomes: either the three from the signed total order (<,==,>) // or the three from the unsigned total order, depending on which // comparison operation was used (CMP or CMPU -- PPC is different from // the other architectures, which have a single comparison producing // both signed and unsigned comparison results.) // These ops are for temporary use by rewrite rules. They // cannot appear in the generated assembly. {name: "FlagEQ"}, // equal {name: "FlagLT"}, // signed < or unsigned < {name: "FlagGT"}, // signed > or unsigned > } blocks := []blockData{ {name: "EQ", controls: 1}, {name: "NE", controls: 1}, {name: "LT", controls: 1}, {name: "LE", controls: 1}, {name: "GT", controls: 1}, {name: "GE", controls: 1}, {name: "FLT", controls: 1}, {name: "FLE", controls: 1}, {name: "FGT", controls: 1}, {name: "FGE", controls: 1}, } archs = append(archs, arch{ name: "PPC64", pkg: "cmd/internal/obj/ppc64", genfile: "../../ppc64/ssa.go", ops: ops, blocks: blocks, regnames: regNamesPPC64, gpregmask: gp, fpregmask: fp, framepointerreg: int8(num["SP"]), linkreg: -1, // not used }) }
init
response.rs
//! Server Responses //! //! These are responses sent by a `hyper::Server` to clients, after //! receiving a request. use std::io::IoResult; use time::now_utc; use header; use header::common; use http::{CR, LF, LINE_ENDING, HttpWriter}; use http::HttpWriter::{ThroughWriter, ChunkedWriter, SizedWriter}; use status; use net::{Fresh, Streaming}; use version; /// The outgoing half for a Tcp connection, created by a `Server` and given to a `Handler`. pub struct Response<'a, W = Fresh> { /// The HTTP version of this response. pub version: version::HttpVersion, // Stream the Response is writing to, not accessible through UnwrittenResponse body: HttpWriter<&'a mut (Writer + 'a)>, // The status code for the request. status: status::StatusCode, // The outgoing headers on this response. headers: header::Headers } impl<'a, W> Response<'a, W> {
/// The headers of this response. pub fn headers(&self) -> &header::Headers { &self.headers } /// Construct a Response from its constituent parts. pub fn construct(version: version::HttpVersion, body: HttpWriter<&'a mut (Writer + 'a)>, status: status::StatusCode, headers: header::Headers) -> Response<'a, Fresh> { Response { status: status, version: version, body: body, headers: headers } } /// Deconstruct this Response into its constituent parts. pub fn deconstruct(self) -> (version::HttpVersion, HttpWriter<&'a mut (Writer + 'a)>, status::StatusCode, header::Headers) { (self.version, self.body, self.status, self.headers) } } impl<'a> Response<'a, Fresh> { /// Creates a new Response that can be used to write to a network stream. pub fn new(stream: &'a mut (Writer + 'a)) -> Response<'a, Fresh> { Response { status: status::StatusCode::Ok, version: version::HttpVersion::Http11, headers: header::Headers::new(), body: ThroughWriter(stream) } } /// Consume this Response<Fresh>, writing the Headers and Status and creating a Response<Streaming> pub fn start(mut self) -> IoResult<Response<'a, Streaming>> { debug!("writing head: {} {}", self.version, self.status); try!(write!(&mut self.body, "{} {}{}{}", self.version, self.status, CR as char, LF as char)); if !self.headers.has::<common::Date>() { self.headers.set(common::Date(now_utc())); } let mut chunked = true; let mut len = 0; match self.headers.get::<common::ContentLength>() { Some(cl) => { chunked = false; len = **cl; }, None => () }; // cant do in match above, thanks borrowck if chunked { let encodings = match self.headers.get_mut::<common::TransferEncoding>() { Some(&common::TransferEncoding(ref mut encodings)) => { //TODO: check if chunked is already in encodings. use HashSet? encodings.push(common::transfer_encoding::Encoding::Chunked); false }, None => true }; if encodings { self.headers.set::<common::TransferEncoding>( common::TransferEncoding(vec![common::transfer_encoding::Encoding::Chunked])) } } debug!("headers [\n{}]", self.headers); try!(write!(&mut self.body, "{}", self.headers)); try!(self.body.write(LINE_ENDING)); let stream = if chunked { ChunkedWriter(self.body.unwrap()) } else { SizedWriter(self.body.unwrap(), len) }; // "copy" to change the phantom type Ok(Response { version: self.version, body: stream, status: self.status, headers: self.headers }) } /// Get a mutable reference to the status. #[inline] pub fn status_mut(&mut self) -> &mut status::StatusCode { &mut self.status } /// Get a mutable reference to the Headers. pub fn headers_mut(&mut self) -> &mut header::Headers { &mut self.headers } } impl<'a> Response<'a, Streaming> { /// Flushes all writing of a response to the client. pub fn end(self) -> IoResult<()> { debug!("ending"); try!(self.body.end()); Ok(()) } } impl<'a> Writer for Response<'a, Streaming> { fn write(&mut self, msg: &[u8]) -> IoResult<()> { debug!("write {} bytes", msg.len()); self.body.write(msg) } fn flush(&mut self) -> IoResult<()> { self.body.flush() } }
/// The status of this response. #[inline] pub fn status(&self) -> status::StatusCode { self.status }
widgets.ts
// (C) 2021 GoodData Corporation import { IFilter, ObjRef } from "@gooddata/sdk-model"; import { IBrokenAlertFilterBasicInfo } from "../types/alertTypes"; import { IDashboardQuery } from "./base"; /** * Given a reference to a widget, this query will obtain the filters that should be used when executing it. * These will respect the ignored filters on widget level as well as the filters specified in the insight itself. * Filters returned by this query should be used with {@link @gooddata/sdk-model#insightSetFilters} to obtain * insight that is ready for execution or used to execute a KPI. * * @alpha */ export interface QueryWidgetFilters extends IDashboardQuery<IFilter[]> { readonly type: "GDC.DASH/QUERY.WIDGET.FILTERS"; readonly payload: { readonly widgetRef: ObjRef; readonly widgetFilterOverrides: IFilter[] | undefined; }; } /** * Creates action thought which you can query dashboard component for filters that should be used by a given widget. * * @param widgetRef - reference to insight widget * @param widgetFilterOverrides - optionally specify filters to be applied on top of the dashboard and insight filters * @param correlationId - optionally specify correlation id to use for this command. this will be included in all * events that will be emitted during the command processing * @alpha */ export function queryWidgetFilters( widgetRef: ObjRef, widgetFilterOverrides?: IFilter[], correlationId?: string, ): QueryWidgetFilters { return { type: "GDC.DASH/QUERY.WIDGET.FILTERS", correlationId, payload: { widgetRef, widgetFilterOverrides, }, }; } /** * This query base on given kpi widgetRef calculate BrokenAlertFilterBasicInfo {@link IBrokenAlertFilterBasicInfo} * In case any broken alert filters query return empty array. * @alpha */ export interface QueryWidgetBrokenAlerts extends IDashboardQuery<IBrokenAlertFilterBasicInfo[]> { readonly type: "GDC.DASH/QUERY.WIDGET.BROKEN_ALERTS";
} /** * Creates action thought which you can query dashboard component for broken alert filters. * * @param widgetRef - reference to insight kpi widget * @param correlationId - optionally specify correlation id to use for this command. * @returns * * @alpha */ export function queryWidgetBrokenAlerts(widgetRef: ObjRef, correlationId?: string): QueryWidgetBrokenAlerts { return { type: "GDC.DASH/QUERY.WIDGET.BROKEN_ALERTS", correlationId, payload: { widgetRef, }, }; }
readonly payload: { readonly widgetRef: ObjRef; };
urls.py
from django.conf.urls import include, url from rest_framework import routers from . import views
urlpatterns = [ url(r'^subjects/$', views.SubjectListView.as_view(), name='subject_list'), url(r'^subjects/(?P<pk>\d+)/$', views.SubjectDetailView.as_view(), name='subject_detail'), url(r'^', include(router.urls)) ]
router = routers.DefaultRouter() router.register('courses', views.CourseViewSet)
factionOrder.min.js
'use strict'; var completedFactions; $(function () { if (!localStorage['completedFactions']) { localStorage['completedFactions'] = "[]"; } completedFactions = JSON.parse(localStorage['completedFactions']); $('.order-list').on('click', 'button', function () { completedFactions.push($(this).data('faction')); localStorage['completedFactions'] = JSON.stringify(completedFactions); refreshLists(); }); if (localStorage['minimum-importance']) { $('#minimum-importance').val(localStorage['minimum-importance']); } $('#minimum-importance').on('change', function () { localStorage['minimum-importance'] = $(this).val(); refreshLists(); }); refreshLists(); }); function createFactionListItem(id) { let $itemContainer = $('<div></div>'); $('<button data-faction="' + id + '">Hide Faction</button>').appendTo($itemContainer); $('<a href="faction-details.html?faction=' + id + '">' + factionList[id].name + '</a>').appendTo($itemContainer); return $itemContainer; } function refreshLists() { $('.order-list').empty(); var minimumImportance = parseInt($('#minimum-importance').val()); var incompleteFactions = factionList.filter(function (faction) { //If faction cannot be raised then remove it if (!faction.howToRaise) { return false; } //Filter out faction if it's already completed (or skipped) if (completedFactions.includes(factionList.indexOf(faction))) { return false; } //Filter out faction if it falls below our minimum threshold if (faction.importance < minimumImportance) { return false; } return true; }); var safeFactions = []; var unsafeFactions = []; var opposedFactions = []; //For every incomplete faction check it against every other incomplete faction to make sure none will hurt it for (var i = 0; i < incompleteFactions.length; i++) { var factionId = factionList.indexOf(incompleteFactions[i]); var factionSafe = true; for (var j = 0; j < incompleteFactions.length; j++) { if (incompleteFactions[j].factionsLowered && incompleteFactions[j].factionsLowered.includes(factionId)) { factionSafe = false; //Check if this is bidirectional let opposedFactionId = factionList.indexOf(incompleteFactions[j]); if (incompleteFactions[i].factionsLowered && incompleteFactions[i].factionsLowered.includes(opposedFactionId)) { //To prevent duplicates only add it if j is higher than i if (j > i) { opposedFactions.push([factionId, opposedFactionId]); } } break; } }
if (factionSafe) { safeFactions.push(factionId); } else { unsafeFactions.push(factionId); } } for (var i = 0; i < safeFactions.length; i++) { $('#safe-factions').append(createFactionListItem(safeFactions[i])); } for (var i = 0; i < unsafeFactions.length; i++) { $('#unsafe-factions').append(createFactionListItem(unsafeFactions[i])); } for (var i = 0; i < opposedFactions.length; i++) { let $factionRow = $('<div></div>'); $factionRow.append(createFactionListItem(opposedFactions[i][0])); $factionRow.append(createFactionListItem(opposedFactions[i][1])); $factionRow.appendTo('#opposed-factions'); } }
coinchooser.py
#!/usr/bin/env python # # Electrum - lightweight ParkByte client # Copyright (C) 2015 kyuupichan@gmail # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from collections import defaultdict, namedtuple from math import floor, log10 import struct from parkbyte import sha256, COIN, TYPE_ADDRESS from transaction import Transaction from util import NotEnoughFunds, PrintError, profiler # A simple deterministic PRNG. Used to deterministically shuffle a # set of coins - the same set of coins should produce the same output. # Although choosing UTXOs "randomly" we want it to be deterministic, # so if sending twice from the same UTXO set we choose the same UTXOs # to spend. This prevents attacks on users by malicious or stale # servers. class PRNG: def __init__(self, seed): self.sha = sha256(seed) self.pool = bytearray() def get_bytes(self, n): while len(self.pool) < n: self.pool.extend(self.sha) self.sha = sha256(self.sha) result, self.pool = self.pool[:n], self.pool[n:] return result def random(self): # Returns random double in [0, 1) four = self.get_bytes(4) return struct.unpack("I", four)[0] / 4294967296.0 def randint(self, start, end): # Returns random integer in [start, end) return start + int(self.random() * (end - start)) def choice(self, seq): return seq[int(self.random() * len(seq))] def shuffle(self, x): for i in reversed(xrange(1, len(x))): # pick an element in x[:i+1] with which to exchange x[i] j = int(self.random() * (i+1)) x[i], x[j] = x[j], x[i] Bucket = namedtuple('Bucket', ['desc', 'size', 'value', 'coins']) def strip_unneeded(bkts, sufficient_funds): '''Remove buckets that are unnecessary in achieving the spend amount''' bkts = sorted(bkts, key = lambda bkt: bkt.value) for i in range(len(bkts)): if not sufficient_funds(bkts[i + 1:]): return bkts[i:] # Shouldn't get here return bkts class CoinChooserBase(PrintError): def keys(self, coins): raise NotImplementedError def bucketize_coins(self, coins): keys = self.keys(coins) buckets = defaultdict(list) for key, coin in zip(keys, coins): buckets[key].append(coin) def make_Bucket(desc, coins): size = sum(Transaction.estimated_input_size(coin) for coin in coins) value = sum(coin['value'] for coin in coins) return Bucket(desc, size, value, coins) return map(make_Bucket, buckets.keys(), buckets.values()) def penalty_func(self, tx): def penalty(candidate): return 0 return penalty def change_amounts(self, tx, count, fee_estimator, dust_threshold): # Break change up if bigger than max_change output_amounts = [o[2] for o in tx.outputs()] # Don't split change of less than 0.02 PKB max_change = max(max(output_amounts) * 1.25, 0.02 * COIN) # Use N change outputs for n in range(1, count + 1): # How much is left if we add this many change outputs? change_amount = max(0, tx.get_fee() - fee_estimator(n)) if change_amount // n <= max_change: break # Get a handle on the precision of the output amounts; round our # change to look similar def trailing_zeroes(val): s = str(val) return len(s) - len(s.rstrip('0')) zeroes = map(trailing_zeroes, output_amounts) min_zeroes = min(zeroes) max_zeroes = max(zeroes) zeroes = range(max(0, min_zeroes - 1), (max_zeroes + 1) + 1) # Calculate change; randomize it a bit if using more than 1 output remaining = change_amount amounts = [] while n > 1: average = remaining // n amount = self.p.randint(int(average * 0.7), int(average * 1.3)) precision = min(self.p.choice(zeroes), int(floor(log10(amount)))) amount = int(round(amount, -precision)) amounts.append(amount) remaining -= amount n -= 1 # Last change output. Round down to maximum precision but lose # no more than 100 satoshis to fees (2dp) N = pow(10, min(2, zeroes[0])) amount = (remaining // N) * N amounts.append(amount) assert sum(amounts) <= change_amount return amounts def change_outputs(self, tx, change_addrs, fee_estimator, dust_threshold): amounts = self.change_amounts(tx, len(change_addrs), fee_estimator, dust_threshold) assert min(amounts) >= 0 assert len(change_addrs) >= len(amounts) # If change is above dust threshold after accounting for the # size of the change output, add it to the transaction. dust = sum(amount for amount in amounts if amount < dust_threshold) amounts = [amount for amount in amounts if amount >= dust_threshold] change = [(TYPE_ADDRESS, addr, amount) for addr, amount in zip(change_addrs, amounts)] self.print_error('change:', change) if dust: self.print_error('not keeping dust', dust) return change def make_tx(self, coins, outputs, change_addrs, fee_estimator, dust_threshold): '''Select unspent coins to spend to pay outputs. If the change is greater than dust_threshold (after adding the change output to the transaction) it is kept, otherwise none is sent and it is added to the transaction fee.''' # Deterministic randomness from coins utxos = [c['prevout_hash'] + str(c['prevout_n']) for c in coins] self.p = PRNG(''.join(sorted(utxos))) # Copy the ouputs so when adding change we don't modify "outputs" tx = Transaction.from_io([], outputs[:]) # Size of the transaction with no inputs and no change base_size = tx.estimated_size() spent_amount = tx.output_value() def sufficient_funds(buckets): '''Given a list of buckets, return True if it has enough value to pay for the transaction''' total_input = sum(bucket.value for bucket in buckets) total_size = sum(bucket.size for bucket in buckets) + base_size return total_input >= spent_amount + fee_estimator(total_size) # Collect the coins into buckets, choose a subset of the buckets buckets = self.bucketize_coins(coins) buckets = self.choose_buckets(buckets, sufficient_funds, self.penalty_func(tx)) tx.add_inputs([coin for b in buckets for coin in b.coins]) tx_size = base_size + sum(bucket.size for bucket in buckets) # This takes a count of change outputs and returns a tx fee; # each pay-to-parkbyte-address output serializes as 34 bytes fee = lambda count: fee_estimator(tx_size + count * 34) change = self.change_outputs(tx, change_addrs, fee, dust_threshold) tx.add_outputs(change) self.print_error("using %d inputs" % len(tx.inputs())) self.print_error("using buckets:", [bucket.desc for bucket in buckets]) return tx class CoinChooserOldestFirst(CoinChooserBase): '''Maximize transaction priority. Select the oldest unspent transaction outputs in your wallet, that are sufficient to cover the spent amount. Then, remove any unneeded inputs, starting with the smallest in value. ''' def keys(self, coins): return [coin['prevout_hash'] + ':' + str(coin['prevout_n']) for coin in coins] def choose_buckets(self, buckets, sufficient_funds, penalty_func): '''Spend the oldest buckets first.''' # Unconfirmed coins are young, not old adj_height = lambda height: 99999999 if height == 0 else height buckets.sort(key = lambda b: max(adj_height(coin['height']) for coin in b.coins)) selected = [] for bucket in buckets: selected.append(bucket) if sufficient_funds(selected): return strip_unneeded(selected, sufficient_funds) else: raise NotEnoughFunds() class CoinChooserRandom(CoinChooserBase): def bucket_candidates(self, buckets, sufficient_funds): '''Returns a list of bucket sets.''' candidates = set() # Add all singletons for n, bucket in enumerate(buckets): if sufficient_funds([bucket]): candidates.add((n, )) # And now some random ones attempts = min(100, (len(buckets) - 1) * 10 + 1) permutation = range(len(buckets)) for i in range(attempts): # Get a random permutation of the buckets, and # incrementally combine buckets until sufficient self.p.shuffle(permutation) bkts = [] for count, index in enumerate(permutation): bkts.append(buckets[index]) if sufficient_funds(bkts): candidates.add(tuple(sorted(permutation[:count + 1]))) break else: raise NotEnoughFunds() candidates = [[buckets[n] for n in c] for c in candidates] return [strip_unneeded(c, sufficient_funds) for c in candidates] def choose_buckets(self, buckets, sufficient_funds, penalty_func):
class CoinChooserPrivacy(CoinChooserRandom): '''Attempts to better preserve user privacy. First, if any coin is spent from a user address, all coins are. Compared to spending from other addresses to make up an amount, this reduces information leakage about sender holdings. It also helps to reduce blockchain UTXO bloat, and reduce future privacy loss that would come from reusing that address' remaining UTXOs. Second, it penalizes change that is quite different to the sent amount. Third, it penalizes change that is too big.''' def keys(self, coins): return [coin['address'] for coin in coins] def penalty_func(self, tx): min_change = min(o[2] for o in tx.outputs()) * 0.75 max_change = max(o[2] for o in tx.outputs()) * 1.33 spent_amount = sum(o[2] for o in tx.outputs()) def penalty(buckets): badness = len(buckets) - 1 total_input = sum(bucket.value for bucket in buckets) change = float(total_input - spent_amount) # Penalize change not roughly in output range if change < min_change: badness += (min_change - change) / (min_change + 10000) elif change > max_change: badness += (change - max_change) / (max_change + 10000) # Penalize large change; 5 PKB excess ~= using 1 more input badness += change / (COIN * 5) return badness return penalty COIN_CHOOSERS = {'Priority': CoinChooserOldestFirst, 'Privacy': CoinChooserPrivacy} def get_name(config): kind = config.get('coin_chooser') if not kind in COIN_CHOOSERS: kind = 'Priority' return kind def get_coin_chooser(config): klass = COIN_CHOOSERS[get_name(config)] return klass()
candidates = self.bucket_candidates(buckets, sufficient_funds) penalties = [penalty_func(cand) for cand in candidates] winner = candidates[penalties.index(min(penalties))] self.print_error("Bucket sets:", len(buckets)) self.print_error("Winning penalty:", min(penalties)) return winner
pulumiTypes.go
// *** WARNING: this file was generated by the Pulumi SDK Generator. *** // *** Do not edit by hand unless you're certain you know what you are doing! *** package v20190801 import ( "context" "reflect" "github.com/pulumi/pulumi/sdk/v3/go/pulumi" ) // The shipping address of the customer. type Address struct { // The address line1. AddressLine1 string `pulumi:"addressLine1"` // The address line2. AddressLine2 *string `pulumi:"addressLine2"` // The address line3. AddressLine3 *string `pulumi:"addressLine3"` // The city name. City string `pulumi:"city"` // The country name. Country string `pulumi:"country"` // The postal code. PostalCode string `pulumi:"postalCode"` // The state name. State string `pulumi:"state"` } // AddressInput is an input type that accepts AddressArgs and AddressOutput values. // You can construct a concrete instance of `AddressInput` via: // // AddressArgs{...} type AddressInput interface { pulumi.Input ToAddressOutput() AddressOutput ToAddressOutputWithContext(context.Context) AddressOutput } // The shipping address of the customer. type AddressArgs struct { // The address line1. AddressLine1 pulumi.StringInput `pulumi:"addressLine1"` // The address line2. AddressLine2 pulumi.StringPtrInput `pulumi:"addressLine2"` // The address line3. AddressLine3 pulumi.StringPtrInput `pulumi:"addressLine3"` // The city name. City pulumi.StringInput `pulumi:"city"` // The country name. Country pulumi.StringInput `pulumi:"country"` // The postal code. PostalCode pulumi.StringInput `pulumi:"postalCode"` // The state name. State pulumi.StringInput `pulumi:"state"` } func (AddressArgs) ElementType() reflect.Type { return reflect.TypeOf((*Address)(nil)).Elem() } func (i AddressArgs) ToAddressOutput() AddressOutput { return i.ToAddressOutputWithContext(context.Background()) } func (i AddressArgs) ToAddressOutputWithContext(ctx context.Context) AddressOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressOutput) } func (i AddressArgs) ToAddressPtrOutput() AddressPtrOutput { return i.ToAddressPtrOutputWithContext(context.Background()) } func (i AddressArgs) ToAddressPtrOutputWithContext(ctx context.Context) AddressPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressOutput).ToAddressPtrOutputWithContext(ctx) } // AddressPtrInput is an input type that accepts AddressArgs, AddressPtr and AddressPtrOutput values. // You can construct a concrete instance of `AddressPtrInput` via: // // AddressArgs{...} // // or: // // nil type AddressPtrInput interface { pulumi.Input ToAddressPtrOutput() AddressPtrOutput ToAddressPtrOutputWithContext(context.Context) AddressPtrOutput } type addressPtrType AddressArgs func AddressPtr(v *AddressArgs) AddressPtrInput { return (*addressPtrType)(v) } func (*addressPtrType) ElementType() reflect.Type { return reflect.TypeOf((**Address)(nil)).Elem() } func (i *addressPtrType) ToAddressPtrOutput() AddressPtrOutput { return i.ToAddressPtrOutputWithContext(context.Background()) } func (i *addressPtrType) ToAddressPtrOutputWithContext(ctx context.Context) AddressPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressPtrOutput) } // The shipping address of the customer. type AddressOutput struct{ *pulumi.OutputState } func (AddressOutput) ElementType() reflect.Type { return reflect.TypeOf((*Address)(nil)).Elem() } func (o AddressOutput) ToAddressOutput() AddressOutput { return o } func (o AddressOutput) ToAddressOutputWithContext(ctx context.Context) AddressOutput { return o } func (o AddressOutput) ToAddressPtrOutput() AddressPtrOutput { return o.ToAddressPtrOutputWithContext(context.Background()) } func (o AddressOutput) ToAddressPtrOutputWithContext(ctx context.Context) AddressPtrOutput { return o.ApplyT(func(v Address) *Address { return &v }).(AddressPtrOutput) } // The address line1. func (o AddressOutput) AddressLine1() pulumi.StringOutput { return o.ApplyT(func(v Address) string { return v.AddressLine1 }).(pulumi.StringOutput) } // The address line2. func (o AddressOutput) AddressLine2() pulumi.StringPtrOutput { return o.ApplyT(func(v Address) *string { return v.AddressLine2 }).(pulumi.StringPtrOutput) } // The address line3. func (o AddressOutput) AddressLine3() pulumi.StringPtrOutput { return o.ApplyT(func(v Address) *string { return v.AddressLine3 }).(pulumi.StringPtrOutput) } // The city name. func (o AddressOutput) City() pulumi.StringOutput { return o.ApplyT(func(v Address) string { return v.City }).(pulumi.StringOutput) } // The country name. func (o AddressOutput) Country() pulumi.StringOutput { return o.ApplyT(func(v Address) string { return v.Country }).(pulumi.StringOutput) } // The postal code. func (o AddressOutput) PostalCode() pulumi.StringOutput { return o.ApplyT(func(v Address) string { return v.PostalCode }).(pulumi.StringOutput) } // The state name. func (o AddressOutput) State() pulumi.StringOutput { return o.ApplyT(func(v Address) string { return v.State }).(pulumi.StringOutput) } type AddressPtrOutput struct{ *pulumi.OutputState } func (AddressPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**Address)(nil)).Elem() } func (o AddressPtrOutput) ToAddressPtrOutput() AddressPtrOutput { return o } func (o AddressPtrOutput) ToAddressPtrOutputWithContext(ctx context.Context) AddressPtrOutput { return o } func (o AddressPtrOutput) Elem() AddressOutput { return o.ApplyT(func(v *Address) Address { return *v }).(AddressOutput) } // The address line1. func (o AddressPtrOutput) AddressLine1() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return &v.AddressLine1 }).(pulumi.StringPtrOutput) } // The address line2. func (o AddressPtrOutput) AddressLine2() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return v.AddressLine2 }).(pulumi.StringPtrOutput) } // The address line3. func (o AddressPtrOutput) AddressLine3() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return v.AddressLine3 }).(pulumi.StringPtrOutput) } // The city name. func (o AddressPtrOutput) City() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return &v.City }).(pulumi.StringPtrOutput) } // The country name. func (o AddressPtrOutput) Country() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return &v.Country }).(pulumi.StringPtrOutput) } // The postal code. func (o AddressPtrOutput) PostalCode() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil
return &v.PostalCode }).(pulumi.StringPtrOutput) } // The state name. func (o AddressPtrOutput) State() pulumi.StringPtrOutput { return o.ApplyT(func(v *Address) *string { if v == nil { return nil } return &v.State }).(pulumi.StringPtrOutput) } // The shipping address of the customer. type AddressResponse struct { // The address line1. AddressLine1 string `pulumi:"addressLine1"` // The address line2. AddressLine2 *string `pulumi:"addressLine2"` // The address line3. AddressLine3 *string `pulumi:"addressLine3"` // The city name. City string `pulumi:"city"` // The country name. Country string `pulumi:"country"` // The postal code. PostalCode string `pulumi:"postalCode"` // The state name. State string `pulumi:"state"` } // AddressResponseInput is an input type that accepts AddressResponseArgs and AddressResponseOutput values. // You can construct a concrete instance of `AddressResponseInput` via: // // AddressResponseArgs{...} type AddressResponseInput interface { pulumi.Input ToAddressResponseOutput() AddressResponseOutput ToAddressResponseOutputWithContext(context.Context) AddressResponseOutput } // The shipping address of the customer. type AddressResponseArgs struct { // The address line1. AddressLine1 pulumi.StringInput `pulumi:"addressLine1"` // The address line2. AddressLine2 pulumi.StringPtrInput `pulumi:"addressLine2"` // The address line3. AddressLine3 pulumi.StringPtrInput `pulumi:"addressLine3"` // The city name. City pulumi.StringInput `pulumi:"city"` // The country name. Country pulumi.StringInput `pulumi:"country"` // The postal code. PostalCode pulumi.StringInput `pulumi:"postalCode"` // The state name. State pulumi.StringInput `pulumi:"state"` } func (AddressResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*AddressResponse)(nil)).Elem() } func (i AddressResponseArgs) ToAddressResponseOutput() AddressResponseOutput { return i.ToAddressResponseOutputWithContext(context.Background()) } func (i AddressResponseArgs) ToAddressResponseOutputWithContext(ctx context.Context) AddressResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressResponseOutput) } func (i AddressResponseArgs) ToAddressResponsePtrOutput() AddressResponsePtrOutput { return i.ToAddressResponsePtrOutputWithContext(context.Background()) } func (i AddressResponseArgs) ToAddressResponsePtrOutputWithContext(ctx context.Context) AddressResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressResponseOutput).ToAddressResponsePtrOutputWithContext(ctx) } // AddressResponsePtrInput is an input type that accepts AddressResponseArgs, AddressResponsePtr and AddressResponsePtrOutput values. // You can construct a concrete instance of `AddressResponsePtrInput` via: // // AddressResponseArgs{...} // // or: // // nil type AddressResponsePtrInput interface { pulumi.Input ToAddressResponsePtrOutput() AddressResponsePtrOutput ToAddressResponsePtrOutputWithContext(context.Context) AddressResponsePtrOutput } type addressResponsePtrType AddressResponseArgs func AddressResponsePtr(v *AddressResponseArgs) AddressResponsePtrInput { return (*addressResponsePtrType)(v) } func (*addressResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**AddressResponse)(nil)).Elem() } func (i *addressResponsePtrType) ToAddressResponsePtrOutput() AddressResponsePtrOutput { return i.ToAddressResponsePtrOutputWithContext(context.Background()) } func (i *addressResponsePtrType) ToAddressResponsePtrOutputWithContext(ctx context.Context) AddressResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AddressResponsePtrOutput) } // The shipping address of the customer. type AddressResponseOutput struct{ *pulumi.OutputState } func (AddressResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*AddressResponse)(nil)).Elem() } func (o AddressResponseOutput) ToAddressResponseOutput() AddressResponseOutput { return o } func (o AddressResponseOutput) ToAddressResponseOutputWithContext(ctx context.Context) AddressResponseOutput { return o } func (o AddressResponseOutput) ToAddressResponsePtrOutput() AddressResponsePtrOutput { return o.ToAddressResponsePtrOutputWithContext(context.Background()) } func (o AddressResponseOutput) ToAddressResponsePtrOutputWithContext(ctx context.Context) AddressResponsePtrOutput { return o.ApplyT(func(v AddressResponse) *AddressResponse { return &v }).(AddressResponsePtrOutput) } // The address line1. func (o AddressResponseOutput) AddressLine1() pulumi.StringOutput { return o.ApplyT(func(v AddressResponse) string { return v.AddressLine1 }).(pulumi.StringOutput) } // The address line2. func (o AddressResponseOutput) AddressLine2() pulumi.StringPtrOutput { return o.ApplyT(func(v AddressResponse) *string { return v.AddressLine2 }).(pulumi.StringPtrOutput) } // The address line3. func (o AddressResponseOutput) AddressLine3() pulumi.StringPtrOutput { return o.ApplyT(func(v AddressResponse) *string { return v.AddressLine3 }).(pulumi.StringPtrOutput) } // The city name. func (o AddressResponseOutput) City() pulumi.StringOutput { return o.ApplyT(func(v AddressResponse) string { return v.City }).(pulumi.StringOutput) } // The country name. func (o AddressResponseOutput) Country() pulumi.StringOutput { return o.ApplyT(func(v AddressResponse) string { return v.Country }).(pulumi.StringOutput) } // The postal code. func (o AddressResponseOutput) PostalCode() pulumi.StringOutput { return o.ApplyT(func(v AddressResponse) string { return v.PostalCode }).(pulumi.StringOutput) } // The state name. func (o AddressResponseOutput) State() pulumi.StringOutput { return o.ApplyT(func(v AddressResponse) string { return v.State }).(pulumi.StringOutput) } type AddressResponsePtrOutput struct{ *pulumi.OutputState } func (AddressResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**AddressResponse)(nil)).Elem() } func (o AddressResponsePtrOutput) ToAddressResponsePtrOutput() AddressResponsePtrOutput { return o } func (o AddressResponsePtrOutput) ToAddressResponsePtrOutputWithContext(ctx context.Context) AddressResponsePtrOutput { return o } func (o AddressResponsePtrOutput) Elem() AddressResponseOutput { return o.ApplyT(func(v *AddressResponse) AddressResponse { return *v }).(AddressResponseOutput) } // The address line1. func (o AddressResponsePtrOutput) AddressLine1() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return &v.AddressLine1 }).(pulumi.StringPtrOutput) } // The address line2. func (o AddressResponsePtrOutput) AddressLine2() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return v.AddressLine2 }).(pulumi.StringPtrOutput) } // The address line3. func (o AddressResponsePtrOutput) AddressLine3() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return v.AddressLine3 }).(pulumi.StringPtrOutput) } // The city name. func (o AddressResponsePtrOutput) City() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return &v.City }).(pulumi.StringPtrOutput) } // The country name. func (o AddressResponsePtrOutput) Country() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return &v.Country }).(pulumi.StringPtrOutput) } // The postal code. func (o AddressResponsePtrOutput) PostalCode() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return &v.PostalCode }).(pulumi.StringPtrOutput) } // The state name. func (o AddressResponsePtrOutput) State() pulumi.StringPtrOutput { return o.ApplyT(func(v *AddressResponse) *string { if v == nil { return nil } return &v.State }).(pulumi.StringPtrOutput) } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecret struct { // The algorithm used to encrypt "Value". EncryptionAlgorithm string `pulumi:"encryptionAlgorithm"` // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. EncryptionCertThumbprint *string `pulumi:"encryptionCertThumbprint"` // The value of the secret. Value string `pulumi:"value"` } // AsymmetricEncryptedSecretInput is an input type that accepts AsymmetricEncryptedSecretArgs and AsymmetricEncryptedSecretOutput values. // You can construct a concrete instance of `AsymmetricEncryptedSecretInput` via: // // AsymmetricEncryptedSecretArgs{...} type AsymmetricEncryptedSecretInput interface { pulumi.Input ToAsymmetricEncryptedSecretOutput() AsymmetricEncryptedSecretOutput ToAsymmetricEncryptedSecretOutputWithContext(context.Context) AsymmetricEncryptedSecretOutput } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecretArgs struct { // The algorithm used to encrypt "Value". EncryptionAlgorithm pulumi.StringInput `pulumi:"encryptionAlgorithm"` // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. EncryptionCertThumbprint pulumi.StringPtrInput `pulumi:"encryptionCertThumbprint"` // The value of the secret. Value pulumi.StringInput `pulumi:"value"` } func (AsymmetricEncryptedSecretArgs) ElementType() reflect.Type { return reflect.TypeOf((*AsymmetricEncryptedSecret)(nil)).Elem() } func (i AsymmetricEncryptedSecretArgs) ToAsymmetricEncryptedSecretOutput() AsymmetricEncryptedSecretOutput { return i.ToAsymmetricEncryptedSecretOutputWithContext(context.Background()) } func (i AsymmetricEncryptedSecretArgs) ToAsymmetricEncryptedSecretOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretOutput) } func (i AsymmetricEncryptedSecretArgs) ToAsymmetricEncryptedSecretPtrOutput() AsymmetricEncryptedSecretPtrOutput { return i.ToAsymmetricEncryptedSecretPtrOutputWithContext(context.Background()) } func (i AsymmetricEncryptedSecretArgs) ToAsymmetricEncryptedSecretPtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretOutput).ToAsymmetricEncryptedSecretPtrOutputWithContext(ctx) } // AsymmetricEncryptedSecretPtrInput is an input type that accepts AsymmetricEncryptedSecretArgs, AsymmetricEncryptedSecretPtr and AsymmetricEncryptedSecretPtrOutput values. // You can construct a concrete instance of `AsymmetricEncryptedSecretPtrInput` via: // // AsymmetricEncryptedSecretArgs{...} // // or: // // nil type AsymmetricEncryptedSecretPtrInput interface { pulumi.Input ToAsymmetricEncryptedSecretPtrOutput() AsymmetricEncryptedSecretPtrOutput ToAsymmetricEncryptedSecretPtrOutputWithContext(context.Context) AsymmetricEncryptedSecretPtrOutput } type asymmetricEncryptedSecretPtrType AsymmetricEncryptedSecretArgs func AsymmetricEncryptedSecretPtr(v *AsymmetricEncryptedSecretArgs) AsymmetricEncryptedSecretPtrInput { return (*asymmetricEncryptedSecretPtrType)(v) } func (*asymmetricEncryptedSecretPtrType) ElementType() reflect.Type { return reflect.TypeOf((**AsymmetricEncryptedSecret)(nil)).Elem() } func (i *asymmetricEncryptedSecretPtrType) ToAsymmetricEncryptedSecretPtrOutput() AsymmetricEncryptedSecretPtrOutput { return i.ToAsymmetricEncryptedSecretPtrOutputWithContext(context.Background()) } func (i *asymmetricEncryptedSecretPtrType) ToAsymmetricEncryptedSecretPtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretPtrOutput) } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecretOutput struct{ *pulumi.OutputState } func (AsymmetricEncryptedSecretOutput) ElementType() reflect.Type { return reflect.TypeOf((*AsymmetricEncryptedSecret)(nil)).Elem() } func (o AsymmetricEncryptedSecretOutput) ToAsymmetricEncryptedSecretOutput() AsymmetricEncryptedSecretOutput { return o } func (o AsymmetricEncryptedSecretOutput) ToAsymmetricEncryptedSecretOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretOutput { return o } func (o AsymmetricEncryptedSecretOutput) ToAsymmetricEncryptedSecretPtrOutput() AsymmetricEncryptedSecretPtrOutput { return o.ToAsymmetricEncryptedSecretPtrOutputWithContext(context.Background()) } func (o AsymmetricEncryptedSecretOutput) ToAsymmetricEncryptedSecretPtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretPtrOutput { return o.ApplyT(func(v AsymmetricEncryptedSecret) *AsymmetricEncryptedSecret { return &v }).(AsymmetricEncryptedSecretPtrOutput) } // The algorithm used to encrypt "Value". func (o AsymmetricEncryptedSecretOutput) EncryptionAlgorithm() pulumi.StringOutput { return o.ApplyT(func(v AsymmetricEncryptedSecret) string { return v.EncryptionAlgorithm }).(pulumi.StringOutput) } // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. func (o AsymmetricEncryptedSecretOutput) EncryptionCertThumbprint() pulumi.StringPtrOutput { return o.ApplyT(func(v AsymmetricEncryptedSecret) *string { return v.EncryptionCertThumbprint }).(pulumi.StringPtrOutput) } // The value of the secret. func (o AsymmetricEncryptedSecretOutput) Value() pulumi.StringOutput { return o.ApplyT(func(v AsymmetricEncryptedSecret) string { return v.Value }).(pulumi.StringOutput) } type AsymmetricEncryptedSecretPtrOutput struct{ *pulumi.OutputState } func (AsymmetricEncryptedSecretPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**AsymmetricEncryptedSecret)(nil)).Elem() } func (o AsymmetricEncryptedSecretPtrOutput) ToAsymmetricEncryptedSecretPtrOutput() AsymmetricEncryptedSecretPtrOutput { return o } func (o AsymmetricEncryptedSecretPtrOutput) ToAsymmetricEncryptedSecretPtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretPtrOutput { return o } func (o AsymmetricEncryptedSecretPtrOutput) Elem() AsymmetricEncryptedSecretOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecret) AsymmetricEncryptedSecret { return *v }).(AsymmetricEncryptedSecretOutput) } // The algorithm used to encrypt "Value". func (o AsymmetricEncryptedSecretPtrOutput) EncryptionAlgorithm() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecret) *string { if v == nil { return nil } return &v.EncryptionAlgorithm }).(pulumi.StringPtrOutput) } // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. func (o AsymmetricEncryptedSecretPtrOutput) EncryptionCertThumbprint() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecret) *string { if v == nil { return nil } return v.EncryptionCertThumbprint }).(pulumi.StringPtrOutput) } // The value of the secret. func (o AsymmetricEncryptedSecretPtrOutput) Value() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecret) *string { if v == nil { return nil } return &v.Value }).(pulumi.StringPtrOutput) } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecretResponse struct { // The algorithm used to encrypt "Value". EncryptionAlgorithm string `pulumi:"encryptionAlgorithm"` // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. EncryptionCertThumbprint *string `pulumi:"encryptionCertThumbprint"` // The value of the secret. Value string `pulumi:"value"` } // AsymmetricEncryptedSecretResponseInput is an input type that accepts AsymmetricEncryptedSecretResponseArgs and AsymmetricEncryptedSecretResponseOutput values. // You can construct a concrete instance of `AsymmetricEncryptedSecretResponseInput` via: // // AsymmetricEncryptedSecretResponseArgs{...} type AsymmetricEncryptedSecretResponseInput interface { pulumi.Input ToAsymmetricEncryptedSecretResponseOutput() AsymmetricEncryptedSecretResponseOutput ToAsymmetricEncryptedSecretResponseOutputWithContext(context.Context) AsymmetricEncryptedSecretResponseOutput } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecretResponseArgs struct { // The algorithm used to encrypt "Value". EncryptionAlgorithm pulumi.StringInput `pulumi:"encryptionAlgorithm"` // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. EncryptionCertThumbprint pulumi.StringPtrInput `pulumi:"encryptionCertThumbprint"` // The value of the secret. Value pulumi.StringInput `pulumi:"value"` } func (AsymmetricEncryptedSecretResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*AsymmetricEncryptedSecretResponse)(nil)).Elem() } func (i AsymmetricEncryptedSecretResponseArgs) ToAsymmetricEncryptedSecretResponseOutput() AsymmetricEncryptedSecretResponseOutput { return i.ToAsymmetricEncryptedSecretResponseOutputWithContext(context.Background()) } func (i AsymmetricEncryptedSecretResponseArgs) ToAsymmetricEncryptedSecretResponseOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretResponseOutput) } func (i AsymmetricEncryptedSecretResponseArgs) ToAsymmetricEncryptedSecretResponsePtrOutput() AsymmetricEncryptedSecretResponsePtrOutput { return i.ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(context.Background()) } func (i AsymmetricEncryptedSecretResponseArgs) ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretResponseOutput).ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(ctx) } // AsymmetricEncryptedSecretResponsePtrInput is an input type that accepts AsymmetricEncryptedSecretResponseArgs, AsymmetricEncryptedSecretResponsePtr and AsymmetricEncryptedSecretResponsePtrOutput values. // You can construct a concrete instance of `AsymmetricEncryptedSecretResponsePtrInput` via: // // AsymmetricEncryptedSecretResponseArgs{...} // // or: // // nil type AsymmetricEncryptedSecretResponsePtrInput interface { pulumi.Input ToAsymmetricEncryptedSecretResponsePtrOutput() AsymmetricEncryptedSecretResponsePtrOutput ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(context.Context) AsymmetricEncryptedSecretResponsePtrOutput } type asymmetricEncryptedSecretResponsePtrType AsymmetricEncryptedSecretResponseArgs func AsymmetricEncryptedSecretResponsePtr(v *AsymmetricEncryptedSecretResponseArgs) AsymmetricEncryptedSecretResponsePtrInput { return (*asymmetricEncryptedSecretResponsePtrType)(v) } func (*asymmetricEncryptedSecretResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**AsymmetricEncryptedSecretResponse)(nil)).Elem() } func (i *asymmetricEncryptedSecretResponsePtrType) ToAsymmetricEncryptedSecretResponsePtrOutput() AsymmetricEncryptedSecretResponsePtrOutput { return i.ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(context.Background()) } func (i *asymmetricEncryptedSecretResponsePtrType) ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AsymmetricEncryptedSecretResponsePtrOutput) } // Represent the secrets intended for encryption with asymmetric key pair. type AsymmetricEncryptedSecretResponseOutput struct{ *pulumi.OutputState } func (AsymmetricEncryptedSecretResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*AsymmetricEncryptedSecretResponse)(nil)).Elem() } func (o AsymmetricEncryptedSecretResponseOutput) ToAsymmetricEncryptedSecretResponseOutput() AsymmetricEncryptedSecretResponseOutput { return o } func (o AsymmetricEncryptedSecretResponseOutput) ToAsymmetricEncryptedSecretResponseOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponseOutput { return o } func (o AsymmetricEncryptedSecretResponseOutput) ToAsymmetricEncryptedSecretResponsePtrOutput() AsymmetricEncryptedSecretResponsePtrOutput { return o.ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(context.Background()) } func (o AsymmetricEncryptedSecretResponseOutput) ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponsePtrOutput { return o.ApplyT(func(v AsymmetricEncryptedSecretResponse) *AsymmetricEncryptedSecretResponse { return &v }).(AsymmetricEncryptedSecretResponsePtrOutput) } // The algorithm used to encrypt "Value". func (o AsymmetricEncryptedSecretResponseOutput) EncryptionAlgorithm() pulumi.StringOutput { return o.ApplyT(func(v AsymmetricEncryptedSecretResponse) string { return v.EncryptionAlgorithm }).(pulumi.StringOutput) } // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. func (o AsymmetricEncryptedSecretResponseOutput) EncryptionCertThumbprint() pulumi.StringPtrOutput { return o.ApplyT(func(v AsymmetricEncryptedSecretResponse) *string { return v.EncryptionCertThumbprint }).(pulumi.StringPtrOutput) } // The value of the secret. func (o AsymmetricEncryptedSecretResponseOutput) Value() pulumi.StringOutput { return o.ApplyT(func(v AsymmetricEncryptedSecretResponse) string { return v.Value }).(pulumi.StringOutput) } type AsymmetricEncryptedSecretResponsePtrOutput struct{ *pulumi.OutputState } func (AsymmetricEncryptedSecretResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**AsymmetricEncryptedSecretResponse)(nil)).Elem() } func (o AsymmetricEncryptedSecretResponsePtrOutput) ToAsymmetricEncryptedSecretResponsePtrOutput() AsymmetricEncryptedSecretResponsePtrOutput { return o } func (o AsymmetricEncryptedSecretResponsePtrOutput) ToAsymmetricEncryptedSecretResponsePtrOutputWithContext(ctx context.Context) AsymmetricEncryptedSecretResponsePtrOutput { return o } func (o AsymmetricEncryptedSecretResponsePtrOutput) Elem() AsymmetricEncryptedSecretResponseOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecretResponse) AsymmetricEncryptedSecretResponse { return *v }).(AsymmetricEncryptedSecretResponseOutput) } // The algorithm used to encrypt "Value". func (o AsymmetricEncryptedSecretResponsePtrOutput) EncryptionAlgorithm() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecretResponse) *string { if v == nil { return nil } return &v.EncryptionAlgorithm }).(pulumi.StringPtrOutput) } // Thumbprint certificate used to encrypt \"Value\". If the value is unencrypted, it will be null. func (o AsymmetricEncryptedSecretResponsePtrOutput) EncryptionCertThumbprint() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecretResponse) *string { if v == nil { return nil } return v.EncryptionCertThumbprint }).(pulumi.StringPtrOutput) } // The value of the secret. func (o AsymmetricEncryptedSecretResponsePtrOutput) Value() pulumi.StringPtrOutput { return o.ApplyT(func(v *AsymmetricEncryptedSecretResponse) *string { if v == nil { return nil } return &v.Value }).(pulumi.StringPtrOutput) } // Azure container mapping of the endpoint. type AzureContainerInfo struct { // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). ContainerName string `pulumi:"containerName"` // Storage format used for the file represented by the share. DataFormat string `pulumi:"dataFormat"` // ID of the storage account credential used to access storage. StorageAccountCredentialId string `pulumi:"storageAccountCredentialId"` } // AzureContainerInfoInput is an input type that accepts AzureContainerInfoArgs and AzureContainerInfoOutput values. // You can construct a concrete instance of `AzureContainerInfoInput` via: // // AzureContainerInfoArgs{...} type AzureContainerInfoInput interface { pulumi.Input ToAzureContainerInfoOutput() AzureContainerInfoOutput ToAzureContainerInfoOutputWithContext(context.Context) AzureContainerInfoOutput } // Azure container mapping of the endpoint. type AzureContainerInfoArgs struct { // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). ContainerName pulumi.StringInput `pulumi:"containerName"` // Storage format used for the file represented by the share. DataFormat pulumi.StringInput `pulumi:"dataFormat"` // ID of the storage account credential used to access storage. StorageAccountCredentialId pulumi.StringInput `pulumi:"storageAccountCredentialId"` } func (AzureContainerInfoArgs) ElementType() reflect.Type { return reflect.TypeOf((*AzureContainerInfo)(nil)).Elem() } func (i AzureContainerInfoArgs) ToAzureContainerInfoOutput() AzureContainerInfoOutput { return i.ToAzureContainerInfoOutputWithContext(context.Background()) } func (i AzureContainerInfoArgs) ToAzureContainerInfoOutputWithContext(ctx context.Context) AzureContainerInfoOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoOutput) } func (i AzureContainerInfoArgs) ToAzureContainerInfoPtrOutput() AzureContainerInfoPtrOutput { return i.ToAzureContainerInfoPtrOutputWithContext(context.Background()) } func (i AzureContainerInfoArgs) ToAzureContainerInfoPtrOutputWithContext(ctx context.Context) AzureContainerInfoPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoOutput).ToAzureContainerInfoPtrOutputWithContext(ctx) } // AzureContainerInfoPtrInput is an input type that accepts AzureContainerInfoArgs, AzureContainerInfoPtr and AzureContainerInfoPtrOutput values. // You can construct a concrete instance of `AzureContainerInfoPtrInput` via: // // AzureContainerInfoArgs{...} // // or: // // nil type AzureContainerInfoPtrInput interface { pulumi.Input ToAzureContainerInfoPtrOutput() AzureContainerInfoPtrOutput ToAzureContainerInfoPtrOutputWithContext(context.Context) AzureContainerInfoPtrOutput } type azureContainerInfoPtrType AzureContainerInfoArgs func AzureContainerInfoPtr(v *AzureContainerInfoArgs) AzureContainerInfoPtrInput { return (*azureContainerInfoPtrType)(v) } func (*azureContainerInfoPtrType) ElementType() reflect.Type { return reflect.TypeOf((**AzureContainerInfo)(nil)).Elem() } func (i *azureContainerInfoPtrType) ToAzureContainerInfoPtrOutput() AzureContainerInfoPtrOutput { return i.ToAzureContainerInfoPtrOutputWithContext(context.Background()) } func (i *azureContainerInfoPtrType) ToAzureContainerInfoPtrOutputWithContext(ctx context.Context) AzureContainerInfoPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoPtrOutput) } // Azure container mapping of the endpoint. type AzureContainerInfoOutput struct{ *pulumi.OutputState } func (AzureContainerInfoOutput) ElementType() reflect.Type { return reflect.TypeOf((*AzureContainerInfo)(nil)).Elem() } func (o AzureContainerInfoOutput) ToAzureContainerInfoOutput() AzureContainerInfoOutput { return o } func (o AzureContainerInfoOutput) ToAzureContainerInfoOutputWithContext(ctx context.Context) AzureContainerInfoOutput { return o } func (o AzureContainerInfoOutput) ToAzureContainerInfoPtrOutput() AzureContainerInfoPtrOutput { return o.ToAzureContainerInfoPtrOutputWithContext(context.Background()) } func (o AzureContainerInfoOutput) ToAzureContainerInfoPtrOutputWithContext(ctx context.Context) AzureContainerInfoPtrOutput { return o.ApplyT(func(v AzureContainerInfo) *AzureContainerInfo { return &v }).(AzureContainerInfoPtrOutput) } // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). func (o AzureContainerInfoOutput) ContainerName() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfo) string { return v.ContainerName }).(pulumi.StringOutput) } // Storage format used for the file represented by the share. func (o AzureContainerInfoOutput) DataFormat() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfo) string { return v.DataFormat }).(pulumi.StringOutput) } // ID of the storage account credential used to access storage. func (o AzureContainerInfoOutput) StorageAccountCredentialId() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfo) string { return v.StorageAccountCredentialId }).(pulumi.StringOutput) } type AzureContainerInfoPtrOutput struct{ *pulumi.OutputState } func (AzureContainerInfoPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**AzureContainerInfo)(nil)).Elem() } func (o AzureContainerInfoPtrOutput) ToAzureContainerInfoPtrOutput() AzureContainerInfoPtrOutput { return o } func (o AzureContainerInfoPtrOutput) ToAzureContainerInfoPtrOutputWithContext(ctx context.Context) AzureContainerInfoPtrOutput { return o } func (o AzureContainerInfoPtrOutput) Elem() AzureContainerInfoOutput { return o.ApplyT(func(v *AzureContainerInfo) AzureContainerInfo { return *v }).(AzureContainerInfoOutput) } // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). func (o AzureContainerInfoPtrOutput) ContainerName() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfo) *string { if v == nil { return nil } return &v.ContainerName }).(pulumi.StringPtrOutput) } // Storage format used for the file represented by the share. func (o AzureContainerInfoPtrOutput) DataFormat() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfo) *string { if v == nil { return nil } return &v.DataFormat }).(pulumi.StringPtrOutput) } // ID of the storage account credential used to access storage. func (o AzureContainerInfoPtrOutput) StorageAccountCredentialId() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfo) *string { if v == nil { return nil } return &v.StorageAccountCredentialId }).(pulumi.StringPtrOutput) } // Azure container mapping of the endpoint. type AzureContainerInfoResponse struct { // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). ContainerName string `pulumi:"containerName"` // Storage format used for the file represented by the share. DataFormat string `pulumi:"dataFormat"` // ID of the storage account credential used to access storage. StorageAccountCredentialId string `pulumi:"storageAccountCredentialId"` } // AzureContainerInfoResponseInput is an input type that accepts AzureContainerInfoResponseArgs and AzureContainerInfoResponseOutput values. // You can construct a concrete instance of `AzureContainerInfoResponseInput` via: // // AzureContainerInfoResponseArgs{...} type AzureContainerInfoResponseInput interface { pulumi.Input ToAzureContainerInfoResponseOutput() AzureContainerInfoResponseOutput ToAzureContainerInfoResponseOutputWithContext(context.Context) AzureContainerInfoResponseOutput } // Azure container mapping of the endpoint. type AzureContainerInfoResponseArgs struct { // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). ContainerName pulumi.StringInput `pulumi:"containerName"` // Storage format used for the file represented by the share. DataFormat pulumi.StringInput `pulumi:"dataFormat"` // ID of the storage account credential used to access storage. StorageAccountCredentialId pulumi.StringInput `pulumi:"storageAccountCredentialId"` } func (AzureContainerInfoResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*AzureContainerInfoResponse)(nil)).Elem() } func (i AzureContainerInfoResponseArgs) ToAzureContainerInfoResponseOutput() AzureContainerInfoResponseOutput { return i.ToAzureContainerInfoResponseOutputWithContext(context.Background()) } func (i AzureContainerInfoResponseArgs) ToAzureContainerInfoResponseOutputWithContext(ctx context.Context) AzureContainerInfoResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoResponseOutput) } func (i AzureContainerInfoResponseArgs) ToAzureContainerInfoResponsePtrOutput() AzureContainerInfoResponsePtrOutput { return i.ToAzureContainerInfoResponsePtrOutputWithContext(context.Background()) } func (i AzureContainerInfoResponseArgs) ToAzureContainerInfoResponsePtrOutputWithContext(ctx context.Context) AzureContainerInfoResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoResponseOutput).ToAzureContainerInfoResponsePtrOutputWithContext(ctx) } // AzureContainerInfoResponsePtrInput is an input type that accepts AzureContainerInfoResponseArgs, AzureContainerInfoResponsePtr and AzureContainerInfoResponsePtrOutput values. // You can construct a concrete instance of `AzureContainerInfoResponsePtrInput` via: // // AzureContainerInfoResponseArgs{...} // // or: // // nil type AzureContainerInfoResponsePtrInput interface { pulumi.Input ToAzureContainerInfoResponsePtrOutput() AzureContainerInfoResponsePtrOutput ToAzureContainerInfoResponsePtrOutputWithContext(context.Context) AzureContainerInfoResponsePtrOutput } type azureContainerInfoResponsePtrType AzureContainerInfoResponseArgs func AzureContainerInfoResponsePtr(v *AzureContainerInfoResponseArgs) AzureContainerInfoResponsePtrInput { return (*azureContainerInfoResponsePtrType)(v) } func (*azureContainerInfoResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**AzureContainerInfoResponse)(nil)).Elem() } func (i *azureContainerInfoResponsePtrType) ToAzureContainerInfoResponsePtrOutput() AzureContainerInfoResponsePtrOutput { return i.ToAzureContainerInfoResponsePtrOutputWithContext(context.Background()) } func (i *azureContainerInfoResponsePtrType) ToAzureContainerInfoResponsePtrOutputWithContext(ctx context.Context) AzureContainerInfoResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(AzureContainerInfoResponsePtrOutput) } // Azure container mapping of the endpoint. type AzureContainerInfoResponseOutput struct{ *pulumi.OutputState } func (AzureContainerInfoResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*AzureContainerInfoResponse)(nil)).Elem() } func (o AzureContainerInfoResponseOutput) ToAzureContainerInfoResponseOutput() AzureContainerInfoResponseOutput { return o } func (o AzureContainerInfoResponseOutput) ToAzureContainerInfoResponseOutputWithContext(ctx context.Context) AzureContainerInfoResponseOutput { return o } func (o AzureContainerInfoResponseOutput) ToAzureContainerInfoResponsePtrOutput() AzureContainerInfoResponsePtrOutput { return o.ToAzureContainerInfoResponsePtrOutputWithContext(context.Background()) } func (o AzureContainerInfoResponseOutput) ToAzureContainerInfoResponsePtrOutputWithContext(ctx context.Context) AzureContainerInfoResponsePtrOutput { return o.ApplyT(func(v AzureContainerInfoResponse) *AzureContainerInfoResponse { return &v }).(AzureContainerInfoResponsePtrOutput) } // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). func (o AzureContainerInfoResponseOutput) ContainerName() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfoResponse) string { return v.ContainerName }).(pulumi.StringOutput) } // Storage format used for the file represented by the share. func (o AzureContainerInfoResponseOutput) DataFormat() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfoResponse) string { return v.DataFormat }).(pulumi.StringOutput) } // ID of the storage account credential used to access storage. func (o AzureContainerInfoResponseOutput) StorageAccountCredentialId() pulumi.StringOutput { return o.ApplyT(func(v AzureContainerInfoResponse) string { return v.StorageAccountCredentialId }).(pulumi.StringOutput) } type AzureContainerInfoResponsePtrOutput struct{ *pulumi.OutputState } func (AzureContainerInfoResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**AzureContainerInfoResponse)(nil)).Elem() } func (o AzureContainerInfoResponsePtrOutput) ToAzureContainerInfoResponsePtrOutput() AzureContainerInfoResponsePtrOutput { return o } func (o AzureContainerInfoResponsePtrOutput) ToAzureContainerInfoResponsePtrOutputWithContext(ctx context.Context) AzureContainerInfoResponsePtrOutput { return o } func (o AzureContainerInfoResponsePtrOutput) Elem() AzureContainerInfoResponseOutput { return o.ApplyT(func(v *AzureContainerInfoResponse) AzureContainerInfoResponse { return *v }).(AzureContainerInfoResponseOutput) } // Container name (Based on the data format specified, this represents the name of Azure Files/Page blob/Block blob). func (o AzureContainerInfoResponsePtrOutput) ContainerName() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfoResponse) *string { if v == nil { return nil } return &v.ContainerName }).(pulumi.StringPtrOutput) } // Storage format used for the file represented by the share. func (o AzureContainerInfoResponsePtrOutput) DataFormat() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfoResponse) *string { if v == nil { return nil } return &v.DataFormat }).(pulumi.StringPtrOutput) } // ID of the storage account credential used to access storage. func (o AzureContainerInfoResponsePtrOutput) StorageAccountCredentialId() pulumi.StringPtrOutput { return o.ApplyT(func(v *AzureContainerInfoResponse) *string { if v == nil { return nil } return &v.StorageAccountCredentialId }).(pulumi.StringPtrOutput) } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRight struct { // Type of access to be allowed for the client. AccessPermission string `pulumi:"accessPermission"` // IP of the client. Client string `pulumi:"client"` } // ClientAccessRightInput is an input type that accepts ClientAccessRightArgs and ClientAccessRightOutput values. // You can construct a concrete instance of `ClientAccessRightInput` via: // // ClientAccessRightArgs{...} type ClientAccessRightInput interface { pulumi.Input ToClientAccessRightOutput() ClientAccessRightOutput ToClientAccessRightOutputWithContext(context.Context) ClientAccessRightOutput } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRightArgs struct { // Type of access to be allowed for the client. AccessPermission pulumi.StringInput `pulumi:"accessPermission"` // IP of the client. Client pulumi.StringInput `pulumi:"client"` } func (ClientAccessRightArgs) ElementType() reflect.Type { return reflect.TypeOf((*ClientAccessRight)(nil)).Elem() } func (i ClientAccessRightArgs) ToClientAccessRightOutput() ClientAccessRightOutput { return i.ToClientAccessRightOutputWithContext(context.Background()) } func (i ClientAccessRightArgs) ToClientAccessRightOutputWithContext(ctx context.Context) ClientAccessRightOutput { return pulumi.ToOutputWithContext(ctx, i).(ClientAccessRightOutput) } // ClientAccessRightArrayInput is an input type that accepts ClientAccessRightArray and ClientAccessRightArrayOutput values. // You can construct a concrete instance of `ClientAccessRightArrayInput` via: // // ClientAccessRightArray{ ClientAccessRightArgs{...} } type ClientAccessRightArrayInput interface { pulumi.Input ToClientAccessRightArrayOutput() ClientAccessRightArrayOutput ToClientAccessRightArrayOutputWithContext(context.Context) ClientAccessRightArrayOutput } type ClientAccessRightArray []ClientAccessRightInput func (ClientAccessRightArray) ElementType() reflect.Type { return reflect.TypeOf((*[]ClientAccessRight)(nil)).Elem() } func (i ClientAccessRightArray) ToClientAccessRightArrayOutput() ClientAccessRightArrayOutput { return i.ToClientAccessRightArrayOutputWithContext(context.Background()) } func (i ClientAccessRightArray) ToClientAccessRightArrayOutputWithContext(ctx context.Context) ClientAccessRightArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(ClientAccessRightArrayOutput) } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRightOutput struct{ *pulumi.OutputState } func (ClientAccessRightOutput) ElementType() reflect.Type { return reflect.TypeOf((*ClientAccessRight)(nil)).Elem() } func (o ClientAccessRightOutput) ToClientAccessRightOutput() ClientAccessRightOutput { return o } func (o ClientAccessRightOutput) ToClientAccessRightOutputWithContext(ctx context.Context) ClientAccessRightOutput { return o } // Type of access to be allowed for the client. func (o ClientAccessRightOutput) AccessPermission() pulumi.StringOutput { return o.ApplyT(func(v ClientAccessRight) string { return v.AccessPermission }).(pulumi.StringOutput) } // IP of the client. func (o ClientAccessRightOutput) Client() pulumi.StringOutput { return o.ApplyT(func(v ClientAccessRight) string { return v.Client }).(pulumi.StringOutput) } type ClientAccessRightArrayOutput struct{ *pulumi.OutputState } func (ClientAccessRightArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]ClientAccessRight)(nil)).Elem() } func (o ClientAccessRightArrayOutput) ToClientAccessRightArrayOutput() ClientAccessRightArrayOutput { return o } func (o ClientAccessRightArrayOutput) ToClientAccessRightArrayOutputWithContext(ctx context.Context) ClientAccessRightArrayOutput { return o } func (o ClientAccessRightArrayOutput) Index(i pulumi.IntInput) ClientAccessRightOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) ClientAccessRight { return vs[0].([]ClientAccessRight)[vs[1].(int)] }).(ClientAccessRightOutput) } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRightResponse struct { // Type of access to be allowed for the client. AccessPermission string `pulumi:"accessPermission"` // IP of the client. Client string `pulumi:"client"` } // ClientAccessRightResponseInput is an input type that accepts ClientAccessRightResponseArgs and ClientAccessRightResponseOutput values. // You can construct a concrete instance of `ClientAccessRightResponseInput` via: // // ClientAccessRightResponseArgs{...} type ClientAccessRightResponseInput interface { pulumi.Input ToClientAccessRightResponseOutput() ClientAccessRightResponseOutput ToClientAccessRightResponseOutputWithContext(context.Context) ClientAccessRightResponseOutput } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRightResponseArgs struct { // Type of access to be allowed for the client. AccessPermission pulumi.StringInput `pulumi:"accessPermission"` // IP of the client. Client pulumi.StringInput `pulumi:"client"` } func (ClientAccessRightResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*ClientAccessRightResponse)(nil)).Elem() } func (i ClientAccessRightResponseArgs) ToClientAccessRightResponseOutput() ClientAccessRightResponseOutput { return i.ToClientAccessRightResponseOutputWithContext(context.Background()) } func (i ClientAccessRightResponseArgs) ToClientAccessRightResponseOutputWithContext(ctx context.Context) ClientAccessRightResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(ClientAccessRightResponseOutput) } // ClientAccessRightResponseArrayInput is an input type that accepts ClientAccessRightResponseArray and ClientAccessRightResponseArrayOutput values. // You can construct a concrete instance of `ClientAccessRightResponseArrayInput` via: // // ClientAccessRightResponseArray{ ClientAccessRightResponseArgs{...} } type ClientAccessRightResponseArrayInput interface { pulumi.Input ToClientAccessRightResponseArrayOutput() ClientAccessRightResponseArrayOutput ToClientAccessRightResponseArrayOutputWithContext(context.Context) ClientAccessRightResponseArrayOutput } type ClientAccessRightResponseArray []ClientAccessRightResponseInput func (ClientAccessRightResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]ClientAccessRightResponse)(nil)).Elem() } func (i ClientAccessRightResponseArray) ToClientAccessRightResponseArrayOutput() ClientAccessRightResponseArrayOutput { return i.ToClientAccessRightResponseArrayOutputWithContext(context.Background()) } func (i ClientAccessRightResponseArray) ToClientAccessRightResponseArrayOutputWithContext(ctx context.Context) ClientAccessRightResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(ClientAccessRightResponseArrayOutput) } // The mapping between a particular client IP and the type of access client has on the NFS share. type ClientAccessRightResponseOutput struct{ *pulumi.OutputState } func (ClientAccessRightResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*ClientAccessRightResponse)(nil)).Elem() } func (o ClientAccessRightResponseOutput) ToClientAccessRightResponseOutput() ClientAccessRightResponseOutput { return o } func (o ClientAccessRightResponseOutput) ToClientAccessRightResponseOutputWithContext(ctx context.Context) ClientAccessRightResponseOutput { return o } // Type of access to be allowed for the client. func (o ClientAccessRightResponseOutput) AccessPermission() pulumi.StringOutput { return o.ApplyT(func(v ClientAccessRightResponse) string { return v.AccessPermission }).(pulumi.StringOutput) } // IP of the client. func (o ClientAccessRightResponseOutput) Client() pulumi.StringOutput { return o.ApplyT(func(v ClientAccessRightResponse) string { return v.Client }).(pulumi.StringOutput) } type ClientAccessRightResponseArrayOutput struct{ *pulumi.OutputState } func (ClientAccessRightResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]ClientAccessRightResponse)(nil)).Elem() } func (o ClientAccessRightResponseArrayOutput) ToClientAccessRightResponseArrayOutput() ClientAccessRightResponseArrayOutput { return o } func (o ClientAccessRightResponseArrayOutput) ToClientAccessRightResponseArrayOutputWithContext(ctx context.Context) ClientAccessRightResponseArrayOutput { return o } func (o ClientAccessRightResponseArrayOutput) Index(i pulumi.IntInput) ClientAccessRightResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) ClientAccessRightResponse { return vs[0].([]ClientAccessRightResponse)[vs[1].(int)] }).(ClientAccessRightResponseOutput) } // Contains all the contact details of the customer. type ContactDetails struct { // The name of the company. CompanyName string `pulumi:"companyName"` // The contact person name. ContactPerson string `pulumi:"contactPerson"` // The email list. EmailList []string `pulumi:"emailList"` // The phone number. Phone string `pulumi:"phone"` } // ContactDetailsInput is an input type that accepts ContactDetailsArgs and ContactDetailsOutput values. // You can construct a concrete instance of `ContactDetailsInput` via: // // ContactDetailsArgs{...} type ContactDetailsInput interface { pulumi.Input ToContactDetailsOutput() ContactDetailsOutput ToContactDetailsOutputWithContext(context.Context) ContactDetailsOutput } // Contains all the contact details of the customer. type ContactDetailsArgs struct { // The name of the company. CompanyName pulumi.StringInput `pulumi:"companyName"` // The contact person name. ContactPerson pulumi.StringInput `pulumi:"contactPerson"` // The email list. EmailList pulumi.StringArrayInput `pulumi:"emailList"` // The phone number. Phone pulumi.StringInput `pulumi:"phone"` } func (ContactDetailsArgs) ElementType() reflect.Type { return reflect.TypeOf((*ContactDetails)(nil)).Elem() } func (i ContactDetailsArgs) ToContactDetailsOutput() ContactDetailsOutput { return i.ToContactDetailsOutputWithContext(context.Background()) } func (i ContactDetailsArgs) ToContactDetailsOutputWithContext(ctx context.Context) ContactDetailsOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsOutput) } func (i ContactDetailsArgs) ToContactDetailsPtrOutput() ContactDetailsPtrOutput { return i.ToContactDetailsPtrOutputWithContext(context.Background()) } func (i ContactDetailsArgs) ToContactDetailsPtrOutputWithContext(ctx context.Context) ContactDetailsPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsOutput).ToContactDetailsPtrOutputWithContext(ctx) } // ContactDetailsPtrInput is an input type that accepts ContactDetailsArgs, ContactDetailsPtr and ContactDetailsPtrOutput values. // You can construct a concrete instance of `ContactDetailsPtrInput` via: // // ContactDetailsArgs{...} // // or: // // nil type ContactDetailsPtrInput interface { pulumi.Input ToContactDetailsPtrOutput() ContactDetailsPtrOutput ToContactDetailsPtrOutputWithContext(context.Context) ContactDetailsPtrOutput } type contactDetailsPtrType ContactDetailsArgs func ContactDetailsPtr(v *ContactDetailsArgs) ContactDetailsPtrInput { return (*contactDetailsPtrType)(v) } func (*contactDetailsPtrType) ElementType() reflect.Type { return reflect.TypeOf((**ContactDetails)(nil)).Elem() } func (i *contactDetailsPtrType) ToContactDetailsPtrOutput() ContactDetailsPtrOutput { return i.ToContactDetailsPtrOutputWithContext(context.Background()) } func (i *contactDetailsPtrType) ToContactDetailsPtrOutputWithContext(ctx context.Context) ContactDetailsPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsPtrOutput) } // Contains all the contact details of the customer. type ContactDetailsOutput struct{ *pulumi.OutputState } func (ContactDetailsOutput) ElementType() reflect.Type { return reflect.TypeOf((*ContactDetails)(nil)).Elem() } func (o ContactDetailsOutput) ToContactDetailsOutput() ContactDetailsOutput { return o } func (o ContactDetailsOutput) ToContactDetailsOutputWithContext(ctx context.Context) ContactDetailsOutput { return o } func (o ContactDetailsOutput) ToContactDetailsPtrOutput() ContactDetailsPtrOutput { return o.ToContactDetailsPtrOutputWithContext(context.Background()) } func (o ContactDetailsOutput) ToContactDetailsPtrOutputWithContext(ctx context.Context) ContactDetailsPtrOutput { return o.ApplyT(func(v ContactDetails) *ContactDetails { return &v }).(ContactDetailsPtrOutput) } // The name of the company. func (o ContactDetailsOutput) CompanyName() pulumi.StringOutput { return o.ApplyT(func(v ContactDetails) string { return v.CompanyName }).(pulumi.StringOutput) } // The contact person name. func (o ContactDetailsOutput) ContactPerson() pulumi.StringOutput { return o.ApplyT(func(v ContactDetails) string { return v.ContactPerson }).(pulumi.StringOutput) } // The email list. func (o ContactDetailsOutput) EmailList() pulumi.StringArrayOutput { return o.ApplyT(func(v ContactDetails) []string { return v.EmailList }).(pulumi.StringArrayOutput) } // The phone number. func (o ContactDetailsOutput) Phone() pulumi.StringOutput { return o.ApplyT(func(v ContactDetails) string { return v.Phone }).(pulumi.StringOutput) } type ContactDetailsPtrOutput struct{ *pulumi.OutputState } func (ContactDetailsPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**ContactDetails)(nil)).Elem() } func (o ContactDetailsPtrOutput) ToContactDetailsPtrOutput() ContactDetailsPtrOutput { return o } func (o ContactDetailsPtrOutput) ToContactDetailsPtrOutputWithContext(ctx context.Context) ContactDetailsPtrOutput { return o } func (o ContactDetailsPtrOutput) Elem() ContactDetailsOutput { return o.ApplyT(func(v *ContactDetails) ContactDetails { return *v }).(ContactDetailsOutput) } // The name of the company. func (o ContactDetailsPtrOutput) CompanyName() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetails) *string { if v == nil { return nil } return &v.CompanyName }).(pulumi.StringPtrOutput) } // The contact person name. func (o ContactDetailsPtrOutput) ContactPerson() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetails) *string { if v == nil { return nil } return &v.ContactPerson }).(pulumi.StringPtrOutput) } // The email list. func (o ContactDetailsPtrOutput) EmailList() pulumi.StringArrayOutput { return o.ApplyT(func(v *ContactDetails) []string { if v == nil { return nil } return v.EmailList }).(pulumi.StringArrayOutput) } // The phone number. func (o ContactDetailsPtrOutput) Phone() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetails) *string { if v == nil { return nil } return &v.Phone }).(pulumi.StringPtrOutput) } // Contains all the contact details of the customer. type ContactDetailsResponse struct { // The name of the company. CompanyName string `pulumi:"companyName"` // The contact person name. ContactPerson string `pulumi:"contactPerson"` // The email list. EmailList []string `pulumi:"emailList"` // The phone number. Phone string `pulumi:"phone"` } // ContactDetailsResponseInput is an input type that accepts ContactDetailsResponseArgs and ContactDetailsResponseOutput values. // You can construct a concrete instance of `ContactDetailsResponseInput` via: // // ContactDetailsResponseArgs{...} type ContactDetailsResponseInput interface { pulumi.Input ToContactDetailsResponseOutput() ContactDetailsResponseOutput ToContactDetailsResponseOutputWithContext(context.Context) ContactDetailsResponseOutput } // Contains all the contact details of the customer. type ContactDetailsResponseArgs struct { // The name of the company. CompanyName pulumi.StringInput `pulumi:"companyName"` // The contact person name. ContactPerson pulumi.StringInput `pulumi:"contactPerson"` // The email list. EmailList pulumi.StringArrayInput `pulumi:"emailList"` // The phone number. Phone pulumi.StringInput `pulumi:"phone"` } func (ContactDetailsResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*ContactDetailsResponse)(nil)).Elem() } func (i ContactDetailsResponseArgs) ToContactDetailsResponseOutput() ContactDetailsResponseOutput { return i.ToContactDetailsResponseOutputWithContext(context.Background()) } func (i ContactDetailsResponseArgs) ToContactDetailsResponseOutputWithContext(ctx context.Context) ContactDetailsResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsResponseOutput) } func (i ContactDetailsResponseArgs) ToContactDetailsResponsePtrOutput() ContactDetailsResponsePtrOutput { return i.ToContactDetailsResponsePtrOutputWithContext(context.Background()) } func (i ContactDetailsResponseArgs) ToContactDetailsResponsePtrOutputWithContext(ctx context.Context) ContactDetailsResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsResponseOutput).ToContactDetailsResponsePtrOutputWithContext(ctx) } // ContactDetailsResponsePtrInput is an input type that accepts ContactDetailsResponseArgs, ContactDetailsResponsePtr and ContactDetailsResponsePtrOutput values. // You can construct a concrete instance of `ContactDetailsResponsePtrInput` via: // // ContactDetailsResponseArgs{...} // // or: // // nil type ContactDetailsResponsePtrInput interface { pulumi.Input ToContactDetailsResponsePtrOutput() ContactDetailsResponsePtrOutput ToContactDetailsResponsePtrOutputWithContext(context.Context) ContactDetailsResponsePtrOutput } type contactDetailsResponsePtrType ContactDetailsResponseArgs func ContactDetailsResponsePtr(v *ContactDetailsResponseArgs) ContactDetailsResponsePtrInput { return (*contactDetailsResponsePtrType)(v) } func (*contactDetailsResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**ContactDetailsResponse)(nil)).Elem() } func (i *contactDetailsResponsePtrType) ToContactDetailsResponsePtrOutput() ContactDetailsResponsePtrOutput { return i.ToContactDetailsResponsePtrOutputWithContext(context.Background()) } func (i *contactDetailsResponsePtrType) ToContactDetailsResponsePtrOutputWithContext(ctx context.Context) ContactDetailsResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(ContactDetailsResponsePtrOutput) } // Contains all the contact details of the customer. type ContactDetailsResponseOutput struct{ *pulumi.OutputState } func (ContactDetailsResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*ContactDetailsResponse)(nil)).Elem() } func (o ContactDetailsResponseOutput) ToContactDetailsResponseOutput() ContactDetailsResponseOutput { return o } func (o ContactDetailsResponseOutput) ToContactDetailsResponseOutputWithContext(ctx context.Context) ContactDetailsResponseOutput { return o } func (o ContactDetailsResponseOutput) ToContactDetailsResponsePtrOutput() ContactDetailsResponsePtrOutput { return o.ToContactDetailsResponsePtrOutputWithContext(context.Background()) } func (o ContactDetailsResponseOutput) ToContactDetailsResponsePtrOutputWithContext(ctx context.Context) ContactDetailsResponsePtrOutput { return o.ApplyT(func(v ContactDetailsResponse) *ContactDetailsResponse { return &v }).(ContactDetailsResponsePtrOutput) } // The name of the company. func (o ContactDetailsResponseOutput) CompanyName() pulumi.StringOutput { return o.ApplyT(func(v ContactDetailsResponse) string { return v.CompanyName }).(pulumi.StringOutput) } // The contact person name. func (o ContactDetailsResponseOutput) ContactPerson() pulumi.StringOutput { return o.ApplyT(func(v ContactDetailsResponse) string { return v.ContactPerson }).(pulumi.StringOutput) } // The email list. func (o ContactDetailsResponseOutput) EmailList() pulumi.StringArrayOutput { return o.ApplyT(func(v ContactDetailsResponse) []string { return v.EmailList }).(pulumi.StringArrayOutput) } // The phone number. func (o ContactDetailsResponseOutput) Phone() pulumi.StringOutput { return o.ApplyT(func(v ContactDetailsResponse) string { return v.Phone }).(pulumi.StringOutput) } type ContactDetailsResponsePtrOutput struct{ *pulumi.OutputState } func (ContactDetailsResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**ContactDetailsResponse)(nil)).Elem() } func (o ContactDetailsResponsePtrOutput) ToContactDetailsResponsePtrOutput() ContactDetailsResponsePtrOutput { return o } func (o ContactDetailsResponsePtrOutput) ToContactDetailsResponsePtrOutputWithContext(ctx context.Context) ContactDetailsResponsePtrOutput { return o } func (o ContactDetailsResponsePtrOutput) Elem() ContactDetailsResponseOutput { return o.ApplyT(func(v *ContactDetailsResponse) ContactDetailsResponse { return *v }).(ContactDetailsResponseOutput) } // The name of the company. func (o ContactDetailsResponsePtrOutput) CompanyName() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetailsResponse) *string { if v == nil { return nil } return &v.CompanyName }).(pulumi.StringPtrOutput) } // The contact person name. func (o ContactDetailsResponsePtrOutput) ContactPerson() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetailsResponse) *string { if v == nil { return nil } return &v.ContactPerson }).(pulumi.StringPtrOutput) } // The email list. func (o ContactDetailsResponsePtrOutput) EmailList() pulumi.StringArrayOutput { return o.ApplyT(func(v *ContactDetailsResponse) []string { if v == nil { return nil } return v.EmailList }).(pulumi.StringArrayOutput) } // The phone number. func (o ContactDetailsResponsePtrOutput) Phone() pulumi.StringPtrOutput { return o.ApplyT(func(v *ContactDetailsResponse) *string { if v == nil { return nil } return &v.Phone }).(pulumi.StringPtrOutput) } // The share mount point. type MountPointMapResponse struct { // Mount point for the share. MountPoint string `pulumi:"mountPoint"` // ID of the role to which share is mounted. RoleId string `pulumi:"roleId"` // Role type. RoleType string `pulumi:"roleType"` // ID of the share mounted to the role VM. ShareId string `pulumi:"shareId"` } // MountPointMapResponseInput is an input type that accepts MountPointMapResponseArgs and MountPointMapResponseOutput values. // You can construct a concrete instance of `MountPointMapResponseInput` via: // // MountPointMapResponseArgs{...} type MountPointMapResponseInput interface { pulumi.Input ToMountPointMapResponseOutput() MountPointMapResponseOutput ToMountPointMapResponseOutputWithContext(context.Context) MountPointMapResponseOutput } // The share mount point. type MountPointMapResponseArgs struct { // Mount point for the share. MountPoint pulumi.StringInput `pulumi:"mountPoint"` // ID of the role to which share is mounted. RoleId pulumi.StringInput `pulumi:"roleId"` // Role type. RoleType pulumi.StringInput `pulumi:"roleType"` // ID of the share mounted to the role VM. ShareId pulumi.StringInput `pulumi:"shareId"` } func (MountPointMapResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*MountPointMapResponse)(nil)).Elem() } func (i MountPointMapResponseArgs) ToMountPointMapResponseOutput() MountPointMapResponseOutput { return i.ToMountPointMapResponseOutputWithContext(context.Background()) } func (i MountPointMapResponseArgs) ToMountPointMapResponseOutputWithContext(ctx context.Context) MountPointMapResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(MountPointMapResponseOutput) } // MountPointMapResponseArrayInput is an input type that accepts MountPointMapResponseArray and MountPointMapResponseArrayOutput values. // You can construct a concrete instance of `MountPointMapResponseArrayInput` via: // // MountPointMapResponseArray{ MountPointMapResponseArgs{...} } type MountPointMapResponseArrayInput interface { pulumi.Input ToMountPointMapResponseArrayOutput() MountPointMapResponseArrayOutput ToMountPointMapResponseArrayOutputWithContext(context.Context) MountPointMapResponseArrayOutput } type MountPointMapResponseArray []MountPointMapResponseInput func (MountPointMapResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]MountPointMapResponse)(nil)).Elem() } func (i MountPointMapResponseArray) ToMountPointMapResponseArrayOutput() MountPointMapResponseArrayOutput { return i.ToMountPointMapResponseArrayOutputWithContext(context.Background()) } func (i MountPointMapResponseArray) ToMountPointMapResponseArrayOutputWithContext(ctx context.Context) MountPointMapResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(MountPointMapResponseArrayOutput) } // The share mount point. type MountPointMapResponseOutput struct{ *pulumi.OutputState } func (MountPointMapResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*MountPointMapResponse)(nil)).Elem() } func (o MountPointMapResponseOutput) ToMountPointMapResponseOutput() MountPointMapResponseOutput { return o } func (o MountPointMapResponseOutput) ToMountPointMapResponseOutputWithContext(ctx context.Context) MountPointMapResponseOutput { return o } // Mount point for the share. func (o MountPointMapResponseOutput) MountPoint() pulumi.StringOutput { return o.ApplyT(func(v MountPointMapResponse) string { return v.MountPoint }).(pulumi.StringOutput) } // ID of the role to which share is mounted. func (o MountPointMapResponseOutput) RoleId() pulumi.StringOutput { return o.ApplyT(func(v MountPointMapResponse) string { return v.RoleId }).(pulumi.StringOutput) } // Role type. func (o MountPointMapResponseOutput) RoleType() pulumi.StringOutput { return o.ApplyT(func(v MountPointMapResponse) string { return v.RoleType }).(pulumi.StringOutput) } // ID of the share mounted to the role VM. func (o MountPointMapResponseOutput) ShareId() pulumi.StringOutput { return o.ApplyT(func(v MountPointMapResponse) string { return v.ShareId }).(pulumi.StringOutput) } type MountPointMapResponseArrayOutput struct{ *pulumi.OutputState } func (MountPointMapResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]MountPointMapResponse)(nil)).Elem() } func (o MountPointMapResponseArrayOutput) ToMountPointMapResponseArrayOutput() MountPointMapResponseArrayOutput { return o } func (o MountPointMapResponseArrayOutput) ToMountPointMapResponseArrayOutputWithContext(ctx context.Context) MountPointMapResponseArrayOutput { return o } func (o MountPointMapResponseArrayOutput) Index(i pulumi.IntInput) MountPointMapResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) MountPointMapResponse { return vs[0].([]MountPointMapResponse)[vs[1].(int)] }).(MountPointMapResponseOutput) } // Represents a single status change. type OrderStatus struct { // Comments related to this status change. Comments *string `pulumi:"comments"` // Status of the order as per the allowed status types. Status string `pulumi:"status"` } // OrderStatusInput is an input type that accepts OrderStatusArgs and OrderStatusOutput values. // You can construct a concrete instance of `OrderStatusInput` via: // // OrderStatusArgs{...} type OrderStatusInput interface { pulumi.Input ToOrderStatusOutput() OrderStatusOutput ToOrderStatusOutputWithContext(context.Context) OrderStatusOutput } // Represents a single status change. type OrderStatusArgs struct { // Comments related to this status change. Comments pulumi.StringPtrInput `pulumi:"comments"` // Status of the order as per the allowed status types. Status pulumi.StringInput `pulumi:"status"` } func (OrderStatusArgs) ElementType() reflect.Type { return reflect.TypeOf((*OrderStatus)(nil)).Elem() } func (i OrderStatusArgs) ToOrderStatusOutput() OrderStatusOutput { return i.ToOrderStatusOutputWithContext(context.Background()) } func (i OrderStatusArgs) ToOrderStatusOutputWithContext(ctx context.Context) OrderStatusOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusOutput) } func (i OrderStatusArgs) ToOrderStatusPtrOutput() OrderStatusPtrOutput { return i.ToOrderStatusPtrOutputWithContext(context.Background()) } func (i OrderStatusArgs) ToOrderStatusPtrOutputWithContext(ctx context.Context) OrderStatusPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusOutput).ToOrderStatusPtrOutputWithContext(ctx) } // OrderStatusPtrInput is an input type that accepts OrderStatusArgs, OrderStatusPtr and OrderStatusPtrOutput values. // You can construct a concrete instance of `OrderStatusPtrInput` via: // // OrderStatusArgs{...} // // or: // // nil type OrderStatusPtrInput interface { pulumi.Input ToOrderStatusPtrOutput() OrderStatusPtrOutput ToOrderStatusPtrOutputWithContext(context.Context) OrderStatusPtrOutput } type orderStatusPtrType OrderStatusArgs func OrderStatusPtr(v *OrderStatusArgs) OrderStatusPtrInput { return (*orderStatusPtrType)(v) } func (*orderStatusPtrType) ElementType() reflect.Type { return reflect.TypeOf((**OrderStatus)(nil)).Elem() } func (i *orderStatusPtrType) ToOrderStatusPtrOutput() OrderStatusPtrOutput { return i.ToOrderStatusPtrOutputWithContext(context.Background()) } func (i *orderStatusPtrType) ToOrderStatusPtrOutputWithContext(ctx context.Context) OrderStatusPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusPtrOutput) } // Represents a single status change. type OrderStatusOutput struct{ *pulumi.OutputState } func (OrderStatusOutput) ElementType() reflect.Type { return reflect.TypeOf((*OrderStatus)(nil)).Elem() } func (o OrderStatusOutput) ToOrderStatusOutput() OrderStatusOutput { return o } func (o OrderStatusOutput) ToOrderStatusOutputWithContext(ctx context.Context) OrderStatusOutput { return o } func (o OrderStatusOutput) ToOrderStatusPtrOutput() OrderStatusPtrOutput { return o.ToOrderStatusPtrOutputWithContext(context.Background()) } func (o OrderStatusOutput) ToOrderStatusPtrOutputWithContext(ctx context.Context) OrderStatusPtrOutput { return o.ApplyT(func(v OrderStatus) *OrderStatus { return &v }).(OrderStatusPtrOutput) } // Comments related to this status change. func (o OrderStatusOutput) Comments() pulumi.StringPtrOutput { return o.ApplyT(func(v OrderStatus) *string { return v.Comments }).(pulumi.StringPtrOutput) } // Status of the order as per the allowed status types. func (o OrderStatusOutput) Status() pulumi.StringOutput { return o.ApplyT(func(v OrderStatus) string { return v.Status }).(pulumi.StringOutput) } type OrderStatusPtrOutput struct{ *pulumi.OutputState } func (OrderStatusPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**OrderStatus)(nil)).Elem() } func (o OrderStatusPtrOutput) ToOrderStatusPtrOutput() OrderStatusPtrOutput { return o } func (o OrderStatusPtrOutput) ToOrderStatusPtrOutputWithContext(ctx context.Context) OrderStatusPtrOutput { return o } func (o OrderStatusPtrOutput) Elem() OrderStatusOutput { return o.ApplyT(func(v *OrderStatus) OrderStatus { return *v }).(OrderStatusOutput) } // Comments related to this status change. func (o OrderStatusPtrOutput) Comments() pulumi.StringPtrOutput { return o.ApplyT(func(v *OrderStatus) *string { if v == nil { return nil } return v.Comments }).(pulumi.StringPtrOutput) } // Status of the order as per the allowed status types. func (o OrderStatusPtrOutput) Status() pulumi.StringPtrOutput { return o.ApplyT(func(v *OrderStatus) *string { if v == nil { return nil } return &v.Status }).(pulumi.StringPtrOutput) } // Represents a single status change. type OrderStatusResponse struct { // Dictionary to hold generic information which is not stored // by the already existing properties AdditionalOrderDetails map[string]string `pulumi:"additionalOrderDetails"` // Comments related to this status change. Comments *string `pulumi:"comments"` // Status of the order as per the allowed status types. Status string `pulumi:"status"` // Time of status update. UpdateDateTime string `pulumi:"updateDateTime"` } // OrderStatusResponseInput is an input type that accepts OrderStatusResponseArgs and OrderStatusResponseOutput values. // You can construct a concrete instance of `OrderStatusResponseInput` via: // // OrderStatusResponseArgs{...} type OrderStatusResponseInput interface { pulumi.Input ToOrderStatusResponseOutput() OrderStatusResponseOutput ToOrderStatusResponseOutputWithContext(context.Context) OrderStatusResponseOutput } // Represents a single status change. type OrderStatusResponseArgs struct { // Dictionary to hold generic information which is not stored // by the already existing properties AdditionalOrderDetails pulumi.StringMapInput `pulumi:"additionalOrderDetails"` // Comments related to this status change. Comments pulumi.StringPtrInput `pulumi:"comments"` // Status of the order as per the allowed status types. Status pulumi.StringInput `pulumi:"status"` // Time of status update. UpdateDateTime pulumi.StringInput `pulumi:"updateDateTime"` } func (OrderStatusResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*OrderStatusResponse)(nil)).Elem() } func (i OrderStatusResponseArgs) ToOrderStatusResponseOutput() OrderStatusResponseOutput { return i.ToOrderStatusResponseOutputWithContext(context.Background()) } func (i OrderStatusResponseArgs) ToOrderStatusResponseOutputWithContext(ctx context.Context) OrderStatusResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusResponseOutput) } func (i OrderStatusResponseArgs) ToOrderStatusResponsePtrOutput() OrderStatusResponsePtrOutput { return i.ToOrderStatusResponsePtrOutputWithContext(context.Background()) } func (i OrderStatusResponseArgs) ToOrderStatusResponsePtrOutputWithContext(ctx context.Context) OrderStatusResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusResponseOutput).ToOrderStatusResponsePtrOutputWithContext(ctx) } // OrderStatusResponsePtrInput is an input type that accepts OrderStatusResponseArgs, OrderStatusResponsePtr and OrderStatusResponsePtrOutput values. // You can construct a concrete instance of `OrderStatusResponsePtrInput` via: // // OrderStatusResponseArgs{...} // // or: // // nil type OrderStatusResponsePtrInput interface { pulumi.Input ToOrderStatusResponsePtrOutput() OrderStatusResponsePtrOutput ToOrderStatusResponsePtrOutputWithContext(context.Context) OrderStatusResponsePtrOutput } type orderStatusResponsePtrType OrderStatusResponseArgs func OrderStatusResponsePtr(v *OrderStatusResponseArgs) OrderStatusResponsePtrInput { return (*orderStatusResponsePtrType)(v) } func (*orderStatusResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**OrderStatusResponse)(nil)).Elem() } func (i *orderStatusResponsePtrType) ToOrderStatusResponsePtrOutput() OrderStatusResponsePtrOutput { return i.ToOrderStatusResponsePtrOutputWithContext(context.Background()) } func (i *orderStatusResponsePtrType) ToOrderStatusResponsePtrOutputWithContext(ctx context.Context) OrderStatusResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusResponsePtrOutput) } // OrderStatusResponseArrayInput is an input type that accepts OrderStatusResponseArray and OrderStatusResponseArrayOutput values. // You can construct a concrete instance of `OrderStatusResponseArrayInput` via: // // OrderStatusResponseArray{ OrderStatusResponseArgs{...} } type OrderStatusResponseArrayInput interface { pulumi.Input ToOrderStatusResponseArrayOutput() OrderStatusResponseArrayOutput ToOrderStatusResponseArrayOutputWithContext(context.Context) OrderStatusResponseArrayOutput } type OrderStatusResponseArray []OrderStatusResponseInput func (OrderStatusResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]OrderStatusResponse)(nil)).Elem() } func (i OrderStatusResponseArray) ToOrderStatusResponseArrayOutput() OrderStatusResponseArrayOutput { return i.ToOrderStatusResponseArrayOutputWithContext(context.Background()) } func (i OrderStatusResponseArray) ToOrderStatusResponseArrayOutputWithContext(ctx context.Context) OrderStatusResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(OrderStatusResponseArrayOutput) } // Represents a single status change. type OrderStatusResponseOutput struct{ *pulumi.OutputState } func (OrderStatusResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*OrderStatusResponse)(nil)).Elem() } func (o OrderStatusResponseOutput) ToOrderStatusResponseOutput() OrderStatusResponseOutput { return o } func (o OrderStatusResponseOutput) ToOrderStatusResponseOutputWithContext(ctx context.Context) OrderStatusResponseOutput { return o } func (o OrderStatusResponseOutput) ToOrderStatusResponsePtrOutput() OrderStatusResponsePtrOutput { return o.ToOrderStatusResponsePtrOutputWithContext(context.Background()) } func (o OrderStatusResponseOutput) ToOrderStatusResponsePtrOutputWithContext(ctx context.Context) OrderStatusResponsePtrOutput { return o.ApplyT(func(v OrderStatusResponse) *OrderStatusResponse { return &v }).(OrderStatusResponsePtrOutput) } // Dictionary to hold generic information which is not stored // by the already existing properties func (o OrderStatusResponseOutput) AdditionalOrderDetails() pulumi.StringMapOutput { return o.ApplyT(func(v OrderStatusResponse) map[string]string { return v.AdditionalOrderDetails }).(pulumi.StringMapOutput) } // Comments related to this status change. func (o OrderStatusResponseOutput) Comments() pulumi.StringPtrOutput { return o.ApplyT(func(v OrderStatusResponse) *string { return v.Comments }).(pulumi.StringPtrOutput) } // Status of the order as per the allowed status types. func (o OrderStatusResponseOutput) Status() pulumi.StringOutput { return o.ApplyT(func(v OrderStatusResponse) string { return v.Status }).(pulumi.StringOutput) } // Time of status update. func (o OrderStatusResponseOutput) UpdateDateTime() pulumi.StringOutput { return o.ApplyT(func(v OrderStatusResponse) string { return v.UpdateDateTime }).(pulumi.StringOutput) } type OrderStatusResponsePtrOutput struct{ *pulumi.OutputState } func (OrderStatusResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**OrderStatusResponse)(nil)).Elem() } func (o OrderStatusResponsePtrOutput) ToOrderStatusResponsePtrOutput() OrderStatusResponsePtrOutput { return o } func (o OrderStatusResponsePtrOutput) ToOrderStatusResponsePtrOutputWithContext(ctx context.Context) OrderStatusResponsePtrOutput { return o } func (o OrderStatusResponsePtrOutput) Elem() OrderStatusResponseOutput { return o.ApplyT(func(v *OrderStatusResponse) OrderStatusResponse { return *v }).(OrderStatusResponseOutput) } // Dictionary to hold generic information which is not stored // by the already existing properties func (o OrderStatusResponsePtrOutput) AdditionalOrderDetails() pulumi.StringMapOutput { return o.ApplyT(func(v *OrderStatusResponse) map[string]string { if v == nil { return nil } return v.AdditionalOrderDetails }).(pulumi.StringMapOutput) } // Comments related to this status change. func (o OrderStatusResponsePtrOutput) Comments() pulumi.StringPtrOutput { return o.ApplyT(func(v *OrderStatusResponse) *string { if v == nil { return nil } return v.Comments }).(pulumi.StringPtrOutput) } // Status of the order as per the allowed status types. func (o OrderStatusResponsePtrOutput) Status() pulumi.StringPtrOutput { return o.ApplyT(func(v *OrderStatusResponse) *string { if v == nil { return nil } return &v.Status }).(pulumi.StringPtrOutput) } // Time of status update. func (o OrderStatusResponsePtrOutput) UpdateDateTime() pulumi.StringPtrOutput { return o.ApplyT(func(v *OrderStatusResponse) *string { if v == nil { return nil } return &v.UpdateDateTime }).(pulumi.StringPtrOutput) } type OrderStatusResponseArrayOutput struct{ *pulumi.OutputState } func (OrderStatusResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]OrderStatusResponse)(nil)).Elem() } func (o OrderStatusResponseArrayOutput) ToOrderStatusResponseArrayOutput() OrderStatusResponseArrayOutput { return o } func (o OrderStatusResponseArrayOutput) ToOrderStatusResponseArrayOutputWithContext(ctx context.Context) OrderStatusResponseArrayOutput { return o } func (o OrderStatusResponseArrayOutput) Index(i pulumi.IntInput) OrderStatusResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) OrderStatusResponse { return vs[0].([]OrderStatusResponse)[vs[1].(int)] }).(OrderStatusResponseOutput) } // Fields for tracking refresh job on the share or container. type RefreshDetails struct { // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. ErrorManifestFile *string `pulumi:"errorManifestFile"` // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. InProgressRefreshJobId *string `pulumi:"inProgressRefreshJobId"` // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. LastCompletedRefreshJobTimeInUTC *string `pulumi:"lastCompletedRefreshJobTimeInUTC"` // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. LastJob *string `pulumi:"lastJob"` } // RefreshDetailsInput is an input type that accepts RefreshDetailsArgs and RefreshDetailsOutput values. // You can construct a concrete instance of `RefreshDetailsInput` via: // // RefreshDetailsArgs{...} type RefreshDetailsInput interface { pulumi.Input ToRefreshDetailsOutput() RefreshDetailsOutput ToRefreshDetailsOutputWithContext(context.Context) RefreshDetailsOutput } // Fields for tracking refresh job on the share or container. type RefreshDetailsArgs struct { // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. ErrorManifestFile pulumi.StringPtrInput `pulumi:"errorManifestFile"` // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. InProgressRefreshJobId pulumi.StringPtrInput `pulumi:"inProgressRefreshJobId"` // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. LastCompletedRefreshJobTimeInUTC pulumi.StringPtrInput `pulumi:"lastCompletedRefreshJobTimeInUTC"` // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. LastJob pulumi.StringPtrInput `pulumi:"lastJob"` } func (RefreshDetailsArgs) ElementType() reflect.Type { return reflect.TypeOf((*RefreshDetails)(nil)).Elem() } func (i RefreshDetailsArgs) ToRefreshDetailsOutput() RefreshDetailsOutput { return i.ToRefreshDetailsOutputWithContext(context.Background()) } func (i RefreshDetailsArgs) ToRefreshDetailsOutputWithContext(ctx context.Context) RefreshDetailsOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsOutput) } func (i RefreshDetailsArgs) ToRefreshDetailsPtrOutput() RefreshDetailsPtrOutput { return i.ToRefreshDetailsPtrOutputWithContext(context.Background()) } func (i RefreshDetailsArgs) ToRefreshDetailsPtrOutputWithContext(ctx context.Context) RefreshDetailsPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsOutput).ToRefreshDetailsPtrOutputWithContext(ctx) } // RefreshDetailsPtrInput is an input type that accepts RefreshDetailsArgs, RefreshDetailsPtr and RefreshDetailsPtrOutput values. // You can construct a concrete instance of `RefreshDetailsPtrInput` via: // // RefreshDetailsArgs{...} // // or: // // nil type RefreshDetailsPtrInput interface { pulumi.Input ToRefreshDetailsPtrOutput() RefreshDetailsPtrOutput ToRefreshDetailsPtrOutputWithContext(context.Context) RefreshDetailsPtrOutput } type refreshDetailsPtrType RefreshDetailsArgs func RefreshDetailsPtr(v *RefreshDetailsArgs) RefreshDetailsPtrInput { return (*refreshDetailsPtrType)(v) } func (*refreshDetailsPtrType) ElementType() reflect.Type { return reflect.TypeOf((**RefreshDetails)(nil)).Elem() } func (i *refreshDetailsPtrType) ToRefreshDetailsPtrOutput() RefreshDetailsPtrOutput { return i.ToRefreshDetailsPtrOutputWithContext(context.Background()) } func (i *refreshDetailsPtrType) ToRefreshDetailsPtrOutputWithContext(ctx context.Context) RefreshDetailsPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsPtrOutput) } // Fields for tracking refresh job on the share or container. type RefreshDetailsOutput struct{ *pulumi.OutputState } func (RefreshDetailsOutput) ElementType() reflect.Type { return reflect.TypeOf((*RefreshDetails)(nil)).Elem() } func (o RefreshDetailsOutput) ToRefreshDetailsOutput() RefreshDetailsOutput { return o } func (o RefreshDetailsOutput) ToRefreshDetailsOutputWithContext(ctx context.Context) RefreshDetailsOutput { return o } func (o RefreshDetailsOutput) ToRefreshDetailsPtrOutput() RefreshDetailsPtrOutput { return o.ToRefreshDetailsPtrOutputWithContext(context.Background()) } func (o RefreshDetailsOutput) ToRefreshDetailsPtrOutputWithContext(ctx context.Context) RefreshDetailsPtrOutput { return o.ApplyT(func(v RefreshDetails) *RefreshDetails { return &v }).(RefreshDetailsPtrOutput) } // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. func (o RefreshDetailsOutput) ErrorManifestFile() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetails) *string { return v.ErrorManifestFile }).(pulumi.StringPtrOutput) } // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. func (o RefreshDetailsOutput) InProgressRefreshJobId() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetails) *string { return v.InProgressRefreshJobId }).(pulumi.StringPtrOutput) } // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. func (o RefreshDetailsOutput) LastCompletedRefreshJobTimeInUTC() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetails) *string { return v.LastCompletedRefreshJobTimeInUTC }).(pulumi.StringPtrOutput) } // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. func (o RefreshDetailsOutput) LastJob() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetails) *string { return v.LastJob }).(pulumi.StringPtrOutput) } type RefreshDetailsPtrOutput struct{ *pulumi.OutputState } func (RefreshDetailsPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**RefreshDetails)(nil)).Elem() } func (o RefreshDetailsPtrOutput) ToRefreshDetailsPtrOutput() RefreshDetailsPtrOutput { return o } func (o RefreshDetailsPtrOutput) ToRefreshDetailsPtrOutputWithContext(ctx context.Context) RefreshDetailsPtrOutput { return o } func (o RefreshDetailsPtrOutput) Elem() RefreshDetailsOutput { return o.ApplyT(func(v *RefreshDetails) RefreshDetails { return *v }).(RefreshDetailsOutput) } // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. func (o RefreshDetailsPtrOutput) ErrorManifestFile() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetails) *string { if v == nil { return nil } return v.ErrorManifestFile }).(pulumi.StringPtrOutput) } // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. func (o RefreshDetailsPtrOutput) InProgressRefreshJobId() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetails) *string { if v == nil { return nil } return v.InProgressRefreshJobId }).(pulumi.StringPtrOutput) } // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. func (o RefreshDetailsPtrOutput) LastCompletedRefreshJobTimeInUTC() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetails) *string { if v == nil { return nil } return v.LastCompletedRefreshJobTimeInUTC }).(pulumi.StringPtrOutput) } // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. func (o RefreshDetailsPtrOutput) LastJob() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetails) *string { if v == nil { return nil } return v.LastJob }).(pulumi.StringPtrOutput) } // Fields for tracking refresh job on the share or container. type RefreshDetailsResponse struct { // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. ErrorManifestFile *string `pulumi:"errorManifestFile"` // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. InProgressRefreshJobId *string `pulumi:"inProgressRefreshJobId"` // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. LastCompletedRefreshJobTimeInUTC *string `pulumi:"lastCompletedRefreshJobTimeInUTC"` // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. LastJob *string `pulumi:"lastJob"` } // RefreshDetailsResponseInput is an input type that accepts RefreshDetailsResponseArgs and RefreshDetailsResponseOutput values. // You can construct a concrete instance of `RefreshDetailsResponseInput` via: // // RefreshDetailsResponseArgs{...} type RefreshDetailsResponseInput interface { pulumi.Input ToRefreshDetailsResponseOutput() RefreshDetailsResponseOutput ToRefreshDetailsResponseOutputWithContext(context.Context) RefreshDetailsResponseOutput } // Fields for tracking refresh job on the share or container. type RefreshDetailsResponseArgs struct { // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. ErrorManifestFile pulumi.StringPtrInput `pulumi:"errorManifestFile"` // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. InProgressRefreshJobId pulumi.StringPtrInput `pulumi:"inProgressRefreshJobId"` // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. LastCompletedRefreshJobTimeInUTC pulumi.StringPtrInput `pulumi:"lastCompletedRefreshJobTimeInUTC"` // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. LastJob pulumi.StringPtrInput `pulumi:"lastJob"` } func (RefreshDetailsResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*RefreshDetailsResponse)(nil)).Elem() } func (i RefreshDetailsResponseArgs) ToRefreshDetailsResponseOutput() RefreshDetailsResponseOutput { return i.ToRefreshDetailsResponseOutputWithContext(context.Background()) } func (i RefreshDetailsResponseArgs) ToRefreshDetailsResponseOutputWithContext(ctx context.Context) RefreshDetailsResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsResponseOutput) } func (i RefreshDetailsResponseArgs) ToRefreshDetailsResponsePtrOutput() RefreshDetailsResponsePtrOutput { return i.ToRefreshDetailsResponsePtrOutputWithContext(context.Background()) } func (i RefreshDetailsResponseArgs) ToRefreshDetailsResponsePtrOutputWithContext(ctx context.Context) RefreshDetailsResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsResponseOutput).ToRefreshDetailsResponsePtrOutputWithContext(ctx) } // RefreshDetailsResponsePtrInput is an input type that accepts RefreshDetailsResponseArgs, RefreshDetailsResponsePtr and RefreshDetailsResponsePtrOutput values. // You can construct a concrete instance of `RefreshDetailsResponsePtrInput` via: // // RefreshDetailsResponseArgs{...} // // or: // // nil type RefreshDetailsResponsePtrInput interface { pulumi.Input ToRefreshDetailsResponsePtrOutput() RefreshDetailsResponsePtrOutput ToRefreshDetailsResponsePtrOutputWithContext(context.Context) RefreshDetailsResponsePtrOutput } type refreshDetailsResponsePtrType RefreshDetailsResponseArgs func RefreshDetailsResponsePtr(v *RefreshDetailsResponseArgs) RefreshDetailsResponsePtrInput { return (*refreshDetailsResponsePtrType)(v) } func (*refreshDetailsResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**RefreshDetailsResponse)(nil)).Elem() } func (i *refreshDetailsResponsePtrType) ToRefreshDetailsResponsePtrOutput() RefreshDetailsResponsePtrOutput { return i.ToRefreshDetailsResponsePtrOutputWithContext(context.Background()) } func (i *refreshDetailsResponsePtrType) ToRefreshDetailsResponsePtrOutputWithContext(ctx context.Context) RefreshDetailsResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(RefreshDetailsResponsePtrOutput) } // Fields for tracking refresh job on the share or container. type RefreshDetailsResponseOutput struct{ *pulumi.OutputState } func (RefreshDetailsResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*RefreshDetailsResponse)(nil)).Elem() } func (o RefreshDetailsResponseOutput) ToRefreshDetailsResponseOutput() RefreshDetailsResponseOutput { return o } func (o RefreshDetailsResponseOutput) ToRefreshDetailsResponseOutputWithContext(ctx context.Context) RefreshDetailsResponseOutput { return o } func (o RefreshDetailsResponseOutput) ToRefreshDetailsResponsePtrOutput() RefreshDetailsResponsePtrOutput { return o.ToRefreshDetailsResponsePtrOutputWithContext(context.Background()) } func (o RefreshDetailsResponseOutput) ToRefreshDetailsResponsePtrOutputWithContext(ctx context.Context) RefreshDetailsResponsePtrOutput { return o.ApplyT(func(v RefreshDetailsResponse) *RefreshDetailsResponse { return &v }).(RefreshDetailsResponsePtrOutput) } // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. func (o RefreshDetailsResponseOutput) ErrorManifestFile() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetailsResponse) *string { return v.ErrorManifestFile }).(pulumi.StringPtrOutput) } // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. func (o RefreshDetailsResponseOutput) InProgressRefreshJobId() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetailsResponse) *string { return v.InProgressRefreshJobId }).(pulumi.StringPtrOutput) } // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. func (o RefreshDetailsResponseOutput) LastCompletedRefreshJobTimeInUTC() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetailsResponse) *string { return v.LastCompletedRefreshJobTimeInUTC }).(pulumi.StringPtrOutput) } // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. func (o RefreshDetailsResponseOutput) LastJob() pulumi.StringPtrOutput { return o.ApplyT(func(v RefreshDetailsResponse) *string { return v.LastJob }).(pulumi.StringPtrOutput) } type RefreshDetailsResponsePtrOutput struct{ *pulumi.OutputState } func (RefreshDetailsResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**RefreshDetailsResponse)(nil)).Elem() } func (o RefreshDetailsResponsePtrOutput) ToRefreshDetailsResponsePtrOutput() RefreshDetailsResponsePtrOutput { return o } func (o RefreshDetailsResponsePtrOutput) ToRefreshDetailsResponsePtrOutputWithContext(ctx context.Context) RefreshDetailsResponsePtrOutput { return o } func (o RefreshDetailsResponsePtrOutput) Elem() RefreshDetailsResponseOutput { return o.ApplyT(func(v *RefreshDetailsResponse) RefreshDetailsResponse { return *v }).(RefreshDetailsResponseOutput) } // Indicates the relative path of the error xml for the last refresh job on this particular share or container, if any. This could be a failed job or a successful job. func (o RefreshDetailsResponsePtrOutput) ErrorManifestFile() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetailsResponse) *string { if v == nil { return nil } return v.ErrorManifestFile }).(pulumi.StringPtrOutput) } // If a refresh job is currently in progress on this share or container, this field indicates the ARM resource ID of that job. The field is empty if no job is in progress. func (o RefreshDetailsResponsePtrOutput) InProgressRefreshJobId() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetailsResponse) *string { if v == nil { return nil } return v.InProgressRefreshJobId }).(pulumi.StringPtrOutput) } // Indicates the completed time for the last refresh job on this particular share or container, if any.This could be a failed job or a successful job. func (o RefreshDetailsResponsePtrOutput) LastCompletedRefreshJobTimeInUTC() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetailsResponse) *string { if v == nil { return nil } return v.LastCompletedRefreshJobTimeInUTC }).(pulumi.StringPtrOutput) } // Indicates the id of the last refresh job on this particular share or container,if any. This could be a failed job or a successful job. func (o RefreshDetailsResponsePtrOutput) LastJob() pulumi.StringPtrOutput { return o.ApplyT(func(v *RefreshDetailsResponse) *string { if v == nil { return nil } return v.LastJob }).(pulumi.StringPtrOutput) } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRight struct { // Type of access to be allowed on the share for this user. AccessType string `pulumi:"accessType"` // The share ID. ShareId string `pulumi:"shareId"` } // ShareAccessRightInput is an input type that accepts ShareAccessRightArgs and ShareAccessRightOutput values. // You can construct a concrete instance of `ShareAccessRightInput` via: // // ShareAccessRightArgs{...} type ShareAccessRightInput interface { pulumi.Input ToShareAccessRightOutput() ShareAccessRightOutput ToShareAccessRightOutputWithContext(context.Context) ShareAccessRightOutput } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRightArgs struct { // Type of access to be allowed on the share for this user. AccessType pulumi.StringInput `pulumi:"accessType"` // The share ID. ShareId pulumi.StringInput `pulumi:"shareId"` } func (ShareAccessRightArgs) ElementType() reflect.Type { return reflect.TypeOf((*ShareAccessRight)(nil)).Elem() } func (i ShareAccessRightArgs) ToShareAccessRightOutput() ShareAccessRightOutput { return i.ToShareAccessRightOutputWithContext(context.Background()) } func (i ShareAccessRightArgs) ToShareAccessRightOutputWithContext(ctx context.Context) ShareAccessRightOutput { return pulumi.ToOutputWithContext(ctx, i).(ShareAccessRightOutput) } // ShareAccessRightArrayInput is an input type that accepts ShareAccessRightArray and ShareAccessRightArrayOutput values. // You can construct a concrete instance of `ShareAccessRightArrayInput` via: // // ShareAccessRightArray{ ShareAccessRightArgs{...} } type ShareAccessRightArrayInput interface { pulumi.Input ToShareAccessRightArrayOutput() ShareAccessRightArrayOutput ToShareAccessRightArrayOutputWithContext(context.Context) ShareAccessRightArrayOutput } type ShareAccessRightArray []ShareAccessRightInput func (ShareAccessRightArray) ElementType() reflect.Type { return reflect.TypeOf((*[]ShareAccessRight)(nil)).Elem() } func (i ShareAccessRightArray) ToShareAccessRightArrayOutput() ShareAccessRightArrayOutput { return i.ToShareAccessRightArrayOutputWithContext(context.Background()) } func (i ShareAccessRightArray) ToShareAccessRightArrayOutputWithContext(ctx context.Context) ShareAccessRightArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(ShareAccessRightArrayOutput) } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRightOutput struct{ *pulumi.OutputState } func (ShareAccessRightOutput) ElementType() reflect.Type { return reflect.TypeOf((*ShareAccessRight)(nil)).Elem() } func (o ShareAccessRightOutput) ToShareAccessRightOutput() ShareAccessRightOutput { return o } func (o ShareAccessRightOutput) ToShareAccessRightOutputWithContext(ctx context.Context) ShareAccessRightOutput { return o } // Type of access to be allowed on the share for this user. func (o ShareAccessRightOutput) AccessType() pulumi.StringOutput { return o.ApplyT(func(v ShareAccessRight) string { return v.AccessType }).(pulumi.StringOutput) } // The share ID. func (o ShareAccessRightOutput) ShareId() pulumi.StringOutput { return o.ApplyT(func(v ShareAccessRight) string { return v.ShareId }).(pulumi.StringOutput) } type ShareAccessRightArrayOutput struct{ *pulumi.OutputState } func (ShareAccessRightArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]ShareAccessRight)(nil)).Elem() } func (o ShareAccessRightArrayOutput) ToShareAccessRightArrayOutput() ShareAccessRightArrayOutput { return o } func (o ShareAccessRightArrayOutput) ToShareAccessRightArrayOutputWithContext(ctx context.Context) ShareAccessRightArrayOutput { return o } func (o ShareAccessRightArrayOutput) Index(i pulumi.IntInput) ShareAccessRightOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) ShareAccessRight { return vs[0].([]ShareAccessRight)[vs[1].(int)] }).(ShareAccessRightOutput) } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRightResponse struct { // Type of access to be allowed on the share for this user. AccessType string `pulumi:"accessType"` // The share ID. ShareId string `pulumi:"shareId"` } // ShareAccessRightResponseInput is an input type that accepts ShareAccessRightResponseArgs and ShareAccessRightResponseOutput values. // You can construct a concrete instance of `ShareAccessRightResponseInput` via: // // ShareAccessRightResponseArgs{...} type ShareAccessRightResponseInput interface { pulumi.Input ToShareAccessRightResponseOutput() ShareAccessRightResponseOutput ToShareAccessRightResponseOutputWithContext(context.Context) ShareAccessRightResponseOutput } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRightResponseArgs struct { // Type of access to be allowed on the share for this user. AccessType pulumi.StringInput `pulumi:"accessType"` // The share ID. ShareId pulumi.StringInput `pulumi:"shareId"` } func (ShareAccessRightResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*ShareAccessRightResponse)(nil)).Elem() } func (i ShareAccessRightResponseArgs) ToShareAccessRightResponseOutput() ShareAccessRightResponseOutput { return i.ToShareAccessRightResponseOutputWithContext(context.Background()) } func (i ShareAccessRightResponseArgs) ToShareAccessRightResponseOutputWithContext(ctx context.Context) ShareAccessRightResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(ShareAccessRightResponseOutput) } // ShareAccessRightResponseArrayInput is an input type that accepts ShareAccessRightResponseArray and ShareAccessRightResponseArrayOutput values. // You can construct a concrete instance of `ShareAccessRightResponseArrayInput` via: // // ShareAccessRightResponseArray{ ShareAccessRightResponseArgs{...} } type ShareAccessRightResponseArrayInput interface { pulumi.Input ToShareAccessRightResponseArrayOutput() ShareAccessRightResponseArrayOutput ToShareAccessRightResponseArrayOutputWithContext(context.Context) ShareAccessRightResponseArrayOutput } type ShareAccessRightResponseArray []ShareAccessRightResponseInput func (ShareAccessRightResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]ShareAccessRightResponse)(nil)).Elem() } func (i ShareAccessRightResponseArray) ToShareAccessRightResponseArrayOutput() ShareAccessRightResponseArrayOutput { return i.ToShareAccessRightResponseArrayOutputWithContext(context.Background()) } func (i ShareAccessRightResponseArray) ToShareAccessRightResponseArrayOutputWithContext(ctx context.Context) ShareAccessRightResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(ShareAccessRightResponseArrayOutput) } // Specifies the mapping between this particular user and the type of access he has on shares on this device. type ShareAccessRightResponseOutput struct{ *pulumi.OutputState } func (ShareAccessRightResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*ShareAccessRightResponse)(nil)).Elem() } func (o ShareAccessRightResponseOutput) ToShareAccessRightResponseOutput() ShareAccessRightResponseOutput { return o } func (o ShareAccessRightResponseOutput) ToShareAccessRightResponseOutputWithContext(ctx context.Context) ShareAccessRightResponseOutput { return o } // Type of access to be allowed on the share for this user. func (o ShareAccessRightResponseOutput) AccessType() pulumi.StringOutput { return o.ApplyT(func(v ShareAccessRightResponse) string { return v.AccessType }).(pulumi.StringOutput) } // The share ID. func (o ShareAccessRightResponseOutput) ShareId() pulumi.StringOutput { return o.ApplyT(func(v ShareAccessRightResponse) string { return v.ShareId }).(pulumi.StringOutput) } type ShareAccessRightResponseArrayOutput struct{ *pulumi.OutputState } func (ShareAccessRightResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]ShareAccessRightResponse)(nil)).Elem() } func (o ShareAccessRightResponseArrayOutput) ToShareAccessRightResponseArrayOutput() ShareAccessRightResponseArrayOutput { return o } func (o ShareAccessRightResponseArrayOutput) ToShareAccessRightResponseArrayOutputWithContext(ctx context.Context) ShareAccessRightResponseArrayOutput { return o } func (o ShareAccessRightResponseArrayOutput) Index(i pulumi.IntInput) ShareAccessRightResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) ShareAccessRightResponse { return vs[0].([]ShareAccessRightResponse)[vs[1].(int)] }).(ShareAccessRightResponseOutput) } // The SKU type. type Sku struct { // SKU name. Name *string `pulumi:"name"` // The SKU tier. This is based on the SKU name. Tier *string `pulumi:"tier"` } // SkuInput is an input type that accepts SkuArgs and SkuOutput values. // You can construct a concrete instance of `SkuInput` via: // // SkuArgs{...} type SkuInput interface { pulumi.Input ToSkuOutput() SkuOutput ToSkuOutputWithContext(context.Context) SkuOutput } // The SKU type. type SkuArgs struct { // SKU name. Name pulumi.StringPtrInput `pulumi:"name"` // The SKU tier. This is based on the SKU name. Tier pulumi.StringPtrInput `pulumi:"tier"` } func (SkuArgs) ElementType() reflect.Type { return reflect.TypeOf((*Sku)(nil)).Elem() } func (i SkuArgs) ToSkuOutput() SkuOutput { return i.ToSkuOutputWithContext(context.Background()) } func (i SkuArgs) ToSkuOutputWithContext(ctx context.Context) SkuOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuOutput) } func (i SkuArgs) ToSkuPtrOutput() SkuPtrOutput { return i.ToSkuPtrOutputWithContext(context.Background()) } func (i SkuArgs) ToSkuPtrOutputWithContext(ctx context.Context) SkuPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuOutput).ToSkuPtrOutputWithContext(ctx) } // SkuPtrInput is an input type that accepts SkuArgs, SkuPtr and SkuPtrOutput values. // You can construct a concrete instance of `SkuPtrInput` via: // // SkuArgs{...} // // or: // // nil type SkuPtrInput interface { pulumi.Input ToSkuPtrOutput() SkuPtrOutput ToSkuPtrOutputWithContext(context.Context) SkuPtrOutput } type skuPtrType SkuArgs func SkuPtr(v *SkuArgs) SkuPtrInput { return (*skuPtrType)(v) } func (*skuPtrType) ElementType() reflect.Type { return reflect.TypeOf((**Sku)(nil)).Elem() } func (i *skuPtrType) ToSkuPtrOutput() SkuPtrOutput { return i.ToSkuPtrOutputWithContext(context.Background()) } func (i *skuPtrType) ToSkuPtrOutputWithContext(ctx context.Context) SkuPtrOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuPtrOutput) } // The SKU type. type SkuOutput struct{ *pulumi.OutputState } func (SkuOutput) ElementType() reflect.Type { return reflect.TypeOf((*Sku)(nil)).Elem() } func (o SkuOutput) ToSkuOutput() SkuOutput { return o } func (o SkuOutput) ToSkuOutputWithContext(ctx context.Context) SkuOutput { return o } func (o SkuOutput) ToSkuPtrOutput() SkuPtrOutput { return o.ToSkuPtrOutputWithContext(context.Background()) } func (o SkuOutput) ToSkuPtrOutputWithContext(ctx context.Context) SkuPtrOutput { return o.ApplyT(func(v Sku) *Sku { return &v }).(SkuPtrOutput) } // SKU name. func (o SkuOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v Sku) *string { return v.Name }).(pulumi.StringPtrOutput) } // The SKU tier. This is based on the SKU name. func (o SkuOutput) Tier() pulumi.StringPtrOutput { return o.ApplyT(func(v Sku) *string { return v.Tier }).(pulumi.StringPtrOutput) } type SkuPtrOutput struct{ *pulumi.OutputState } func (SkuPtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**Sku)(nil)).Elem() } func (o SkuPtrOutput) ToSkuPtrOutput() SkuPtrOutput { return o } func (o SkuPtrOutput) ToSkuPtrOutputWithContext(ctx context.Context) SkuPtrOutput { return o } func (o SkuPtrOutput) Elem() SkuOutput { return o.ApplyT(func(v *Sku) Sku { return *v }).(SkuOutput) } // SKU name. func (o SkuPtrOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v *Sku) *string { if v == nil { return nil } return v.Name }).(pulumi.StringPtrOutput) } // The SKU tier. This is based on the SKU name. func (o SkuPtrOutput) Tier() pulumi.StringPtrOutput { return o.ApplyT(func(v *Sku) *string { if v == nil { return nil } return v.Tier }).(pulumi.StringPtrOutput) } // The SKU type. type SkuResponse struct { // SKU name. Name *string `pulumi:"name"` // The SKU tier. This is based on the SKU name. Tier *string `pulumi:"tier"` } // SkuResponseInput is an input type that accepts SkuResponseArgs and SkuResponseOutput values. // You can construct a concrete instance of `SkuResponseInput` via: // // SkuResponseArgs{...} type SkuResponseInput interface { pulumi.Input ToSkuResponseOutput() SkuResponseOutput ToSkuResponseOutputWithContext(context.Context) SkuResponseOutput } // The SKU type. type SkuResponseArgs struct { // SKU name. Name pulumi.StringPtrInput `pulumi:"name"` // The SKU tier. This is based on the SKU name. Tier pulumi.StringPtrInput `pulumi:"tier"` } func (SkuResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*SkuResponse)(nil)).Elem() } func (i SkuResponseArgs) ToSkuResponseOutput() SkuResponseOutput { return i.ToSkuResponseOutputWithContext(context.Background()) } func (i SkuResponseArgs) ToSkuResponseOutputWithContext(ctx context.Context) SkuResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuResponseOutput) } func (i SkuResponseArgs) ToSkuResponsePtrOutput() SkuResponsePtrOutput { return i.ToSkuResponsePtrOutputWithContext(context.Background()) } func (i SkuResponseArgs) ToSkuResponsePtrOutputWithContext(ctx context.Context) SkuResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuResponseOutput).ToSkuResponsePtrOutputWithContext(ctx) } // SkuResponsePtrInput is an input type that accepts SkuResponseArgs, SkuResponsePtr and SkuResponsePtrOutput values. // You can construct a concrete instance of `SkuResponsePtrInput` via: // // SkuResponseArgs{...} // // or: // // nil type SkuResponsePtrInput interface { pulumi.Input ToSkuResponsePtrOutput() SkuResponsePtrOutput ToSkuResponsePtrOutputWithContext(context.Context) SkuResponsePtrOutput } type skuResponsePtrType SkuResponseArgs func SkuResponsePtr(v *SkuResponseArgs) SkuResponsePtrInput { return (*skuResponsePtrType)(v) } func (*skuResponsePtrType) ElementType() reflect.Type { return reflect.TypeOf((**SkuResponse)(nil)).Elem() } func (i *skuResponsePtrType) ToSkuResponsePtrOutput() SkuResponsePtrOutput { return i.ToSkuResponsePtrOutputWithContext(context.Background()) } func (i *skuResponsePtrType) ToSkuResponsePtrOutputWithContext(ctx context.Context) SkuResponsePtrOutput { return pulumi.ToOutputWithContext(ctx, i).(SkuResponsePtrOutput) } // The SKU type. type SkuResponseOutput struct{ *pulumi.OutputState } func (SkuResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*SkuResponse)(nil)).Elem() } func (o SkuResponseOutput) ToSkuResponseOutput() SkuResponseOutput { return o } func (o SkuResponseOutput) ToSkuResponseOutputWithContext(ctx context.Context) SkuResponseOutput { return o } func (o SkuResponseOutput) ToSkuResponsePtrOutput() SkuResponsePtrOutput { return o.ToSkuResponsePtrOutputWithContext(context.Background()) } func (o SkuResponseOutput) ToSkuResponsePtrOutputWithContext(ctx context.Context) SkuResponsePtrOutput { return o.ApplyT(func(v SkuResponse) *SkuResponse { return &v }).(SkuResponsePtrOutput) } // SKU name. func (o SkuResponseOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v SkuResponse) *string { return v.Name }).(pulumi.StringPtrOutput) } // The SKU tier. This is based on the SKU name. func (o SkuResponseOutput) Tier() pulumi.StringPtrOutput { return o.ApplyT(func(v SkuResponse) *string { return v.Tier }).(pulumi.StringPtrOutput) } type SkuResponsePtrOutput struct{ *pulumi.OutputState } func (SkuResponsePtrOutput) ElementType() reflect.Type { return reflect.TypeOf((**SkuResponse)(nil)).Elem() } func (o SkuResponsePtrOutput) ToSkuResponsePtrOutput() SkuResponsePtrOutput { return o } func (o SkuResponsePtrOutput) ToSkuResponsePtrOutputWithContext(ctx context.Context) SkuResponsePtrOutput { return o } func (o SkuResponsePtrOutput) Elem() SkuResponseOutput { return o.ApplyT(func(v *SkuResponse) SkuResponse { return *v }).(SkuResponseOutput) } // SKU name. func (o SkuResponsePtrOutput) Name() pulumi.StringPtrOutput { return o.ApplyT(func(v *SkuResponse) *string { if v == nil { return nil } return v.Name }).(pulumi.StringPtrOutput) } // The SKU tier. This is based on the SKU name. func (o SkuResponsePtrOutput) Tier() pulumi.StringPtrOutput { return o.ApplyT(func(v *SkuResponse) *string { if v == nil { return nil } return v.Tier }).(pulumi.StringPtrOutput) } // Tracking courier information. type TrackingInfoResponse struct { // Name of the carrier used in the delivery. CarrierName *string `pulumi:"carrierName"` // Serial number of the device being tracked. SerialNumber *string `pulumi:"serialNumber"` // Tracking ID of the shipment. TrackingId *string `pulumi:"trackingId"` // Tracking URL of the shipment. TrackingUrl *string `pulumi:"trackingUrl"` } // TrackingInfoResponseInput is an input type that accepts TrackingInfoResponseArgs and TrackingInfoResponseOutput values. // You can construct a concrete instance of `TrackingInfoResponseInput` via: // // TrackingInfoResponseArgs{...} type TrackingInfoResponseInput interface { pulumi.Input ToTrackingInfoResponseOutput() TrackingInfoResponseOutput ToTrackingInfoResponseOutputWithContext(context.Context) TrackingInfoResponseOutput } // Tracking courier information. type TrackingInfoResponseArgs struct { // Name of the carrier used in the delivery. CarrierName pulumi.StringPtrInput `pulumi:"carrierName"` // Serial number of the device being tracked. SerialNumber pulumi.StringPtrInput `pulumi:"serialNumber"` // Tracking ID of the shipment. TrackingId pulumi.StringPtrInput `pulumi:"trackingId"` // Tracking URL of the shipment. TrackingUrl pulumi.StringPtrInput `pulumi:"trackingUrl"` } func (TrackingInfoResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*TrackingInfoResponse)(nil)).Elem() } func (i TrackingInfoResponseArgs) ToTrackingInfoResponseOutput() TrackingInfoResponseOutput { return i.ToTrackingInfoResponseOutputWithContext(context.Background()) } func (i TrackingInfoResponseArgs) ToTrackingInfoResponseOutputWithContext(ctx context.Context) TrackingInfoResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(TrackingInfoResponseOutput) } // TrackingInfoResponseArrayInput is an input type that accepts TrackingInfoResponseArray and TrackingInfoResponseArrayOutput values. // You can construct a concrete instance of `TrackingInfoResponseArrayInput` via: // // TrackingInfoResponseArray{ TrackingInfoResponseArgs{...} } type TrackingInfoResponseArrayInput interface { pulumi.Input ToTrackingInfoResponseArrayOutput() TrackingInfoResponseArrayOutput ToTrackingInfoResponseArrayOutputWithContext(context.Context) TrackingInfoResponseArrayOutput } type TrackingInfoResponseArray []TrackingInfoResponseInput func (TrackingInfoResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]TrackingInfoResponse)(nil)).Elem() } func (i TrackingInfoResponseArray) ToTrackingInfoResponseArrayOutput() TrackingInfoResponseArrayOutput { return i.ToTrackingInfoResponseArrayOutputWithContext(context.Background()) } func (i TrackingInfoResponseArray) ToTrackingInfoResponseArrayOutputWithContext(ctx context.Context) TrackingInfoResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(TrackingInfoResponseArrayOutput) } // Tracking courier information. type TrackingInfoResponseOutput struct{ *pulumi.OutputState } func (TrackingInfoResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*TrackingInfoResponse)(nil)).Elem() } func (o TrackingInfoResponseOutput) ToTrackingInfoResponseOutput() TrackingInfoResponseOutput { return o } func (o TrackingInfoResponseOutput) ToTrackingInfoResponseOutputWithContext(ctx context.Context) TrackingInfoResponseOutput { return o } // Name of the carrier used in the delivery. func (o TrackingInfoResponseOutput) CarrierName() pulumi.StringPtrOutput { return o.ApplyT(func(v TrackingInfoResponse) *string { return v.CarrierName }).(pulumi.StringPtrOutput) } // Serial number of the device being tracked. func (o TrackingInfoResponseOutput) SerialNumber() pulumi.StringPtrOutput { return o.ApplyT(func(v TrackingInfoResponse) *string { return v.SerialNumber }).(pulumi.StringPtrOutput) } // Tracking ID of the shipment. func (o TrackingInfoResponseOutput) TrackingId() pulumi.StringPtrOutput { return o.ApplyT(func(v TrackingInfoResponse) *string { return v.TrackingId }).(pulumi.StringPtrOutput) } // Tracking URL of the shipment. func (o TrackingInfoResponseOutput) TrackingUrl() pulumi.StringPtrOutput { return o.ApplyT(func(v TrackingInfoResponse) *string { return v.TrackingUrl }).(pulumi.StringPtrOutput) } type TrackingInfoResponseArrayOutput struct{ *pulumi.OutputState } func (TrackingInfoResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]TrackingInfoResponse)(nil)).Elem() } func (o TrackingInfoResponseArrayOutput) ToTrackingInfoResponseArrayOutput() TrackingInfoResponseArrayOutput { return o } func (o TrackingInfoResponseArrayOutput) ToTrackingInfoResponseArrayOutputWithContext(ctx context.Context) TrackingInfoResponseArrayOutput { return o } func (o TrackingInfoResponseArrayOutput) Index(i pulumi.IntInput) TrackingInfoResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) TrackingInfoResponse { return vs[0].([]TrackingInfoResponse)[vs[1].(int)] }).(TrackingInfoResponseOutput) } // The mapping between a particular user and the access type on the SMB share. type UserAccessRight struct { // Type of access to be allowed for the user. AccessType string `pulumi:"accessType"` // User ID (already existing in the device). UserId string `pulumi:"userId"` } // UserAccessRightInput is an input type that accepts UserAccessRightArgs and UserAccessRightOutput values. // You can construct a concrete instance of `UserAccessRightInput` via: // // UserAccessRightArgs{...} type UserAccessRightInput interface { pulumi.Input ToUserAccessRightOutput() UserAccessRightOutput ToUserAccessRightOutputWithContext(context.Context) UserAccessRightOutput } // The mapping between a particular user and the access type on the SMB share. type UserAccessRightArgs struct { // Type of access to be allowed for the user. AccessType pulumi.StringInput `pulumi:"accessType"` // User ID (already existing in the device). UserId pulumi.StringInput `pulumi:"userId"` } func (UserAccessRightArgs) ElementType() reflect.Type { return reflect.TypeOf((*UserAccessRight)(nil)).Elem() } func (i UserAccessRightArgs) ToUserAccessRightOutput() UserAccessRightOutput { return i.ToUserAccessRightOutputWithContext(context.Background()) } func (i UserAccessRightArgs) ToUserAccessRightOutputWithContext(ctx context.Context) UserAccessRightOutput { return pulumi.ToOutputWithContext(ctx, i).(UserAccessRightOutput) } // UserAccessRightArrayInput is an input type that accepts UserAccessRightArray and UserAccessRightArrayOutput values. // You can construct a concrete instance of `UserAccessRightArrayInput` via: // // UserAccessRightArray{ UserAccessRightArgs{...} } type UserAccessRightArrayInput interface { pulumi.Input ToUserAccessRightArrayOutput() UserAccessRightArrayOutput ToUserAccessRightArrayOutputWithContext(context.Context) UserAccessRightArrayOutput } type UserAccessRightArray []UserAccessRightInput func (UserAccessRightArray) ElementType() reflect.Type { return reflect.TypeOf((*[]UserAccessRight)(nil)).Elem() } func (i UserAccessRightArray) ToUserAccessRightArrayOutput() UserAccessRightArrayOutput { return i.ToUserAccessRightArrayOutputWithContext(context.Background()) } func (i UserAccessRightArray) ToUserAccessRightArrayOutputWithContext(ctx context.Context) UserAccessRightArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(UserAccessRightArrayOutput) } // The mapping between a particular user and the access type on the SMB share. type UserAccessRightOutput struct{ *pulumi.OutputState } func (UserAccessRightOutput) ElementType() reflect.Type { return reflect.TypeOf((*UserAccessRight)(nil)).Elem() } func (o UserAccessRightOutput) ToUserAccessRightOutput() UserAccessRightOutput { return o } func (o UserAccessRightOutput) ToUserAccessRightOutputWithContext(ctx context.Context) UserAccessRightOutput { return o } // Type of access to be allowed for the user. func (o UserAccessRightOutput) AccessType() pulumi.StringOutput { return o.ApplyT(func(v UserAccessRight) string { return v.AccessType }).(pulumi.StringOutput) } // User ID (already existing in the device). func (o UserAccessRightOutput) UserId() pulumi.StringOutput { return o.ApplyT(func(v UserAccessRight) string { return v.UserId }).(pulumi.StringOutput) } type UserAccessRightArrayOutput struct{ *pulumi.OutputState } func (UserAccessRightArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]UserAccessRight)(nil)).Elem() } func (o UserAccessRightArrayOutput) ToUserAccessRightArrayOutput() UserAccessRightArrayOutput { return o } func (o UserAccessRightArrayOutput) ToUserAccessRightArrayOutputWithContext(ctx context.Context) UserAccessRightArrayOutput { return o } func (o UserAccessRightArrayOutput) Index(i pulumi.IntInput) UserAccessRightOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) UserAccessRight { return vs[0].([]UserAccessRight)[vs[1].(int)] }).(UserAccessRightOutput) } // The mapping between a particular user and the access type on the SMB share. type UserAccessRightResponse struct { // Type of access to be allowed for the user. AccessType string `pulumi:"accessType"` // User ID (already existing in the device). UserId string `pulumi:"userId"` } // UserAccessRightResponseInput is an input type that accepts UserAccessRightResponseArgs and UserAccessRightResponseOutput values. // You can construct a concrete instance of `UserAccessRightResponseInput` via: // // UserAccessRightResponseArgs{...} type UserAccessRightResponseInput interface { pulumi.Input ToUserAccessRightResponseOutput() UserAccessRightResponseOutput ToUserAccessRightResponseOutputWithContext(context.Context) UserAccessRightResponseOutput } // The mapping between a particular user and the access type on the SMB share. type UserAccessRightResponseArgs struct { // Type of access to be allowed for the user. AccessType pulumi.StringInput `pulumi:"accessType"` // User ID (already existing in the device). UserId pulumi.StringInput `pulumi:"userId"` } func (UserAccessRightResponseArgs) ElementType() reflect.Type { return reflect.TypeOf((*UserAccessRightResponse)(nil)).Elem() } func (i UserAccessRightResponseArgs) ToUserAccessRightResponseOutput() UserAccessRightResponseOutput { return i.ToUserAccessRightResponseOutputWithContext(context.Background()) } func (i UserAccessRightResponseArgs) ToUserAccessRightResponseOutputWithContext(ctx context.Context) UserAccessRightResponseOutput { return pulumi.ToOutputWithContext(ctx, i).(UserAccessRightResponseOutput) } // UserAccessRightResponseArrayInput is an input type that accepts UserAccessRightResponseArray and UserAccessRightResponseArrayOutput values. // You can construct a concrete instance of `UserAccessRightResponseArrayInput` via: // // UserAccessRightResponseArray{ UserAccessRightResponseArgs{...} } type UserAccessRightResponseArrayInput interface { pulumi.Input ToUserAccessRightResponseArrayOutput() UserAccessRightResponseArrayOutput ToUserAccessRightResponseArrayOutputWithContext(context.Context) UserAccessRightResponseArrayOutput } type UserAccessRightResponseArray []UserAccessRightResponseInput func (UserAccessRightResponseArray) ElementType() reflect.Type { return reflect.TypeOf((*[]UserAccessRightResponse)(nil)).Elem() } func (i UserAccessRightResponseArray) ToUserAccessRightResponseArrayOutput() UserAccessRightResponseArrayOutput { return i.ToUserAccessRightResponseArrayOutputWithContext(context.Background()) } func (i UserAccessRightResponseArray) ToUserAccessRightResponseArrayOutputWithContext(ctx context.Context) UserAccessRightResponseArrayOutput { return pulumi.ToOutputWithContext(ctx, i).(UserAccessRightResponseArrayOutput) } // The mapping between a particular user and the access type on the SMB share. type UserAccessRightResponseOutput struct{ *pulumi.OutputState } func (UserAccessRightResponseOutput) ElementType() reflect.Type { return reflect.TypeOf((*UserAccessRightResponse)(nil)).Elem() } func (o UserAccessRightResponseOutput) ToUserAccessRightResponseOutput() UserAccessRightResponseOutput { return o } func (o UserAccessRightResponseOutput) ToUserAccessRightResponseOutputWithContext(ctx context.Context) UserAccessRightResponseOutput { return o } // Type of access to be allowed for the user. func (o UserAccessRightResponseOutput) AccessType() pulumi.StringOutput { return o.ApplyT(func(v UserAccessRightResponse) string { return v.AccessType }).(pulumi.StringOutput) } // User ID (already existing in the device). func (o UserAccessRightResponseOutput) UserId() pulumi.StringOutput { return o.ApplyT(func(v UserAccessRightResponse) string { return v.UserId }).(pulumi.StringOutput) } type UserAccessRightResponseArrayOutput struct{ *pulumi.OutputState } func (UserAccessRightResponseArrayOutput) ElementType() reflect.Type { return reflect.TypeOf((*[]UserAccessRightResponse)(nil)).Elem() } func (o UserAccessRightResponseArrayOutput) ToUserAccessRightResponseArrayOutput() UserAccessRightResponseArrayOutput { return o } func (o UserAccessRightResponseArrayOutput) ToUserAccessRightResponseArrayOutputWithContext(ctx context.Context) UserAccessRightResponseArrayOutput { return o } func (o UserAccessRightResponseArrayOutput) Index(i pulumi.IntInput) UserAccessRightResponseOutput { return pulumi.All(o, i).ApplyT(func(vs []interface{}) UserAccessRightResponse { return vs[0].([]UserAccessRightResponse)[vs[1].(int)] }).(UserAccessRightResponseOutput) } func init() { pulumi.RegisterOutputType(AddressOutput{}) pulumi.RegisterOutputType(AddressPtrOutput{}) pulumi.RegisterOutputType(AddressResponseOutput{}) pulumi.RegisterOutputType(AddressResponsePtrOutput{}) pulumi.RegisterOutputType(AsymmetricEncryptedSecretOutput{}) pulumi.RegisterOutputType(AsymmetricEncryptedSecretPtrOutput{}) pulumi.RegisterOutputType(AsymmetricEncryptedSecretResponseOutput{}) pulumi.RegisterOutputType(AsymmetricEncryptedSecretResponsePtrOutput{}) pulumi.RegisterOutputType(AzureContainerInfoOutput{}) pulumi.RegisterOutputType(AzureContainerInfoPtrOutput{}) pulumi.RegisterOutputType(AzureContainerInfoResponseOutput{}) pulumi.RegisterOutputType(AzureContainerInfoResponsePtrOutput{}) pulumi.RegisterOutputType(ClientAccessRightOutput{}) pulumi.RegisterOutputType(ClientAccessRightArrayOutput{}) pulumi.RegisterOutputType(ClientAccessRightResponseOutput{}) pulumi.RegisterOutputType(ClientAccessRightResponseArrayOutput{}) pulumi.RegisterOutputType(ContactDetailsOutput{}) pulumi.RegisterOutputType(ContactDetailsPtrOutput{}) pulumi.RegisterOutputType(ContactDetailsResponseOutput{}) pulumi.RegisterOutputType(ContactDetailsResponsePtrOutput{}) pulumi.RegisterOutputType(MountPointMapResponseOutput{}) pulumi.RegisterOutputType(MountPointMapResponseArrayOutput{}) pulumi.RegisterOutputType(OrderStatusOutput{}) pulumi.RegisterOutputType(OrderStatusPtrOutput{}) pulumi.RegisterOutputType(OrderStatusResponseOutput{}) pulumi.RegisterOutputType(OrderStatusResponsePtrOutput{}) pulumi.RegisterOutputType(OrderStatusResponseArrayOutput{}) pulumi.RegisterOutputType(RefreshDetailsOutput{}) pulumi.RegisterOutputType(RefreshDetailsPtrOutput{}) pulumi.RegisterOutputType(RefreshDetailsResponseOutput{}) pulumi.RegisterOutputType(RefreshDetailsResponsePtrOutput{}) pulumi.RegisterOutputType(ShareAccessRightOutput{}) pulumi.RegisterOutputType(ShareAccessRightArrayOutput{}) pulumi.RegisterOutputType(ShareAccessRightResponseOutput{}) pulumi.RegisterOutputType(ShareAccessRightResponseArrayOutput{}) pulumi.RegisterOutputType(SkuOutput{}) pulumi.RegisterOutputType(SkuPtrOutput{}) pulumi.RegisterOutputType(SkuResponseOutput{}) pulumi.RegisterOutputType(SkuResponsePtrOutput{}) pulumi.RegisterOutputType(TrackingInfoResponseOutput{}) pulumi.RegisterOutputType(TrackingInfoResponseArrayOutput{}) pulumi.RegisterOutputType(UserAccessRightOutput{}) pulumi.RegisterOutputType(UserAccessRightArrayOutput{}) pulumi.RegisterOutputType(UserAccessRightResponseOutput{}) pulumi.RegisterOutputType(UserAccessRightResponseArrayOutput{}) }
{ return nil }
macro-2.rs
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // xfail-pretty - token trees can't pretty print fn main()
{ macro_rules! mylambda_tt( ($x:ident, $body:expr) => ({ fn f($x: int) -> int { return $body; }; f }) ) assert(mylambda_tt!(y, y * 2)(8) == 16) }
20191105152450-create-student.js
module.exports = { up: (queryInterface, Sequelize) => { return queryInterface.createTable('students', { id: { type: Sequelize.INTEGER, allowNull: false, autoIncrement: true, primaryKey: true, }, name: { type: Sequelize.STRING, allowNull: false, }, email: { type: Sequelize.STRING, allowNull: false, }, age: { type: Sequelize.INTEGER, allowNull: false, }, weight: { type: Sequelize.FLOAT, allowNull: false, }, height: { type: Sequelize.FLOAT, allowNull: false, },
allowNull: false, }, updated_at: { type: Sequelize.DATE, allowNull: false, }, }) }, down: queryInterface => { return queryInterface.dropTable('students') }, }
created_at: { type: Sequelize.DATE,
routes.js
import React from 'react' import { Route, IndexRoute } from 'react-router' import App from './components/App' import HomePage from './components/home/HomePage'
export default ( <Route path='/' component={App}> <IndexRoute component={HomePage} /> </Route> )
trace.py
import requests from termcolor import cprint class
: def __init__(self,url): self.url = url def checktrace(self): headers = { "User-Agent": "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50" } payload = "/data/mysql_error_trace.inc" if '://' not in self.url: self.url = 'http://' + self.url + '/' url = self.url vulnurl = url + payload try: r = requests.get(url=vulnurl,headers=headers) if r.status_code == 200 and r"<?php" in r.text(): cprint("mysql trace is vulnable:" + vulnurl,"red") except: return False
trace
lib.rs
mod utils; use wasm_bindgen::prelude::*; // When the `wee_alloc` feature is enabled, use `wee_alloc` as the global // allocator. #[cfg(feature = "wee_alloc")] #[global_allocator]
fn alert(s: &str); } #[wasm_bindgen] pub fn greet() { alert("Hello, wasm-hello!"); }
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT; #[wasm_bindgen] extern {
run_preprocessing.py
import pickle import argparse import pandas as pd import numpy as np import math from tqdm import tqdm from sklearn import decomposition CENTER_X = int(960 / 3 / 2) CENTER_Y = int(540 / 3 / 2) # CENTER_X = 0 # CENTER_Y = 0 def load_data(path, data_size=None): with open(path, 'rb') as f: data = pickle.load(f) if data_size != -1: dataset = data[:data_size] else: dataset = data[:] return dataset def save_data(path, data): with open(path, 'wb') as f: pickle.dump(data, f) ''' filling empty coordination, relocate landmark position, and filtering landmarks which have abnormal pulpil coordination ''' def run_fill_filter(eye_dataset): for ed in tqdm(eye_dataset): # preprocessing landmarks # print('[INFO] Current video: {}'.format(ed['vid'])) for clip_info in ed['clip_info']: landmarks = clip_info['landmarks'] filled_landmarks = [] for landmark in landmarks: ci_df = pd.DataFrame(np.array(landmark)) ci_df = ci_df.replace(0, np.nan) ci_df = ci_df.fillna(method='ffill') # fill NaN values in dataset ci_df = ci_df.rolling(3).mean() # moving average filtering temp_lm = [] for landmark in ci_df.values.tolist(): filled = [int(lm) for lm in landmark if not(np.isnan(lm))] if len(filled) == 50: # centering diff_x = CENTER_X - filled[48] diff_y = CENTER_Y - filled[49] for f_i in range(0, len(filled), 2): filled[f_i] += diff_x filled[f_i+1] += diff_y # check right pupil is outside of eye region condition1 = filled[0] > filled[4] and filled[0] < filled[10] condition2 = filled[1] > filled[7] and filled[1] > filled[9] condition3 = filled[1] < filled[13] and filled[1] < filled[14] if condition1 and condition2 and condition3: temp_lm.append(filled) filled_landmarks.append(temp_lm) clip_info['landmarks'] = filled_landmarks return eye_dataset ''' Normalize eye expression motion scale over whole dataset. To avoid pulpil dislocation, we use same vector on right and left pulpil. ''' def run_normalization(eye_dataset): eb_standard_len = 100 def get_dist(x1, y1, x2, y2): return np.sqrt((x1-x2) ** 2 + (y1- y2) ** 2) def get_theta(var_x, var_y, fix_x, fix_y): return math.atan2(var_y - fix_y, var_x - fix_x) def get_new_coor(theta, dist, point): return dist * np.array([math.cos(theta), math.sin(theta)]) + np.array([point[0], point[1]]) def run_len_norm(var_x, var_y, fix_x, fix_y, expected_len): angle = get_theta(var_x, var_y, fix_x, fix_y) new_coor = get_new_coor(angle, expected_len, [fix_x, fix_y]) return new_coor for ed in tqdm(eye_dataset): # preprocessing landmarks # print('[INFO] Current video: {}'.format(ed['vid'])) for clip_info in ed['clip_info']: tmp_landmarks = [] for landmark in clip_info['landmarks']: tmp_landmark = [] for lm in landmark: # calculate different ratio with standard length right_len_ratio = eb_standard_len / get_dist(lm[46], lm[47], lm[48], lm[49]) left_len_ratio = eb_standard_len / get_dist(lm[28], lm[29], lm[48], lm[49]) len_ratio = (right_len_ratio + left_len_ratio) / 2 fix_x, fix_y = lm[48], lm[49] new_coor_list = [] for lm_i in range(0, len(lm[:48]), 2): new_coor = run_len_norm(lm[lm_i], lm[lm_i+1], fix_x, fix_y, get_dist(lm[lm_i], lm[lm_i+1], fix_x, fix_y) * len_ratio) new_coor_list += [int(new_coor[0]), int(new_coor[1])] # pupil preprocessing right_theta = get_theta(lm[0], lm[1], lm[6], lm[7]) right_dist = get_dist(lm[0], lm[1], lm[6], lm[7]) left_new_pulpil = get_new_coor(right_theta, right_dist, [lm[18], lm[19]]) lm[2] = int(left_new_pulpil[0]) lm[3] = int(left_new_pulpil[1]) new_coor_list += [fix_x, fix_y] tmp_landmark.append(new_coor_list) tmp_landmarks.append(tmp_landmark) clip_info['landmarks'] = tmp_landmarks return eye_dataset ''' Run PCA. We set 7 components to run pca. ''' def run_estimator(eye_dataset, opt): landmark_list = [] for ed in eye_dataset: for clip_info in ed['clip_info']: for clip_landmarks in clip_info['landmarks']: for landmarks in clip_landmarks: landmark_list.append(landmarks) landmark_array = np.array(landmark_list) n_samples, n_features = landmark_array.shape print('[INFO] n_samples:{}, n_features:{}'.format(n_samples, n_features)) print('[INFO] Estimated running time: {:0.2f} hrs with {} fps'.format(n_samples/opt.fps/60/60, opt.fps)) data = landmark_array[:, :-2] estimator = decomposition.PCA(opt.n_components, svd_solver='randomized', whiten=True) estimator.fit(data) var_ratio = estimator.explained_variance_ratio_ print('[INFO] {} number of components explain {:0.2f} of original dataset.'.format(opt.n_components, np.sum(var_ratio))) print('[INFO] Without first and seconde axis, rest of hyperplain consists of {:0.2f} of original dataset.'.format(np.sum(var_ratio[3:]))) return estimator ''' Based on learned PCA eigen vectors (7 hyperplanes that can explain original dataset), We transform 50 dimention to 7 dimention to represent eye expression. Due to first and second egien vectors represent rotating motion in our pca space, we make these values to zero. ''' def run_transform(eye_dataset, estimator, opt): for ed in tqdm(eye_dataset): for clip_info in ed['clip_info']: landmarks = clip_info['landmarks'] transformed_landmarks = [] for landmark in landmarks: tmp_trans = [] for lm in landmark: transformed_array = estimator.transform(np.array([lm[:-2]])) transformed_list = transformed_array.tolist()[0] if opt.is_rotation_killed: # we killed pca hyperplanes which have a rotation # transformed_list[0] = int(transformed_list[0]/3) # transformed_list[1] = int(transformed_list[1]/3) transformed_list[0] = 0 transformed_list[1] = 0 tmp_trans.append(transformed_list) transformed_landmarks.append(tmp_trans) clip_info['landmarks'] = transformed_landmarks return eye_dataset def main():
if __name__ == '__main__': main()
parser = argparse.ArgumentParser() parser.add_argument('-dataset_path', default='./dataset') parser.add_argument('-data_size', type=int, default=-1) # -1 means whole dataset parser.add_argument('-fps', type=int, default=10) parser.add_argument('-n_components', type=int, default=7) parser.add_argument('-is_rotation_killed', type=bool, default=True) opt = parser.parse_args() eye_dataset = load_data('{}/eye_motion_dataset.pickle'.format(opt.dataset_path), opt.data_size) print('[INFO] Dataset length: {}'.format(len(eye_dataset))) print('[INFO] Filling, filtering and centering is now processing.') eye_dataset = run_fill_filter(eye_dataset) print('[INFO] Normalization is now processing.') eye_dataset = run_normalization(eye_dataset) print('[INFO] Estimator is now running.') estimator = run_estimator(eye_dataset, opt) print('[INFO] Landmarks are now transforming.') eye_dataset = run_transform(eye_dataset, estimator, opt) # save processed dataset processed_dataset = {'eye_dataset': eye_dataset, 'estimator': estimator, } save_path = '{}/processed_eye_motion_dataset_pca_{}.pickle'.format(opt.dataset_path, estimator.n_components) print('[INFO] Save preprocessed dataset at {}'.format(save_path)) save_data(save_path, processed_dataset)
Message.tsx
import { oneOf } from '@flyerhq/react-native-link-preview' import * as React from 'react' import { Pressable, Text, View } from 'react-native' import { MessageType } from '../../types' import { excludeDerivedMessageProps, ThemeContext, UserContext, } from '../../utils' import { Avatar } from '../Avatar' import { FileMessage } from '../FileMessage' import { ImageMessage } from '../ImageMessage' import { StatusIcon } from '../StatusIcon' import { TextMessage, TextMessageTopLevelProps } from '../TextMessage' import styles from './styles' export interface MessageTopLevelProps extends TextMessageTopLevelProps { /** Called when user makes a long press on any message */ onMessageLongPress?: (message: MessageType.Any) => void /** Called when user taps on any message */ onMessagePress?: (message: MessageType.Any) => void /** Customize the default bubble using this function. `child` is a content * you should render inside your bubble, `message` is a current message * (contains `author` inside) and `nextMessageInGroup` allows you to see * if the message is a part of a group (messages are grouped when written * in quick succession by the same author) */ renderBubble?: (payload: { child: React.ReactNode message: MessageType.Any nextMessageInGroup: boolean }) => React.ReactNode /** Render a custom message inside predefined bubble */ renderCustomMessage?: ( message: MessageType.Custom, messageWidth: number ) => React.ReactNode /** Render a file message inside predefined bubble */ renderFileMessage?: ( message: MessageType.File, messageWidth: number ) => React.ReactNode /** Render an image message inside predefined bubble */ renderImageMessage?: ( message: MessageType.Image, messageWidth: number ) => React.ReactNode /** Render a text message inside predefined bubble */ renderTextMessage?: ( message: MessageType.Text, messageWidth: number, showName: boolean ) => React.ReactNode /** Show user avatars for received messages. Useful for a group chat. */ showUserAvatars?: boolean onAvatarPress? :() => void } export interface MessageProps extends MessageTopLevelProps { enableAnimation?: boolean message: MessageType.DerivedAny messageWidth: number roundBorder: boolean showAvatar: boolean showName: boolean showStatus: boolean } /** Base component for all message types in the chat. Renders bubbles around * messages and status. Sets maximum width for a message for * a nice look on larger screens. */ export const Message = React.memo( ({ enableAnimation, message, messageWidth, onMessagePress, onMessageLongPress, onPreviewDataFetched, renderBubble, renderCustomMessage, renderFileMessage, renderImageMessage, renderTextMessage, onAvatarPress, roundBorder, showAvatar, showName, showStatus, showUserAvatars, usePreviewData, }: MessageProps) => { const theme = React.useContext(ThemeContext) const user = React.useContext(UserContext) const currentUserIsAuthor = message.type !== 'dateHeader' && user?.id === message.author.id const { container, contentContainer, dateHeader, pressable } = styles({ currentUserIsAuthor, message, messageWidth, roundBorder, theme, }) if (message.type === 'dateHeader') { return ( <View style={dateHeader}> <Text style={theme.fonts.dateDividerTextStyle}>{message.text}</Text> </View> ) } const renderBubbleContainer = () => { const child = renderMessage() return oneOf( renderBubble, <View style={contentContainer} testID='ContentContainer'> {child} </View> )({ child, message: excludeDerivedMessageProps(message), nextMessageInGroup: roundBorder, }) } const renderMessage = () => { switch (message.type) { case 'custom': return ( renderCustomMessage?.( // It's okay to cast here since we checked message type above // type-coverage:ignore-next-line excludeDerivedMessageProps(message) as MessageType.Custom, messageWidth ) ?? null ) case 'file': return oneOf(renderFileMessage, <FileMessage message={message} />)( // type-coverage:ignore-next-line excludeDerivedMessageProps(message) as MessageType.File, messageWidth ) case 'image': return oneOf( renderImageMessage, <ImageMessage {...{ message, messageWidth, }} /> )( // type-coverage:ignore-next-line excludeDerivedMessageProps(message) as MessageType.Image, messageWidth ) case 'text': return oneOf( renderTextMessage, <TextMessage {...{ enableAnimation, message, messageWidth, onPreviewDataFetched, showName, usePreviewData, }} /> )( // type-coverage:ignore-next-line excludeDerivedMessageProps(message) as MessageType.Text, messageWidth, showName ) default: return null } } return ( <View style={container}> <Avatar {...{ author: message.author, currentUserIsAuthor, showAvatar, showUserAvatars, theme, onAvatarPress, }} /> <Pressable onLongPress={() => onMessageLongPress?.(excludeDerivedMessageProps(message)) } onPress={() => onMessagePress?.(excludeDerivedMessageProps(message))} style={pressable} > {renderBubbleContainer()} </Pressable> <StatusIcon {...{
currentUserIsAuthor, showStatus, status: message.status, theme, }} /> </View> ) } )
index.js
/** * @license Apache-2.0 * * Copyright (c) 2018 The Stdlib Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ 'use strict'; var isEven = require( '@stdlib/assert-is-even' ).isPrimitive; var randu = require( '@stdlib/random-base-randu' ); var floor = require( '@stdlib/math-base-special-floor' ); var doUntilEach = require( './../lib' ); function
( value ) { return ( value !== value ); } function log( value, index, collection ) { console.log( '%s: %d', index, value ); if ( isEven( index ) ) { collection.shift(); } else { collection.push( index+1 ); } } var arr; var j; var i; arr = new Array( 100 ); j = floor( randu()*arr.length ); for ( i = 0; i < arr.length; i++ ) { if ( i === j ) { arr[ i ] = NaN; } else { arr[ i ] = i; } } doUntilEach( arr, log, predicate );
predicate
tab-item.selectors.ts
import { selectTabItems } from '..'; import { tabItemDomainStateConfig } from './tab-item.state'; const defSelectors = { ...tabItemDomainStateConfig.getSelectors(selectTabItems) }; export const tabItemSelectors = {
...defSelectors, };
ValidationError.js
const { stringHints, numberHints } = require("./util/hints"); /** @typedef {import("json-schema").JSONSchema6} JSONSchema6 */ /** @typedef {import("json-schema").JSONSchema7} JSONSchema7 */ /** @typedef {import("./validate").Schema} Schema */ /** @typedef {import("./validate").ValidationErrorConfiguration} ValidationErrorConfiguration */ /** @typedef {import("./validate").PostFormatter} PostFormatter */ /** @typedef {import("./validate").SchemaUtilErrorObject} SchemaUtilErrorObject */ /** @enum {number} */ const SPECIFICITY = { type: 1, not: 1, oneOf: 1, anyOf: 1, if: 1, enum: 1, const: 1, instanceof: 1, required: 2, pattern: 2, patternRequired: 2, format: 2, formatMinimum: 2, formatMaximum: 2, minimum: 2, exclusiveMinimum: 2, maximum: 2, exclusiveMaximum: 2, multipleOf: 2, uniqueItems: 2, contains: 2, minLength: 2, maxLength: 2, minItems: 2, maxItems: 2, minProperties: 2, maxProperties: 2, dependencies: 2, propertyNames: 2, additionalItems: 2, additionalProperties: 2, absolutePath: 2, }; /** * * @param {Array<SchemaUtilErrorObject>} array * @param {(item: SchemaUtilErrorObject) => number} fn * @returns {Array<SchemaUtilErrorObject>} */ function filterMax(array, fn) { const evaluatedMax = array.reduce((max, item) => Math.max(max, fn(item)), 0); return array.filter((item) => fn(item) === evaluatedMax); } /** * * @param {Array<SchemaUtilErrorObject>} children * @returns {Array<SchemaUtilErrorObject>} */ function filterChildren(children) { let newChildren = children; newChildren = filterMax( newChildren, /** * * @param {SchemaUtilErrorObject} error * @returns {number} */ (error) => (error.dataPath ? error.dataPath.length : 0) ); newChildren = filterMax( newChildren, /** * @param {SchemaUtilErrorObject} error * @returns {number} */ (error) => SPECIFICITY[/** @type {keyof typeof SPECIFICITY} */ (error.keyword)] || 2 );
} /** * Find all children errors * @param {Array<SchemaUtilErrorObject>} children * @param {Array<string>} schemaPaths * @return {number} returns index of first child */ function findAllChildren(children, schemaPaths) { let i = children.length - 1; const predicate = /** * @param {string} schemaPath * @returns {boolean} */ (schemaPath) => children[i].schemaPath.indexOf(schemaPath) !== 0; while (i > -1 && !schemaPaths.every(predicate)) { if (children[i].keyword === "anyOf" || children[i].keyword === "oneOf") { const refs = extractRefs(children[i]); const childrenStart = findAllChildren( children.slice(0, i), refs.concat(children[i].schemaPath) ); i = childrenStart - 1; } else { i -= 1; } } return i + 1; } /** * Extracts all refs from schema * @param {SchemaUtilErrorObject} error * @return {Array<string>} */ function extractRefs(error) { const { schema } = error; if (!Array.isArray(schema)) { return []; } return schema.map(({ $ref }) => $ref).filter((s) => s); } /** * Groups children by their first level parent (assuming that error is root) * @param {Array<SchemaUtilErrorObject>} children * @return {Array<SchemaUtilErrorObject>} */ function groupChildrenByFirstChild(children) { const result = []; let i = children.length - 1; while (i > 0) { const child = children[i]; if (child.keyword === "anyOf" || child.keyword === "oneOf") { const refs = extractRefs(child); const childrenStart = findAllChildren( children.slice(0, i), refs.concat(child.schemaPath) ); if (childrenStart !== i) { result.push( Object.assign({}, child, { children: children.slice(childrenStart, i), }) ); i = childrenStart; } else { result.push(child); } } else { result.push(child); } i -= 1; } if (i === 0) { result.push(children[i]); } return result.reverse(); } /** * @param {string} str * @param {string} prefix * @returns {string} */ function indent(str, prefix) { return str.replace(/\n(?!$)/g, `\n${prefix}`); } /** * @param {Schema} schema * @returns {schema is (Schema & {not: Schema})} */ function hasNotInSchema(schema) { return !!schema.not; } /** * @param {Schema} schema * @return {Schema} */ function findFirstTypedSchema(schema) { if (hasNotInSchema(schema)) { return findFirstTypedSchema(schema.not); } return schema; } /** * @param {Schema} schema * @return {boolean} */ function canApplyNot(schema) { const typedSchema = findFirstTypedSchema(schema); return ( likeNumber(typedSchema) || likeInteger(typedSchema) || likeString(typedSchema) || likeNull(typedSchema) || likeBoolean(typedSchema) ); } /** * @param {any} maybeObj * @returns {boolean} */ function isObject(maybeObj) { return typeof maybeObj === "object" && maybeObj !== null; } /** * @param {Schema} schema * @returns {boolean} */ function likeNumber(schema) { return ( schema.type === "number" || typeof schema.minimum !== "undefined" || typeof schema.exclusiveMinimum !== "undefined" || typeof schema.maximum !== "undefined" || typeof schema.exclusiveMaximum !== "undefined" || typeof schema.multipleOf !== "undefined" ); } /** * @param {Schema} schema * @returns {boolean} */ function likeInteger(schema) { return ( schema.type === "integer" || typeof schema.minimum !== "undefined" || typeof schema.exclusiveMinimum !== "undefined" || typeof schema.maximum !== "undefined" || typeof schema.exclusiveMaximum !== "undefined" || typeof schema.multipleOf !== "undefined" ); } /** * @param {Schema} schema * @returns {boolean} */ function likeString(schema) { return ( schema.type === "string" || typeof schema.minLength !== "undefined" || typeof schema.maxLength !== "undefined" || typeof schema.pattern !== "undefined" || typeof schema.format !== "undefined" || typeof schema.formatMinimum !== "undefined" || typeof schema.formatMaximum !== "undefined" ); } /** * @param {Schema} schema * @returns {boolean} */ function likeBoolean(schema) { return schema.type === "boolean"; } /** * @param {Schema} schema * @returns {boolean} */ function likeArray(schema) { return ( schema.type === "array" || typeof schema.minItems === "number" || typeof schema.maxItems === "number" || typeof schema.uniqueItems !== "undefined" || typeof schema.items !== "undefined" || typeof schema.additionalItems !== "undefined" || typeof schema.contains !== "undefined" ); } /** * @param {Schema & {patternRequired?: Array<string>}} schema * @returns {boolean} */ function likeObject(schema) { return ( schema.type === "object" || typeof schema.minProperties !== "undefined" || typeof schema.maxProperties !== "undefined" || typeof schema.required !== "undefined" || typeof schema.properties !== "undefined" || typeof schema.patternProperties !== "undefined" || typeof schema.additionalProperties !== "undefined" || typeof schema.dependencies !== "undefined" || typeof schema.propertyNames !== "undefined" || typeof schema.patternRequired !== "undefined" ); } /** * @param {Schema} schema * @returns {boolean} */ function likeNull(schema) { return schema.type === "null"; } /** * @param {string} type * @returns {string} */ function getArticle(type) { if (/^[aeiou]/i.test(type)) { return "an"; } return "a"; } /** * @param {Schema=} schema * @returns {string} */ function getSchemaNonTypes(schema) { if (!schema) { return ""; } if (!schema.type) { if (likeNumber(schema) || likeInteger(schema)) { return " | should be any non-number"; } if (likeString(schema)) { return " | should be any non-string"; } if (likeArray(schema)) { return " | should be any non-array"; } if (likeObject(schema)) { return " | should be any non-object"; } } return ""; } /** * @param {Array<string>} hints * @returns {string} */ function formatHints(hints) { return hints.length > 0 ? `(${hints.join(", ")})` : ""; } /** * @param {Schema} schema * @param {boolean} logic * @returns {string[]} */ function getHints(schema, logic) { if (likeNumber(schema) || likeInteger(schema)) { return numberHints(schema, logic); } else if (likeString(schema)) { return stringHints(schema, logic); } return []; } class ValidationError extends Error { /** * @param {Array<SchemaUtilErrorObject>} errors * @param {Schema} schema * @param {ValidationErrorConfiguration} configuration */ constructor(errors, schema, configuration = {}) { super(); /** @type {string} */ this.name = "ValidationError"; /** @type {Array<SchemaUtilErrorObject>} */ this.errors = errors; /** @type {Schema} */ this.schema = schema; let headerNameFromSchema; let baseDataPathFromSchema; if (schema.title && (!configuration.name || !configuration.baseDataPath)) { const splittedTitleFromSchema = schema.title.match(/^(.+) (.+)$/); if (splittedTitleFromSchema) { if (!configuration.name) { [, headerNameFromSchema] = splittedTitleFromSchema; } if (!configuration.baseDataPath) { [, , baseDataPathFromSchema] = splittedTitleFromSchema; } } } /** @type {string} */ this.headerName = configuration.name || headerNameFromSchema || "Object"; /** @type {string} */ this.baseDataPath = configuration.baseDataPath || baseDataPathFromSchema || "configuration"; /** @type {PostFormatter | null} */ this.postFormatter = configuration.postFormatter || null; const header = `Invalid ${this.baseDataPath} object. ${ this.headerName } has been initialized using ${getArticle(this.baseDataPath)} ${ this.baseDataPath } object that does not match the API schema.\n`; /** @type {string} */ this.message = `${header}${this.formatValidationErrors(errors)}`; Error.captureStackTrace(this, this.constructor); } /** * @param {string} path * @returns {Schema} */ getSchemaPart(path) { const newPath = path.split("/"); let schemaPart = this.schema; for (let i = 1; i < newPath.length; i++) { const inner = schemaPart[/** @type {keyof Schema} */ (newPath[i])]; if (!inner) { break; } schemaPart = inner; } return schemaPart; } /** * @param {Schema} schema * @param {boolean} logic * @param {Array<Object>} prevSchemas * @returns {string} */ formatSchema(schema, logic = true, prevSchemas = []) { let newLogic = logic; const formatInnerSchema = /** * * @param {Object} innerSchema * @param {boolean=} addSelf * @returns {string} */ (innerSchema, addSelf) => { if (!addSelf) { return this.formatSchema(innerSchema, newLogic, prevSchemas); } if (prevSchemas.includes(innerSchema)) { return "(recursive)"; } return this.formatSchema( innerSchema, newLogic, prevSchemas.concat(schema) ); }; if (hasNotInSchema(schema) && !likeObject(schema)) { if (canApplyNot(schema.not)) { newLogic = !logic; return formatInnerSchema(schema.not); } const needApplyLogicHere = !schema.not.not; const prefix = logic ? "" : "non "; newLogic = !logic; return needApplyLogicHere ? prefix + formatInnerSchema(schema.not) : formatInnerSchema(schema.not); } if ( /** @type {Schema & {instanceof: string | Array<string>}} */ (schema) .instanceof ) { const { instanceof: value } = /** @type {Schema & {instanceof: string | Array<string>}} */ (schema); const values = !Array.isArray(value) ? [value] : value; return values .map( /** * @param {string} item * @returns {string} */ (item) => (item === "Function" ? "function" : item) ) .join(" | "); } if (schema.enum) { return /** @type {Array<any>} */ (schema.enum) .map((item) => JSON.stringify(item)) .join(" | "); } if (typeof schema.const !== "undefined") { return JSON.stringify(schema.const); } if (schema.oneOf) { return /** @type {Array<Schema>} */ (schema.oneOf) .map((item) => formatInnerSchema(item, true)) .join(" | "); } if (schema.anyOf) { return /** @type {Array<Schema>} */ (schema.anyOf) .map((item) => formatInnerSchema(item, true)) .join(" | "); } if (schema.allOf) { return /** @type {Array<Schema>} */ (schema.allOf) .map((item) => formatInnerSchema(item, true)) .join(" & "); } if (/** @type {JSONSchema7} */ (schema).if) { const { if: ifValue, then: thenValue, else: elseValue, } = /** @type {JSONSchema7} */ (schema); return `${ifValue ? `if ${formatInnerSchema(ifValue)}` : ""}${ thenValue ? ` then ${formatInnerSchema(thenValue)}` : "" }${elseValue ? ` else ${formatInnerSchema(elseValue)}` : ""}`; } if (schema.$ref) { return formatInnerSchema(this.getSchemaPart(schema.$ref), true); } if (likeNumber(schema) || likeInteger(schema)) { const [type, ...hints] = getHints(schema, logic); const str = `${type}${hints.length > 0 ? ` ${formatHints(hints)}` : ""}`; return logic ? str : hints.length > 0 ? `non-${type} | ${str}` : `non-${type}`; } if (likeString(schema)) { const [type, ...hints] = getHints(schema, logic); const str = `${type}${hints.length > 0 ? ` ${formatHints(hints)}` : ""}`; return logic ? str : str === "string" ? "non-string" : `non-string | ${str}`; } if (likeBoolean(schema)) { return `${logic ? "" : "non-"}boolean`; } if (likeArray(schema)) { // not logic already applied in formatValidationError newLogic = true; const hints = []; if (typeof schema.minItems === "number") { hints.push( `should not have fewer than ${schema.minItems} item${ schema.minItems > 1 ? "s" : "" }` ); } if (typeof schema.maxItems === "number") { hints.push( `should not have more than ${schema.maxItems} item${ schema.maxItems > 1 ? "s" : "" }` ); } if (schema.uniqueItems) { hints.push("should not have duplicate items"); } const hasAdditionalItems = typeof schema.additionalItems === "undefined" || Boolean(schema.additionalItems); let items = ""; if (schema.items) { if (Array.isArray(schema.items) && schema.items.length > 0) { items = `${ /** @type {Array<Schema>} */ (schema.items) .map((item) => formatInnerSchema(item)) .join(", ") }`; if (hasAdditionalItems) { if ( schema.additionalItems && isObject(schema.additionalItems) && Object.keys(schema.additionalItems).length > 0 ) { hints.push( `additional items should be ${formatInnerSchema( schema.additionalItems )}` ); } } } else if (schema.items && Object.keys(schema.items).length > 0) { // "additionalItems" is ignored items = `${formatInnerSchema(schema.items)}`; } else { // Fallback for empty `items` value items = "any"; } } else { // "additionalItems" is ignored items = "any"; } if (schema.contains && Object.keys(schema.contains).length > 0) { hints.push( `should contains at least one ${this.formatSchema( schema.contains )} item` ); } return `[${items}${hasAdditionalItems ? ", ..." : ""}]${ hints.length > 0 ? ` (${hints.join(", ")})` : "" }`; } if (likeObject(schema)) { // not logic already applied in formatValidationError newLogic = true; const hints = []; if (typeof schema.minProperties === "number") { hints.push( `should not have fewer than ${schema.minProperties} ${ schema.minProperties > 1 ? "properties" : "property" }` ); } if (typeof schema.maxProperties === "number") { hints.push( `should not have more than ${schema.maxProperties} ${ schema.minProperties && schema.minProperties > 1 ? "properties" : "property" }` ); } if ( schema.patternProperties && Object.keys(schema.patternProperties).length > 0 ) { const patternProperties = Object.keys(schema.patternProperties); hints.push( `additional property names should match pattern${ patternProperties.length > 1 ? "s" : "" } ${patternProperties .map((pattern) => JSON.stringify(pattern)) .join(" | ")}` ); } const properties = schema.properties ? Object.keys(schema.properties) : []; const required = schema.required ? schema.required : []; const allProperties = [ ...new Set( /** @type {Array<string>} */ ([]).concat(required).concat(properties) ), ]; const objectStructure = allProperties .map((property) => { const isRequired = required.includes(property); // Some properties need quotes, maybe we should add check // Maybe we should output type of property (`foo: string`), but it is looks very unreadable return `${property}${isRequired ? "" : "?"}`; }) .concat( typeof schema.additionalProperties === "undefined" || Boolean(schema.additionalProperties) ? schema.additionalProperties && isObject(schema.additionalProperties) ? [`<key>: ${formatInnerSchema(schema.additionalProperties)}`] : ["…"] : [] ) .join(", "); const { dependencies, propertyNames, patternRequired } = /** @type {Schema & {patternRequired?: Array<string>;}} */ (schema); if (dependencies) { Object.keys(dependencies).forEach((dependencyName) => { const dependency = dependencies[dependencyName]; if (Array.isArray(dependency)) { hints.push( `should have ${ dependency.length > 1 ? "properties" : "property" } ${dependency .map((dep) => `'${dep}'`) .join(", ")} when property '${dependencyName}' is present` ); } else { hints.push( `should be valid according to the schema ${formatInnerSchema( dependency )} when property '${dependencyName}' is present` ); } }); } if (propertyNames && Object.keys(propertyNames).length > 0) { hints.push( `each property name should match format ${JSON.stringify( schema.propertyNames.format )}` ); } if (patternRequired && patternRequired.length > 0) { hints.push( `should have property matching pattern ${patternRequired.map( /** * @param {string} item * @returns {string} */ (item) => JSON.stringify(item) )}` ); } return `object {${objectStructure ? ` ${objectStructure} ` : ""}}${ hints.length > 0 ? ` (${hints.join(", ")})` : "" }`; } if (likeNull(schema)) { return `${logic ? "" : "non-"}null`; } if (Array.isArray(schema.type)) { // not logic already applied in formatValidationError return `${schema.type.join(" | ")}`; } // Fallback for unknown keywords // not logic already applied in formatValidationError /* istanbul ignore next */ return JSON.stringify(schema, null, 2); } /** * @param {Schema=} schemaPart * @param {(boolean | Array<string>)=} additionalPath * @param {boolean=} needDot * @param {boolean=} logic * @returns {string} */ getSchemaPartText(schemaPart, additionalPath, needDot = false, logic = true) { if (!schemaPart) { return ""; } if (Array.isArray(additionalPath)) { for (let i = 0; i < additionalPath.length; i++) { /** @type {Schema | undefined} */ const inner = schemaPart[/** @type {keyof Schema} */ (additionalPath[i])]; if (inner) { // eslint-disable-next-line no-param-reassign schemaPart = inner; } else { break; } } } while (schemaPart.$ref) { // eslint-disable-next-line no-param-reassign schemaPart = this.getSchemaPart(schemaPart.$ref); } let schemaText = `${this.formatSchema(schemaPart, logic)}${ needDot ? "." : "" }`; if (schemaPart.description) { schemaText += `\n-> ${schemaPart.description}`; } if (schemaPart.link) { schemaText += `\n-> Read more at ${schemaPart.link}`; } return schemaText; } /** * @param {Schema=} schemaPart * @returns {string} */ getSchemaPartDescription(schemaPart) { if (!schemaPart) { return ""; } while (schemaPart.$ref) { // eslint-disable-next-line no-param-reassign schemaPart = this.getSchemaPart(schemaPart.$ref); } let schemaText = ""; if (schemaPart.description) { schemaText += `\n-> ${schemaPart.description}`; } if (schemaPart.link) { schemaText += `\n-> Read more at ${schemaPart.link}`; } return schemaText; } /** * @param {SchemaUtilErrorObject} error * @returns {string} */ formatValidationError(error) { const { keyword, dataPath: errorDataPath } = error; const dataPath = `${this.baseDataPath}${errorDataPath}`; switch (keyword) { case "type": { const { parentSchema, params } = error; // eslint-disable-next-line default-case switch (/** @type {import("ajv").TypeParams} */ (params).type) { case "number": return `${dataPath} should be a ${this.getSchemaPartText( parentSchema, false, true )}`; case "integer": return `${dataPath} should be an ${this.getSchemaPartText( parentSchema, false, true )}`; case "string": return `${dataPath} should be a ${this.getSchemaPartText( parentSchema, false, true )}`; case "boolean": return `${dataPath} should be a ${this.getSchemaPartText( parentSchema, false, true )}`; case "array": return `${dataPath} should be an array:\n${this.getSchemaPartText( parentSchema )}`; case "object": return `${dataPath} should be an object:\n${this.getSchemaPartText( parentSchema )}`; case "null": return `${dataPath} should be a ${this.getSchemaPartText( parentSchema, false, true )}`; default: return `${dataPath} should be:\n${this.getSchemaPartText( parentSchema )}`; } } case "instanceof": { const { parentSchema } = error; return `${dataPath} should be an instance of ${this.getSchemaPartText( parentSchema, false, true )}`; } case "pattern": { const { params, parentSchema } = error; const { pattern } = /** @type {import("ajv").PatternParams} */ (params); return `${dataPath} should match pattern ${JSON.stringify( pattern )}${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "format": { const { params, parentSchema } = error; const { format } = /** @type {import("ajv").FormatParams} */ (params); return `${dataPath} should match format ${JSON.stringify( format )}${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "formatMinimum": case "formatMaximum": { const { params, parentSchema } = error; const { comparison, limit } = /** @type {import("ajv").ComparisonParams} */ (params); return `${dataPath} should be ${comparison} ${JSON.stringify( limit )}${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "minimum": case "maximum": case "exclusiveMinimum": case "exclusiveMaximum": { const { parentSchema, params } = error; const { comparison, limit } = /** @type {import("ajv").ComparisonParams} */ (params); const [, ...hints] = getHints( /** @type {Schema} */ (parentSchema), true ); if (hints.length === 0) { hints.push(`should be ${comparison} ${limit}`); } return `${dataPath} ${hints.join(" ")}${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "multipleOf": { const { params, parentSchema } = error; const { multipleOf } = /** @type {import("ajv").MultipleOfParams} */ ( params ); return `${dataPath} should be multiple of ${multipleOf}${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "patternRequired": { const { params, parentSchema } = error; const { missingPattern } = /** @type {import("ajv").PatternRequiredParams} */ (params); return `${dataPath} should have property matching pattern ${JSON.stringify( missingPattern )}${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "minLength": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); if (limit === 1) { return `${dataPath} should be a non-empty string${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } const length = limit - 1; return `${dataPath} should be longer than ${length} character${ length > 1 ? "s" : "" }${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "minItems": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); if (limit === 1) { return `${dataPath} should be a non-empty array${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } return `${dataPath} should not have fewer than ${limit} items${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "minProperties": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); if (limit === 1) { return `${dataPath} should be a non-empty object${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } return `${dataPath} should not have fewer than ${limit} properties${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "maxLength": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); const max = limit + 1; return `${dataPath} should be shorter than ${max} character${ max > 1 ? "s" : "" }${getSchemaNonTypes(parentSchema)}.${this.getSchemaPartDescription( parentSchema )}`; } case "maxItems": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); return `${dataPath} should not have more than ${limit} items${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "maxProperties": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); return `${dataPath} should not have more than ${limit} properties${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "uniqueItems": { const { params, parentSchema } = error; const { i } = /** @type {import("ajv").UniqueItemsParams} */ (params); return `${dataPath} should not contain the item '${ error.data[i] }' twice${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "additionalItems": { const { params, parentSchema } = error; const { limit } = /** @type {import("ajv").LimitParams} */ (params); return `${dataPath} should not have more than ${limit} items${getSchemaNonTypes( parentSchema )}. These items are valid:\n${this.getSchemaPartText(parentSchema)}`; } case "contains": { const { parentSchema } = error; return `${dataPath} should contains at least one ${this.getSchemaPartText( parentSchema, ["contains"] )} item${getSchemaNonTypes(parentSchema)}.`; } case "required": { const { parentSchema, params } = error; const missingProperty = /** @type {import("ajv").DependenciesParams} */ ( params ).missingProperty.replace(/^\./, ""); const hasProperty = parentSchema && Boolean( /** @type {Schema} */ (parentSchema).properties && /** @type {Schema} */ (parentSchema).properties[missingProperty] ); return `${dataPath} misses the property '${missingProperty}'${getSchemaNonTypes( parentSchema )}.${ hasProperty ? ` Should be:\n${this.getSchemaPartText(parentSchema, [ "properties", missingProperty, ])}` : this.getSchemaPartDescription(parentSchema) }`; } case "additionalProperties": { const { params, parentSchema } = error; const { additionalProperty } = /** @type {import("ajv").AdditionalPropertiesParams} */ (params); return `${dataPath} has an unknown property '${additionalProperty}'${getSchemaNonTypes( parentSchema )}. These properties are valid:\n${this.getSchemaPartText( parentSchema )}`; } case "dependencies": { const { params, parentSchema } = error; const { property, deps } = /** @type {import("ajv").DependenciesParams} */ (params); const dependencies = deps .split(",") .map( /** * @param {string} dep * @returns {string} */ (dep) => `'${dep.trim()}'` ) .join(", "); return `${dataPath} should have properties ${dependencies} when property '${property}' is present${getSchemaNonTypes( parentSchema )}.${this.getSchemaPartDescription(parentSchema)}`; } case "propertyNames": { const { params, parentSchema, schema } = error; const { propertyName } = /** @type {import("ajv").PropertyNamesParams} */ (params); return `${dataPath} property name '${propertyName}' is invalid${getSchemaNonTypes( parentSchema )}. Property names should be match format ${JSON.stringify( schema.format )}.${this.getSchemaPartDescription(parentSchema)}`; } case "enum": { const { parentSchema } = error; if ( parentSchema && /** @type {Schema} */ (parentSchema).enum && /** @type {Schema} */ (parentSchema).enum.length === 1 ) { return `${dataPath} should be ${this.getSchemaPartText( parentSchema, false, true )}`; } return `${dataPath} should be one of these:\n${this.getSchemaPartText( parentSchema )}`; } case "const": { const { parentSchema } = error; return `${dataPath} should be equal to constant ${this.getSchemaPartText( parentSchema, false, true )}`; } case "not": { const postfix = likeObject(/** @type {Schema} */ (error.parentSchema)) ? `\n${this.getSchemaPartText(error.parentSchema)}` : ""; const schemaOutput = this.getSchemaPartText( error.schema, false, false, false ); if (canApplyNot(error.schema)) { return `${dataPath} should be any ${schemaOutput}${postfix}.`; } const { schema, parentSchema } = error; return `${dataPath} should not be ${this.getSchemaPartText( schema, false, true )}${ parentSchema && likeObject(parentSchema) ? `\n${this.getSchemaPartText(parentSchema)}` : "" }`; } case "oneOf": case "anyOf": { const { parentSchema, children } = error; if (children && children.length > 0) { if (error.schema.length === 1) { const lastChild = children[children.length - 1]; const remainingChildren = children.slice(0, children.length - 1); return this.formatValidationError( Object.assign({}, lastChild, { children: remainingChildren, parentSchema: Object.assign( {}, parentSchema, lastChild.parentSchema ), }) ); } let filteredChildren = filterChildren(children); if (filteredChildren.length === 1) { return this.formatValidationError(filteredChildren[0]); } filteredChildren = groupChildrenByFirstChild(filteredChildren); return `${dataPath} should be one of these:\n${this.getSchemaPartText( parentSchema )}\nDetails:\n${filteredChildren .map( /** * @param {SchemaUtilErrorObject} nestedError * @returns {string} */ (nestedError) => ` * ${indent(this.formatValidationError(nestedError), " ")}` ) .join("\n")}`; } return `${dataPath} should be one of these:\n${this.getSchemaPartText( parentSchema )}`; } case "if": { const { params, parentSchema } = error; const { failingKeyword } = /** @type {import("ajv").IfParams} */ ( params ); return `${dataPath} should match "${failingKeyword}" schema:\n${this.getSchemaPartText( parentSchema, [failingKeyword] )}`; } case "absolutePath": { const { message, parentSchema } = error; return `${dataPath}: ${message}${this.getSchemaPartDescription( parentSchema )}`; } /* istanbul ignore next */ default: { const { message, parentSchema } = error; const ErrorInJSON = JSON.stringify(error, null, 2); // For `custom`, `false schema`, `$ref` keywords // Fallback for unknown keywords return `${dataPath} ${message} (${ErrorInJSON}).\n${this.getSchemaPartText( parentSchema, false )}`; } } } /** * @param {Array<SchemaUtilErrorObject>} errors * @returns {string} */ formatValidationErrors(errors) { return errors .map((error) => { let formattedError = this.formatValidationError(error); if (this.postFormatter) { formattedError = this.postFormatter(formattedError, error); } return ` - ${indent(formattedError, " ")}`; }) .join("\n"); } } export default ValidationError;
return newChildren;
cloudresourcemanager-gen.go
// Copyright 2019 Google Inc. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // Code generated file. DO NOT EDIT. // Package cloudresourcemanager provides access to the Cloud Resource Manager API. // // See https://cloud.google.com/resource-manager // // Usage example: // // import "google.golang.org/api/cloudresourcemanager/v1beta1" // ... // cloudresourcemanagerService, err := cloudresourcemanager.New(oauthHttpClient) package cloudresourcemanager // import "google.golang.org/api/cloudresourcemanager/v1beta1" import ( "bytes" "context" "encoding/json" "errors" "fmt" "io" "net/http" "net/url" "strconv" "strings" gensupport "google.golang.org/api/gensupport" googleapi "google.golang.org/api/googleapi" ) // Always reference these packages, just in case the auto-generated code // below doesn't. var _ = bytes.NewBuffer var _ = strconv.Itoa var _ = fmt.Sprintf var _ = json.NewDecoder var _ = io.Copy var _ = url.Parse var _ = gensupport.MarshalJSON var _ = googleapi.Version var _ = errors.New var _ = strings.Replace var _ = context.Canceled const apiId = "cloudresourcemanager:v1beta1" const apiName = "cloudresourcemanager" const apiVersion = "v1beta1" const basePath = "https://cloudresourcemanager.googleapis.com/" // OAuth2 scopes used by this API. const ( // View and manage your data across Google Cloud Platform services CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform" // View your data across Google Cloud Platform services CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only" ) func New(client *http.Client) (*Service, error)
type Service struct { client *http.Client BasePath string // API endpoint base URL UserAgent string // optional additional User-Agent fragment Organizations *OrganizationsService Projects *ProjectsService } func (s *Service) userAgent() string { if s.UserAgent == "" { return googleapi.UserAgent } return googleapi.UserAgent + " " + s.UserAgent } func NewOrganizationsService(s *Service) *OrganizationsService { rs := &OrganizationsService{s: s} return rs } type OrganizationsService struct { s *Service } func NewProjectsService(s *Service) *ProjectsService { rs := &ProjectsService{s: s} return rs } type ProjectsService struct { s *Service } // Ancestor: Identifying information for a single ancestor of a project. type Ancestor struct { // ResourceId: Resource id of the ancestor. ResourceId *ResourceId `json:"resourceId,omitempty"` // ForceSendFields is a list of field names (e.g. "ResourceId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ResourceId") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Ancestor) MarshalJSON() ([]byte, error) { type NoMethod Ancestor raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AuditConfig: Specifies the audit configuration for a service. // The configuration determines which permission types are logged, and // what // identities, if any, are exempted from logging. // An AuditConfig must have one or more AuditLogConfigs. // // If there are AuditConfigs for both `allServices` and a specific // service, // the union of the two AuditConfigs is used for that service: the // log_types // specified in each AuditConfig are enabled, and the exempted_members // in each // AuditLogConfig are exempted. // // Example Policy with multiple AuditConfigs: // // { // "audit_configs": [ // { // "service": "allServices" // "audit_log_configs": [ // { // "log_type": "DATA_READ", // "exempted_members": [ // "user:[email protected]" // ] // }, // { // "log_type": "DATA_WRITE", // }, // { // "log_type": "ADMIN_READ", // } // ] // }, // { // "service": "fooservice.googleapis.com" // "audit_log_configs": [ // { // "log_type": "DATA_READ", // }, // { // "log_type": "DATA_WRITE", // "exempted_members": [ // "user:[email protected]" // ] // } // ] // } // ] // } // // For fooservice, this policy enables DATA_READ, DATA_WRITE and // ADMIN_READ // logging. It also exempts [email protected] from DATA_READ logging, // and // [email protected] from DATA_WRITE logging. type AuditConfig struct { // AuditLogConfigs: The configuration for logging of each type of // permission. AuditLogConfigs []*AuditLogConfig `json:"auditLogConfigs,omitempty"` // Service: Specifies a service that will be enabled for audit // logging. // For example, `storage.googleapis.com`, // `cloudsql.googleapis.com`. // `allServices` is a special value that covers all services. Service string `json:"service,omitempty"` // ForceSendFields is a list of field names (e.g. "AuditLogConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AuditLogConfigs") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *AuditConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // AuditLogConfig: Provides the configuration for logging a type of // permissions. // Example: // // { // "audit_log_configs": [ // { // "log_type": "DATA_READ", // "exempted_members": [ // "user:[email protected]" // ] // }, // { // "log_type": "DATA_WRITE", // } // ] // } // // This enables 'DATA_READ' and 'DATA_WRITE' logging, while // exempting // [email protected] from DATA_READ logging. type AuditLogConfig struct { // ExemptedMembers: Specifies the identities that do not cause logging // for this type of // permission. // Follows the same format of Binding.members. ExemptedMembers []string `json:"exemptedMembers,omitempty"` // LogType: The log type that this config enables. // // Possible values: // "LOG_TYPE_UNSPECIFIED" - Default case. Should never be this. // "ADMIN_READ" - Admin reads. Example: CloudIAM getIamPolicy // "DATA_WRITE" - Data writes. Example: CloudSQL Users create // "DATA_READ" - Data reads. Example: CloudSQL Users list LogType string `json:"logType,omitempty"` // ForceSendFields is a list of field names (e.g. "ExemptedMembers") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ExemptedMembers") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { type NoMethod AuditLogConfig raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Binding: Associates `members` with a `role`. type Binding struct { // Condition: Unimplemented. The condition that is associated with this // binding. // NOTE: an unsatisfied condition will not allow user access via // current // binding. Different bindings, including their conditions, are // examined // independently. Condition *Expr `json:"condition,omitempty"` // Members: Specifies the identities requesting access for a Cloud // Platform resource. // `members` can have the following values: // // * `allUsers`: A special identifier that represents anyone who is // on the internet; with or without a Google account. // // * `allAuthenticatedUsers`: A special identifier that represents // anyone // who is authenticated with a Google account or a service // account. // // * `user:{emailid}`: An email address that represents a specific // Google // account. For example, `[email protected]` . // // // * `serviceAccount:{emailid}`: An email address that represents a // service // account. For example, // `[email protected]`. // // * `group:{emailid}`: An email address that represents a Google // group. // For example, `[email protected]`. // // // * `domain:{domain}`: A Google Apps domain name that represents all // the // users of that domain. For example, `google.com` or // `example.com`. // // Members []string `json:"members,omitempty"` // Role: Role that is assigned to `members`. // For example, `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` // ForceSendFields is a list of field names (e.g. "Condition") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Condition") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Binding) MarshalJSON() ([]byte, error) { type NoMethod Binding raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Empty: A generic empty message that you can re-use to avoid defining // duplicated // empty messages in your APIs. A typical example is to use it as the // request // or the response type of an API method. For instance: // // service Foo { // rpc Bar(google.protobuf.Empty) returns // (google.protobuf.Empty); // } // // The JSON representation for `Empty` is empty JSON object `{}`. type Empty struct { // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` } // Expr: Represents an expression text. Example: // // title: "User account presence" // description: "Determines whether the request has a user account" // expression: "size(request.user) > 0" type Expr struct { // Description: An optional description of the expression. This is a // longer text which // describes the expression, e.g. when hovered over it in a UI. Description string `json:"description,omitempty"` // Expression: Textual representation of an expression in // Common Expression Language syntax. // // The application context of the containing message determines // which // well-known feature set of CEL is supported. Expression string `json:"expression,omitempty"` // Location: An optional string indicating the location of the // expression for error // reporting, e.g. a file name and a position in the file. Location string `json:"location,omitempty"` // Title: An optional title for the expression, i.e. a short string // describing // its purpose. This can be used e.g. in UIs which allow to enter // the // expression. Title string `json:"title,omitempty"` // ForceSendFields is a list of field names (e.g. "Description") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Description") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Expr) MarshalJSON() ([]byte, error) { type NoMethod Expr raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FolderOperation: Metadata describing a long running folder operation type FolderOperation struct { // DestinationParent: The resource name of the folder or organization we // are either creating // the folder under or moving the folder to. DestinationParent string `json:"destinationParent,omitempty"` // DisplayName: The display name of the folder. DisplayName string `json:"displayName,omitempty"` // OperationType: The type of this operation. // // Possible values: // "OPERATION_TYPE_UNSPECIFIED" - Operation type not specified. // "CREATE" - A create folder operation. // "MOVE" - A move folder operation. OperationType string `json:"operationType,omitempty"` // SourceParent: The resource name of the folder's parent. // Only applicable when the operation_type is MOVE. SourceParent string `json:"sourceParent,omitempty"` // ForceSendFields is a list of field names (e.g. "DestinationParent") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DestinationParent") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *FolderOperation) MarshalJSON() ([]byte, error) { type NoMethod FolderOperation raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // FolderOperationError: A classification of the Folder Operation error. type FolderOperationError struct { // ErrorMessageId: The type of operation error experienced. // // Possible values: // "ERROR_TYPE_UNSPECIFIED" - The error type was unrecognized or // unspecified. // "ACTIVE_FOLDER_HEIGHT_VIOLATION" - The attempted action would // violate the max folder depth constraint. // "MAX_CHILD_FOLDERS_VIOLATION" - The attempted action would violate // the max child folders constraint. // "FOLDER_NAME_UNIQUENESS_VIOLATION" - The attempted action would // violate the locally-unique folder // display_name constraint. // "RESOURCE_DELETED_VIOLATION" - The resource being moved has been // deleted. // "PARENT_DELETED_VIOLATION" - The resource a folder was being added // to has been deleted. // "CYCLE_INTRODUCED_VIOLATION" - The attempted action would introduce // cycle in resource path. // "FOLDER_BEING_MOVED_VIOLATION" - The attempted action would move a // folder that is already being moved. // "FOLDER_TO_DELETE_NON_EMPTY_VIOLATION" - The folder the caller is // trying to delete contains active resources. // "DELETED_FOLDER_HEIGHT_VIOLATION" - The attempted action would // violate the max deleted folder depth // constraint. ErrorMessageId string `json:"errorMessageId,omitempty"` // ForceSendFields is a list of field names (e.g. "ErrorMessageId") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "ErrorMessageId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *FolderOperationError) MarshalJSON() ([]byte, error) { type NoMethod FolderOperationError raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GetAncestryRequest: The request sent to the // GetAncestry // method. type GetAncestryRequest struct { } // GetAncestryResponse: Response from the GetAncestry method. type GetAncestryResponse struct { // Ancestor: Ancestors are ordered from bottom to top of the resource // hierarchy. The // first ancestor is the project itself, followed by the project's // parent, // etc. Ancestor []*Ancestor `json:"ancestor,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Ancestor") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Ancestor") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *GetAncestryResponse) MarshalJSON() ([]byte, error) { type NoMethod GetAncestryResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // GetIamPolicyRequest: Request message for `GetIamPolicy` method. type GetIamPolicyRequest struct { } // ListOrganizationsResponse: The response returned from the // `ListOrganizations` method. type ListOrganizationsResponse struct { // NextPageToken: A pagination token to be used to retrieve the next // page of results. If the // result is too large to fit within the page size specified in the // request, // this field will be set with a token that can be used to fetch the // next page // of results. If this field is empty, it indicates that this // response // contains the last page of results. NextPageToken string `json:"nextPageToken,omitempty"` // Organizations: The list of Organizations that matched the list query, // possibly paginated. Organizations []*Organization `json:"organizations,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListOrganizationsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListOrganizationsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ListProjectsResponse: A page of the response received from // the // ListProjects // method. // // A paginated response where more pages are available // has // `next_page_token` set. This token can be used in a subsequent request // to // retrieve the next request page. type ListProjectsResponse struct { // NextPageToken: Pagination token. // // If the result set is too large to fit in a single response, this // token // is returned. It encodes the position of the current result // cursor. // Feeding this value into a new list request with the `page_token` // parameter // gives the next page of the results. // // When `next_page_token` is not filled in, there is no next page // and // the list returned is the last page in the result set. // // Pagination tokens have a limited lifetime. NextPageToken string `json:"nextPageToken,omitempty"` // Projects: The list of Projects that matched the list filter. This // list can // be paginated. Projects []*Project `json:"projects,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "NextPageToken") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "NextPageToken") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { type NoMethod ListProjectsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Organization: The root node in the resource hierarchy to which a // particular entity's // (e.g., company) resources belong. type Organization struct { // CreationTime: Timestamp when the Organization was created. Assigned // by the server. // @OutputOnly CreationTime string `json:"creationTime,omitempty"` // DisplayName: A human-readable string that refers to the Organization // in the // GCP Console UI. This string is set by the server and cannot // be // changed. The string will be set to the primary domain (for // example, // "google.com") of the G Suite customer that owns the // organization. // @OutputOnly DisplayName string `json:"displayName,omitempty"` // LifecycleState: The organization's current lifecycle state. Assigned // by the server. // @OutputOnly // // Possible values: // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only // useful for distinguishing unset values. // "ACTIVE" - The normal and active state. // "DELETE_REQUESTED" - The organization has been marked for deletion // by the user. LifecycleState string `json:"lifecycleState,omitempty"` // Name: Output Only. The resource name of the organization. This is // the // organization's relative path in the API. Its format // is // "organizations/[organization_id]". For example, "organizations/1234". Name string `json:"name,omitempty"` // OrganizationId: An immutable id for the Organization that is assigned // on creation. This // should be omitted when creating a new Organization. // This field is read-only. OrganizationId string `json:"organizationId,omitempty"` // Owner: The owner of this Organization. The owner should be specified // on // creation. Once set, it cannot be changed. // This field is required. Owner *OrganizationOwner `json:"owner,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "CreationTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreationTime") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Organization) MarshalJSON() ([]byte, error) { type NoMethod Organization raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // OrganizationOwner: The entity that owns an Organization. The lifetime // of the Organization and // all of its descendants are bound to the `OrganizationOwner`. If // the // `OrganizationOwner` is deleted, the Organization and all its // descendants will // be deleted. type OrganizationOwner struct { // DirectoryCustomerId: The G Suite customer id used in the Directory // API. DirectoryCustomerId string `json:"directoryCustomerId,omitempty"` // ForceSendFields is a list of field names (e.g. "DirectoryCustomerId") // to unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "DirectoryCustomerId") to // include in API requests with the JSON null value. By default, fields // with empty values are omitted from API requests. However, any field // with an empty value appearing in NullFields will be sent to the // server as null. It is an error if a field in this list has a // non-empty value. This may be used to include null fields in Patch // requests. NullFields []string `json:"-"` } func (s *OrganizationOwner) MarshalJSON() ([]byte, error) { type NoMethod OrganizationOwner raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Policy: Defines an Identity and Access Management (IAM) policy. It is // used to // specify access control policies for Cloud Platform resources. // // // A `Policy` consists of a list of `bindings`. A `binding` binds a list // of // `members` to a `role`, where the members can be user accounts, Google // groups, // Google domains, and service accounts. A `role` is a named list of // permissions // defined by IAM. // // **JSON Example** // // { // "bindings": [ // { // "role": "roles/owner", // "members": [ // "user:[email protected]", // "group:[email protected]", // "domain:google.com", // // "serviceAccount:[email protected]" // ] // }, // { // "role": "roles/viewer", // "members": ["user:[email protected]"] // } // ] // } // // **YAML Example** // // bindings: // - members: // - user:[email protected] // - group:[email protected] // - domain:google.com // - serviceAccount:[email protected] // role: roles/owner // - members: // - user:[email protected] // role: roles/viewer // // // For a description of IAM and its features, see the // [IAM developer's guide](https://cloud.google.com/iam/docs). type Policy struct { // AuditConfigs: Specifies cloud audit logging configuration for this // policy. AuditConfigs []*AuditConfig `json:"auditConfigs,omitempty"` // Bindings: Associates a list of `members` to a `role`. // `bindings` with no members will result in an error. Bindings []*Binding `json:"bindings,omitempty"` // Etag: `etag` is used for optimistic concurrency control as a way to // help // prevent simultaneous updates of a policy from overwriting each // other. // It is strongly suggested that systems make use of the `etag` in // the // read-modify-write cycle to perform policy updates in order to avoid // race // conditions: An `etag` is returned in the response to `getIamPolicy`, // and // systems are expected to put that etag in the request to // `setIamPolicy` to // ensure that their change will be applied to the same version of the // policy. // // If no `etag` is provided in the call to `setIamPolicy`, then the // existing // policy is overwritten blindly. Etag string `json:"etag,omitempty"` // Version: Deprecated. Version int64 `json:"version,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "AuditConfigs") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "AuditConfigs") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Policy) MarshalJSON() ([]byte, error) { type NoMethod Policy raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // Project: A Project is a high-level Google Cloud Platform entity. It // is a // container for ACLs, APIs, App Engine Apps, VMs, and other // Google Cloud Platform resources. type Project struct { // CreateTime: Creation time. // // Read-only. CreateTime string `json:"createTime,omitempty"` // Labels: The labels associated with this Project. // // Label keys must be between 1 and 63 characters long and must // conform // to the following regular expression: // \[a-z\](\[-a-z0-9\]*\[a-z0-9\])?. // // Label values must be between 0 and 63 characters long and must // conform // to the regular expression (\[a-z\](\[-a-z0-9\]*\[a-z0-9\])?)?. A // label // value can be empty. // // No more than 256 labels can be associated with a given // resource. // // Clients should store labels in a representation such as JSON that // does not // depend on specific characters being disallowed. // // Example: <code>"environment" : "dev"</code> // Read-write. Labels map[string]string `json:"labels,omitempty"` // LifecycleState: The Project lifecycle state. // // Read-only. // // Possible values: // "LIFECYCLE_STATE_UNSPECIFIED" - Unspecified state. This is only // used/useful for distinguishing // unset values. // "ACTIVE" - The normal and active state. // "DELETE_REQUESTED" - The project has been marked for deletion by // the user // (by invoking DeleteProject) // or by the system (Google Cloud Platform). // This can generally be reversed by invoking UndeleteProject. // "DELETE_IN_PROGRESS" - This lifecycle state is no longer used and // is not returned by the API. LifecycleState string `json:"lifecycleState,omitempty"` // Name: The user-assigned display name of the Project. // It must be 4 to 30 characters. // Allowed characters are: lowercase and uppercase letters, // numbers, // hyphen, single-quote, double-quote, space, and exclamation // point. // // Example: <code>My Project</code> // Read-write. Name string `json:"name,omitempty"` // Parent: An optional reference to a parent Resource. // // Supported parent types include "organization" and "folder". Once set, // the // parent cannot be cleared. The `parent` can be set on creation or // using the // `UpdateProject` method; the end user must have // the // `resourcemanager.projects.create` permission on the // parent. // // Read-write. Parent *ResourceId `json:"parent,omitempty"` // ProjectId: The unique, user-assigned ID of the Project. // It must be 6 to 30 lowercase letters, digits, or hyphens. // It must start with a letter. // Trailing hyphens are prohibited. // // Example: <code>tokyo-rain-123</code> // Read-only after creation. ProjectId string `json:"projectId,omitempty"` // ProjectNumber: The number uniquely identifying the project. // // Example: <code>415104041262</code> // Read-only. ProjectNumber int64 `json:"projectNumber,omitempty,string"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreateTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *Project) MarshalJSON() ([]byte, error) { type NoMethod Project raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ProjectCreationStatus: A status object which is used as the // `metadata` field for the Operation // returned by CreateProject. It provides insight for when significant // phases of // Project creation have completed. type ProjectCreationStatus struct { // CreateTime: Creation time of the project creation workflow. CreateTime string `json:"createTime,omitempty"` // Gettable: True if the project can be retrieved using GetProject. No // other operations // on the project are guaranteed to work until the project creation // is // complete. Gettable bool `json:"gettable,omitempty"` // Ready: True if the project creation process is complete. Ready bool `json:"ready,omitempty"` // ForceSendFields is a list of field names (e.g. "CreateTime") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "CreateTime") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ProjectCreationStatus) MarshalJSON() ([]byte, error) { type NoMethod ProjectCreationStatus raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // ResourceId: A container to reference an id for any resource type. A // `resource` in Google // Cloud Platform is a generic term for something you (a developer) may // want to // interact with through one of our API's. Some examples are an App // Engine app, // a Compute Engine instance, a Cloud SQL database, and so on. type ResourceId struct { // Id: Required field for the type-specific id. This should correspond // to the id // used in the type-specific API's. Id string `json:"id,omitempty"` // Type: Required field representing the resource type this id is // for. // At present, the valid types are "project", "folder", and // "organization". Type string `json:"type,omitempty"` // ForceSendFields is a list of field names (e.g. "Id") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Id") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *ResourceId) MarshalJSON() ([]byte, error) { type NoMethod ResourceId raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // SetIamPolicyRequest: Request message for `SetIamPolicy` method. type SetIamPolicyRequest struct { // Policy: REQUIRED: The complete policy to be applied to the // `resource`. The size of // the policy is limited to a few 10s of KB. An empty policy is a // valid policy but certain Cloud Platform services (such as // Projects) // might reject them. Policy *Policy `json:"policy,omitempty"` // UpdateMask: OPTIONAL: A FieldMask specifying which fields of the // policy to modify. Only // the fields in the mask will be modified. If no mask is provided, // the // following default mask is used: // paths: "bindings, etag" // This field is only used by Cloud IAM. UpdateMask string `json:"updateMask,omitempty"` // ForceSendFields is a list of field names (e.g. "Policy") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Policy") to include in API // requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *SetIamPolicyRequest) MarshalJSON() ([]byte, error) { type NoMethod SetIamPolicyRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TestIamPermissionsRequest: Request message for `TestIamPermissions` // method. type TestIamPermissionsRequest struct { // Permissions: The set of permissions to check for the `resource`. // Permissions with // wildcards (such as '*' or 'storage.*') are not allowed. For // more // information see // [IAM // Overview](https://cloud.google.com/iam/docs/overview#permissions). Permissions []string `json:"permissions,omitempty"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TestIamPermissionsRequest) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsRequest raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // TestIamPermissionsResponse: Response message for `TestIamPermissions` // method. type TestIamPermissionsResponse struct { // Permissions: A subset of `TestPermissionsRequest.permissions` that // the caller is // allowed. Permissions []string `json:"permissions,omitempty"` // ServerResponse contains the HTTP response code and headers from the // server. googleapi.ServerResponse `json:"-"` // ForceSendFields is a list of field names (e.g. "Permissions") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the // server regardless of whether the field is empty or not. This may be // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` // NullFields is a list of field names (e.g. "Permissions") to include // in API requests with the JSON null value. By default, fields with // empty values are omitted from API requests. However, any field with // an empty value appearing in NullFields will be sent to the server as // null. It is an error if a field in this list has a non-empty value. // This may be used to include null fields in Patch requests. NullFields []string `json:"-"` } func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { type NoMethod TestIamPermissionsResponse raw := NoMethod(*s) return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } // UndeleteProjectRequest: The request sent to the // UndeleteProject // method. type UndeleteProjectRequest struct { } // method id "cloudresourcemanager.organizations.get": type OrganizationsGetCall struct { s *Service name string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Fetches an Organization resource identified by the specified // resource name. func (r *OrganizationsService) Get(name string) *OrganizationsGetCall { c := &OrganizationsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name return c } // OrganizationId sets the optional parameter "organizationId": The id // of the Organization resource to fetch. // This field is deprecated and will be removed in v1. Use name instead. func (c *OrganizationsGetCall) OrganizationId(organizationId string) *OrganizationsGetCall { c.urlParams_.Set("organizationId", organizationId) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsGetCall) Fields(s ...googleapi.Field) *OrganizationsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *OrganizationsGetCall) IfNoneMatch(entityTag string) *OrganizationsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsGetCall) Context(ctx context.Context) *OrganizationsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.get" call. // Exactly one of *Organization or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Organization.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *OrganizationsGetCall) Do(opts ...googleapi.CallOption) (*Organization, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Organization{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Fetches an Organization resource identified by the specified resource name.", // "flatPath": "v1beta1/organizations/{organizationsId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.organizations.get", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "The resource name of the Organization to fetch, e.g. \"organizations/1234\".", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // }, // "organizationId": { // "description": "The id of the Organization resource to fetch.\nThis field is deprecated and will be removed in v1. Use name instead.", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "response": { // "$ref": "Organization" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.organizations.getIamPolicy": type OrganizationsGetIamPolicyCall struct { s *Service resource string getiampolicyrequest *GetIamPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // GetIamPolicy: Gets the access control policy for an Organization // resource. May be empty // if no such policy or resource exists. The `resource` field should be // the // organization's resource name, e.g. "organizations/123". func (r *OrganizationsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *OrganizationsGetIamPolicyCall { c := &OrganizationsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.getiampolicyrequest = getiampolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsGetIamPolicyCall) Fields(s ...googleapi.Field) *OrganizationsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsGetIamPolicyCall) Context(ctx context.Context) *OrganizationsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *OrganizationsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets the access control policy for an Organization resource. May be empty\nif no such policy or resource exists. The `resource` field should be the\norganization's resource name, e.g. \"organizations/123\".", // "flatPath": "v1beta1/organizations/{organizationsId}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.getIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:getIamPolicy", // "request": { // "$ref": "GetIamPolicyRequest" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.organizations.list": type OrganizationsListCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists Organization resources that are visible to the user and // satisfy // the specified filter. This method returns Organizations in an // unspecified // order. New Organizations do not necessarily appear at the end of the // list. func (r *OrganizationsService) List() *OrganizationsListCall { c := &OrganizationsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Filter sets the optional parameter "filter": An optional query string // used to filter the Organizations to return in // the response. Filter rules are case-insensitive. // // // Organizations may be filtered by `owner.directoryCustomerId` or // by // `domain`, where the domain is a G Suite domain, for // example: // // |Filter|Description| // |------|-----------| // |owner.directorycu // stomerid:123456789|Organizations with `owner.directory_customer_id` // equal to `123456789`.| // |domain:google.com|Organizations corresponding to the domain // `google.com`.| // // This field is optional. func (c *OrganizationsListCall) Filter(filter string) *OrganizationsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of Organizations to return in the response. // This field is optional. func (c *OrganizationsListCall) PageSize(pageSize int64) *OrganizationsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to `ListOrganizations` // that indicates from where listing should continue. // This field is optional. func (c *OrganizationsListCall) PageToken(pageToken string) *OrganizationsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsListCall) Fields(s ...googleapi.Field) *OrganizationsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *OrganizationsListCall) IfNoneMatch(entityTag string) *OrganizationsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsListCall) Context(ctx context.Context) *OrganizationsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/organizations") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.list" call. // Exactly one of *ListOrganizationsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *ListOrganizationsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *OrganizationsListCall) Do(opts ...googleapi.CallOption) (*ListOrganizationsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListOrganizationsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists Organization resources that are visible to the user and satisfy\nthe specified filter. This method returns Organizations in an unspecified\norder. New Organizations do not necessarily appear at the end of the list.", // "flatPath": "v1beta1/organizations", // "httpMethod": "GET", // "id": "cloudresourcemanager.organizations.list", // "parameterOrder": [], // "parameters": { // "filter": { // "description": "An optional query string used to filter the Organizations to return in\nthe response. Filter rules are case-insensitive.\n\n\nOrganizations may be filtered by `owner.directoryCustomerId` or by\n`domain`, where the domain is a G Suite domain, for example:\n\n|Filter|Description|\n|------|-----------|\n|owner.directorycustomerid:123456789|Organizations with `owner.directory_customer_id` equal to `123456789`.|\n|domain:google.com|Organizations corresponding to the domain `google.com`.|\n\nThis field is optional.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of Organizations to return in the response.\nThis field is optional.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to `ListOrganizations`\nthat indicates from where listing should continue.\nThis field is optional.", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/organizations", // "response": { // "$ref": "ListOrganizationsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *OrganizationsListCall) Pages(ctx context.Context, f func(*ListOrganizationsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "cloudresourcemanager.organizations.setIamPolicy": type OrganizationsSetIamPolicyCall struct { s *Service resource string setiampolicyrequest *SetIamPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SetIamPolicy: Sets the access control policy on an Organization // resource. Replaces any // existing policy. The `resource` field should be the organization's // resource // name, e.g. "organizations/123". func (r *OrganizationsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *OrganizationsSetIamPolicyCall { c := &OrganizationsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.setiampolicyrequest = setiampolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsSetIamPolicyCall) Fields(s ...googleapi.Field) *OrganizationsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsSetIamPolicyCall) Context(ctx context.Context) *OrganizationsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *OrganizationsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Sets the access control policy on an Organization resource. Replaces any\nexisting policy. The `resource` field should be the organization's resource\nname, e.g. \"organizations/123\".", // "flatPath": "v1beta1/organizations/{organizationsId}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.setIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:setIamPolicy", // "request": { // "$ref": "SetIamPolicyRequest" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.organizations.testIamPermissions": type OrganizationsTestIamPermissionsCall struct { s *Service resource string testiampermissionsrequest *TestIamPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // TestIamPermissions: Returns permissions that a caller has on the // specified Organization. // The `resource` field should be the organization's resource name, // e.g. "organizations/123". func (r *OrganizationsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *OrganizationsTestIamPermissionsCall { c := &OrganizationsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.testiampermissionsrequest = testiampermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsTestIamPermissionsCall) Fields(s ...googleapi.Field) *OrganizationsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsTestIamPermissionsCall) Context(ctx context.Context) *OrganizationsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+resource}:testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.testIamPermissions" call. // Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *TestIamPermissionsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *OrganizationsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns permissions that a caller has on the specified Organization.\nThe `resource` field should be the organization's resource name,\ne.g. \"organizations/123\".", // "flatPath": "v1beta1/organizations/{organizationsId}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.organizations.testIamPermissions", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+resource}:testIamPermissions", // "request": { // "$ref": "TestIamPermissionsRequest" // }, // "response": { // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.organizations.update": type OrganizationsUpdateCall struct { s *Service name string organization *Organization urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates an Organization resource identified by the specified // resource name. func (r *OrganizationsService) Update(name string, organization *Organization) *OrganizationsUpdateCall { c := &OrganizationsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.name = name c.organization = organization return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *OrganizationsUpdateCall) Fields(s ...googleapi.Field) *OrganizationsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *OrganizationsUpdateCall) Context(ctx context.Context) *OrganizationsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *OrganizationsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *OrganizationsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.organization) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/{+name}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "name": c.name, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.organizations.update" call. // Exactly one of *Organization or error will be non-nil. Any non-2xx // status code is an error. Response headers are in either // *Organization.ServerResponse.Header or (if a response was returned at // all) in error.(*googleapi.Error).Header. Use googleapi.IsNotModified // to check whether the returned error was because // http.StatusNotModified was returned. func (c *OrganizationsUpdateCall) Do(opts ...googleapi.CallOption) (*Organization, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Organization{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates an Organization resource identified by the specified resource name.", // "flatPath": "v1beta1/organizations/{organizationsId}", // "httpMethod": "PUT", // "id": "cloudresourcemanager.organizations.update", // "parameterOrder": [ // "name" // ], // "parameters": { // "name": { // "description": "Output Only. The resource name of the organization. This is the\norganization's relative path in the API. Its format is\n\"organizations/[organization_id]\". For example, \"organizations/1234\".", // "location": "path", // "pattern": "^organizations/[^/]+$", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/{+name}", // "request": { // "$ref": "Organization" // }, // "response": { // "$ref": "Organization" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.projects.create": type ProjectsCreateCall struct { s *Service project *Project urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Create: Creates a Project resource. // // Initially, the Project resource is owned by its creator // exclusively. // The creator can later grant permission to others to read or update // the // Project. // // Several APIs are activated automatically for the Project, // including // Google Cloud Storage. The parent is identified by a // specified // ResourceId, which must include both an ID and a type, such // as // project, folder, or organization. // // This method does not associate the new project with a billing // account. // You can set or update the billing account associated with a project // using // the // [`projects.updateBillingInfo`] // (/billing/reference/rest/v1/projects/up // dateBillingInfo) method. func (r *ProjectsService) Create(project *Project) *ProjectsCreateCall { c := &ProjectsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.project = project return c } // UseLegacyStack sets the optional parameter "useLegacyStack": A safety // hatch to opt out of the new reliable project creation process. func (c *ProjectsCreateCall) UseLegacyStack(useLegacyStack bool) *ProjectsCreateCall { c.urlParams_.Set("useLegacyStack", fmt.Sprint(useLegacyStack)) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsCreateCall) Fields(s ...googleapi.Field) *ProjectsCreateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsCreateCall) Context(ctx context.Context) *ProjectsCreateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsCreateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.create" call. // Exactly one of *Project or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Project.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsCreateCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Creates a Project resource.\n\nInitially, the Project resource is owned by its creator exclusively.\nThe creator can later grant permission to others to read or update the\nProject.\n\nSeveral APIs are activated automatically for the Project, including\nGoogle Cloud Storage. The parent is identified by a specified\nResourceId, which must include both an ID and a type, such as\nproject, folder, or organization.\n\nThis method does not associate the new project with a billing account.\nYou can set or update the billing account associated with a project using\nthe [`projects.updateBillingInfo`]\n(/billing/reference/rest/v1/projects/updateBillingInfo) method.", // "flatPath": "v1beta1/projects", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.create", // "parameterOrder": [], // "parameters": { // "useLegacyStack": { // "description": "A safety hatch to opt out of the new reliable project creation process.", // "location": "query", // "type": "boolean" // } // }, // "path": "v1beta1/projects", // "request": { // "$ref": "Project" // }, // "response": { // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.projects.delete": type ProjectsDeleteCall struct { s *Service projectId string urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Delete: Marks the Project identified by the specified // `project_id` (for example, `my-project-123`) for deletion. // This method will only affect the Project if it has a lifecycle state // of // ACTIVE. // // This method changes the Project's lifecycle state from // ACTIVE // to DELETE_REQUESTED. // The deletion starts at an unspecified time, at which point the // project is // no longer accessible. // // Until the deletion completes, you can check the lifecycle // state // checked by retrieving the Project with GetProject, // and the Project remains visible to ListProjects. // However, you cannot update the project. // // After the deletion completes, the Project is not retrievable by // the GetProject and // ListProjects methods. // // The caller must have modify permissions for this Project. func (r *ProjectsService) Delete(projectId string) *ProjectsDeleteCall { c := &ProjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsDeleteCall) Fields(s ...googleapi.Field) *ProjectsDeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsDeleteCall) Context(ctx context.Context) *ProjectsDeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsDeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("DELETE", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.delete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsDeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Marks the Project identified by the specified\n`project_id` (for example, `my-project-123`) for deletion.\nThis method will only affect the Project if it has a lifecycle state of\nACTIVE.\n\nThis method changes the Project's lifecycle state from\nACTIVE\nto DELETE_REQUESTED.\nThe deletion starts at an unspecified time, at which point the project is\nno longer accessible.\n\nUntil the deletion completes, you can check the lifecycle state\nchecked by retrieving the Project with GetProject,\nand the Project remains visible to ListProjects.\nHowever, you cannot update the project.\n\nAfter the deletion completes, the Project is not retrievable by\nthe GetProject and\nListProjects methods.\n\nThe caller must have modify permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}", // "httpMethod": "DELETE", // "id": "cloudresourcemanager.projects.delete", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The Project ID (for example, `foo-bar-123`).\n\nRequired.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{projectId}", // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.projects.get": type ProjectsGetCall struct { s *Service projectId string urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // Get: Retrieves the Project identified by the specified // `project_id` (for example, `my-project-123`). // // The caller must have read permissions for this Project. func (r *ProjectsService) Get(projectId string) *ProjectsGetCall { c := &ProjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsGetCall) IfNoneMatch(entityTag string) *ProjectsGetCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsGetCall) Context(ctx context.Context) *ProjectsGetCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsGetCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.get" call. // Exactly one of *Project or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Project.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsGetCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Retrieves the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}", // "httpMethod": "GET", // "id": "cloudresourcemanager.projects.get", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{projectId}", // "response": { // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.projects.getAncestry": type ProjectsGetAncestryCall struct { s *Service projectId string getancestryrequest *GetAncestryRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // GetAncestry: Gets a list of ancestors in the resource hierarchy for // the Project // identified by the specified `project_id` (for example, // `my-project-123`). // // The caller must have read permissions for this Project. func (r *ProjectsService) GetAncestry(projectId string, getancestryrequest *GetAncestryRequest) *ProjectsGetAncestryCall { c := &ProjectsGetAncestryCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId c.getancestryrequest = getancestryrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsGetAncestryCall) Fields(s ...googleapi.Field) *ProjectsGetAncestryCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsGetAncestryCall) Context(ctx context.Context) *ProjectsGetAncestryCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsGetAncestryCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getancestryrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}:getAncestry") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.getAncestry" call. // Exactly one of *GetAncestryResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *GetAncestryResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsGetAncestryCall) Do(opts ...googleapi.CallOption) (*GetAncestryResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &GetAncestryResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Gets a list of ancestors in the resource hierarchy for the Project\nidentified by the specified `project_id` (for example, `my-project-123`).\n\nThe caller must have read permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}:getAncestry", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getAncestry", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The Project ID (for example, `my-project-123`).\n\nRequired.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{projectId}:getAncestry", // "request": { // "$ref": "GetAncestryRequest" // }, // "response": { // "$ref": "GetAncestryResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.projects.getIamPolicy": type ProjectsGetIamPolicyCall struct { s *Service resource string getiampolicyrequest *GetIamPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // GetIamPolicy: Returns the IAM access control policy for the specified // Project. // Permission is denied if the policy or the resource does not // exist. // // For additional information about resource structure and // identification, // see [Resource Names](/apis/design/resource_names). func (r *ProjectsService) GetIamPolicy(resource string, getiampolicyrequest *GetIamPolicyRequest) *ProjectsGetIamPolicyCall { c := &ProjectsGetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.getiampolicyrequest = getiampolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsGetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsGetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsGetIamPolicyCall) Context(ctx context.Context) *ProjectsGetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsGetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.getiampolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{resource}:getIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.getIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsGetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns the IAM access control policy for the specified Project.\nPermission is denied if the policy or the resource does not exist.\n\nFor additional information about resource structure and identification,\nsee [Resource Names](/apis/design/resource_names).", // "flatPath": "v1beta1/projects/{resource}:getIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.getIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{resource}:getIamPolicy", // "request": { // "$ref": "GetIamPolicyRequest" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.projects.list": type ProjectsListCall struct { s *Service urlParams_ gensupport.URLParams ifNoneMatch_ string ctx_ context.Context header_ http.Header } // List: Lists Projects that are visible to the user and satisfy // the // specified filter. This method returns Projects in an unspecified // order. // This method is eventually consistent with project mutations; this // means // that a newly created project may not appear in the results or // recent // updates to an existing project may not be reflected in the results. // To // retrieve the latest state of a project, use the GetProjectmethod. func (r *ProjectsService) List() *ProjectsListCall { c := &ProjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)} return c } // Filter sets the optional parameter "filter": An expression for // filtering the results of the request. Filter rules are // case insensitive. The fields eligible for filtering are: // // + `name` // + `id` // + <code>labels.<em>key</em></code> where *key* is the name of a // label // // Some examples of using labels as // filters: // // |Filter|Description| // |------|-----------| // |name:how*|The project's name starts with "how".| // |name:Howl|The project's name is `Howl` or // `howl`.| // |name:HOWL|Equivalent to above.| // |NAME:howl|Equivalent to above.| // |labels.color:*|The project has the label // `color`.| // |labels.color:red|The project's label `color` has the value // `red`.| // |labels.color:red&nbsp;labels.size:big|The project's label `color` // has the value `red` and its label `size` has the value `big`. // // If you specify a filter that has both `parent.type` and `parent.id`, // then // the `resourcemanager.projects.list` permission is checked on the // parent. // If the user has this permission, all projects under the parent will // be // returned after remaining filters have been applied. If the user lacks // this // permission, then all projects for which the user has // the // `resourcemanager.projects.get` permission will be returned after // remaining // filters have been applied. If no filter is specified, the call will // return // projects for which the user has `resourcemanager.projects.get` // permissions. func (c *ProjectsListCall) Filter(filter string) *ProjectsListCall { c.urlParams_.Set("filter", filter) return c } // PageSize sets the optional parameter "pageSize": The maximum number // of Projects to return in the response. // The server can return fewer Projects than requested. // If unspecified, server picks an appropriate default. func (c *ProjectsListCall) PageSize(pageSize int64) *ProjectsListCall { c.urlParams_.Set("pageSize", fmt.Sprint(pageSize)) return c } // PageToken sets the optional parameter "pageToken": A pagination token // returned from a previous call to ListProjects // that indicates from where listing should continue. func (c *ProjectsListCall) PageToken(pageToken string) *ProjectsListCall { c.urlParams_.Set("pageToken", pageToken) return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsListCall) Fields(s ...googleapi.Field) *ProjectsListCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // IfNoneMatch sets the optional parameter which makes the operation // fail if the object's ETag matches the given value. This is useful for // getting updates only after the object has changed since the last // request. Use googleapi.IsNotModified to check whether the response // error from Do is the result of In-None-Match. func (c *ProjectsListCall) IfNoneMatch(entityTag string) *ProjectsListCall { c.ifNoneMatch_ = entityTag return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsListCall) Context(ctx context.Context) *ProjectsListCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsListCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) if c.ifNoneMatch_ != "" { reqHeaders.Set("If-None-Match", c.ifNoneMatch_) } var body io.Reader = nil c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("GET", urls, body) if err != nil { return nil, err } req.Header = reqHeaders return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.list" call. // Exactly one of *ListProjectsResponse or error will be non-nil. Any // non-2xx status code is an error. Response headers are in either // *ListProjectsResponse.ServerResponse.Header or (if a response was // returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsListCall) Do(opts ...googleapi.CallOption) (*ListProjectsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &ListProjectsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Lists Projects that are visible to the user and satisfy the\nspecified filter. This method returns Projects in an unspecified order.\nThis method is eventually consistent with project mutations; this means\nthat a newly created project may not appear in the results or recent\nupdates to an existing project may not be reflected in the results. To\nretrieve the latest state of a project, use the GetProjectmethod.", // "flatPath": "v1beta1/projects", // "httpMethod": "GET", // "id": "cloudresourcemanager.projects.list", // "parameterOrder": [], // "parameters": { // "filter": { // "description": "An expression for filtering the results of the request. Filter rules are\ncase insensitive. The fields eligible for filtering are:\n\n+ `name`\n+ `id`\n+ \u003ccode\u003elabels.\u003cem\u003ekey\u003c/em\u003e\u003c/code\u003e where *key* is the name of a label\n\nSome examples of using labels as filters:\n\n|Filter|Description|\n|------|-----------|\n|name:how*|The project's name starts with \"how\".|\n|name:Howl|The project's name is `Howl` or `howl`.|\n|name:HOWL|Equivalent to above.|\n|NAME:howl|Equivalent to above.|\n|labels.color:*|The project has the label `color`.|\n|labels.color:red|The project's label `color` has the value `red`.|\n|labels.color:red\u0026nbsp;labels.size:big|The project's label `color` has the value `red` and its label `size` has the value `big`.\n\nIf you specify a filter that has both `parent.type` and `parent.id`, then\nthe `resourcemanager.projects.list` permission is checked on the parent.\nIf the user has this permission, all projects under the parent will be\nreturned after remaining filters have been applied. If the user lacks this\npermission, then all projects for which the user has the\n`resourcemanager.projects.get` permission will be returned after remaining\nfilters have been applied. If no filter is specified, the call will return\nprojects for which the user has `resourcemanager.projects.get` permissions.\n\nOptional.", // "location": "query", // "type": "string" // }, // "pageSize": { // "description": "The maximum number of Projects to return in the response.\nThe server can return fewer Projects than requested.\nIf unspecified, server picks an appropriate default.\n\nOptional.", // "format": "int32", // "location": "query", // "type": "integer" // }, // "pageToken": { // "description": "A pagination token returned from a previous call to ListProjects\nthat indicates from where listing should continue.\n\nOptional.", // "location": "query", // "type": "string" // } // }, // "path": "v1beta1/projects", // "response": { // "$ref": "ListProjectsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // Pages invokes f for each page of results. // A non-nil error returned from f will halt the iteration. // The provided context supersedes any context provided to the Context method. func (c *ProjectsListCall) Pages(ctx context.Context, f func(*ListProjectsResponse) error) error { c.ctx_ = ctx defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point for { x, err := c.Do() if err != nil { return err } if err := f(x); err != nil { return err } if x.NextPageToken == "" { return nil } c.PageToken(x.NextPageToken) } } // method id "cloudresourcemanager.projects.setIamPolicy": type ProjectsSetIamPolicyCall struct { s *Service resource string setiampolicyrequest *SetIamPolicyRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // SetIamPolicy: Sets the IAM access control policy for the specified // Project. Overwrites // any existing policy. // // The following constraints apply when using `setIamPolicy()`: // // + Project does not support `allUsers` and `allAuthenticatedUsers` // as // `members` in a `Binding` of a `Policy`. // // + The owner role can be granted only to `user` and // `serviceAccount`. // // + Service accounts can be made owners of a project directly // without any restrictions. However, to be added as an owner, a user // must be // invited via Cloud Platform console and must accept the invitation. // // + A user cannot be granted the owner role using `setIamPolicy()`. The // user // must be granted the owner role using the Cloud Platform Console and // must // explicitly accept the invitation. // // + Invitations to grant the owner role cannot be sent // using // `setIamPolicy()`; they must be sent only using the Cloud Platform // Console. // // + Membership changes that leave the project without any owners that // have // accepted the Terms of Service (ToS) will be rejected. // // + If the project is not part of an organization, there must be at // least // one owner who has accepted the Terms of Service (ToS) agreement in // the // policy. Calling `setIamPolicy()` to remove the last ToS-accepted // owner // from the policy will fail. This restriction also applies to // legacy // projects that no longer have owners who have accepted the ToS. Edits // to // IAM policies will be rejected until the lack of a ToS-accepting owner // is // rectified. // // + This method will replace the existing policy, and cannot be used // to // append additional IAM settings. // // Note: Removing service accounts from policies or changing their // roles // can render services completely inoperable. It is important to // understand // how the service account is being used before removing or updating // its // roles. func (r *ProjectsService) SetIamPolicy(resource string, setiampolicyrequest *SetIamPolicyRequest) *ProjectsSetIamPolicyCall { c := &ProjectsSetIamPolicyCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.setiampolicyrequest = setiampolicyrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsSetIamPolicyCall) Fields(s ...googleapi.Field) *ProjectsSetIamPolicyCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsSetIamPolicyCall) Context(ctx context.Context) *ProjectsSetIamPolicyCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsSetIamPolicyCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.setiampolicyrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{resource}:setIamPolicy") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.setIamPolicy" call. // Exactly one of *Policy or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Policy.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsSetIamPolicyCall) Do(opts ...googleapi.CallOption) (*Policy, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Policy{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Sets the IAM access control policy for the specified Project. Overwrites\nany existing policy.\n\nThe following constraints apply when using `setIamPolicy()`:\n\n+ Project does not support `allUsers` and `allAuthenticatedUsers` as\n`members` in a `Binding` of a `Policy`.\n\n+ The owner role can be granted only to `user` and `serviceAccount`.\n\n+ Service accounts can be made owners of a project directly\nwithout any restrictions. However, to be added as an owner, a user must be\ninvited via Cloud Platform console and must accept the invitation.\n\n+ A user cannot be granted the owner role using `setIamPolicy()`. The user\nmust be granted the owner role using the Cloud Platform Console and must\nexplicitly accept the invitation.\n\n+ Invitations to grant the owner role cannot be sent using\n`setIamPolicy()`; they must be sent only using the Cloud Platform Console.\n\n+ Membership changes that leave the project without any owners that have\naccepted the Terms of Service (ToS) will be rejected.\n\n+ If the project is not part of an organization, there must be at least\none owner who has accepted the Terms of Service (ToS) agreement in the\npolicy. Calling `setIamPolicy()` to remove the last ToS-accepted owner\nfrom the policy will fail. This restriction also applies to legacy\nprojects that no longer have owners who have accepted the ToS. Edits to\nIAM policies will be rejected until the lack of a ToS-accepting owner is\nrectified.\n\n+ This method will replace the existing policy, and cannot be used to\nappend additional IAM settings.\n\nNote: Removing service accounts from policies or changing their roles\ncan render services completely inoperable. It is important to understand\nhow the service account is being used before removing or updating its\nroles.", // "flatPath": "v1beta1/projects/{resource}:setIamPolicy", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.setIamPolicy", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy is being specified.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{resource}:setIamPolicy", // "request": { // "$ref": "SetIamPolicyRequest" // }, // "response": { // "$ref": "Policy" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.projects.testIamPermissions": type ProjectsTestIamPermissionsCall struct { s *Service resource string testiampermissionsrequest *TestIamPermissionsRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // TestIamPermissions: Returns permissions that a caller has on the // specified Project. func (r *ProjectsService) TestIamPermissions(resource string, testiampermissionsrequest *TestIamPermissionsRequest) *ProjectsTestIamPermissionsCall { c := &ProjectsTestIamPermissionsCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.resource = resource c.testiampermissionsrequest = testiampermissionsrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsTestIamPermissionsCall) Fields(s ...googleapi.Field) *ProjectsTestIamPermissionsCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsTestIamPermissionsCall) Context(ctx context.Context) *ProjectsTestIamPermissionsCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsTestIamPermissionsCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.testiampermissionsrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{resource}:testIamPermissions") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "resource": c.resource, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.testIamPermissions" call. // Exactly one of *TestIamPermissionsResponse or error will be non-nil. // Any non-2xx status code is an error. Response headers are in either // *TestIamPermissionsResponse.ServerResponse.Header or (if a response // was returned at all) in error.(*googleapi.Error).Header. Use // googleapi.IsNotModified to check whether the returned error was // because http.StatusNotModified was returned. func (c *ProjectsTestIamPermissionsCall) Do(opts ...googleapi.CallOption) (*TestIamPermissionsResponse, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &TestIamPermissionsResponse{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Returns permissions that a caller has on the specified Project.", // "flatPath": "v1beta1/projects/{resource}:testIamPermissions", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.testIamPermissions", // "parameterOrder": [ // "resource" // ], // "parameters": { // "resource": { // "description": "REQUIRED: The resource for which the policy detail is being requested.\nSee the operation documentation for the appropriate value for this field.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{resource}:testIamPermissions", // "request": { // "$ref": "TestIamPermissionsRequest" // }, // "response": { // "$ref": "TestIamPermissionsResponse" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform", // "https://www.googleapis.com/auth/cloud-platform.read-only" // ] // } } // method id "cloudresourcemanager.projects.undelete": type ProjectsUndeleteCall struct { s *Service projectId string undeleteprojectrequest *UndeleteProjectRequest urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Undelete: Restores the Project identified by the // specified // `project_id` (for example, `my-project-123`). // You can only use this method for a Project that has a lifecycle state // of // DELETE_REQUESTED. // After deletion starts, the Project cannot be restored. // // The caller must have modify permissions for this Project. func (r *ProjectsService) Undelete(projectId string, undeleteprojectrequest *UndeleteProjectRequest) *ProjectsUndeleteCall { c := &ProjectsUndeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId c.undeleteprojectrequest = undeleteprojectrequest return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsUndeleteCall) Fields(s ...googleapi.Field) *ProjectsUndeleteCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsUndeleteCall) Context(ctx context.Context) *ProjectsUndeleteCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsUndeleteCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.undeleteprojectrequest) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}:undelete") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("POST", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.undelete" call. // Exactly one of *Empty or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Empty.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsUndeleteCall) Do(opts ...googleapi.CallOption) (*Empty, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Empty{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Restores the Project identified by the specified\n`project_id` (for example, `my-project-123`).\nYou can only use this method for a Project that has a lifecycle state of\nDELETE_REQUESTED.\nAfter deletion starts, the Project cannot be restored.\n\nThe caller must have modify permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}:undelete", // "httpMethod": "POST", // "id": "cloudresourcemanager.projects.undelete", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The project ID (for example, `foo-bar-123`).\n\nRequired.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{projectId}:undelete", // "request": { // "$ref": "UndeleteProjectRequest" // }, // "response": { // "$ref": "Empty" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } } // method id "cloudresourcemanager.projects.update": type ProjectsUpdateCall struct { s *Service projectId string project *Project urlParams_ gensupport.URLParams ctx_ context.Context header_ http.Header } // Update: Updates the attributes of the Project identified by the // specified // `project_id` (for example, `my-project-123`). // // The caller must have modify permissions for this Project. func (r *ProjectsService) Update(projectId string, project *Project) *ProjectsUpdateCall { c := &ProjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)} c.projectId = projectId c.project = project return c } // Fields allows partial responses to be retrieved. See // https://developers.google.com/gdata/docs/2.0/basics#PartialResponse // for more information. func (c *ProjectsUpdateCall) Fields(s ...googleapi.Field) *ProjectsUpdateCall { c.urlParams_.Set("fields", googleapi.CombineFields(s)) return c } // Context sets the context to be used in this call's Do method. Any // pending HTTP request will be aborted if the provided context is // canceled. func (c *ProjectsUpdateCall) Context(ctx context.Context) *ProjectsUpdateCall { c.ctx_ = ctx return c } // Header returns an http.Header that can be modified by the caller to // add HTTP headers to the request. func (c *ProjectsUpdateCall) Header() http.Header { if c.header_ == nil { c.header_ = make(http.Header) } return c.header_ } func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) for k, v := range c.header_ { reqHeaders[k] = v } reqHeaders.Set("User-Agent", c.s.userAgent()) var body io.Reader = nil body, err := googleapi.WithoutDataWrapper.JSONReader(c.project) if err != nil { return nil, err } reqHeaders.Set("Content-Type", "application/json") c.urlParams_.Set("alt", alt) c.urlParams_.Set("prettyPrint", "false") urls := googleapi.ResolveRelative(c.s.BasePath, "v1beta1/projects/{projectId}") urls += "?" + c.urlParams_.Encode() req, err := http.NewRequest("PUT", urls, body) if err != nil { return nil, err } req.Header = reqHeaders googleapi.Expand(req.URL, map[string]string{ "projectId": c.projectId, }) return gensupport.SendRequest(c.ctx_, c.s.client, req) } // Do executes the "cloudresourcemanager.projects.update" call. // Exactly one of *Project or error will be non-nil. Any non-2xx status // code is an error. Response headers are in either // *Project.ServerResponse.Header or (if a response was returned at all) // in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to // check whether the returned error was because http.StatusNotModified // was returned. func (c *ProjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Project, error) { gensupport.SetOptions(c.urlParams_, opts...) res, err := c.doRequest("json") if res != nil && res.StatusCode == http.StatusNotModified { if res.Body != nil { res.Body.Close() } return nil, &googleapi.Error{ Code: res.StatusCode, Header: res.Header, } } if err != nil { return nil, err } defer googleapi.CloseBody(res) if err := googleapi.CheckResponse(res); err != nil { return nil, err } ret := &Project{ ServerResponse: googleapi.ServerResponse{ Header: res.Header, HTTPStatusCode: res.StatusCode, }, } target := &ret if err := gensupport.DecodeResponse(target, res); err != nil { return nil, err } return ret, nil // { // "description": "Updates the attributes of the Project identified by the specified\n`project_id` (for example, `my-project-123`).\n\nThe caller must have modify permissions for this Project.", // "flatPath": "v1beta1/projects/{projectId}", // "httpMethod": "PUT", // "id": "cloudresourcemanager.projects.update", // "parameterOrder": [ // "projectId" // ], // "parameters": { // "projectId": { // "description": "The project ID (for example, `my-project-123`).\n\nRequired.", // "location": "path", // "required": true, // "type": "string" // } // }, // "path": "v1beta1/projects/{projectId}", // "request": { // "$ref": "Project" // }, // "response": { // "$ref": "Project" // }, // "scopes": [ // "https://www.googleapis.com/auth/cloud-platform" // ] // } }
{ if client == nil { return nil, errors.New("client is nil") } s := &Service{client: client, BasePath: basePath} s.Organizations = NewOrganizationsService(s) s.Projects = NewProjectsService(s) return s, nil }
help.go
// Copyright 2012 Jesse van den Kieboom. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package flags import ( "bufio" "bytes" "fmt" "io" "reflect" "strings" "unicode/utf8" ) type alignmentInfo struct { maxLongLen int hasShort bool hasValueName bool terminalColumns int indent bool } func (p *Parser) getAlignmentInfo() alignmentInfo { ret := alignmentInfo{ maxLongLen: 0, hasShort: false, hasValueName: false, terminalColumns: getTerminalColumns(), } if ret.terminalColumns <= 0 { ret.terminalColumns = 80 } p.eachActiveGroup(func(c *Command, grp *Group) { for _, info := range grp.options { if !info.canCli() { continue } if info.ShortName != 0 { ret.hasShort = true } lv := utf8.RuneCountInString(info.ValueName) if lv != 0 { ret.hasValueName = true } l := utf8.RuneCountInString(info.LongName) + lv if c != p.Command { // for indenting l = l + 4 } if l > ret.maxLongLen { ret.maxLongLen = l } } }) return ret } func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) { line := &bytes.Buffer{} distanceBetweenOptionAndDescription := 2 paddingBeforeOption := 2 prefix := paddingBeforeOption if info.indent { prefix += 4 } line.WriteString(strings.Repeat(" ", prefix)) if option.ShortName != 0 { line.WriteRune(defaultShortOptDelimiter) line.WriteRune(option.ShortName) } else if info.hasShort { line.WriteString(" ") } descstart := info.maxLongLen + paddingBeforeOption + distanceBetweenOptionAndDescription if info.hasShort { descstart += 2 } if info.maxLongLen > 0 { descstart += 4 } if info.hasValueName { descstart += 3 } if len(option.LongName) > 0 { if option.ShortName != 0 { line.WriteString(", ") } else if info.hasShort { line.WriteString(" ") } line.WriteString(defaultLongOptDelimiter) line.WriteString(option.LongName) } if option.canArgument() { line.WriteRune(defaultNameArgDelimiter) if len(option.ValueName) > 0 { line.WriteString(option.ValueName) } } written := line.Len() line.WriteTo(writer) if option.Description != "" { dw := descstart - written writer.WriteString(strings.Repeat(" ", dw)) def := "" defs := option.Default if len(option.DefaultMask) != 0 { if option.DefaultMask != "-" { def = option.DefaultMask } } else if len(defs) == 0 && option.canArgument() { var showdef bool switch option.field.Type.Kind() { case reflect.Func, reflect.Ptr: showdef = !option.value.IsNil() case reflect.Slice, reflect.String, reflect.Array: showdef = option.value.Len() > 0 case reflect.Map: showdef = !option.value.IsNil() && option.value.Len() > 0 default: zeroval := reflect.Zero(option.field.Type) showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface()) } if showdef { def, _ = convertToString(option.value, option.tag) } } else if len(defs) != 0 { def = strings.Join(defs, ", ") } var desc string if def != "" { desc = fmt.Sprintf("%s (%v)", option.Description, def) } else { desc = option.Description } writer.WriteString(wrapText(desc, info.terminalColumns-descstart, strings.Repeat(" ", descstart))) } writer.WriteString("\n") } func
(s []*Command) int { if len(s) == 0 { return 0 } ret := len(s[0].Name) for _, v := range s[1:] { l := len(v.Name) if l > ret { ret = l } } return ret } // WriteHelp writes a help message containing all the possible options and // their descriptions to the provided writer. Note that the HelpFlag parser // option provides a convenient way to add a -h/--help option group to the // command line parser which will automatically show the help messages using // this method. func (p *Parser) WriteHelp(writer io.Writer) { if writer == nil { return } wr := bufio.NewWriter(writer) aligninfo := p.getAlignmentInfo() cmd := p.Command for cmd.Active != nil { cmd = cmd.Active } if p.Name != "" { wr.WriteString("Usage:\n") wr.WriteString(" ") allcmd := p.Command for allcmd != nil { var usage string if allcmd == p.Command { if len(p.Usage) != 0 { usage = p.Usage } else if p.Options&HelpFlag != 0 { usage = "[OPTIONS]" } } else if us, ok := allcmd.data.(Usage); ok { usage = us.Usage() } else if allcmd.hasCliOptions() { usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name) } if len(usage) != 0 { fmt.Fprintf(wr, " %s %s", allcmd.Name, usage) } else { fmt.Fprintf(wr, " %s", allcmd.Name) } if allcmd.Active == nil && len(allcmd.commands) > 0 { var co, cc string if allcmd.SubcommandsOptional { co, cc = "[", "]" } else { co, cc = "<", ">" } if len(allcmd.commands) > 3 { fmt.Fprintf(wr, " %scommand%s", co, cc) } else { subcommands := allcmd.sortedCommands() names := make([]string, len(subcommands)) for i, subc := range subcommands { names[i] = subc.Name } fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc) } } allcmd = allcmd.Active } fmt.Fprintln(wr) if len(cmd.LongDescription) != 0 { fmt.Fprintln(wr) t := wrapText(cmd.LongDescription, aligninfo.terminalColumns, "") fmt.Fprintln(wr, t) } } prevcmd := p.Command p.eachActiveGroup(func(c *Command, grp *Group) { first := true // Skip built-in help group for all commands except the top-level // parser if grp.isBuiltinHelp && c != p.Command { return } for _, info := range grp.options { if info.canCli() { if prevcmd != c { fmt.Fprintf(wr, "\n[%s command options]\n", c.Name) prevcmd = c aligninfo.indent = true } if first && prevcmd.Group != grp { fmt.Fprintln(wr) if aligninfo.indent { wr.WriteString(" ") } fmt.Fprintf(wr, "%s:\n", grp.ShortDescription) first = false } p.writeHelpOption(wr, info, aligninfo) } } }) scommands := cmd.sortedCommands() if len(scommands) > 0 { maxnamelen := maxCommandLength(scommands) fmt.Fprintln(wr) fmt.Fprintln(wr, "Available commands:") for _, c := range scommands { fmt.Fprintf(wr, " %s", c.Name) if len(c.ShortDescription) > 0 { pad := strings.Repeat(" ", maxnamelen-len(c.Name)) fmt.Fprintf(wr, "%s %s", pad, c.ShortDescription) } fmt.Fprintln(wr) } } wr.Flush() }
maxCommandLength
keeper.go
package keeper import ( "fmt" "strconv" "strings" "github.com/tendermint/tendermint/libs/log" db "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" clienttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/02-client/types" connectiontypes "github.com/cosmos/cosmos-sdk/x/ibc/core/03-connection/types" "github.com/cosmos/cosmos-sdk/x/ibc/core/04-channel/types" porttypes "github.com/cosmos/cosmos-sdk/x/ibc/core/05-port/types" host "github.com/cosmos/cosmos-sdk/x/ibc/core/24-host" "github.com/cosmos/cosmos-sdk/x/ibc/core/exported" ) // Keeper defines the IBC channel keeper type Keeper struct { // implements gRPC QueryServer interface types.QueryServer storeKey sdk.StoreKey cdc codec.BinaryMarshaler clientKeeper types.ClientKeeper connectionKeeper types.ConnectionKeeper portKeeper types.PortKeeper scopedKeeper capabilitykeeper.ScopedKeeper } // NewKeeper creates a new IBC channel Keeper instance func NewKeeper( cdc codec.BinaryMarshaler, key sdk.StoreKey, clientKeeper types.ClientKeeper, connectionKeeper types.ConnectionKeeper, portKeeper types.PortKeeper, scopedKeeper capabilitykeeper.ScopedKeeper, ) Keeper
// Logger returns a module-specific logger. func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s/%s", host.ModuleName, types.SubModuleName)) } // GetChannel returns a channel with a particular identifier binded to a specific port func (k Keeper) GetChannel(ctx sdk.Context, portID, channelID string) (types.Channel, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyChannel(portID, channelID)) if bz == nil { return types.Channel{}, false } var channel types.Channel k.cdc.MustUnmarshalBinaryBare(bz, &channel) return channel, true } // SetChannel sets a channel to the store func (k Keeper) SetChannel(ctx sdk.Context, portID, channelID string, channel types.Channel) { store := ctx.KVStore(k.storeKey) bz := k.cdc.MustMarshalBinaryBare(&channel) store.Set(host.KeyChannel(portID, channelID), bz) } // GetNextSequenceSend gets a channel's next send sequence from the store func (k Keeper) GetNextSequenceSend(ctx sdk.Context, portID, channelID string) (uint64, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyNextSequenceSend(portID, channelID)) if bz == nil { return 0, false } return sdk.BigEndianToUint64(bz), true } // SetNextSequenceSend sets a channel's next send sequence to the store func (k Keeper) SetNextSequenceSend(ctx sdk.Context, portID, channelID string, sequence uint64) { store := ctx.KVStore(k.storeKey) bz := sdk.Uint64ToBigEndian(sequence) store.Set(host.KeyNextSequenceSend(portID, channelID), bz) } // GetNextSequenceRecv gets a channel's next receive sequence from the store func (k Keeper) GetNextSequenceRecv(ctx sdk.Context, portID, channelID string) (uint64, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyNextSequenceRecv(portID, channelID)) if bz == nil { return 0, false } return sdk.BigEndianToUint64(bz), true } // SetNextSequenceRecv sets a channel's next receive sequence to the store func (k Keeper) SetNextSequenceRecv(ctx sdk.Context, portID, channelID string, sequence uint64) { store := ctx.KVStore(k.storeKey) bz := sdk.Uint64ToBigEndian(sequence) store.Set(host.KeyNextSequenceRecv(portID, channelID), bz) } // GetNextSequenceAck gets a channel's next ack sequence from the store func (k Keeper) GetNextSequenceAck(ctx sdk.Context, portID, channelID string) (uint64, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyNextSequenceAck(portID, channelID)) if bz == nil { return 0, false } return sdk.BigEndianToUint64(bz), true } // SetNextSequenceAck sets a channel's next ack sequence to the store func (k Keeper) SetNextSequenceAck(ctx sdk.Context, portID, channelID string, sequence uint64) { store := ctx.KVStore(k.storeKey) bz := sdk.Uint64ToBigEndian(sequence) store.Set(host.KeyNextSequenceAck(portID, channelID), bz) } // GetPacketReceipt gets a packet receipt from the store func (k Keeper) GetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) (string, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyPacketReceipt(portID, channelID, sequence)) if bz == nil { return "", false } return string(bz), true } // SetPacketReceipt sets an empty packet receipt to the store func (k Keeper) SetPacketReceipt(ctx sdk.Context, portID, channelID string, sequence uint64) { store := ctx.KVStore(k.storeKey) store.Set(host.KeyPacketReceipt(portID, channelID, sequence), []byte("")) } // GetPacketCommitment gets the packet commitment hash from the store func (k Keeper) GetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) []byte { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyPacketCommitment(portID, channelID, sequence)) return bz } // HasPacketCommitment returns true if the packet commitment exists func (k Keeper) HasPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) bool { store := ctx.KVStore(k.storeKey) return store.Has(host.KeyPacketCommitment(portID, channelID, sequence)) } // SetPacketCommitment sets the packet commitment hash to the store func (k Keeper) SetPacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64, commitmentHash []byte) { store := ctx.KVStore(k.storeKey) store.Set(host.KeyPacketCommitment(portID, channelID, sequence), commitmentHash) } func (k Keeper) deletePacketCommitment(ctx sdk.Context, portID, channelID string, sequence uint64) { store := ctx.KVStore(k.storeKey) store.Delete(host.KeyPacketCommitment(portID, channelID, sequence)) } // SetPacketAcknowledgement sets the packet ack hash to the store func (k Keeper) SetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64, ackHash []byte) { store := ctx.KVStore(k.storeKey) store.Set(host.KeyPacketAcknowledgement(portID, channelID, sequence), ackHash) } // GetPacketAcknowledgement gets the packet ack hash from the store func (k Keeper) GetPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) ([]byte, bool) { store := ctx.KVStore(k.storeKey) bz := store.Get(host.KeyPacketAcknowledgement(portID, channelID, sequence)) if bz == nil { return nil, false } return bz, true } // HasPacketAcknowledgement check if the packet ack hash is already on the store func (k Keeper) HasPacketAcknowledgement(ctx sdk.Context, portID, channelID string, sequence uint64) bool { store := ctx.KVStore(k.storeKey) return store.Has(host.KeyPacketAcknowledgement(portID, channelID, sequence)) } // IteratePacketSequence provides an iterator over all send, receive or ack sequences. // For each sequence, cb will be called. If the cb returns true, the iterator // will close and stop. func (k Keeper) IteratePacketSequence(ctx sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64) bool) { defer iterator.Close() for ; iterator.Valid(); iterator.Next() { portID, channelID, err := host.ParseChannelPath(string(iterator.Key())) if err != nil { // return if the key is not a channel key return } sequence := sdk.BigEndianToUint64(iterator.Value()) if cb(portID, channelID, sequence) { break } } } // GetAllPacketSendSeqs returns all stored next send sequences. func (k Keeper) GetAllPacketSendSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqSendPrefix)) k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextSendSeq uint64) bool { ps := types.NewPacketSequence(portID, channelID, nextSendSeq) seqs = append(seqs, ps) return false }) return seqs } // GetAllPacketRecvSeqs returns all stored next recv sequences. func (k Keeper) GetAllPacketRecvSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqRecvPrefix)) k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextRecvSeq uint64) bool { ps := types.NewPacketSequence(portID, channelID, nextRecvSeq) seqs = append(seqs, ps) return false }) return seqs } // GetAllPacketAckSeqs returns all stored next acknowledgements sequences. func (k Keeper) GetAllPacketAckSeqs(ctx sdk.Context) (seqs []types.PacketSequence) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyNextSeqAckPrefix)) k.IteratePacketSequence(ctx, iterator, func(portID, channelID string, nextAckSeq uint64) bool { ps := types.NewPacketSequence(portID, channelID, nextAckSeq) seqs = append(seqs, ps) return false }) return seqs } // IteratePacketCommitment provides an iterator over all PacketCommitment objects. For each // packet commitment, cb will be called. If the cb returns true, the iterator will close // and stop. func (k Keeper) IteratePacketCommitment(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketCommitmentPrefix)) k.iterateHashes(ctx, iterator, cb) } // GetAllPacketCommitments returns all stored PacketCommitments objects. func (k Keeper) GetAllPacketCommitments(ctx sdk.Context) (commitments []types.PacketAckCommitment) { k.IteratePacketCommitment(ctx, func(portID, channelID string, sequence uint64, hash []byte) bool { pc := types.NewPacketAckCommitment(portID, channelID, sequence, hash) commitments = append(commitments, pc) return false }) return commitments } // IteratePacketCommitmentAtChannel provides an iterator over all PacketCommmitment objects // at a specified channel. For each packet commitment, cb will be called. If the cb returns // true, the iterator will close and stop. func (k Keeper) IteratePacketCommitmentAtChannel(ctx sdk.Context, portID, channelID string, cb func(_, _ string, sequence uint64, hash []byte) bool) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.PacketCommitmentPrefixPath(portID, channelID))) k.iterateHashes(ctx, iterator, cb) } // GetAllPacketCommitmentsAtChannel returns all stored PacketCommitments objects for a specified // port ID and channel ID. func (k Keeper) GetAllPacketCommitmentsAtChannel(ctx sdk.Context, portID, channelID string) (commitments []types.PacketAckCommitment) { k.IteratePacketCommitmentAtChannel(ctx, portID, channelID, func(_, _ string, sequence uint64, hash []byte) bool { pc := types.NewPacketAckCommitment(portID, channelID, sequence, hash) commitments = append(commitments, pc) return false }) return commitments } // IteratePacketAcknowledgement provides an iterator over all PacketAcknowledgement objects. For each // aknowledgement, cb will be called. If the cb returns true, the iterator will close // and stop. func (k Keeper) IteratePacketAcknowledgement(ctx sdk.Context, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyPacketAckPrefix)) k.iterateHashes(ctx, iterator, cb) } // GetAllPacketAcks returns all stored PacketAcknowledgements objects. func (k Keeper) GetAllPacketAcks(ctx sdk.Context) (acks []types.PacketAckCommitment) { k.IteratePacketAcknowledgement(ctx, func(portID, channelID string, sequence uint64, ack []byte) bool { packetAck := types.NewPacketAckCommitment(portID, channelID, sequence, ack) acks = append(acks, packetAck) return false }) return acks } // IterateChannels provides an iterator over all Channel objects. For each // Channel, cb will be called. If the cb returns true, the iterator will close // and stop. func (k Keeper) IterateChannels(ctx sdk.Context, cb func(types.IdentifiedChannel) bool) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte(host.KeyChannelPrefix)) defer iterator.Close() for ; iterator.Valid(); iterator.Next() { var channel types.Channel k.cdc.MustUnmarshalBinaryBare(iterator.Value(), &channel) portID, channelID := host.MustParseChannelPath(string(iterator.Key())) identifiedChannel := types.NewIdentifiedChannel(portID, channelID, channel) if cb(identifiedChannel) { break } } } // GetAllChannels returns all stored Channel objects. func (k Keeper) GetAllChannels(ctx sdk.Context) (channels []types.IdentifiedChannel) { k.IterateChannels(ctx, func(channel types.IdentifiedChannel) bool { channels = append(channels, channel) return false }) return channels } // GetChannelClientState returns the associated client state with its ID, from a port and channel identifier. func (k Keeper) GetChannelClientState(ctx sdk.Context, portID, channelID string) (string, exported.ClientState, error) { channel, found := k.GetChannel(ctx, portID, channelID) if !found { return "", nil, sdkerrors.Wrapf(types.ErrChannelNotFound, "port-id: %s, channel-id: %s", portID, channelID) } connection, found := k.connectionKeeper.GetConnection(ctx, channel.ConnectionHops[0]) if !found { return "", nil, sdkerrors.Wrapf(connectiontypes.ErrConnectionNotFound, "connection-id: %s", channel.ConnectionHops[0]) } clientState, found := k.clientKeeper.GetClientState(ctx, connection.ClientId) if !found { return "", nil, sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client-id: %s", connection.ClientId) } return connection.ClientId, clientState, nil } // LookupModuleByChannel will return the IBCModule along with the capability associated with a given channel defined by its portID and channelID func (k Keeper) LookupModuleByChannel(ctx sdk.Context, portID, channelID string) (string, *capabilitytypes.Capability, error) { modules, cap, err := k.scopedKeeper.LookupModules(ctx, host.ChannelCapabilityPath(portID, channelID)) if err != nil { return "", nil, err } return porttypes.GetModuleOwner(modules), cap, nil } // common functionality for IteratePacketCommitment and IteratePacketAcknowledgement func (k Keeper) iterateHashes(_ sdk.Context, iterator db.Iterator, cb func(portID, channelID string, sequence uint64, hash []byte) bool) { defer iterator.Close() for ; iterator.Valid(); iterator.Next() { keySplit := strings.Split(string(iterator.Key()), "/") portID := keySplit[2] channelID := keySplit[4] sequence, err := strconv.ParseUint(keySplit[len(keySplit)-1], 10, 64) if err != nil { panic(err) } if cb(portID, channelID, sequence, iterator.Value()) { break } } }
{ return Keeper{ storeKey: key, cdc: cdc, clientKeeper: clientKeeper, connectionKeeper: connectionKeeper, portKeeper: portKeeper, scopedKeeper: scopedKeeper, } }
index.js
var React = require('react'), Reflux = require('reflux'), Router = require('react-router'), { NotFoundRoute, State, Link, Route, RouteHandler, DefaultRoute } = Router, osmAuth = require('osm-auth'), haversine = require('haversine'), xhr = require('xhr'), qs = require('querystring'); window.React = React; // Constants for API endpoints const API06 = 'https://api.openstreetmap.org/api/0.6/', OVERPASS = 'https://overpass-api.de/api/interpreter'; // Constants for our OAuth connection to OpenStreetMap. const OAUTH_CONSUMER_KEY = 'ba5eNXgk15yUZu0HKOiiaj6TGMwGPXZTCguB1284', OAUTH_SECRET = 'Ln2ownAA5vcP8ag7QV5BV8wJiLXEmlgbC01QFTcc'; // # Configuration // This is used to show certain nodes in the list: otherwise the ones // we're looking for would be crowded out by telephone poles etc. const KEYPAIR = { k: 'amenity', v: 'cafe' }, TAG = 'cost:coffee', // The version string is added to changesets to let OSM know which // editor software is responsible for which changes. VERSION = 'poism', MBX = 'pk.eyJ1IjoidG1jdyIsImEiOiIzczJRVGdRIn0.DKkDbTPnNUgHqTDBg7_zRQ', MAP = 'tmcw.kbh273ee', PIN = 'pin-l-cafe', LOC = 'pin-s'; L.mapbox.accessToken = MBX; // # Parsing & Producing XML var a = (nl) => Array.prototype.slice.call(nl), attr = (n, k) => n.getAttribute(k), serializer = new XMLSerializer(); // Given an XML DOM in OSM format and an object of the form // // { k, v } // // Find all nodes with that key combination and return them // in the form // // { xml: Node, tags: {}, id: 'osm-id' } var parser = (xml, kv) => a(xml.getElementsByTagName('node')).map(node => a(node.getElementsByTagName('tag')).reduce((memo, tag) => { memo.tags[attr(tag, 'k')] = attr(tag, 'v'); return memo; }, { xml: node, tags: {}, id: attr(node, 'id'), location: { latitude: parseFloat(attr(node, 'lat')), longitude: parseFloat(attr(node, 'lon')) } })) .filter(node => node.tags[kv.k] === kv.v); var serialize = (xml) => serializer.serializeToString(xml) .replace('xmlns="http://www.w3.org/1999/xhtml"', ''); // Since we're building XML the hacky way by formatting strings, // we'll need to escape strings so that places like "Charlie's Shop" // don't make invalid XML. var escape = _ => _.replace(/&/g, '&amp;') .replace(/</g, '&lt;').replace(/>/g, '&gt;').replace(/"/g, '&quot;'); // Generate the XML payload necessary to open a new changeset in OSM var changesetCreate = (comment) => `<osm><changeset> <tag k="created_by" v="${VERSION}" /> <tag k="comment" v="${escape(comment)}" /> </changeset></osm>`; // After the OSM changeset is opened, we need to send the changes: // this generates the necessary XML to add or update a specific // tag on a single node. var changesetChange = (node, tag, id) => { a(node.getElementsByTagName('tag')) .filter(tagElem => tagElem.getAttribute('k') === tag.k) .forEach(tagElem => node.removeChild(tagElem)); node.setAttribute('changeset', id); var newTag = node.appendChild(document.createElement('tag')); newTag.setAttribute('k', tag.k); newTag.setAttribute('v', tag.v); return `<osmChange version="0.3" generator="${VERSION}"> <modify>${serialize(node)}</modify> </osmChange>`; }; var sortDistance = (location) => (a, b) => haversine(location, a.location) - haversine(location, b.location); var queryOverpass = (center, kv, callback) => { const RADIUS = 0.1; var bbox = [ center.latitude - RADIUS, center.longitude - RADIUS, center.latitude + RADIUS, center.longitude + RADIUS ].join(','); var query = `[out:xml][timeout:25]; (node["${kv.k}"="${kv.v}"](${bbox});); out body; >; out skel qt;`; xhr({ uri: OVERPASS, method: 'POST', body: query }, callback); }; // # Stores var locationStore = Reflux.createStore({ location: { latitude: 0, longitude: 0 }, getInitialState() { return this.location; }, init() { this.watcher = navigator.geolocation.watchPosition(res => { if (haversine(this.location, res.coords) > 10) { this.trigger(res.coords); } this.location = res.coords; }); } }); // Here's where we store fully-formed OSM Nodes that correspond to matches. // These are listed with Overpass and then loaded in full with OSM API. // This two-step process imitates the ability to filter the OSM API - without // it, we'd have some very slow calls to the `/map/` endpoint, instead of // fast calls to the `/nodes` endpoint. var nodeLoad = Reflux.createAction(); var nodeSave = Reflux.createAction(); var nodeStore = Reflux.createStore({ nodes: {}, getInitialState() { return this.nodes; }, init() { this.listenTo(nodeLoad, this.load); this.listenTo(locationStore, this.load); this.listenTo(nodeSave, this.save); }, load(center) { queryOverpass(center, KEYPAIR, (err, resp, map) => { if (err) return console.error(err); this.loadNodes(parser(resp.responseXML, KEYPAIR) .sort(sortDistance(center)) .slice(0, 50) .map(n => n.id)); }); }, loadNodes(ids) { ids = ids.filter(id => !this.nodes[id]); if (!ids.length) return this.trigger(this.nodes); xhr({ uri: `${API06}nodes/?nodes=${ids.join(',')}`, method: 'GET' }, (err, resp, body) => { if (err) return console.error(err); parser(resp.responseXML, KEYPAIR).forEach(node => { if (!this.nodes[node.id]) this.nodes[node.id] = node; }); this.trigger(this.nodes); }); }, save(res, price, currency) { const XMLHEADER = { header: { 'Content-Type': 'text/xml' } }; var xml = res.xml; var tag = { k: TAG, v: currency + price }; var comment = `Updating coffee price to ${currency} ${price} for ${res.tags.name}`; auth.xhr({ method: 'PUT', prefix: false, options: XMLHEADER, content: changesetCreate(comment), path: `${API06}changeset/create` }, (err, id) => { if (err) return console.error(err); auth.xhr({ method: 'POST', prefix: false, options: XMLHEADER, content: changesetChange(xml, tag, id), path: `${API06}changeset/${id}/upload`, }, (err, res) => { auth.xhr({ method: 'PUT', prefix: false, path: `${API06}changeset/${id}/close` }, (err, id) => { if (err) console.error(err); router.transitionTo('/success'); }); }); }); } }); // osm-auth does the hard work of managing user authentication with // OpenStreetMap via the OAuth protocol. var auth = osmAuth({ oauth_consumer_key: OAUTH_CONSUMER_KEY, oauth_secret: OAUTH_SECRET, auto: false, landing: 'index.html', singlepage: true }); // Here we store the user's logged-in / logged-out status so we can show // the authentication view instead of a list as an initial pageview. var userLogin = Reflux.createAction(); var userStore = Reflux.createStore({ user: null, init() { this.user = auth.authenticated(); this.listenTo(userLogin, this.login); }, getInitialState() { return this.user; }, login() { auth.authenticate((err, details) => { this.user = auth.authenticated(); this.trigger(this.user); }); } }); // # Components // A simple shout-out and log-in button that shoots a user into the OSM // oauth flow. var LogIn = React.createClass({ render() { /* jshint ignore:start */ return (<div className='pad2'> <div className='pad1 space-bottom1'> Adding to the map requires an OpenStreetMap account. </div> <button onClick={userLogin} className='button col12 fill-green icon account'>Log in to OpenStreetMap</button> </div> ); /* jshint ignore:end */ } }); // A simple wrapper for a call to the [Mapbox Static Map API](https://www.mapbox.com/developers/api/static/) // that we use for editing pages: this gives a basic idea of where the coffee // shop is as well as a marker for your location. Helpful when there's // a Starbucks on every corner of an intersection. var StaticMap = React.createClass({ render() { return ( /* jshint ignore:start */ <img src={`https://api.tiles.mapbox.com/v4/${MAP}/` + `${PIN}(${this.props.location.longitude},${this.props.location.latitude}),` + (this.props.self ? `${LOC}(${this.props.self.longitude},${this.props.self.latitude})` : '') + `/${this.props.location.longitude},${this.props.location.latitude}` + `,14/[email protected]?access_token=${MBX}`} /> /* jshint ignore:end */ ); } }); var Page = React.createClass({ render() { return ( /* jshint ignore:start */ <div className='margin3 col6'> <div className='col12'> <RouteHandler/> </div> </div> /* jshint ignore:end */ ); } }); var values = obj => Object.keys(obj).map(key => obj[key]); // A list of potential nodes for viewing and editing. var List = React.createClass({ // We use Reflux's `.connect` method to listen for changes in stores // and automatically call setState to use their data here. mixins: [ Reflux.connect(nodeStore, 'nodes'), Reflux.connect(locationStore, 'location'), Reflux.connect(userStore, 'user')], /* jshint ignore:start */ render() { return ( <div> <div className='clearfix col12'> <div className='pad2 clearfix'> <div className='col4'> <img width={300/2} height={230/2} className='inline' src='assets/logo_inverted.png' /> </div> <div className='col8 pad2y pad1x'> <h3>poism</h3> <p className='italic'>a simple point of interest editor for OpenStreetMap</p> </div> </div> </div> {this.state.user ? <div className='pad2'> {!values(this.state.nodes).length && <div className='pad4 center'> Loading... </div>} {values(this.state.nodes) .sort(sortDistance(this.state.location)) .map(res => <Result key={res.id} res={res} />)} </div> : <LogIn />}
</div>); } /* jshint ignore:end */ }); // A single list item var Result = React.createClass({ render() { /* jshint ignore:start */ return <Link to='editor' params={{ osmId: this.props.res.id }} className='pad1 col12 clearfix fill-coffee space-bottom1'> <div className='price-tag round'> {this.props.res.tags[TAG] ? this.props.res.tags[TAG] : <span className='icon pencil'></span>} </div> <strong>{this.props.res.tags.name}</strong> </Link>; /* jshint ignore:end */ } }); var parseCurrency = str => { var number = str.match(/[\d\.]+/), currency = str.match(/[^\d\.]+/); return { currency: currency || '$', price: parseFloat((number && number[0]) || 0) }; }; // This view is shown briefly after a user completes an edit. The user // can either click/tap to go back to the list, or it'll do that automatically // in 1 second. var Success = React.createClass({ componentDidMount() { setTimeout(() => { if (this.isMounted()) { this.transitionTo('list'); } }, 1000); }, /* jshint ignore:start */ render() { return <Link to='list' className='col12 center pad4'> <h2><span className='big icon check'></span> Saved!</h2> </Link>; } /* jshint ignore:end */ }); // The help page. Doesn't have any JavaScript functionality of its own - // this is static content. var Help = React.createClass({ /* jshint ignore:start */ render() { return <div> <Link to='list' className='home icon button fill-darken2 col12'>home</Link> <div className='pad1y'> <div className='round fill-lighten0 pad2 dark'> <p><strong>COFFEEDEX</strong> is a community project that aims to track the price of house coffee everywhere.</p> <p>The data is stored in <a href='http://osm.org/'>OpenStreetMap</a>, a free and open source map of the world, as tags on existing coffeehops. There are 150,000+.</p> <p>Maps in this application are &copy; <a href='http://mapbox.com/'>Mapbox</a>.</p> <p>COFFEEDEX data stored in OpenStreetMap is <a href='http://www.openstreetmap.org/copyright'>available under the ODbL license.</a></p> <p>This is also an open source project. You can view the source code, clone it, fork it, and make new things with it as inspiration or raw parts.</p> <a className='button stroke icon github col12 space-bottom1' href='http://github.com/tmcw/coffeedex'>COFFEEDEX on GitHub</a> <p><span className='icon mobile'></span> COFFEEDEX also works great on phones! Try it on your phone and add it to your iPhone home screen - it'll look even prettier.</p> <h2>FAQ</h2> <ul> <li><strong>Which coffee?</strong> This site tracks the price of <em>house coffee</em> for here. In many cases, that means a 12oz drip, but if all coffees are pour-overs or your country uses different standard size, the overriding rule is cheapest-here.</li> </ul> </div> </div> </div>; } /* jshint ignore:end */ }); // The editor. This allows users to view and edit tags on single result items. var Editor = React.createClass({ mixins: [ Reflux.listenTo(nodeStore, 'onNodeLoad', 'onNodeLoad'), Reflux.connect(locationStore, 'location'), State], onNodeLoad(nodes) { var node = nodes[this.getParams().osmId]; if (node) { if (node.tags[TAG]) { var currency = parseCurrency(node.tags[TAG]); this.setState({ currency: currency.currency, price: currency.price, node: node }); } else { this.setState({ node: node }); } } }, getInitialState() { return { currency: '$', price: 0 }; }, // Before this view is displayed, we make sure that the node it'll // show will be loaded soon. statics: { willTransitionTo(transition, params) { nodeStore.loadNodes([params.osmId]); }, }, save(e) { e.preventDefault(); var node = this.state.node; nodeSave(node, this.state.price, this.state.currency); }, render() { var node = this.state.node; /* jshint ignore:start */ if (!node) return <div className='pad4 center'> Loading... </div>; return <div className='col12'> <Link to='list' className='home icon button fill-darken0 unround col12'>home</Link> <StaticMap location={node.location} self={this.state.location} /> <div className='pad1 col12 clearfix'> <div className='col12'> <div className='center'> how much for a cup of joe at </div> <h1 className='center'> {node.tags.name} </h1> </div> <div className='limit-mobile'> <div className='col12 clearfix space-bottom1'> <select valueLink={this.linkState('currency')} className='coffee-select'> {currency.map(c => <option key={c[0]} value={c[0]}>{c[1]}</option>)} </select> <input valueLink={this.linkState('price')} className='coffee-input' type='number' /> </div> <a href='#' onClick={this.save} className='fill-darken1 button col12 icon plus pad1 unround'>Save</a> </div> </div> </div>; /* jshint ignore:end */ } }); // Our router. This manages what URLs mean and where Links can go. var routes = ( /* jshint ignore:start */ <Route handler={Page} path='/'> <DefaultRoute name='list' handler={List} /> <Route name='success' path='/success' handler={Success} /> <Route name='help' path='/help' handler={Help} /> <Route name='editor' path='/edit/:osmId' handler={Editor} /> </Route> /* jshint ignore:end */ ); var router = Router.create({ routes }); // This is a little dirty: the router will rewrite paths it doesn't know, // including the path we desperately need to complete the OAuth dance. // So before booting it up, we notice if we need to bootstrap an oauth_token, // and if so, we do that before starting the application. if (location.search && !auth.authenticated()) { var oauth_token = qs.parse(location.search.replace('?', '')).oauth_token; auth.bootstrapToken(oauth_token, (err, res) => { userStore.user = true; userStore.trigger(userStore.user); router.run(Handler => { /* jshint ignore:start */ React.render(<Handler/>, document.body); /* jshint ignore:end */ }); }); } else { router.run(Handler => { /* jshint ignore:start */ React.render(<Handler/>, document.body); /* jshint ignore:end */ }); }
bitcoin_cy.ts
<?xml version="1.0" ?><!DOCTYPE TS><TS language="cy" version="2.1"> <context> <name>AboutDialog</name> <message> <location filename="../forms/aboutdialog.ui" line="+14"/> <source>About TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+39"/> <source>&lt;b&gt;TalonPower&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Copyright © 2009-2014 The Bitcoin developers Copyright © 2012-2014 The NovaCoin developers Copyright © 2014 The TalonPower developers</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <location filename="../forms/addressbookpage.ui" line="+14"/> <source>Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <source>Double-click to edit address or label</source> <translation>Clicio dwywaith i olygu cyfeiriad neu label</translation> </message> <message> <location line="+27"/> <source>Create a new address</source> <translation>Creu cyfeiriad newydd</translation> </message> <message> <location line="+14"/> <source>Copy the currently selected address to the system clipboard</source> <translation>Copio&apos;r cyfeiriad sydd wedi&apos;i ddewis i&apos;r clipfwrdd system</translation> </message> <message> <location line="-11"/> <source>&amp;New Address</source> <translation type="unfinished"/> </message> <message> <location line="-46"/> <source>These are your TalonPower addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>&amp;Copy Address</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Show &amp;QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sign a message to prove you own a TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Delete the currently selected address from the list</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Verify a message to ensure it was signed with a specified TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>&amp;Delete</source> <translation>&amp;Dileu</translation> </message> <message> <location filename="../addressbookpage.cpp" line="+65"/> <source>Copy &amp;Label</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Edit</source> <translation type="unfinished"/> </message> <message> <location line="+250"/> <source>Export Address Book Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <location filename="../addresstablemodel.cpp" line="+144"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Cyfeiriad</translation> </message> <message> <location line="+36"/> <source>(no label)</source> <translation>(heb label)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <location filename="../forms/askpassphrasedialog.ui" line="+26"/> <source>Passphrase Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Enter passphrase</source> <translation>Teipiwch gyfrinymadrodd</translation> </message> <message> <location line="+14"/> <source>New passphrase</source> <translation>Cyfrinymadrodd newydd</translation> </message> <message> <location line="+14"/> <source>Repeat new passphrase</source> <translation>Ailadroddwch gyfrinymadrodd newydd</translation> </message> <message> <location line="+33"/> <source>Serves to disable the trivial sendmoney when OS account compromised. Provides no real security.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>For staking only</source> <translation type="unfinished"/> </message> <message> <location filename="../askpassphrasedialog.cpp" line="+35"/> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Dewiswch gyfrinymadrodd newydd ar gyfer y waled. &lt;br/&gt; Defnyddiwch cyfrinymadrodd o &lt;b&gt;10 neu fwy o lythyrennau hapgyrch&lt;/b&gt;, neu &lt;b&gt; wyth neu fwy o eiriau.</translation> </message> <message> <location line="+1"/> <source>Encrypt wallet</source> <translation>Amgryptio&apos;r waled</translation> </message> <message> <location line="+7"/> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Mae angen i&apos;r gweithred hon ddefnyddio&apos;ch cyfrinymadrodd er mwyn datgloi&apos;r waled.</translation> </message> <message> <location line="+5"/> <source>Unlock wallet</source> <translation>Datgloi&apos;r waled</translation> </message> <message> <location line="+3"/> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Mae angen i&apos;r gweithred hon ddefnyddio&apos;ch cyfrinymadrodd er mwyn dadgryptio&apos;r waled.</translation> </message> <message> <location line="+5"/> <source>Decrypt wallet</source> <translation>Dadgryptio&apos;r waled</translation> </message> <message> <location line="+3"/> <source>Change passphrase</source> <translation>Newid cyfrinymadrodd</translation> </message> <message> <location line="+1"/> <source>Enter the old and new passphrase to the wallet.</source> <translation>Teipiwch yr hen cyfrinymadrodd a chyfrinymadrodd newydd i mewn i&apos;r waled.</translation> </message> <message> <location line="+46"/> <source>Confirm wallet encryption</source> <translation>Cadarnau amgryptiad y waled</translation> </message> <message> <location line="+1"/> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR COINS&lt;/b&gt;!</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Are you sure you wish to encrypt your wallet?</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"/> </message> <message> <location line="+103"/> <location line="+24"/> <source>Warning: The Caps Lock key is on!</source> <translation type="unfinished"/> </message> <message> <location line="-133"/> <location line="+60"/> <source>Wallet encrypted</source> <translation>Waled wedi&apos;i amgryptio</translation> </message> <message> <location line="-58"/> <source>TalonPower will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your coins from being stolen by malware infecting your computer.</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+7"/> <location line="+44"/> <location line="+6"/> <source>Wallet encryption failed</source> <translation>Amgryptiad waled wedi methu</translation> </message> <message> <location line="-56"/> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Methodd amgryptiad y waled oherwydd gwall mewnol. Ni amgryptwyd eich waled.</translation> </message> <message> <location line="+7"/> <location line="+50"/> <source>The supplied passphrases do not match.</source> <translation>Dydy&apos;r cyfrinymadroddion a ddarparwyd ddim yn cyd-fynd â&apos;u gilydd.</translation> </message> <message> <location line="-38"/> <source>Wallet unlock failed</source> <translation>Methodd ddatgloi&apos;r waled</translation> </message> <message> <location line="+1"/> <location line="+12"/> <location line="+19"/> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Wallet decryption failed</source> <translation>Methodd dadgryptiad y waled</translation> </message> <message> <location line="+14"/> <source>Wallet passphrase was successfully changed.</source> <translation type="unfinished"/> </message> </context> <context> <name>BitcoinGUI</name> <message> <location filename="../bitcoingui.cpp" line="+280"/> <source>Sign &amp;message...</source> <translation type="unfinished"/> </message> <message> <location line="+242"/> <source>Synchronizing with network...</source> <translation>Cysoni â&apos;r rhwydwaith...</translation> </message> <message> <location line="-308"/> <source>&amp;Overview</source> <translation>&amp;Trosolwg</translation> </message> <message> <location line="+1"/> <source>Show general overview of wallet</source> <translation>Dangos trosolwg cyffredinol y waled</translation> </message> <message> <location line="+17"/> <source>&amp;Transactions</source> <translation>&amp;Trafodion</translation> </message> <message> <location line="+1"/> <source>Browse transaction history</source> <translation>Pori hanes trafodion</translation> </message> <message> <location line="+5"/> <source>&amp;Address Book</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit the list of stored addresses and labels</source> <translation type="unfinished"/> </message> <message> <location line="-13"/> <source>&amp;Receive coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show the list of addresses for receiving payments</source> <translation type="unfinished"/> </message> <message> <location line="-7"/> <source>&amp;Send coins</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>E&amp;xit</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Quit application</source> <translation>Gadael rhaglen</translation> </message> <message> <location line="+4"/> <source>Show information about TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>About &amp;Qt</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show information about Qt</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Options...</source> <translation>&amp;Opsiynau</translation> </message> <message> <location line="+4"/> <source>&amp;Encrypt Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Backup Wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>&amp;Change Passphrase...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+250"/> <source>~%n block(s) remaining</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Downloaded %1 of %2 blocks of transaction history (%3% done).</source> <translation type="unfinished"/> </message> <message> <location line="-247"/> <source>&amp;Export...</source> <translation type="unfinished"/> </message> <message> <location line="-62"/> <source>Send coins to a TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Modify configuration options for TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <location line="-14"/> <source>Encrypt or decrypt wallet</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup wallet to another location</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Change the passphrase used for wallet encryption</source> <translation>Newid y cyfrinymadrodd a ddefnyddiwyd ar gyfer amgryptio&apos;r waled</translation> </message> <message> <location line="+10"/> <source>&amp;Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Open debugging and diagnostic console</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>&amp;Verify message...</source> <translation type="unfinished"/> </message> <message> <location line="-200"/> <source>TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+178"/> <source>&amp;About TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Show / Hide</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Unlock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>&amp;Lock Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Lock wallet</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>&amp;File</source> <translation>&amp;Ffeil</translation> </message> <message> <location line="+8"/> <source>&amp;Settings</source> <translation>&amp;Gosodiadau</translation> </message> <message> <location line="+8"/> <source>&amp;Help</source> <translation>&amp;Cymorth</translation> </message> <message> <location line="+9"/> <source>Tabs toolbar</source> <translation>Bar offer tabiau</translation> </message> <message> <location line="+8"/> <source>Actions toolbar</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+9"/> <source>[testnet]</source> <translation>[testnet]</translation> </message> <message> <location line="+0"/> <location line="+60"/> <source>TalonPower client</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+70"/> <source>%n active connection(s) to TalonPower network</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+40"/> <source>Downloaded %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <location line="+413"/> <source>Staking.&lt;br&gt;Your weight is %1&lt;br&gt;Network weight is %2&lt;br&gt;Expected time to earn reward is %3</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Not staking because wallet is locked</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because wallet is syncing</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Not staking because you don&apos;t have mature coins</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-403"/> <source>%n second(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="-284"/> <source>&amp;Unlock Wallet...</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+288"/> <source>%n minute(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s) ago</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Up to date</source> <translation>Cyfamserol</translation> </message> <message> <location line="+7"/> <source>Catching up...</source> <translation>Dal i fyny</translation> </message> <message> <location line="+10"/> <source>Last received block was generated %1.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+27"/> <source>Sent transaction</source> <translation>Trafodiad a anfonwyd</translation> </message> <message> <location line="+1"/> <source>Incoming transaction</source> <translation>Trafodiad sy&apos;n cyrraedd</translation> </message> <message> <location line="+1"/> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <location line="+15"/> <source>URI handling</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <location line="+15"/> <source>URI can not be parsed! This can be caused by an invalid TalonPower address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Mae&apos;r waled &lt;b&gt;wedi&apos;i amgryptio&lt;/b&gt; ac &lt;b&gt;heb ei gloi&lt;/b&gt; ar hyn o bryd</translation> </message> <message> <location line="+10"/> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Mae&apos;r waled &lt;b&gt;wedi&apos;i amgryptio&lt;/b&gt; ac &lt;b&gt;ar glo&lt;/b&gt; ar hyn o bryd</translation> </message> <message> <location line="+25"/> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>There was an error trying to save the wallet data to the new location.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+76"/> <source>%n second(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n minute(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n hour(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message numerus="yes"> <location line="+4"/> <source>%n day(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+18"/> <source>Not staking</source> <translation type="unfinished"/> </message> <message> <location filename="../bitcoin.cpp" line="+109"/> <source>A fatal error occurred. TalonPower can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <location filename="../clientmodel.cpp" line="+90"/> <source>Network Alert</source> <translation type="unfinished"/> </message> </context> <context> <name>CoinControlDialog</name> <message> <location filename="../forms/coincontroldialog.ui" line="+14"/> <source>Coin Control</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+48"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="+551"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location filename="../forms/coincontroldialog.ui" line="+51"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change:</source> <translation type="unfinished"/> </message> <message> <location line="+69"/> <source>(un)select all</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Tree mode</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>List mode</source> <translation type="unfinished"/> </message> <message> <location line="+45"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Label</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Address</source> <translation>Cyfeiriad</translation> </message> <message> <location line="+5"/> <source>Date</source> <translation>Dyddiad</translation> </message> <message> <location line="+5"/> <source>Confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Priority</source> <translation type="unfinished"/> </message> <message> <location filename="../coincontroldialog.cpp" line="-515"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+26"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+317"/> <source>highest</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium-high</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>low-medium</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>low</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>lowest</source> <translation type="unfinished"/> </message> <message> <location line="+155"/> <source>DUST</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>yes</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>This label turns red, if the transaction size is bigger than 10000 bytes. This means a fee of at least %1 per kb is required. Can vary +/- 1 Byte per input.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Transactions with higher priority get more likely into a block. This label turns red, if the priority is smaller than &quot;medium&quot;. This means a fee of at least %1 per kb is required.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if any recipient receives an amount smaller than %1. This means a fee of at least %2 is required. Amounts below 0.546 times the minimum relay fee are shown as DUST.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This label turns red, if the change is smaller than %1. This means a fee of at least %2 is required.</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+66"/> <source>(no label)</source> <translation>(heb label)</translation> </message> <message> <location line="-9"/> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>(change)</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <location filename="../forms/editaddressdialog.ui" line="+14"/> <source>Edit Address</source> <translation>Golygu&apos;r cyfeiriad</translation> </message> <message> <location line="+11"/> <source>&amp;Label</source> <translation>&amp;Label</translation> </message> <message> <location line="+10"/> <source>The label associated with this address book entry</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Address</source> <translation>&amp;Cyfeiriad</translation> </message> <message> <location line="+10"/> <source>The address associated with this address book entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <location filename="../editaddressdialog.cpp" line="+20"/> <source>New receiving address</source> <translation>Cyfeiriad derbyn newydd</translation> </message> <message> <location line="+4"/> <source>New sending address</source> <translation>Cyfeiriad anfon newydd</translation> </message> <message> <location line="+3"/> <source>Edit receiving address</source> <translation>Golygu&apos;r cyfeiriad derbyn</translation> </message> <message> <location line="+4"/> <source>Edit sending address</source> <translation>Golygu&apos;r cyfeiriad anfon</translation> </message> <message> <location line="+76"/> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Mae&apos;r cyfeiriad &quot;%1&quot; sydd newydd gael ei geisio gennych yn y llyfr cyfeiriad yn barod.</translation> </message> <message> <location line="-5"/> <source>The entered address &quot;%1&quot; is not a valid TalonPower address.</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Could not unlock wallet.</source> <translation>Methodd ddatgloi&apos;r waled.</translation> </message> <message> <location line="+5"/> <source>New key generation failed.</source> <translation>Methodd gynhyrchu allwedd newydd.</translation> </message> </context> <context> <name>GUIUtil::HelpMessageBox</name> <message> <location filename="../guiutil.cpp" line="+420"/> <location line="+12"/> <source>TalonPower-Qt</source> <translation type="unfinished"/> </message> <message> <location line="-12"/> <source>version</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>UI options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Start minimized</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <location filename="../forms/optionsdialog.ui" line="+14"/> <source>Options</source> <translation>Opsiynau</translation> </message> <message> <location line="+16"/> <source>&amp;Main</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB. Fee 0.01 recommended.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Pay transaction &amp;fee</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Reserved amount does not participate in staking and is therefore spendable at any time.</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Reserve</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Automatically start TalonPower after logging in to the system.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Start TalonPower on system login</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Detach block and address databases at shutdown. This means they can be moved to another data directory, but it slows down shutdown. The wallet is always detached.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Detach databases at shutdown</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Network</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Automatically open the TalonPower client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Map port using &amp;UPnP</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Connect to the TalonPower network through a SOCKS proxy (e.g. when connecting through Tor).</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Connect through SOCKS proxy:</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Proxy &amp;IP:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>IP address of the proxy (e.g. 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Port:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Port of the proxy (e.g. 9050)</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>SOCKS &amp;Version:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>SOCKS version of the proxy (e.g. 5)</source> <translation type="unfinished"/> </message> <message> <location line="+36"/> <source>&amp;Window</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Show only a tray icon after minimizing the window.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>M&amp;inimize on close</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>&amp;Display</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>User Interface &amp;language:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>The user interface language can be set here. This setting will take effect after restarting TalonPower.</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>&amp;Unit to show amounts in:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>Whether to show TalonPower addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Display addresses in transaction list</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Display coin &amp;control features (experts only!)</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;OK</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>&amp;Cancel</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>&amp;Apply</source> <translation type="unfinished"/> </message> <message> <location filename="../optionsdialog.cpp" line="+55"/> <source>default</source> <translation type="unfinished"/> </message> <message> <location line="+149"/> <location line="+9"/> <source>Warning</source> <translation type="unfinished"/> </message> <message> <location line="-9"/> <location line="+9"/> <source>This setting will take effect after restarting TalonPower.</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The supplied proxy address is invalid.</source> <translation type="unfinished"/> </message> </context> <context> <name>OverviewPage</name> <message> <location filename="../forms/overviewpage.ui" line="+14"/> <source>Form</source> <translation>Ffurflen</translation> </message> <message> <location line="+33"/> <location line="+231"/> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the TalonPower network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <location line="-160"/> <source>Stake:</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Unconfirmed:</source> <translation type="unfinished"/> </message> <message> <location line="-107"/> <source>Wallet</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Spendable:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current spendable balance</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>Immature:</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>Mined balance that has not yet matured</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Total:</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Your current total balance</source> <translation type="unfinished"/> </message> <message> <location line="+46"/> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Trafodion diweddar&lt;/b&gt;</translation> </message> <message> <location line="-108"/> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Total of coins that was staked, and do not yet count toward the current balance</source> <translation type="unfinished"/> </message> <message> <location filename="../overviewpage.cpp" line="+113"/> <location line="+1"/> <source>out of sync</source> <translation type="unfinished"/> </message> </context> <context> <name>QRCodeDialog</name> <message> <location filename="../forms/qrcodedialog.ui" line="+14"/> <source>QR Code Dialog</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>Request Payment</source> <translation type="unfinished"/> </message> <message> <location line="+56"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>Label:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Message:</source> <translation type="unfinished"/> </message> <message> <location line="+71"/> <source>&amp;Save As...</source> <translation type="unfinished"/> </message> <message> <location filename="../qrcodedialog.cpp" line="+62"/> <source>Error encoding URI into QR Code.</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>The entered amount is invalid, please check.</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Save QR Code</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>PNG Images (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <location filename="../forms/rpcconsole.ui" line="+46"/> <source>Client name</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+23"/> <location line="+26"/> <location line="+23"/> <location line="+23"/> <location line="+36"/> <location line="+53"/> <location line="+23"/> <location line="+23"/> <location filename="../rpcconsole.cpp" line="+348"/> <source>N/A</source> <translation type="unfinished"/> </message> <message> <location line="-217"/> <source>Client version</source> <translation type="unfinished"/> </message> <message> <location line="-45"/> <source>&amp;Information</source> <translation type="unfinished"/> </message> <message> <location line="+68"/> <source>Using OpenSSL version</source> <translation type="unfinished"/> </message> <message> <location line="+49"/> <source>Startup time</source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>Network</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Number of connections</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>On testnet</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Block chain</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Current number of blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Estimated total blocks</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Last block time</source> <translation type="unfinished"/> </message> <message> <location line="+52"/> <source>&amp;Open</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Command-line options</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Show the TalonPower-Qt help message to get a list with possible TalonPower command-line options.</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>&amp;Show</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>&amp;Console</source> <translation type="unfinished"/> </message> <message> <location line="-260"/> <source>Build date</source> <translation type="unfinished"/> </message> <message> <location line="-104"/> <source>TalonPower - Debug window</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>TalonPower Core</source> <translation type="unfinished"/> </message> <message> <location line="+279"/> <source>Debug log file</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Open the TalonPower debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <location line="+102"/> <source>Clear console</source> <translation type="unfinished"/> </message> <message> <location filename="../rpcconsole.cpp" line="-33"/> <source>Welcome to the TalonPower RPC console.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <location filename="../forms/sendcoinsdialog.ui" line="+14"/> <location filename="../sendcoinsdialog.cpp" line="+182"/> <location line="+5"/> <location line="+5"/> <location line="+5"/> <location line="+6"/> <location line="+5"/> <location line="+5"/> <source>Send Coins</source> <translation>Anfon arian</translation> </message> <message> <location line="+76"/> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <location line="+77"/> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+35"/> <source>0</source> <translation type="unfinished"/> </message> <message> <location line="-19"/> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Amount:</source> <translation type="unfinished"/> </message> <message> <location line="+22"/> <location line="+86"/> <location line="+86"/> <location line="+32"/> <source>0.00 BC</source> <translation type="unfinished"/> </message> <message> <location line="-191"/> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>medium</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <location line="+19"/> <source>no</source> <translation type="unfinished"/> </message> <message> <location line="+32"/> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>Change</source> <translation type="unfinished"/> </message> <message> <location line="+50"/> <source>custom change address</source> <translation type="unfinished"/> </message> <message> <location line="+106"/> <source>Send to multiple recipients at once</source> <translation>Anfon at pobl lluosog ar yr un pryd</translation> </message> <message> <location line="+3"/> <source>Add &amp;Recipient</source> <translation type="unfinished"/> </message> <message> <location line="+20"/> <source>Remove all transaction fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Balance:</source> <translation>Gweddill:</translation> </message> <message> <location line="+16"/> <source>123.456 BC</source> <translation type="unfinished"/> </message> <message> <location line="+31"/> <source>Confirm the send action</source> <translation>Cadarnhau&apos;r gweithrediad anfon</translation> </message> <message> <location line="+3"/> <source>S&amp;end</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsdialog.cpp" line="-173"/> <source>Enter a TalonPower address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <location line="+86"/> <source>&lt;b&gt;%1&lt;/b&gt; to %2 (%3)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Confirm send coins</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Are you sure you want to send %1?</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source> and </source> <translation type="unfinished"/> </message> <message> <location line="+29"/> <source>The recipient address is not valid, please recheck.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount to pay must be larger than 0.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The amount exceeds your balance.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: Transaction creation failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="+251"/> <source>WARNING: Invalid TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <source>(no label)</source> <translation>(heb label)</translation> </message> <message> <location line="+4"/> <source>WARNING: unknown change address</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <location filename="../forms/sendcoinsentry.ui" line="+14"/> <source>Form</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>A&amp;mount:</source> <translation>&amp;Maint</translation> </message> <message> <location line="+13"/> <source>Pay &amp;To:</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <location filename="../sendcoinsentry.cpp" line="+25"/> <source>Enter a label for this address to add it to your address book</source> <translation type="unfinished"/> </message> <message> <location line="+9"/> <source>&amp;Label:</source> <translation>&amp;Label:</translation> </message> <message> <location line="+18"/> <source>The address to send the payment to (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Choose address from address book</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="+7"/> <source>Paste address from clipboard</source> <translation>Gludo cyfeiriad o&apos;r glipfwrdd</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+7"/> <source>Remove this recipient</source> <translation type="unfinished"/> </message> <message> <location filename="../sendcoinsentry.cpp" line="+1"/> <source>Enter a TalonPower address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <location filename="../forms/signverifymessagedialog.ui" line="+14"/> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"/> </message> <message> <location line="+13"/> <location line="+124"/> <source>&amp;Sign Message</source> <translation type="unfinished"/> </message> <message> <location line="-118"/> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>The address to sign the message with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+10"/> <location line="+203"/> <source>Choose an address from the address book</source> <translation type="unfinished"/> </message> <message> <location line="-193"/> <location line="+203"/> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <location line="-193"/> <source>Paste address from clipboard</source> <translation>Gludo cyfeiriad o&apos;r glipfwrdd</translation> </message> <message> <location line="+10"/> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <location line="+12"/> <source>Enter the message you want to sign here</source> <translation type="unfinished"/> </message> <message> <location line="+24"/> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>Sign the message to prove you own this TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all sign message fields</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <location line="+146"/> <source>Clear &amp;All</source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <location line="+70"/> <source>&amp;Verify Message</source> <translation type="unfinished"/> </message> <message> <location line="-64"/> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"/> </message> <message> <location line="+21"/> <source>The address the message was signed with (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="+40"/> <source>Verify the message to ensure it was signed with the specified TalonPower address</source> <translation type="unfinished"/> </message> <message> <location line="+17"/> <source>Reset all verify message fields</source> <translation type="unfinished"/> </message> <message> <location filename="../signverifymessagedialog.cpp" line="+27"/> <location line="+3"/> <source>Enter a TalonPower address (e.g. B8gZqgY4r2RoEdqYk3QsAqFckyf9pRHN6i)</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Enter TalonPower signature</source> <translation type="unfinished"/> </message> <message> <location line="+82"/> <location line="+81"/> <source>The entered address is invalid.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+8"/> <location line="+73"/> <location line="+8"/> <source>Please check the address and try again.</source> <translation type="unfinished"/> </message> <message> <location line="-81"/> <location line="+81"/> <source>The entered address does not refer to a key.</source> <translation type="unfinished"/> </message> <message> <location line="-73"/> <source>Wallet unlock was cancelled.</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Private key for the entered address is not available.</source> <translation type="unfinished"/> </message> <message> <location line="+12"/> <source>Message signing failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message signed.</source> <translation type="unfinished"/> </message> <message> <location line="+59"/> <source>The signature could not be decoded.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <location line="+13"/> <source>Please check the signature and try again.</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>The signature did not match the message digest.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Message verification failed.</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Message verified.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDesc</name> <message> <location filename="../transactiondesc.cpp" line="+19"/> <source>Open until %1</source> <translation>Agor tan %1</translation> </message> <message numerus="yes"> <location line="-2"/> <source>Open for %n block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+8"/> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/offline</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1/unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>%1 confirmations</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>Status</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="+7"/> <source>, broadcast through %n node(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+4"/> <source>Date</source> <translation>Dyddiad</translation> </message> <message> <location line="+7"/> <source>Source</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Generated</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <location line="+17"/> <source>From</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <location line="+22"/> <location line="+58"/> <source>To</source> <translation type="unfinished"/> </message> <message> <location line="-77"/> <location line="+2"/> <source>own address</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>label</source> <translation type="unfinished"/> </message> <message> <location line="+37"/> <location line="+12"/> <location line="+45"/> <location line="+17"/> <location line="+30"/> <source>Credit</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-102"/> <source>matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+2"/> <source>not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <location line="+8"/> <location line="+15"/> <location line="+30"/> <source>Debit</source> <translation type="unfinished"/> </message> <message> <location line="-39"/> <source>Transaction fee</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Net amount</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Message</source> <translation>Neges</translation> </message> <message> <location line="+2"/> <source>Comment</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated coins must mature 3 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Debug information</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Transaction</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Inputs</source> <translation type="unfinished"/> </message> <message> <location line="+23"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>true</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>false</source> <translation type="unfinished"/> </message> <message> <location line="-211"/> <source>, has not been successfully broadcast yet</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>unknown</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionDescDialog</name> <message> <location filename="../forms/transactiondescdialog.ui" line="+14"/> <source>Transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>This pane shows a detailed description of the transaction</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionTableModel</name> <message> <location filename="../transactiontablemodel.cpp" line="+226"/> <source>Date</source> <translation>Dyddiad</translation> </message> <message> <location line="+0"/> <source>Type</source> <translation>Math</translation> </message> <message> <location line="+0"/> <source>Address</source> <translation>Cyfeiriad</translation> </message> <message> <location line="+0"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+60"/> <source>Open until %1</source> <translation>Agor tan %1</translation> </message> <message> <location line="+12"/> <source>Confirmed (%1 confirmations)</source> <translation type="unfinished"/> </message> <message numerus="yes"> <location line="-15"/> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <location line="+6"/> <source>Offline</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Generated but not accepted</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Received from</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Payment to yourself</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+38"/> <source>(n/a)</source> <translation type="unfinished"/> </message> <message> <location line="+190"/> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Date and time that the transaction was received.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Type of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Destination address of transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Amount removed from or added to balance.</source> <translation type="unfinished"/> </message> </context> <context> <name>TransactionView</name> <message> <location filename="../transactionview.cpp" line="+55"/> <location line="+16"/> <source>All</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Today</source> <translation>Heddiw</translation> </message> <message> <location line="+1"/> <source>This week</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Last month</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>This year</source> <translation>Eleni</translation> </message> <message> <location line="+1"/> <source>Range...</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Received with</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Sent to</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>To yourself</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Mined</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Other</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Enter address or label to search</source> <translation type="unfinished"/> </message> <message> <location line="+7"/> <source>Min amount</source> <translation type="unfinished"/> </message> <message> <location line="+34"/> <source>Copy address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Edit label</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Show transaction details</source> <translation type="unfinished"/> </message> <message> <location line="+144"/> <source>Export Transaction Data</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Comma separated file (*.csv)</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Confirmed</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Date</source> <translation>Dyddiad</translation> </message> <message> <location line="+1"/> <source>Type</source> <translation>Math</translation> </message> <message> <location line="+1"/> <source>Label</source> <translation>Label</translation> </message> <message> <location line="+1"/> <source>Address</source> <translation>Cyfeiriad</translation> </message> <message> <location line="+1"/> <source>Amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>ID</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error exporting</source> <translation type="unfinished"/> </message> <message> <location line="+0"/> <source>Could not write to file %1.</source> <translation type="unfinished"/> </message> <message> <location line="+100"/> <source>Range:</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>to</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletModel</name> <message> <location filename="../walletmodel.cpp" line="+206"/> <source>Sending...</source> <translation type="unfinished"/> </message> </context> <context> <name>bitcoin-core</name> <message> <location filename="../bitcoinstrings.cpp" line="+33"/> <source>TalonPower version</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Usage:</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send command to -server or talonpowerd</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>List commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Get help for a command</source> <translation type="unfinished"/>
<message> <location line="+2"/> <source>Options:</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify configuration file (default: talonpower.conf)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify pid file (default: talonpowerd.pid)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Specify data directory</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Set database cache size in megabytes (default: 25)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set database disk log size in megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+6"/> <source>Listen for connections on &lt;port&gt; (default: 28547 or testnet: 38547)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Specify your own public address</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Bind to given address. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Stake your coins to support network and gain reward (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation type="unfinished"/> </message> <message> <location line="-44"/> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Detach block and address databases. Increases shutdown time (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds </source> <translation type="unfinished"/> </message> <message> <location line="-87"/> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 28548 or testnet: 38548)</source> <translation type="unfinished"/> </message> <message> <location line="-11"/> <source>Accept command line and JSON-RPC commands</source> <translation type="unfinished"/> </message> <message> <location line="+101"/> <source>Error: Transaction creation failed </source> <translation type="unfinished"/> </message> <message> <location line="-5"/> <source>Error: Wallet locked, unable to create transaction </source> <translation type="unfinished"/> </message> <message> <location line="-8"/> <source>Importing blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Importing bootstrap blockchain data file.</source> <translation type="unfinished"/> </message> <message> <location line="-88"/> <source>Run in the background as a daemon and accept commands</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Use the test network</source> <translation type="unfinished"/> </message> <message> <location line="-24"/> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"/> </message> <message> <location line="-38"/> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"/> </message> <message> <location line="+117"/> <source>Error initializing database environment %s! To recover, BACKUP THAT DIRECTORY, then remove everything from it except for wallet.dat.</source> <translation type="unfinished"/> </message> <message> <location line="-20"/> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+61"/> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong TalonPower will not work properly.</source> <translation type="unfinished"/> </message> <message> <location line="-31"/> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"/> </message> <message> <location line="-18"/> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"/> </message> <message> <location line="-30"/> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Block creation options:</source> <translation type="unfinished"/> </message> <message> <location line="-62"/> <source>Connect only to the specified node(s)</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"/> </message> <message> <location line="+94"/> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"/> </message> <message> <location line="-90"/> <source>Find peers using DNS lookup (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync checkpoints policy (default: strict)</source> <translation type="unfinished"/> </message> <message> <location line="+83"/> <source>Invalid -tor address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Invalid amount for -reservebalance=&lt;amount&gt;</source> <translation type="unfinished"/> </message> <message> <location line="-82"/> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Output extra debugging information. Implies all other -debug* options</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Output extra network debugging information</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Prepend debug output with timestamp</source> <translation type="unfinished"/> </message> <message> <location line="+35"/> <source>SSL options: (see the Bitcoin Wiki for SSL setup instructions)</source> <translation type="unfinished"/> </message> <message> <location line="-74"/> <source>Select the version of socks proxy to use (4-5, default: 5)</source> <translation type="unfinished"/> </message> <message> <location line="+41"/> <source>Send trace/debug info to console instead of debug.log file</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send trace/debug info to debugger</source> <translation type="unfinished"/> </message> <message> <location line="+28"/> <source>Set maximum block size in bytes (default: 250000)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-29"/> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"/> </message> <message> <location line="-42"/> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation type="unfinished"/> </message> <message> <location line="+109"/> <source>Unable to sign checkpoint, wrong checkpointkey? </source> <translation type="unfinished"/> </message> <message> <location line="-80"/> <source>Use UPnP to map the listening port (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation type="unfinished"/> </message> <message> <location line="-25"/> <source>Use proxy to reach tor hidden services (default: same as -proxy)</source> <translation type="unfinished"/> </message> <message> <location line="+42"/> <source>Username for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+47"/> <source>Verifying database integrity...</source> <translation type="unfinished"/> </message> <message> <location line="+57"/> <source>WARNING: syncronized checkpoint violation detected, but skipped!</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Warning: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <location line="-2"/> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"/> </message> <message> <location line="-48"/> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"/> </message> <message> <location line="-54"/> <source>Password for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="-84"/> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=talonpowerrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;TalonPower Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <location line="+51"/> <source>Find peers using internet relay chat (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Sync time with other nodes. Disable if time on your system is precise e.g. syncing with NTP (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+15"/> <source>When creating transactions, ignore inputs with value less than this (default: 0.01)</source> <translation type="unfinished"/> </message> <message> <location line="+16"/> <source>Allow JSON-RPC connections from specified IP address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Require a confirmations for change (default: 0)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Enforce transaction scripts to use canonical PUSH operators (default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Upgrade wallet to latest format</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescan the block chain for missing wallet transactions</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>How many blocks to check at startup (default: 2500, 0 = all)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>How thorough the block verification is (0-6, default: 1)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Imports blocks from external blk000?.dat file</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server certificate file (default: server.cert)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Server private key (default: server.pem)</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <location line="+53"/> <source>Error: Wallet unlocked for staking only, unable to create transaction.</source> <translation type="unfinished"/> </message> <message> <location line="+18"/> <source>WARNING: Invalid checkpoint found! Displayed transactions may not be correct! You may need to upgrade, or notify developers.</source> <translation type="unfinished"/> </message> <message> <location line="-158"/> <source>This help message</source> <translation type="unfinished"/> </message> <message> <location line="+95"/> <source>Wallet %s resides outside data directory %s.</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot obtain a lock on data directory %s. TalonPower is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-98"/> <source>TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+140"/> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation type="unfinished"/> </message> <message> <location line="-130"/> <source>Connect through socks proxy</source> <translation type="unfinished"/> </message> <message> <location line="+3"/> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation type="unfinished"/> </message> <message> <location line="+122"/> <source>Loading addresses...</source> <translation type="unfinished"/> </message> <message> <location line="-15"/> <source>Error loading blkindex.dat</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Error loading wallet.dat: Wallet corrupted</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Error loading wallet.dat: Wallet requires newer version of TalonPower</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Wallet needed to be rewritten: restart TalonPower to complete</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Error loading wallet.dat</source> <translation type="unfinished"/> </message> <message> <location line="-16"/> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-1"/> <source>Unknown -socks proxy version requested: %i</source> <translation type="unfinished"/> </message> <message> <location line="+4"/> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+2"/> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="-24"/> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+44"/> <source>Error: could not start node</source> <translation type="unfinished"/> </message> <message> <location line="+11"/> <source>Sending...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Invalid amount</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Insufficient funds</source> <translation type="unfinished"/> </message> <message> <location line="-34"/> <source>Loading block index...</source> <translation type="unfinished"/> </message> <message> <location line="-103"/> <source>Add a node to connect to and attempt to keep the connection open</source> <translation type="unfinished"/> </message> <message> <location line="+122"/> <source>Unable to bind to %s on this computer. TalonPower is probably already running.</source> <translation type="unfinished"/> </message> <message> <location line="-97"/> <source>Fee per KB to add to transactions you send</source> <translation type="unfinished"/> </message> <message> <location line="+55"/> <source>Invalid amount for -mininput=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <location line="+25"/> <source>Loading wallet...</source> <translation type="unfinished"/> </message> <message> <location line="+8"/> <source>Cannot downgrade wallet</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot initialize keypool</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Cannot write default address</source> <translation type="unfinished"/> </message> <message> <location line="+1"/> <source>Rescanning...</source> <translation type="unfinished"/> </message> <message> <location line="+5"/> <source>Done loading</source> <translation type="unfinished"/> </message> <message> <location line="-167"/> <source>To use the %s option</source> <translation type="unfinished"/> </message> <message> <location line="+14"/> <source>Error</source> <translation>Gwall</translation> </message> <message> <location line="+6"/> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"/> </message> </context> </TS>
</message>
Checkbox.tsx
import { h, FunctionalComponent, JSX } from 'preact'; interface ICheckboxProps { label: string; checked: boolean; onChange: JSX.GenericEventHandler<HTMLInputElement>; } const Checkbox: FunctionalComponent<ICheckboxProps> = (props: ICheckboxProps) => ( <label className="option-label"> <input type="checkbox" checked={props.checked} onChange={props.onChange} /> {props.label} </label> );
export default Checkbox;
padding.py
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function import abc import six from cryptography_patched import utils from cryptography_patched.exceptions import AlreadyFinalized from cryptography_patched.hazmat.bindings._padding import lib @six.add_metaclass(abc.ABCMeta) class PaddingContext(object): @abc.abstractmethod def update(self, data): """ Pads the provided bytes and returns any available data as bytes. """ @abc.abstractmethod def finalize(self): """ Finalize the padding, returns bytes. """ def _byte_padding_check(block_size): if not (0 <= block_size <= 2040): raise ValueError("block_size must be in range(0, 2041).") if block_size % 8 != 0: raise ValueError("block_size must be a multiple of 8.") def _byte_padding_update(buffer_, data, block_size): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") utils._check_bytes("data", data) buffer_ += data finished_blocks = len(buffer_) // (block_size // 8) result = buffer_[:finished_blocks * (block_size // 8)] buffer_ = buffer_[finished_blocks * (block_size // 8):] return buffer_, result def _byte_padding_pad(buffer_, block_size, paddingfn): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") pad_size = block_size // 8 - len(buffer_) return buffer_ + paddingfn(pad_size) def _byte_unpadding_update(buffer_, data, block_size): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") utils._check_bytes("data", data) buffer_ += data finished_blocks = max(len(buffer_) // (block_size // 8) - 1, 0) result = buffer_[:finished_blocks * (block_size // 8)] buffer_ = buffer_[finished_blocks * (block_size // 8):] return buffer_, result def _byte_unpadding_check(buffer_, block_size, checkfn): if buffer_ is None: raise AlreadyFinalized("Context was already finalized.") if len(buffer_) != block_size // 8: raise ValueError("Invalid padding bytes.") valid = checkfn(buffer_, block_size // 8) if not valid: raise ValueError("Invalid padding bytes.") pad_size = six.indexbytes(buffer_, -1) return buffer_[:-pad_size] class PKCS7(object): def __init__(self, block_size): _byte_padding_check(block_size) self.block_size = block_size def padder(self): return _PKCS7PaddingContext(self.block_size) def unpadder(self): return _PKCS7UnpaddingContext(self.block_size) @utils.register_interface(PaddingContext) class _PKCS7PaddingContext(object): def __init__(self, block_size): self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b"" def update(self, data): self._buffer, result = _byte_padding_update( self._buffer, data, self.block_size) return result def _padding(self, size): return six.int2byte(size) * size def finalize(self): result = _byte_padding_pad( self._buffer, self.block_size, self._padding) self._buffer = None return result @utils.register_interface(PaddingContext) class _PKCS7UnpaddingContext(object): def __init__(self, block_size): self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b"" def update(self, data): self._buffer, result = _byte_unpadding_update( self._buffer, data, self.block_size) return result def finalize(self): result = _byte_unpadding_check( self._buffer, self.block_size, lib.Cryptography_check_pkcs7_padding) self._buffer = None return result class ANSIX923(object): def __init__(self, block_size): _byte_padding_check(block_size) self.block_size = block_size def padder(self): return _ANSIX923PaddingContext(self.block_size) def unpadder(self): return _ANSIX923UnpaddingContext(self.block_size) @utils.register_interface(PaddingContext) class _ANSIX923PaddingContext(object): def __init__(self, block_size): self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b"" def update(self, data): self._buffer, result = _byte_padding_update( self._buffer, data, self.block_size) return result def _padding(self, size): return six.int2byte(0) * (size - 1) + six.int2byte(size) def finalize(self): result = _byte_padding_pad( self._buffer, self.block_size, self._padding) self._buffer = None return result @utils.register_interface(PaddingContext) class _ANSIX923UnpaddingContext(object): def __init__(self, block_size):
def update(self, data): self._buffer, result = _byte_unpadding_update( self._buffer, data, self.block_size) return result def finalize(self): result = _byte_unpadding_check( self._buffer, self.block_size, lib.Cryptography_check_ansix923_padding) self._buffer = None return result
self.block_size = block_size # TODO: more copies than necessary, we should use zero-buffer (#193) self._buffer = b""
galaxySettings.js
import { types } from "../constants/action-types"; var initialState = { zoom: 1, showProgress: true, showLabels: true, showConnections: true }; export default function (state = initialState, action) { switch (action.type) { case types.SET_ZOOM: { return Object.assign({}, state, { zoom: action.payload.zoom }); } case 'VIEW_TOGGLE_PROGRESS': { return Object.assign({}, state, { showProgress: !state.showProgress }); } case 'VIEW_TOGGLE_LABELS': { return Object.assign({}, state, { showLabels: !state.showLabels }); } case 'VIEW_TOGGLE_CONNECTIONS': { return Object.assign({}, state, { showConnections: !state.showConnections }); } default:
}
return state; }
virtualkey.rs
/// Supported virtual keys. /// /// Virtual keys represent the intended meaning of the key, and have no relation to where /// the key physically is on the keyboard. Use virtual key where the meaning of the key /// is most important (textual input). When meaning matters less, but physical location /// is more important (WASD-like control schemes) use [`crate::Scancode`]. #[repr(u16)] #[derive(Debug, Copy, Clone, Eq, PartialEq)] pub enum
{ Unknown, Escape, One, Two, Three, Four, Five, Six, Seven, Eight, Nine, Zero, Minus, Equal, Backspace, Tab, Q, W, E, R, T, Y, U, I, O, P, LeftBrace, RightBrace, Enter, LeftControl, A, D, S, F, G, H, J, K, L, Semicolon, Apostrope, Grave, LeftShift, Backslash, Z, X, C, V, B, N, M, Comma, Dot, Slash, RightShift, KeyPadAsterick, LeftAlt, Space, CapsLock, F1, F2, F3, F4, F5, F6, F7, F8, F9, F10, NumLock, ScrollLock, KeyPad7, KeyPad8, KeyPad9, KeyPadMinus, KeyPad4, KeyPad5, KeyPad6, KeyPadPlus, KeyPad1, KeyPad2, KeyPad3, KeyPad0, KeyPadDot, NonUsBackslash, F11, F12, KeyPadEnter, RightAlt, Home, Up, PageUp, Left, Right, End, Down, PageDown, Insert, Delete, KeyPadEqual, KeyPadPlusMinus, }
VirtualKey
ServidorMusa.py
from flask import * from peewee import * import sys from playhouse.shortcuts import model_to_dict, dict_to_model from base64 import b64encode app = Flask(__name__) musa_db = MySQLDatabase( "musa", host="localhost", port=3306, user="euterpe", passwd="An6248322") class MySQLModel(Model): """Database model""" class Meta: database = musa_db # Mensajes # 5 - Todo PERFECTO # 51 - Melomano login # 52 - Artista login # 7 - Todo mal # 1 - Contraseña incorrecta # 2 - El usuario no existe # 3 - Lista vacia # 4 - Usuario registrado # 6 - El usuario ya existe # 10 - Álbum registrado # 11 - Error al registrar álbum # 12 - El artista de agregó # 13 - Error al agregar al artista # 14 - Se agregó la canción # 15 - No se pudo agregar la canción # 16 - Se actualizó el artista # 17 - Error al actualizar el artista # 18 - Se actualizó el melómano # 19 - Error al actualizar el melómano # 20 - Se agregó la canción a la playlist # 21 - No se agregó la canción a la playlist # 300 - Contraseñas no coinciden class Melomano(MySQLModel): idMelomano = PrimaryKeyField() nombreMelomano = CharField() nombre = CharField() apellidos = CharField() password = CharField() fotoPerfil = TextField() correoElectronico = CharField() class Genero(MySQLModel): idGenero = PrimaryKeyField() genero = CharField() class Artista(MySQLModel): idArtista = PrimaryKeyField() nombre = CharField() biografia = CharField() correoElectronico = CharField() password = CharField() idGenero = ForeignKeyField(Genero, db_column = "idGenero") class Album(MySQLModel): idAlbum = PrimaryKeyField() nombre = CharField() portada = TextField() fechaLanzamiento = DateField() companiaDiscografica = CharField() idArtista = ForeignKeyField(Artista, db_column = "idArtista") class Cancion(MySQLModel): idCancion = PrimaryKeyField() nombre = CharField() idAlbum = ForeignKeyField(Album, db_column = "idAlbum") idGenero = ForeignKeyField(Genero, db_column = "idGenero") cancion = TextField() duracion = IntegerField() class Playlist(MySQLModel): idPlaylist = PrimaryKeyField() nombre = CharField() portada = TextField() idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano") class CancionesPlaylist(MySQLModel): idPlaylist = ForeignKeyField(Playlist, db_column = "idPlaylist") idCancion = ForeignKeyField(Cancion, db_column = "idCancion") class Calificacion(MySQLModel): idCancion = ForeignKeyField(Cancion, db_column = "idCancion") nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano") calificacion = IntegerField() class CancionPropia(MySQLModel): idCancionPropia = PrimaryKeyField() nombre = CharField() cancion = TextField() nombreUsuario = ForeignKeyField(Melomano, db_column = "idMelomano") class FotoArtista(MySQLModel): idFoto = PrimaryKeyField() foto = TextField() idArtista = ForeignKeyField(Artista, db_column = "idArtista") class Historial(MySQLModel): idHistorial = PrimaryKeyField() idCancion = ForeignKeyField(Cancion, db_column = "idCancion") idMelomano = ForeignKeyField(Melomano, db_column = "idMelomano") @app.route("/") def main(): return jsonify("Musa server. versión 1.0") @app.route("/login", methods=["POST"]) def iniciar_sesion(): mensaje = 2 for melomano in Melomano.select(): if (melomano.nombreMelomano == request.form['username']) & (melomano.password == request.form['password']): mensaje = 51 for artista in Artista.select(): if (artista.correoElectronico == request.form['username']) & (artista.password == request.form['password']): mensaje = 52 return jsonify(mensaje) """Melómano WS""" @app.route("/melomano/agregar", methods=["POST"]) def registrar_melomano(): with musa_db.atomic(): try: melomano = Melomano.create( nombreMelomano = request.form['nombreMelomano'], nombre = request.form['nombre'], apellidos = request.form['apellidos'], password = request.form['password'], fotoPerfil = request.form['fotoPerfil'], correoElectronico = request.form['correoElectronico']) mensaje = 4 except IntegrityError: mensaje = 6 return jsonify(mensaje) @app.route("/melomano/recuperar", methods=["POST"]) def recuperarMelomano(): melomano = Melomano.get(Melomano.nombreMelomano == request.form['nombreMelomano']) return jsonify(model_to_dict(melomano)) @app.route("/melomano/actualizar", methods=["POST"]) def actualizar_melomano(): try: melomano = Melomano.select().where(Melomano.idMelomano == request.form["idMelomano"]).get() melomano.nombre = request.form["nombre"] melomano.apellidos = request.form["apellidos"] melomano.password = request.form["password"] melomano.fotoPerfil = request.form["fotoPerfil"] melomano.correoElectronico = request.form["correoElectronico"] melomano.save() mensaje = 18 except IntegrityError: mensaje = 19 return jsonify(mensaje) """Artista WS""" @app.route("/artista/agregar", methods=["POST"]) def agregar_artista(): with musa_db.atomic(): try: artista = Artista.create( nombre = request.form['nombre'], biografia = request.form['biografia'], idGenero = request.form['genero'], correoElectronico = request.form['correoElectronico'], password = request.form['password'] ) mensaje = 12 except IntegrityError: mensaje = 13 return jsonify(mensaje) @app.route("/artista/actualizar", methods=["POST"]) def actualizar_artista(): try: artista = Artista.select().where(Artista.idArtista == request.form["idArtista"]).get() artista.biografia = request.form["biografia"] artista.save() mensaje = 16 except IntegrityError: mensaje = 17 return jsonify(mensaje) @app.route("/artista/recuperarArtista", methods=["POST"]) def recuperar_artista(): artista = Artista.select().where(Artista.correoElectronico == request.form["nombre"]).get() resultado = {"idArtista": artista.idArtista, "nombre": artista.nombre, "biografia": artista.biografia,
return jsonify(resultado) @app.route("/artista/subirFoto", methods=["POST"]) def subir_foto_artista(): with musa_db.atomic(): try: foto = FotoArtista.create( foto = request.form['foto'], idArtista = request.form['idArtista'] ) mensaje = 16 except IntegrityError: mensaje = 17 return jsonify(mensaje) @app.route("/artista/borrarFotos", methods=["DELETE"]) def eliminar_fotos_artista(): try: FotoArtista.delete().where(FotoArtista.idArtista == request.form['idArtista']).execute() mensaje = 16 except IntegrityError: mensaje = 17 return jsonify(mensaje) @app.route("/artista/recuperarFotos", methods=["POST"]) def recuperar_fotos_artista(): query = FotoArtista.select(FotoArtista.foto).where(FotoArtista.idArtista == request.form['idArtista']) lista_foto = [] for foto in query: lista_foto.append(model_to_dict(foto)) return (jsonify(lista_foto)) """Canción WS""" @app.route("/cancion/agregar", methods=["POST"]) def agregar_cancion(): with musa_db.atomic(): try: cancion = Cancion.create( nombre = request.form['nombre'], idAlbum = int (request.form['idAlbum']), idGenero = int (request.form['idGenero']), cancion = request.form['cancion'], duracion = int (request.form['duracion']), ) mensaje = 14 except IntegrityError: mensaje = 15 return jsonify(mensaje) @app.route("/cancion/cancionesArtista", methods=["POST"]) def recuperar_canciones_artista(): query = Cancion.select().join(Album).join(Artista).where(Artista.nombre == request.form["nombreArtista"]) songs = [] for cancion in query: song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre, "album":cancion.idAlbum.nombre, "duracion": cancion.duracion} songs.append(song) return jsonify(songs) @app.route("/cancion/buscar", methods=["POST"]) def buscar_canciones(): query = Cancion.select().join(Album).join(Artista).where(Artista.nombre.contains(request.form["nombre"]) | (Cancion.nombre.contains(request.form["nombre"]) | (Album.nombre.contains(request.form["nombre"])))) songs = [] for cancion in query: song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre, "album":cancion.idAlbum.nombre, "duracion": cancion.duracion} songs.append(song) return jsonify(songs) @app.route("/cancion/recuperarTodas", methods=["GET"]) def recuperar_todas_canciones(): query = Cancion.select().join(Album).join(Artista) songs = [] for cancion in query: song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre, "album":cancion.idAlbum.nombre, "duracion": cancion.duracion} songs.append(song) return jsonify(songs) @app.route("/cancion/actualizarRuta", methods=["POST"]) def actualizar_ruta(): try: ultima = Cancion.select().order_by(Cancion.idCancion.desc()).get() cancion = Cancion.select().where(Cancion.idCancion == ultima.idCancion).get() cancion.cancion = request.form["ruta"] cancion.save() mensaje = 400 except IntegrityError: mensaje = 401 return jsonify(mensaje) @app.route("/cancion/nombreUltimaCancion", methods=["GET"]) def nombre_ultima_cancion(): query = Cancion.select().order_by(Cancion.idCancion.desc()).get() return jsonify(query.nombre) @app.route("/cancion/cancionAPartirDeID", methods=["POST"]) def cancion_ruta_id(): query = Cancion.select().where(Cancion.idCancion == request.form['idCancion']).get(); return jsonify(query.cancion) """Álbum WS""" @app.route("/album/agregar", methods=["POST"]) def agregar_album(): with musa_db.atomic(): try: album = Album.create( nombre = request.form['nombre'], portada = request.form['portada'], fechaLanzamiento = request.form['fechaLanzamiento'], companiaDiscografica = request.form['companiaDiscografica'], idArtista = int (request.form['idArtista']) ) mensaje = 10 except IntegrityError: mensaje = 11 return jsonify(mensaje) @app.route("/album/recuperarUltimo", methods=["GET"]) def recuperar_ultimo_album(): query = Album.select().join(Artista).order_by(Album.idAlbum.desc()).get() album = {"idAlbum": query.idAlbum, "nombre": query.nombre, "portada": None, "fechaLanzamiento": None, "companiaDiscografica": None, "idArtista": query.idArtista.idArtista} return jsonify(album) @app.route("/album/deArtista", methods=["POST"]) def recuperar_de_artista(): query = Album.select().where(Album.idArtista == request.form["idArtista"]) albumes = [] for album in query: alb = {"idAlbum": album.idAlbum, "nombre": album.nombre, "portada": album.portada, "fechaLanzamiento": album.fechaLanzamiento, "companiaDiscografica": album.companiaDiscografica, "idArtista": album.idArtista.idArtista} albumes.append(alb) return jsonify(albumes) @app.route("/album/recuperarFoto", methods=["POST"]) def recuperar_foto_album(): query = Album.select().where(Album.nombre == request.form["nombre"]).get() return jsonify(query.portada) """Playlist WS""" @app.route("/playlist/recuperarMelomano", methods=["POST"]) def recuperar_playlist(): listas = Playlist.select().where(Playlist.idMelomano == request.form["idMelomano"]) playlists = [] for lista in listas: oneLista = {"idPlaylist": lista.idPlaylist, "nombre": lista.nombre, "portada": lista.portada} playlists.append(oneLista) return jsonify(playlists) @app.route("/playlist/agregaAPlaylist", methods=["POST"]) def agregar_a_playlist(): with musa_db.atomic(): try: playlist = CancionesPlaylist.create( idPlaylist = request.form['idPlaylist'], idCancion = request.form['idCancion'] ) mensaje = 20 except IntegrityError: mensaje = 21 return jsonify(mensaje) @app.route("/playlist/recuperarCanciones", methods=["POST"]) def recuperar_de_playlist(): canciones = Cancion.select().join(CancionesPlaylist).join(Playlist).where((Playlist.idMelomano == request.form["idMelomano"]) & (Playlist.idPlaylist == request.form["idPlaylist"])) songs = [] for cancion in canciones: song = {"idCancion": cancion.idCancion, "nombre": cancion.nombre, "artista": cancion.idAlbum.idArtista.nombre, "album":cancion.idAlbum.nombre, "duracion": cancion.duracion} songs.append(song) return jsonify(songs) @app.route("/playlist/agregarPlaylist", methods=["POST"]) def agregar_playlist(): with musa_db.atomic(): try: playlist = Playlist.create( nombre = request.form['nombre'], portada = request.form['portada'], idMelomano = request.form['idMelomano'] ) mensaje = 900 except IntegrityError: mensaje = 901 return jsonify(mensaje) """Historial WS""" @app.route("/historial/agregarHistorial", methods=["POST"]) def agregar_historial(): with musa_db.atomic(): try: historial = Historial.create( idCancion = int(request.form['idCancion']), idMelomano = int(request.form['idMelomano']) ) mensaje = 500 except IntegrityError: mensaje = 501 return jsonify(mensaje) @app.route("/historial/consultarMelomano", methods=["POST"]) def consultar_historial(): query = Historial.select().join(Cancion).join(Album).join(Artista).select().where( Historial.idMelomano == request.form["idMelomano"]) songs = [] for cancion in query: song = {"idCancion": cancion.idCancion.idCancion, "nombre": cancion.idCancion.nombre, "artista": cancion.idCancion.idAlbum.idArtista.nombre, "album":cancion.idCancion.idAlbum.nombre, "duracion": cancion.idCancion.duracion} songs.append(song) return jsonify(songs) @app.route("/historial/getUltimoHistorial", methods=["GET"]) def ultimo_historial(): query = Historial.select().join(Cancion).order_by(Historial.idHistorial.desc()).get() return jsonify(query.idCancion.cancion) """Género WS""" @app.route("/genero/recuperarGeneros", methods=["GET"]) def recuperar_generos(): generos = [] query_generos = Genero.select() for genero in query_generos: generos.append(model_to_dict(genero)) return jsonify(generos) if __name__ == "__main__": app.run(host = '206.189.124.168', port = '5555', debug = True)
"correoElectronico": artista.correoElectronico, "password": artista.password, "idGenero": artista.idGenero.idGenero}
dom_entity_reference.rs
// This file was generated by gir (https://github.com/gtk-rs/gir) // from gir-files (https://github.com/gtk-rs/gir-files.git) // DO NOT EDIT use crate::DOMEventTarget; use crate::DOMNode; use crate::DOMObject; use std::fmt; glib::wrapper! { pub struct DOMEntityReference(Object<ffi::WebKitDOMEntityReference, ffi::WebKitDOMEntityReferenceClass>) @extends DOMNode, DOMObject, @implements DOMEventTarget; match fn { type_ => || ffi::webkit_dom_entity_reference_get_type(), } } impl DOMEntityReference {} pub const NONE_DOM_ENTITY_REFERENCE: Option<&DOMEntityReference> = None;
}
impl fmt::Display for DOMEntityReference { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.write_str("DOMEntityReference") }
forms.py
from wtforms import Form, TextField, SelectField from wtforms.validators import DataRequired class QueryForm(Form):
search_query = TextField('', validators=[DataRequired()], render_kw={"placeholder": "Your query here"}) search_category = SelectField('Search for', choices=[('pa', 'Paper / Author'), ('p', 'Paper'), ('a', 'Author')])
needless_borrowed_ref.rs
//! Checks for useless borrowed references. //! //! This lint is **warn** by default use crate::utils::{in_macro, snippet, span_lint_and_then}; use if_chain::if_chain; use rustc::hir::{BindingAnnotation, MutImmutable, Pat, PatKind}; use rustc::lint::{LateContext, LateLintPass, LintArray, LintPass}; use rustc::{declare_tool_lint, lint_array}; use rustc_errors::Applicability; /// **What it does:** Checks for useless borrowed references. /// /// **Why is this bad?** It is mostly useless and make the code look more /// complex than it /// actually is. /// /// **Known problems:** It seems that the `&ref` pattern is sometimes useful. /// For instance in the following snippet: /// ```rust /// enum Animal { /// Cat(u64), /// Dog(u64), /// } /// /// fn foo(a: &Animal, b: &Animal) { /// match (a, b) { /// (&Animal::Cat(v), k) | (k, &Animal::Cat(v)) => (), // lifetime /// mismatch error /// (&Animal::Dog(ref c), &Animal::Dog(_)) => () /// } /// } /// ``` /// There is a lifetime mismatch error for `k` (indeed a and b have distinct /// lifetime). /// This can be fixed by using the `&ref` pattern. /// However, the code can also be fixed by much cleaner ways /// /// **Example:** /// ```rust /// let mut v = Vec::<String>::new(); /// let _ = v.iter_mut().filter(|&ref a| a.is_empty()); /// ``` /// This closure takes a reference on something that has been matched as a /// reference and /// de-referenced. /// As such, it could just be |a| a.is_empty() declare_clippy_lint! { pub NEEDLESS_BORROWED_REFERENCE, complexity, "taking a needless borrowed reference" } #[derive(Copy, Clone)] pub struct NeedlessBorrowedRef; impl LintPass for NeedlessBorrowedRef { fn get_lints(&self) -> LintArray
} impl<'a, 'tcx> LateLintPass<'a, 'tcx> for NeedlessBorrowedRef { fn check_pat(&mut self, cx: &LateContext<'a, 'tcx>, pat: &'tcx Pat) { if in_macro(pat.span) { // OK, simple enough, lints doesn't check in macro. return; } if_chain! { // Only lint immutable refs, because `&mut ref T` may be useful. if let PatKind::Ref(ref sub_pat, MutImmutable) = pat.node; // Check sub_pat got a `ref` keyword (excluding `ref mut`). if let PatKind::Binding(BindingAnnotation::Ref, _, spanned_name, ..) = sub_pat.node; then { span_lint_and_then(cx, NEEDLESS_BORROWED_REFERENCE, pat.span, "this pattern takes a reference on something that is being de-referenced", |db| { let hint = snippet(cx, spanned_name.span, "..").into_owned(); db.span_suggestion_with_applicability( pat.span, "try removing the `&ref` part and just keep", hint, Applicability::MachineApplicable, // snippet ); }); } } } }
{ lint_array!(NEEDLESS_BORROWED_REFERENCE) }
profileReducer.js
import { GET_PROFILE, PROFILE_LOADING, PROFILE_NOT_FOUND, CLEAR_CURRENT_PROFILE, GET_PROFILES } from "../actions/types"; const initialState = { profile: null, profiles: null, loading: false };
case PROFILE_LOADING: return { ...state, loading: true }; case GET_PROFILE: return { ...state, profile: action.payload, loading: false }; case CLEAR_CURRENT_PROFILE: return { ...state, profile: null }; case GET_PROFILES: return { ...state, profiles: action.payload, loading: false }; default: return state; } }
export default function(state = initialState, action) { switch (action.type) {
main_test.go
package main // This file is mandatory as otherwise the buildpipelinebeat.test binary is not generated correctly. import ( "flag" "testing" "github.com/regiocom/buildpipelinebeat/cmd" ) var systemTest *bool func init() { testing.Init() systemTest = flag.Bool("systemTest", false, "Set to true when running system tests") cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("systemTest")) cmd.RootCmd.PersistentFlags().AddGoFlag(flag.CommandLine.Lookup("test.coverprofile")) } // Test started when the test binary is started. Only calls main. func
(t *testing.T) { if *systemTest { main() } }
TestSystem
strict-boolean-expressions.test.ts
import rule, { Options, MessageId, } from '../../src/rules/strict-boolean-expressions'; import { batchedSingleLineTests, getFixturesRootDir, RuleTester, noFormat, } from '../RuleTester'; const ruleTester = new RuleTester({ parser: '@typescript-eslint/parser', parserOptions: { tsconfigRootDir: getFixturesRootDir(), project: './tsconfig.json', }, }); ruleTester.run('strict-boolean-expressions', rule, { valid: [ // boolean in boolean context ...batchedSingleLineTests<Options>({ code: noFormat` true ? "a" : "b"; if (false) {} while (true) {} for (; false;) {} !true; false || 123; true && "foo"; !(false || true); true && false ? true : false; false && true || false; false && true || []; (false && 1) || (true && 2); declare const x: boolean; if (x) {} (x: boolean) => !x; <T extends boolean>(x: T) => x ? 1 : 0; declare const x: never; if (x) {} `, }), // string in boolean context ...batchedSingleLineTests<Options>({ code: noFormat` if ("") {} while ("x") {} for (; "";) {} "" && "1" || x; declare const x: string; if (x) {} (x: string) => !x; <T extends string>(x: T) => x ? 1 : 0; `, }), // number in boolean context ...batchedSingleLineTests<Options>({ code: noFormat` if (0) {} while (1n) {} for (; Infinity;) {} 0 / 0 && 1 + 2 || x; declare const x: number; if (x) {} (x: bigint) => !x; <T extends number>(x: T) => x ? 1 : 0; `, }), // nullable object in boolean context ...batchedSingleLineTests<Options>({ code: noFormat` declare const x: null | object; if (x) {} (x?: { a: any }) => !x; <T extends {} | null | undefined>(x: T) => x ? 1 : 0; `, }), // nullable boolean in boolean context ...batchedSingleLineTests<Options>({ options: [{ allowNullableBoolean: true }], code: noFormat` declare const x: boolean | null; if (x) {} (x?: boolean) => !x; <T extends boolean | null | undefined>(x: T) => x ? 1 : 0; `, }), // nullable string in boolean context ...batchedSingleLineTests<Options>({ options: [{ allowNullableString: true }], code: noFormat` declare const x: string | null; if (x) {} (x?: string) => !x; <T extends string | null | undefined>(x: T) => x ? 1 : 0; `, }), // nullable number in boolean context ...batchedSingleLineTests<Options>({ options: [{ allowNullableNumber: true }], code: noFormat` declare const x: number | null; if (x) {} (x?: number) => !x; <T extends number | null | undefined>(x: T) => x ? 1 : 0; `, }), // any in boolean context ...batchedSingleLineTests<Options>({ options: [{ allowAny: true }], code: noFormat` declare const x: any; if (x) {} (x) => !x; <T extends any>(x: T) => x ? 1 : 0; `, }), ], invalid: [ // non-boolean in RHS of test expression ...batchedSingleLineTests<MessageId, Options>({ options: [ { allowString: false, allowNumber: false, allowNullableObject: false }, ], code: noFormat` if (true && 1) {} while (false || "a") {} (x: object) => true || false || x ? true : false; `, errors: [ { messageId: 'conditionErrorNumber', line: 2, column: 13 }, { messageId: 'conditionErrorString', line: 3, column: 25 }, { messageId: 'conditionErrorObject', line: 4, column: 41 }, ], }), // check if all and only the outermost operands are checked { options: [ { allowString: false, allowNumber: false, allowNullableObject: false }, ], code: ` if (('' && {}) || (0 && void 0)) { } `, errors: [ { messageId: 'conditionErrorString', line: 2, column: 14 }, { messageId: 'conditionErrorObject', line: 2, column: 20 }, { messageId: 'conditionErrorNumber', line: 2, column: 28 }, { messageId: 'conditionErrorNullish', line: 2, column: 33 }, ], }, // nullish in boolean context ...batchedSingleLineTests<MessageId, Options>({ code: noFormat` null || {}; undefined && []; declare const x: null; if (x) {} (x: undefined) => !x; <T extends null | undefined>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNullish', line: 2, column: 1 }, { messageId: 'conditionErrorNullish', line: 3, column: 9 }, { messageId: 'conditionErrorNullish', line: 4, column: 36 }, { messageId: 'conditionErrorNullish', line: 5, column: 28 }, { messageId: 'conditionErrorNullish', line: 6, column: 47 }, ], }), // object in boolean context ...batchedSingleLineTests<MessageId, Options>({ code: noFormat` [] || 1; ({}) && "a";
declare const x: symbol; if (x) {} (x: () => void) => !x; <T extends object>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorObject', line: 2, column: 1 }, { messageId: 'conditionErrorObject', line: 3, column: 10 }, { messageId: 'conditionErrorObject', line: 4, column: 38 }, { messageId: 'conditionErrorObject', line: 5, column: 29 }, { messageId: 'conditionErrorObject', line: 6, column: 37 }, ], }), // string in boolean context ...batchedSingleLineTests<MessageId, Options>({ options: [{ allowString: false }], code: noFormat` while ("") {} for (; "foo";) {} declare const x: string; if (x) {} (x: string) => !x; <T extends string>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorString', line: 2, column: 8 }, { messageId: 'conditionErrorString', line: 3, column: 16 }, { messageId: 'conditionErrorString', line: 4, column: 38 }, { messageId: 'conditionErrorString', line: 5, column: 25 }, { messageId: 'conditionErrorString', line: 6, column: 37 }, ], }), // number in boolean context ...batchedSingleLineTests<MessageId, Options>({ options: [{ allowNumber: false }], code: noFormat` while (0n) {} for (; 123;) {} declare const x: number; if (x) {} (x: bigint) => !x; <T extends number>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNumber', line: 2, column: 8 }, { messageId: 'conditionErrorNumber', line: 3, column: 16 }, { messageId: 'conditionErrorNumber', line: 4, column: 38 }, { messageId: 'conditionErrorNumber', line: 5, column: 25 }, { messageId: 'conditionErrorNumber', line: 6, column: 37 }, ], }), // mixed `string | number` value in boolean context ...batchedSingleLineTests<MessageId, Options>({ options: [{ allowString: true, allowNumber: true }], code: noFormat` declare const x: string | number; if (x) {} (x: bigint | string) => !x; <T extends number | bigint | string>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorOther', line: 2, column: 39 }, { messageId: 'conditionErrorOther', line: 3, column: 34 }, { messageId: 'conditionErrorOther', line: 4, column: 55 }, ], }), // nullable boolean in boolean context ...batchedSingleLineTests<MessageId, Options>({ options: [{ allowNullableBoolean: false }], code: noFormat` declare const x: boolean | null; if (x) {} (x?: boolean) => !x; <T extends boolean | null | undefined>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNullableBoolean', line: 2, column: 38 }, { messageId: 'conditionErrorNullableBoolean', line: 3, column: 27 }, { messageId: 'conditionErrorNullableBoolean', line: 4, column: 57 }, ], }), // nullable object in boolean context ...batchedSingleLineTests<MessageId, Options>({ options: [{ allowNullableObject: false }], code: noFormat` declare const x: object | null; if (x) {} (x?: { a: number }) => !x; <T extends {} | null | undefined>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNullableObject', line: 2, column: 37 }, { messageId: 'conditionErrorNullableObject', line: 3, column: 33 }, { messageId: 'conditionErrorNullableObject', line: 4, column: 52 }, ], }), // nullable string in boolean context ...batchedSingleLineTests<MessageId, Options>({ code: noFormat` declare const x: string | null; if (x) {} (x?: string) => !x; <T extends string | null | undefined>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNullableString', line: 2, column: 37 }, { messageId: 'conditionErrorNullableString', line: 3, column: 26 }, { messageId: 'conditionErrorNullableString', line: 4, column: 56 }, ], }), // nullable number in boolean context ...batchedSingleLineTests<MessageId, Options>({ code: noFormat` declare const x: number | null; if (x) {} (x?: number) => !x; <T extends number | null | undefined>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorNullableNumber', line: 2, column: 37 }, { messageId: 'conditionErrorNullableNumber', line: 3, column: 26 }, { messageId: 'conditionErrorNullableNumber', line: 4, column: 56 }, ], }), // any in boolean context // TODO: when `T` is not `extends any` then the error is `conditionErrorObject` (says it's always truthy, which is false) ...batchedSingleLineTests<MessageId, Options>({ code: noFormat` if (x) {} x => !x; <T extends any>(x: T) => x ? 1 : 0; `, errors: [ { messageId: 'conditionErrorAny', line: 2, column: 5 }, { messageId: 'conditionErrorAny', line: 3, column: 15 }, { messageId: 'conditionErrorAny', line: 4, column: 34 }, ], }), ], });
c_form_control_BorderWidth.js
(function (global, factory) { if (typeof define === "function" && define.amd) { define(["exports"], factory); } else if (typeof exports !== "undefined") { factory(exports); } else { var mod = { exports: {} }; factory(mod.exports); global.undefined = mod.exports; } })(this, function (exports) { "use strict"; Object.defineProperty(exports, "__esModule", { value: true }); exports.default = { "name": "--pf-c-form-control--BorderWidth", "value": "1px", "var": "var(--pf-c-form-control--BorderWidth)"
}; });
auto-resize.module.ts
import { NgModule } from '@angular/core'; import { AutoResizeDirective } from './auto-resize'; @NgModule({ declarations: [ AutoResizeDirective ], imports: [ ], exports: [ AutoResizeDirective ] }) export class
{ }
AutoResizeDirectiveModule
expressroutegateways.go
package network // Copyright (c) Microsoft and contributors. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // // See the License for the specific language governing permissions and // limitations under the License. // // Code generated by Microsoft (R) AutoRest Code Generator. // Changes may cause incorrect behavior and will be lost if the code is regenerated. import ( "context" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/validation" "github.com/Azure/go-autorest/tracing" "net/http" ) // ExpressRouteGatewaysClient is the network Client type ExpressRouteGatewaysClient struct { BaseClient } // NewExpressRouteGatewaysClient creates an instance of the ExpressRouteGatewaysClient client. func NewExpressRouteGatewaysClient(subscriptionID string) ExpressRouteGatewaysClient { return NewExpressRouteGatewaysClientWithBaseURI(DefaultBaseURI, subscriptionID) } // NewExpressRouteGatewaysClientWithBaseURI creates an instance of the ExpressRouteGatewaysClient client using a custom // endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI (sovereign clouds, Azure // stack). func NewExpressRouteGatewaysClientWithBaseURI(baseURI string, subscriptionID string) ExpressRouteGatewaysClient { return ExpressRouteGatewaysClient{NewWithBaseURI(baseURI, subscriptionID)} } // CreateOrUpdate creates or updates a ExpressRoute gateway in a specified resource group. // Parameters: // resourceGroupName - the name of the resource group. // expressRouteGatewayName - the name of the ExpressRoute gateway. // putExpressRouteGatewayParameters - parameters required in an ExpressRoute gateway PUT operation. func (client ExpressRouteGatewaysClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, putExpressRouteGatewayParameters ExpressRouteGateway) (result ExpressRouteGatewaysCreateOrUpdateFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.CreateOrUpdate") defer func() { sc := -1 if result.Response() != nil { sc = result.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } if err := validation.Validate([]validation.Validation{ {TargetValue: putExpressRouteGatewayParameters, Constraints: []validation.Constraint{{Target: "putExpressRouteGatewayParameters.ExpressRouteGatewayProperties", Name: validation.Null, Rule: false, Chain: []validation.Constraint{{Target: "putExpressRouteGatewayParameters.ExpressRouteGatewayProperties.VirtualHub", Name: validation.Null, Rule: true, Chain: nil}}}}}}); err != nil { return result, validation.NewError("network.ExpressRouteGatewaysClient", "CreateOrUpdate", err.Error()) } req, err := client.CreateOrUpdatePreparer(ctx, resourceGroupName, expressRouteGatewayName, putExpressRouteGatewayParameters) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "CreateOrUpdate", nil, "Failure preparing request") return } result, err = client.CreateOrUpdateSender(req) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "CreateOrUpdate", result.Response(), "Failure sending request") return } return } // CreateOrUpdatePreparer prepares the CreateOrUpdate request. func (client ExpressRouteGatewaysClient) CreateOrUpdatePreparer(ctx context.Context, resourceGroupName string, expressRouteGatewayName string, putExpressRouteGatewayParameters ExpressRouteGateway) (*http.Request, error) { pathParameters := map[string]interface{}{ "expressRouteGatewayName": autorest.Encode("path", expressRouteGatewayName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-11-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } putExpressRouteGatewayParameters.Etag = nil preparer := autorest.CreatePreparer( autorest.AsContentType("application/json; charset=utf-8"), autorest.AsPut(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}", pathParameters), autorest.WithJSON(putExpressRouteGatewayParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // CreateOrUpdateSender sends the CreateOrUpdate request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteGatewaysClient) CreateOrUpdateSender(req *http.Request) (future ExpressRouteGatewaysCreateOrUpdateFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } future.Future, err = azure.NewFutureFromResponse(resp) return } // CreateOrUpdateResponder handles the response to the CreateOrUpdate request. The method always // closes the http.Response Body. func (client ExpressRouteGatewaysClient) CreateOrUpdateResponder(resp *http.Response) (result ExpressRouteGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // Delete deletes the specified ExpressRoute gateway in a resource group. An ExpressRoute gateway resource can only be // deleted when there are no connection subresources. // Parameters: // resourceGroupName - the name of the resource group. // expressRouteGatewayName - the name of the ExpressRoute gateway. func (client ExpressRouteGatewaysClient) Delete(ctx context.Context, resourceGroupName string, expressRouteGatewayName string) (result ExpressRouteGatewaysDeleteFuture, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.Delete") defer func() { sc := -1 if result.Response() != nil { sc = result.Response().StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.DeletePreparer(ctx, resourceGroupName, expressRouteGatewayName) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Delete", nil, "Failure preparing request") return } result, err = client.DeleteSender(req) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Delete", result.Response(), "Failure sending request") return } return } // DeletePreparer prepares the Delete request. func (client ExpressRouteGatewaysClient) DeletePreparer(ctx context.Context, resourceGroupName string, expressRouteGatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "expressRouteGatewayName": autorest.Encode("path", expressRouteGatewayName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), }
"api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsDelete(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // DeleteSender sends the Delete request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteGatewaysClient) DeleteSender(req *http.Request) (future ExpressRouteGatewaysDeleteFuture, err error) { var resp *http.Response resp, err = client.Send(req, azure.DoRetryWithRegistration(client.Client)) if err != nil { return } future.Future, err = azure.NewFutureFromResponse(resp) return } // DeleteResponder handles the response to the Delete request. The method always // closes the http.Response Body. func (client ExpressRouteGatewaysClient) DeleteResponder(resp *http.Response) (result autorest.Response, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent), autorest.ByClosing()) result.Response = resp return } // Get fetches the details of a ExpressRoute gateway in a resource group. // Parameters: // resourceGroupName - the name of the resource group. // expressRouteGatewayName - the name of the ExpressRoute gateway. func (client ExpressRouteGatewaysClient) Get(ctx context.Context, resourceGroupName string, expressRouteGatewayName string) (result ExpressRouteGateway, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.Get") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.GetPreparer(ctx, resourceGroupName, expressRouteGatewayName) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Get", nil, "Failure preparing request") return } resp, err := client.GetSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Get", resp, "Failure sending request") return } result, err = client.GetResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "Get", resp, "Failure responding to request") } return } // GetPreparer prepares the Get request. func (client ExpressRouteGatewaysClient) GetPreparer(ctx context.Context, resourceGroupName string, expressRouteGatewayName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "expressRouteGatewayName": autorest.Encode("path", expressRouteGatewayName), "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-11-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways/{expressRouteGatewayName}", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // GetSender sends the Get request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteGatewaysClient) GetSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // GetResponder handles the response to the Get request. The method always // closes the http.Response Body. func (client ExpressRouteGatewaysClient) GetResponder(resp *http.Response) (result ExpressRouteGateway, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListByResourceGroup lists ExpressRoute gateways in a given resource group. // Parameters: // resourceGroupName - the name of the resource group. func (client ExpressRouteGatewaysClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ExpressRouteGatewayList, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.ListByResourceGroup") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListByResourceGroup", nil, "Failure preparing request") return } resp, err := client.ListByResourceGroupSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListByResourceGroup", resp, "Failure sending request") return } result, err = client.ListByResourceGroupResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListByResourceGroup", resp, "Failure responding to request") } return } // ListByResourceGroupPreparer prepares the ListByResourceGroup request. func (client ExpressRouteGatewaysClient) ListByResourceGroupPreparer(ctx context.Context, resourceGroupName string) (*http.Request, error) { pathParameters := map[string]interface{}{ "resourceGroupName": autorest.Encode("path", resourceGroupName), "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-11-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/expressRouteGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListByResourceGroupSender sends the ListByResourceGroup request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteGatewaysClient) ListByResourceGroupSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListByResourceGroupResponder handles the response to the ListByResourceGroup request. The method always // closes the http.Response Body. func (client ExpressRouteGatewaysClient) ListByResourceGroupResponder(resp *http.Response) (result ExpressRouteGatewayList, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return } // ListBySubscription lists ExpressRoute gateways under a given subscription. func (client ExpressRouteGatewaysClient) ListBySubscription(ctx context.Context) (result ExpressRouteGatewayList, err error) { if tracing.IsEnabled() { ctx = tracing.StartSpan(ctx, fqdn+"/ExpressRouteGatewaysClient.ListBySubscription") defer func() { sc := -1 if result.Response.Response != nil { sc = result.Response.Response.StatusCode } tracing.EndSpan(ctx, sc, err) }() } req, err := client.ListBySubscriptionPreparer(ctx) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListBySubscription", nil, "Failure preparing request") return } resp, err := client.ListBySubscriptionSender(req) if err != nil { result.Response = autorest.Response{Response: resp} err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListBySubscription", resp, "Failure sending request") return } result, err = client.ListBySubscriptionResponder(resp) if err != nil { err = autorest.NewErrorWithError(err, "network.ExpressRouteGatewaysClient", "ListBySubscription", resp, "Failure responding to request") } return } // ListBySubscriptionPreparer prepares the ListBySubscription request. func (client ExpressRouteGatewaysClient) ListBySubscriptionPreparer(ctx context.Context) (*http.Request, error) { pathParameters := map[string]interface{}{ "subscriptionId": autorest.Encode("path", client.SubscriptionID), } const APIVersion = "2019-11-01" queryParameters := map[string]interface{}{ "api-version": APIVersion, } preparer := autorest.CreatePreparer( autorest.AsGet(), autorest.WithBaseURL(client.BaseURI), autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Network/expressRouteGateways", pathParameters), autorest.WithQueryParameters(queryParameters)) return preparer.Prepare((&http.Request{}).WithContext(ctx)) } // ListBySubscriptionSender sends the ListBySubscription request. The method will close the // http.Response Body if it receives an error. func (client ExpressRouteGatewaysClient) ListBySubscriptionSender(req *http.Request) (*http.Response, error) { return client.Send(req, azure.DoRetryWithRegistration(client.Client)) } // ListBySubscriptionResponder handles the response to the ListBySubscription request. The method always // closes the http.Response Body. func (client ExpressRouteGatewaysClient) ListBySubscriptionResponder(resp *http.Response) (result ExpressRouteGatewayList, err error) { err = autorest.Respond( resp, client.ByInspecting(), azure.WithErrorUnlessStatusCode(http.StatusOK), autorest.ByUnmarshallingJSON(&result), autorest.ByClosing()) result.Response = autorest.Response{Response: resp} return }
const APIVersion = "2019-11-01" queryParameters := map[string]interface{}{
main.go
package bramble import ( "context" "flag" "net/http" "os" "os/signal" "sync" "time" log "github.com/sirupsen/logrus" ) // Main runs the gateway. This function is exported so that it can be reused // when building Bramble with custom plugins. func
() { var configFiles arrayFlags flag.Var(&configFiles, "conf", "Config file (can appear multiple times)") flag.Parse() log.SetFormatter(&log.JSONFormatter{TimestampFormat: time.RFC3339Nano}) cfg, err := GetConfig(configFiles) if err != nil { log.WithError(err).Fatal("failed to get config") } go cfg.Watch() err = cfg.Init() if err != nil { log.WithError(err).Fatal("failed to configure") } log.WithField("config", cfg).Debug("configuration") gtw := NewGateway(cfg.executableSchema, cfg.plugins) RegisterMetrics() go gtw.UpdateSchemas(cfg.PollIntervalDuration) signalChan := make(chan os.Signal) signal.Notify(signalChan, os.Interrupt) ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { <-signalChan log.Info("received shutdown signal") cancel() }() var wg sync.WaitGroup wg.Add(3) go runHandler(ctx, &wg, "metrics", cfg.MetricAddress(), NewMetricsHandler()) go runHandler(ctx, &wg, "private", cfg.PrivateAddress(), gtw.PrivateRouter()) go runHandler(ctx, &wg, "public", cfg.GatewayAddress(), gtw.Router()) wg.Wait() } func runHandler(ctx context.Context, wg *sync.WaitGroup, name, addr string, handler http.Handler) { srv := &http.Server{ Addr: addr, Handler: handler, } go func() { log.WithField("addr", addr).Infof("serving %s handler", name) if err := srv.ListenAndServe(); err != http.ErrServerClosed { log.WithError(err).Fatal("server terminated unexpectedly") } }() <-ctx.Done() timeoutCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() log.Infof("shutting down %s handler", name) err := srv.Shutdown(timeoutCtx) if err != nil { log.WithError(err).Error("error shutting down server") } log.Infof("shut down %s handler", name) wg.Done() }
Main
fetch.py
r"""Initiate an acquisition and fetch a waveform for each specified channel. The gRPC API is built from the C API. NI-SCOPE documentation is installed with the driver at: C:\Program Files (x86)\IVI Foundation\IVI\Drivers\niScope\Documentation\English\Digitizers.chm A version of this .chm is available online at: https://zone.ni.com/reference/en-XX/help/370592AB-01/ Getting Started: To run this example, install "NI-SCOPE Driver" on the server machine: https://www.ni.com/en-us/support/downloads/drivers/download.ni-scope.html For instructions on how to use protoc to generate gRPC client interfaces, see our "Creating a gRPC Client" wiki page: https://github.com/ni/grpc-device/wiki/Creating-a-gRPC-Client Refer to the NI-SCOPE gRPC Wiki to determine the valid channel and resource names for your NI-SCOPE module: https://github.com/ni/grpc-device/wiki/NI-SCOPE-C-Function-Reference Running from command line: Server machine's IP address, port number, and resource name can be passed as separate command line arguments. > python fetch.py <server_address> <port_number> <resource_name> If they are not passed in as command line arguments, then by default the server address will be "localhost:31763", with "SimulatedScope" as the resource name. """ import sys import grpc import niscope_pb2 as niscope_types import niscope_pb2_grpc as grpc_niscope SERVER_ADDRESS = "localhost" SERVER_PORT = "31763" # Resource name and options for a simulated 5164 client. Change them according to the NI-SCOPE # model. RESOURCE = "SimulatedScope" OPTIONS = "Simulate=1, DriverSetup=Model:5164; BoardType:PXIe"
# Read in cmd args if len(sys.argv) >= 2: SERVER_ADDRESS = sys.argv[1] if len(sys.argv) >= 3: SERVER_PORT = sys.argv[2] if len(sys.argv) >= 4: RESOURCE = sys.argv[3] OPTIONS = "" # Create the communication channel for the remote host and create a connection to the NI-SCOPE # service. channel = grpc.insecure_channel(f"{SERVER_ADDRESS}:{SERVER_PORT}") client = grpc_niscope.NiScopeStub(channel) def check_for_error(vi, status): """Raise an exception if the status indicates an error.""" if status != 0: error_message_response = client.ErrorMessage( niscope_types.ErrorMessageRequest(vi=vi, error_code=status) ) raise Exception(error_message_response.error_message) def check_for_initialization_error(response): """Raise an exception if an error was returned from Initialize.""" if response.status < 0: raise RuntimeError(f"Error: {response.error_message or response.status}") if response.status > 0: sys.stderr.write(f"Warning: {response.error_message or response.status}\n") try: # Open session to NI-SCOPE module with options. init_with_options_response = client.InitWithOptions( niscope_types.InitWithOptionsRequest( resource_name=RESOURCE, id_query=False, option_string=OPTIONS ) ) vi = init_with_options_response.vi check_for_initialization_error(init_with_options_response) # Configure vertical. voltage = 10.0 check_for_error( vi, ( client.ConfigureVertical( niscope_types.ConfigureVerticalRequest( vi=vi, channel_list=CHANNELS, range=voltage, offset=0.0, coupling=niscope_types.VerticalCoupling.VERTICAL_COUPLING_NISCOPE_VAL_AC, probe_attenuation=1.0, enabled=True, ) ) ).status, ) # Configure horizontal timing. samples = 1000 check_for_error( vi, ( client.ConfigureHorizontalTiming( niscope_types.ConfigureHorizontalTimingRequest( vi=vi, min_sample_rate=50000000, min_num_pts=samples, ref_position=50.0, num_records=1, enforce_realtime=True, ) ) ).status, ) # Initiate acquisition. check_for_error( vi, (client.InitiateAcquisition(niscope_types.InitiateAcquisitionRequest(vi=vi))).status ) # Fetch waveforms. fetch_response = client.Fetch( niscope_types.FetchRequest(vi=vi, channel_list=CHANNELS, timeout=10000, num_samples=samples) ) check_for_error(vi, fetch_response.status) waveforms = fetch_response.waveform # Print waveform results. for i in range(len(waveforms)): print(f"Waveform {i} information:") print(f"{waveforms[i]}\n") # If NI-SCOPE API throws an exception, print the error message. except grpc.RpcError as rpc_error: error_message = rpc_error.details() if rpc_error.code() == grpc.StatusCode.UNAVAILABLE: error_message = f"Failed to connect to server on {SERVER_ADDRESS}:{SERVER_PORT}" elif rpc_error.code() == grpc.StatusCode.UNIMPLEMENTED: error_message = ( "The operation is not implemented or is not supported/enabled in this service" ) print(f"{error_message}") finally: if "vi" in vars() and vi.id != 0: # close the session. check_for_error(vi, (client.Close(niscope_types.CloseRequest(vi=vi))).status)
CHANNELS = "0"
credential.rs
mod attribute; mod attribute_schema; mod attribute_type; #[cfg(feature = "std")] mod blinding; mod error; #[cfg(feature = "std")] mod holder; #[cfg(feature = "std")] mod issuer; mod presentation; mod presentation_manifest; mod request; mod schema; mod util; #[cfg(feature = "std")] mod verifier; pub use attribute::*; pub use attribute_schema::*; pub use attribute_type::*; #[cfg(feature = "std")] pub use blinding::*; pub use error::*; #[cfg(feature = "std")] pub use holder::*; #[cfg(feature = "std")] pub use issuer::*; pub use presentation::*; pub use presentation_manifest::*; pub use request::*; pub use schema::*; use util::*; #[cfg(feature = "std")] pub use verifier::*; use serde::{Deserialize, Serialize}; /// A credential offer is how an issuer informs a potential holder that /// a credential is available to them #[derive(Debug, Serialize, Deserialize)] pub struct CredentialOffer { /// The credential offer id is a cryptographic nonce, this must never repeat pub id: [u8; 32], /// The schema for the credential that the issuer is offering to sign pub schema: CredentialSchema, } /// A credential that can be presented #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Credential { /// The signed attributes in the credential pub attributes: Vec<CredentialAttribute>, /// The cryptographic signature pub signature: bbs::prelude::Signature, } /// A blind credential that will be unblinded by the holder #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlindCredential { /// The signed attributes in the credential pub attributes: Vec<CredentialAttribute>, /// The cryptographic signature pub signature: bbs::prelude::BlindSignature, } #[cfg(test)] mod tests { use super::*; use ockam_core::lib::*; fn
() -> CredentialSchema { let attribute = CredentialAttributeSchema { label: String::from("test_attr"), description: String::from("test attribute"), attribute_type: CredentialAttributeType::Utf8String, }; let attributes = [attribute].to_vec(); CredentialSchema { id: String::from("test_id"), label: String::from("test_label"), description: String::from("test_desc"), attributes, } } #[test] fn test_schema_creation() { let _schema = create_test_schema(); } #[test] fn test_schema_serialization() { let mut schema = create_test_schema(); if let Ok(serialized) = serde_json::to_string(&schema) { assert!(serialized.contains("test_id")); assert!(serialized.contains("test_label")); assert!(serialized.contains("test_desc")); assert!(serialized.contains("test_attr")); assert!(serialized.contains("test attribute")); if let Ok(mut rehydrated) = serde_json::from_str::<CredentialSchema>(&serialized) { assert_eq!(rehydrated.id, schema.id); assert_eq!(rehydrated.label, schema.label); assert_eq!(rehydrated.description, schema.description); assert_eq!(rehydrated.attributes.len(), schema.attributes.len()); if let Some(schema_attr) = schema.attributes.pop() { if let Some(rehydrated_attr) = rehydrated.attributes.pop() { assert_eq!(schema_attr.attribute_type, rehydrated_attr.attribute_type); assert_eq!(schema_attr.label, rehydrated_attr.label); assert_eq!(schema_attr.description, rehydrated_attr.description); } else { panic!("Missing rehydrated attribute") } } else { panic!("Missing Schema attribute") } } } else { panic!("Couldn't serialize Schema") } } #[cfg(feature = "std")] fn get_test_issuance_schema() -> CredentialSchema { CredentialSchema { id: String::from("test_id"), label: String::from("test_label"), description: String::from("test_desc"), attributes: [ CredentialAttributeSchema { label: String::from(SECRET_ID), description: String::from(""), attribute_type: CredentialAttributeType::Blob, }, CredentialAttributeSchema { label: String::from("device-name"), description: String::from(""), attribute_type: CredentialAttributeType::Utf8String, }, CredentialAttributeSchema { label: String::from("manufacturer"), description: String::from(""), attribute_type: CredentialAttributeType::Utf8String, }, CredentialAttributeSchema { label: String::from("issued"), description: String::from("Unix timestamp of datetime issued"), attribute_type: CredentialAttributeType::Number, }, ] .to_vec(), } } #[cfg(feature = "std")] #[test] fn test_proof_of_possession() { let issuer = Issuer::new(); let proof = issuer.create_proof_of_possession(); let pk = issuer.get_public_key(); assert!(Verifier::verify_proof_of_possession(pk, proof)); } #[cfg(feature = "std")] #[test] fn test_credential_issuance() { let schema = get_test_issuance_schema(); let issuer = Issuer::new(); let holder = Holder::new(); let pk = issuer.get_public_key(); let offer = issuer.create_offer(&schema); let res = holder.accept_credential_offer(&offer, pk); assert!(res.is_ok()); let (request, blinding) = res.unwrap(); let mut attributes = BTreeMap::new(); attributes.insert( schema.attributes[1].label.clone(), CredentialAttribute::String(String::from("local-test")), ); attributes.insert( schema.attributes[2].label.clone(), CredentialAttribute::String(String::from("ockam.io")), ); attributes.insert( schema.attributes[3].label.clone(), CredentialAttribute::Numeric( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs() as i64, ), ); let res = issuer.blind_sign_credential(&request, &schema, &attributes, offer.id); assert!(res.is_ok()); let bc = res.unwrap(); let cred = holder.unblind_credential(bc, blinding); assert!(holder.is_valid_credential(&cred, pk)); } #[cfg(feature = "std")] #[test] fn test_credential_presentation() { let schema = get_test_issuance_schema(); let issuer = Issuer::new(); let holder = Holder::new(); let cred = issuer .sign_credential( &schema, &[ CredentialAttribute::Blob(holder.id.to_bytes_compressed_form()), CredentialAttribute::String(String::from("local-test")), CredentialAttribute::String(String::from("ockam.io")), CredentialAttribute::Numeric( std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap() .as_secs() as i64, ), ], ) .unwrap(); let mut manifest = PresentationManifest { credential_schema: schema, public_key: issuer.get_public_key(), revealed: [0, 1].to_vec(), }; let pr_id = Verifier::create_proof_request_id(); let res = holder.present_credentials(&[cred.clone()], &[manifest.clone()], pr_id); assert!(res.is_err()); manifest.revealed = [1].to_vec(); let res = holder.present_credentials(&[cred.clone()], &[manifest.clone()], pr_id); assert!(res.is_ok()); let prez = res.unwrap(); let res = Verifier::verify_credential_presentations(prez.as_slice(), &[manifest], pr_id); assert!(res.is_ok()); } }
create_test_schema
context.rs
//! Module for API context management. //! //! This module defines traits and structs that can be used to manage //! contextual data related to a request, as it is passed through a series of //! hyper services. //! //! See the `context_tests` module below for examples of how to use. use super::XSpanIdString; use auth::{AuthData, Authorization}; use futures::future::Future; use hyper; use std::marker::Sized; /// Defines methods for accessing, modifying, adding and removing the data stored /// in a context. Used to specify the requirements that a hyper service makes on /// a generic context type that it receives with a request, e.g. /// /// ```rust /// # extern crate hyper; /// # extern crate swagger; /// # extern crate futures; /// # /// # use swagger::context::*; /// # use futures::future::{Future, ok}; /// # use std::marker::PhantomData; /// # /// # struct MyItem; /// # fn do_something_with_my_item(item: &MyItem) {} /// # /// struct MyService<C> { /// marker: PhantomData<C>, /// } /// /// impl<C> hyper::server::Service for MyService<C> /// where C: Has<MyItem>, /// { /// type Request = (hyper::Request, C); /// type Response = hyper::Response; /// type Error = hyper::Error; /// type Future = Box<Future<Item=Self::Response, Error=Self::Error>>; /// fn call(&self, (req, context) : Self::Request) -> Self::Future { /// do_something_with_my_item(Has::<MyItem>::get(&context)); /// Box::new(ok(hyper::Response::new())) /// } /// } /// /// # fn main() {} /// ``` pub trait Has<T> { /// Get an immutable reference to the value. fn get(&self) -> &T; /// Get a mutable reference to the value. fn get_mut(&mut self) -> &mut T; /// Set the value. fn set(&mut self, value: T); } /// Defines a method for permanently extracting a value, changing the resulting /// type. Used to specify that a hyper service consumes some data from the context, /// making it unavailable to later layers, e.g. /// /// ```rust /// # extern crate hyper; /// # extern crate swagger; /// # extern crate futures; /// # /// # use swagger::context::*; /// # use futures::future::{Future, ok}; /// # use std::marker::PhantomData; /// # /// struct MyItem1; /// struct MyItem2; /// struct MyItem3; /// /// struct MiddlewareService<T, C> { /// inner: T, /// marker: PhantomData<C>, /// } /// /// impl<T, C, D, E> hyper::server::Service for MiddlewareService<T, C> /// where /// C: Pop<MyItem1, Result=D>, /// D: Pop<MyItem2, Result=E>, /// E: Pop<MyItem3>, /// T: hyper::server::Service<Request = (hyper::Request, E::Result)> /// { /// type Request = (hyper::Request, C); /// type Response = T::Response; /// type Error = T::Error; /// type Future = T::Future; /// fn call(&self, (req, context) : Self::Request) -> Self::Future { /// /// // type annotations optional, included for illustrative purposes /// let (_, context): (MyItem1, D) = context.pop(); /// let (_, context): (MyItem2, E) = context.pop(); /// let (_, context): (MyItem3, E::Result) = context.pop(); /// /// self.inner.call((req, context)) /// } /// } /// /// # fn main() {} pub trait Pop<T> { /// The type that remains after the value has been popped. type Result; /// Extracts a value. fn pop(self) -> (T, Self::Result); } /// Defines a method for inserting a value, changing the resulting /// type. Used to specify that a hyper service adds some data from the context, /// making it available to later layers, e.g. /// /// ```rust /// # extern crate hyper; /// # extern crate swagger; /// # extern crate futures; /// # /// # use swagger::context::*; /// # use futures::future::{Future, ok}; /// # use std::marker::PhantomData; /// # /// struct MyItem1; /// struct MyItem2; /// struct MyItem3; /// /// struct MiddlewareService<T, C> { /// inner: T, /// marker: PhantomData<C>, /// } /// /// impl<T, C, D, E> hyper::server::Service for MiddlewareService<T, C> /// where /// C: Push<MyItem1, Result=D>, /// D: Push<MyItem2, Result=E>, /// E: Push<MyItem3>, /// T: hyper::server::Service<Request = (hyper::Request, E::Result)> /// { /// type Request = (hyper::Request, C); /// type Response = T::Response; /// type Error = T::Error; /// type Future = T::Future; /// fn call(&self, (req, context) : Self::Request) -> Self::Future { /// let context = context /// .push(MyItem1{}) /// .push(MyItem2{}) /// .push(MyItem3{}); /// self.inner.call((req, context)) /// } /// } /// /// # fn main() {} pub trait Push<T> { /// The type that results from adding an item. type Result; /// Inserts a value. fn push(self, T) -> Self::Result; } /// Defines a struct that can be used to build up contexts recursively by /// adding one item to the context at a time, and a unit struct representing an /// empty context. The first argument is the name of the newly defined context struct /// that is used to add an item to the context, the second argument is the name of /// the empty context struct, and subsequent arguments are the types /// that can be stored in contexts built using these struct. /// /// A cons list built using the generated context type will implement Has<T> and Pop<T> /// for each type T that appears in the list, provided that the list only /// contains the types that were passed to the macro invocation after the context /// type name. /// /// All list types constructed using the generated types will implement `Push<T>` /// for all `T`, but it should ony be used when `T` is one of the types passed /// to the macro invocation, otherwise it might not be possible to retrieve the /// inserted value. /// /// E.g. /// /// ```rust /// # #[macro_use] extern crate swagger; /// # use swagger::{Has, Pop, Push}; /// /// struct MyType1; /// struct MyType2; /// struct MyType3; /// struct MyType4; /// /// new_context_type!(MyContext, MyEmpContext, MyType1, MyType2, MyType3); /// /// fn use_has_my_type_1<T: Has<MyType1>> (_: &T) {} /// fn use_has_my_type_2<T: Has<MyType2>> (_: &T) {} /// fn use_has_my_type_3<T: Has<MyType3>> (_: &T) {} /// fn use_has_my_type_4<T: Has<MyType4>> (_: &T) {} /// /// // will implement `Has<MyType1>` and `Has<MyType2>` because these appear /// // in the type, and were passed to `new_context_type!`. Will not implement /// // `Has<MyType3>` even though it was passed to `new_context_type!`, because /// // it is not included in the type. /// type ExampleContext = MyContext<MyType1, MyContext<MyType2, MyEmpContext>>; /// /// // Will not implement `Has<MyType4>` even though it appears in the type, /// // because `MyType4` was not passed to `new_context_type!`. /// type BadContext = MyContext<MyType1, MyContext<MyType4, MyEmpContext>>; /// /// fn main() { /// let context : ExampleContext = /// MyEmpContext::default() /// .push(MyType2{}) /// .push(MyType1{}); /// /// use_has_my_type_1(&context); /// use_has_my_type_2(&context); /// // use_has_my_type3(&context); // will fail /// /// let bad_context: BadContext = /// MyEmpContext::default() /// .push(MyType4{}) /// .push(MyType1{}); /// // use_has_my_type_4(&bad_context); // will fail /// /// } /// ``` /// /// See the `context_tests` module for more usage examples. #[macro_export] macro_rules! new_context_type { ($context_name:ident, $empty_context_name:ident, $($types:ty),+ ) => { /// Wrapper type for building up contexts recursively, adding one item /// to the context at a time. #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct $context_name<T, C> { head: T, tail: C, } /// Unit struct representing an empty context with no data in it. #[derive(Debug, Clone, Default, PartialEq, Eq)] pub struct $empty_context_name; // implement `Push<T>` on the empty context type for any `T`, so that // items can be added to the context impl<U> $crate::Push<U> for $empty_context_name { type Result = $context_name<U, Self>; fn push(self, item: U) -> Self::Result { $context_name{head: item, tail: Self::default()} } } // implement `Has<T>` for a list where `T` is the type of the head impl<T, C> $crate::Has<T> for $context_name<T, C> { fn set(&mut self, item: T) { self.head = item; } fn get(&self) -> &T { &self.head } fn get_mut(&mut self) -> &mut T { &mut self.head } } // implement `Pop<T>` for a list where `T` is the type of the head impl<T, C> $crate::Pop<T> for $context_name<T, C> { type Result = C; fn pop(self) -> (T, Self::Result) { (self.head, self.tail) } } // implement `Push<U>` for non-empty lists, for all types `U` impl<C, T, U> $crate::Push<U> for $context_name<T, C> { type Result = $context_name<U, Self>; fn push(self, item: U) -> Self::Result { $context_name{head: item, tail: self} } } // Add implementations of `Has<T>` and `Pop<T>` when `T` is any type stored in // the list, not just the head. new_context_type!(impl extend_has $context_name, $empty_context_name, $($types),+); }; // "HELPER" MACRO CASE - NOT FOR EXTERNAL USE // takes a type `Type1` ($head) and a non-empty list of types `Types` ($tail). First calls // another helper macro to define the following impls, for each `Type2` in `Types`: // ``` // impl<C: Has<Type1> Has<Type1> for $context_name<Type2, C> {...} // impl<C: Has<Type2> Has<Type2> for $context_name<Type1, C> {...} // impl<C: Pop<Type1> Pop<Type1> for $context_name<Type2, C> {...} // impl<C: Pop<Type2> Pop<Type2> for $context_name<Type1, C> {...} // ``` // then calls itself again with the rest of the list. The end result is to define the above // impls for all distinct pairs of types in the original list. (impl extend_has $context_name:ident, $empty_context_name:ident, $head:ty, $($tail:ty),+ ) => { // new_context_type!( impl extend_has_helper $context_name, $empty_context_name, $head, $($tail),+ ); new_context_type!(impl extend_has $context_name, $empty_context_name, $($tail),+); }; // "HELPER" MACRO CASE - NOT FOR EXTERNAL USE // base case of the preceding helper macro - was passed an empty list of types, so // we don't need to do anything. (impl extend_has $context_name:ident, $empty_context_name:ident, $head:ty) => {}; // "HELPER" MACRO CASE - NOT FOR EXTERNAL USE // takes a type `Type1` ($type) and a non-empty list of types `Types` ($types). For // each `Type2` in `Types`, defines the following impls: // ``` // impl<C: Has<Type1> Has<Type1> for $context_name<Type2, C> {...} // impl<C: Has<Type2> Has<Type2> for $context_name<Type1, C> {...} // impl<C: Pop<Type1> Pop<Type1> for $context_name<Type2, C> {...} // impl<C: Pop<Type2> Pop<Type2> for $context_name<Type1, C> {...} // ``` // (impl extend_has_helper $context_name:ident, $empty_context_name:ident, $type:ty, $($types:ty),+ ) => { $( impl<C: $crate::Has<$type>> $crate::Has<$type> for $context_name<$types, C> { fn set(&mut self, item: $type) { self.tail.set(item); } fn get(&self) -> &$type { self.tail.get() } fn get_mut(&mut self) -> &mut $type { self.tail.get_mut() } } impl<C: $crate::Has<$types>> $crate::Has<$types> for $context_name<$type, C> { fn set(&mut self, item: $types) { self.tail.set(item); } fn get(&self) -> &$types { self.tail.get() } fn get_mut(&mut self) -> &mut $types { self.tail.get_mut() } } impl<C> $crate::Pop<$type> for $context_name<$types, C> where C: Pop<$type> { type Result = $context_name<$types, C::Result>; fn pop(self) -> ($type, Self::Result) { let (value, tail) = self.tail.pop(); (value, $context_name{ head: self.head, tail}) } } impl<C> $crate::Pop<$types> for $context_name<$type, C> where C: Pop<$types> { type Result = $context_name<$type, C::Result>; fn pop(self) -> ($types, Self::Result) { let (value, tail) = self.tail.pop(); (value, $context_name{ head: self.head, tail}) } } )+ }; } /// Create a default context type to export. new_context_type!( ContextBuilder, EmptyContext, XSpanIdString, Option<AuthData>, Option<Authorization> ); /// Macro for easily defining context types. The first argument should be a /// context type created with `new_context_type!` and subsequent arguments are the /// types to be stored in the context, with the outermost first. /// /// ```rust /// # #[macro_use] extern crate swagger; /// # use swagger::{Has, Pop, Push}; /// /// # struct Type1; /// # struct Type2; /// # struct Type3; /// /// # new_context_type!(MyContext, MyEmptyContext, Type1, Type2, Type3); /// /// // the following two types are identical /// type ExampleContext1 = make_context_ty!(MyContext, MyEmptyContext, Type1, Type2, Type3); /// type ExampleContext2 = MyContext<Type1, MyContext<Type2, MyContext<Type3, MyEmptyContext>>>; /// /// // e.g. this wouldn't compile if they were different types /// fn do_nothing(input: ExampleContext1) -> ExampleContext2 { /// input /// } /// /// # fn main() {} /// ``` #[macro_export] macro_rules! make_context_ty { ($context_name:ident, $empty_context_name:ident, $type:ty $(, $types:ty)* $(,)* ) => { $context_name<$type, make_context_ty!($context_name, $empty_context_name, $($types),*)> }; ($context_name:ident, $empty_context_name:ident $(,)* ) => { $empty_context_name }; } /// Macro for easily defining context values. The first argument should be a /// context type created with `new_context_type!` and subsequent arguments are the /// values to be stored in the context, with the outermost first. /// /// ```rust /// # #[macro_use] extern crate swagger; /// # use swagger::{Has, Pop, Push}; /// /// # #[derive(PartialEq, Eq, Debug)] /// # struct Type1; /// # #[derive(PartialEq, Eq, Debug)] /// # struct Type2; /// # #[derive(PartialEq, Eq, Debug)] /// # struct Type3; /// /// # new_context_type!(MyContext, MyEmptyContext, Type1, Type2, Type3); /// /// fn main() { /// // the following are equivalent /// let context1 = make_context!(MyContext, MyEmptyContext, Type1 {}, Type2 {}, Type3 {}); /// let context2 = MyEmptyContext::default() /// .push(Type3{}) /// .push(Type2{}) /// .push(Type1{}); /// /// assert_eq!(context1, context2); /// } /// ``` #[macro_export] macro_rules! make_context { ($context_name:ident, $empty_context_name:ident, $value:expr $(, $values:expr)* $(,)*) => { make_context!($context_name, $empty_context_name, $($values),*).push($value) }; ($context_name:ident, $empty_context_name:ident $(,)* ) => { $empty_context_name::default() }; } /// Context wrapper, to bind an API with a context. #[derive(Debug)] pub struct ContextWrapper<'a, T: 'a, C> { api: &'a T, context: C, } impl<'a, T, C> ContextWrapper<'a, T, C> { /// Create a new ContextWrapper, binding the API and context. pub fn new(api: &'a T, context: C) -> ContextWrapper<'a, T, C> { ContextWrapper { api, context } } /// Borrows the API. pub fn api(&self) -> &T { self.api } /// Borrows the context. pub fn context(&self) -> &C { &self.context } } /// Trait to extend an API to make it easy to bind it to a context. pub trait ContextWrapperExt<'a, C> where Self: Sized, { /// Binds this API to a context. fn with_context(self: &'a Self, context: C) -> ContextWrapper<'a, Self, C> { ContextWrapper::<Self, C>::new(self, context) } } /// Trait designed to ensure consistency in context used by swagger middlewares /// /// ```rust /// # extern crate hyper; /// # extern crate swagger; /// # use swagger::context::*; /// # use std::marker::PhantomData; /// # use swagger::auth::{AuthData, Authorization}; /// # use swagger::XSpanIdString; /// /// struct ExampleMiddleware<T, C> { /// inner: T, /// marker: PhantomData<C>, /// } /// /// impl<T, C> hyper::server::Service for ExampleMiddleware<T, C> /// where /// T: SwaggerService<C>, /// C: Has<Option<AuthData>> + /// Has<Option<Authorization>> + /// Has<XSpanIdString> + /// Clone + /// 'static, /// { /// type Request = (hyper::Request, C); /// type Response = T::Response; /// type Error = T::Error; /// type Future = T::Future; /// fn call(&self, (req, context) : Self::Request) -> Self::Future { /// self.inner.call((req, context)) /// } /// } /// ``` pub trait SwaggerService<C>: Clone + hyper::server::Service< Request = (hyper::server::Request, C), Response = hyper::server::Response, Error = hyper::Error, Future = Box<Future<Item = hyper::server::Response, Error = hyper::Error>>, > where C: Has<Option<AuthData>> + Has<Option<Authorization>> + Has<XSpanIdString> + Clone + 'static, { } impl<T, C> SwaggerService<C> for T where T: Clone + hyper::server::Service< Request = (hyper::server::Request, C), Response = hyper::server::Response, Error = hyper::Error, Future = Box<Future<Item = hyper::server::Response, Error = hyper::Error>>, >, C: Has<Option<AuthData>> + Has<Option<Authorization>> + Has<XSpanIdString> + Clone + 'static, { } #[cfg(test)] mod context_tests { use super::*; use futures::future::{ok, Future}; use hyper::server::{NewService, Service}; use hyper::{Error, Method, Request, Response, Uri}; use std::io; use std::marker::PhantomData; use std::str::FromStr; struct ContextItem1; struct ContextItem2; struct ContextItem3; fn use_item_1_owned(_: ContextItem1) {} fn use_item_2(_: &ContextItem2) {} fn use_item_3_owned(_: ContextItem3) {} // Example of a "terminating" hyper service using contexts - i.e. doesn't // pass a request and its context on to a wrapped service. struct InnerService<C> where C: Has<ContextItem2> + Pop<ContextItem3>, { marker: PhantomData<C>, } // Use trait bounds to indicate what your service will use from the context. // use `Pop` if you want to take ownership of a value stored in the context, // or `Has` if a reference is enough. impl<C> Service for InnerService<C> where C: Has<ContextItem2> + Pop<ContextItem3>, { type Request = (Request, C); type Response = Response; type Error = Error; type Future = Box<Future<Item = Response, Error = Error>>; fn call(&self, (_, context): Self::Request) -> Self::Future { use_item_2(Has::<ContextItem2>::get(&context)); let (item3, _): (ContextItem3, _) = context.pop(); use_item_3_owned(item3); Box::new(ok(Response::new())) } } struct InnerNewService<C> where C: Has<ContextItem2> + Pop<ContextItem3>, { marker: PhantomData<C>, } impl<C> InnerNewService<C> where C: Has<ContextItem2> + Pop<ContextItem3>, { fn new() -> Self { InnerNewService { marker: PhantomData, } } } impl<C> NewService for InnerNewService<C> where C: Has<ContextItem2> + Pop<ContextItem3>, { type Request = (Request, C); type Response = Response; type Error = Error; type Instance = InnerService<C>; fn new_service(&self) -> Result<Self::Instance, io::Error> { Ok(InnerService { marker: PhantomData, }) } } // Example of a middleware service using contexts, i.e. a hyper service that // processes a request (and its context) and passes it on to another wrapped // service. struct MiddleService<T, C> where C: Pop<ContextItem1>, C::Result: Push<ContextItem2>, <C::Result as Push<ContextItem2>>::Result: Push<ContextItem3>, T: Service< Request = ( Request, <<C::Result as Push<ContextItem2>>::Result as Push<ContextItem3>>::Result, ), >, { inner: T, marker1: PhantomData<C>, } // Use trait bounds to indicate what modifications your service will make // to the context, chaining them as below. impl<T, C, D, E> Service for MiddleService<T, C> where C: Pop<ContextItem1, Result = D>, D: Push<ContextItem2, Result = E>, E: Push<ContextItem3>, T: Service<Request = (Request, E::Result)>, { type Request = (Request, C); type Response = T::Response; type Error = T::Error; type Future = T::Future; fn call(&self, (req, context): Self::Request) -> Self::Future { let (item, context) = context.pop(); use_item_1_owned(item); let context = context.push(ContextItem2 {}).push(ContextItem3 {}); self.inner.call((req, context)) } } struct MiddleNewService<T, C> where C: Pop<ContextItem1>, C::Result: Push<ContextItem2>, <C::Result as Push<ContextItem2>>::Result: Push<ContextItem3>, T: NewService< Request = ( Request, <<C::Result as Push<ContextItem2>>::Result as Push<ContextItem3>>::Result, ), >, { inner: T, marker1: PhantomData<C>, } impl<T, C, D, E> NewService for MiddleNewService<T, C> where C: Pop<ContextItem1, Result = D>, D: Push<ContextItem2, Result = E>, E: Push<ContextItem3>, T: NewService<Request = (Request, E::Result)>, { type Request = (Request, C); type Response = T::Response; type Error = T::Error; type Instance = MiddleService<T::Instance, C>; fn new_service(&self) -> Result<Self::Instance, io::Error> { self.inner.new_service().map(|s| MiddleService { inner: s, marker1: PhantomData, }) } } impl<T, C, D, E> MiddleNewService<T, C> where C: Pop<ContextItem1, Result = D>, D: Push<ContextItem2, Result = E>, E: Push<ContextItem3>, T: NewService<Request = (Request, E::Result)>, { fn
(inner: T) -> Self { MiddleNewService { inner, marker1: PhantomData, } } } // Example of a top layer service that creates a context to be used by // lower layers. struct OuterService<T, C> where C: Default + Push<ContextItem1>, T: Service<Request = (Request, C::Result)>, { inner: T, marker: PhantomData<C>, } // Use a `Default` trait bound so that the context can be created. Use // `Push` trait bounds for each type that you will add to the newly // created context. impl<T, C> Service for OuterService<T, C> where C: Default + Push<ContextItem1>, T: Service<Request = (Request, C::Result)>, { type Request = Request; type Response = T::Response; type Error = T::Error; type Future = T::Future; fn call(&self, req: Self::Request) -> Self::Future { let context = C::default().push(ContextItem1 {}); self.inner.call((req, context)) } } struct OuterNewService<T, C> where C: Default + Push<ContextItem1>, T: NewService<Request = (Request, C::Result)>, { inner: T, marker: PhantomData<C>, } impl<T, C> NewService for OuterNewService<T, C> where C: Default + Push<ContextItem1>, T: NewService<Request = (Request, C::Result)>, { type Request = Request; type Response = T::Response; type Error = T::Error; type Instance = OuterService<T::Instance, C>; fn new_service(&self) -> Result<Self::Instance, io::Error> { self.inner.new_service().map(|s| OuterService { inner: s, marker: PhantomData, }) } } impl<T, C> OuterNewService<T, C> where C: Default + Push<ContextItem1>, T: NewService<Request = (Request, C::Result)>, { fn new(inner: T) -> Self { OuterNewService { inner, marker: PhantomData, } } } // Example of use by a service in its main.rs file. At this point you know // all the hyper service layers you will be using, and what requirements // their contexts types have. Use the `new_context_type!` macro to create // a context type and empty context type that are capable of containing all the // types that your hyper services require. new_context_type!( MyContext, MyEmptyContext, ContextItem1, ContextItem2, ContextItem3 ); #[test] fn send_request() { // annotate the outermost service to indicate that the context type it // uses is the empty context type created by the above macro invocation. // the compiler should infer all the other context types. let new_service = OuterNewService::<_, MyEmptyContext>::new(MiddleNewService::new( InnerNewService::new(), )); let req = Request::new(Method::Post, Uri::from_str("127.0.0.1:80").unwrap()); new_service .new_service() .expect("Failed to start new service") .call(req) .wait() .expect("Service::call returned an error"); } }
new
mobilityconfig.py
""" mobility configuration """ from tkinter import ttk from typing import TYPE_CHECKING import grpc from core.gui.dialogs.dialog import Dialog from core.gui.errors import show_grpc_error from core.gui.themes import PADX, PADY from core.gui.widgets import ConfigFrame if TYPE_CHECKING: from core.gui.app import Application from core.gui.graph.node import CanvasNode class MobilityConfigDialog(Dialog): def __init__( self, master: "Application", app: "Application", canvas_node: "CanvasNode" ): super().__init__( master, app, f"{canvas_node.core_node.name} Mobility Configuration", modal=True, ) self.canvas_node = canvas_node self.node = canvas_node.core_node self.config_frame = None self.has_error = False try: self.config = self.app.core.get_mobility_config(self.node.id) self.draw() except grpc.RpcError as e: self.has_error = True show_grpc_error(e, self.app, self.app) self.destroy() def
(self): self.top.columnconfigure(0, weight=1) self.top.rowconfigure(0, weight=1) self.config_frame = ConfigFrame(self.top, self.app, self.config) self.config_frame.draw_config() self.config_frame.grid(sticky="nsew", pady=PADY) self.draw_apply_buttons() def draw_apply_buttons(self): frame = ttk.Frame(self.top) frame.grid(sticky="ew") for i in range(2): frame.columnconfigure(i, weight=1) button = ttk.Button(frame, text="Apply", command=self.click_apply) button.grid(row=0, column=0, padx=PADX, sticky="ew") button = ttk.Button(frame, text="Cancel", command=self.destroy) button.grid(row=0, column=1, sticky="ew") def click_apply(self): self.config_frame.parse_config() self.app.core.mobility_configs[self.node.id] = self.config self.destroy()
draw
self-check-update.component_20190725180206.ts
import { Component, OnInit, Output, EventEmitter } from '@angular/core'; import { ActivatedRoute, Router } from '@angular/router'; import { HttpResponse, HttpErrorResponse } from '@angular/common/http'; import { Observable } from 'rxjs'; import { QmsBomTechnologyComponent } from '../../../popup/bomTechnologySelection/qms-bom-technology.component' import { IQmsQualityControlDetails } from 'app/shared/model/qms-quality-control-details.model'; import { IQmsProductionInspection } from 'app/shared/model/qms-production-inspection.model'; import { IQmsPartsAssemblyRelation } from 'app/shared/model/qms-parts-assembly-relation.model'; import { QmsProductionInspectionValue } from 'app/shared/model/qms-production-inspection-value.model'; import { QmsProductionInspectionSelfService } from './qms-production-inspection.self.service'; import { NgbModal } from '@ng-bootstrap/ng-bootstrap'; import { Message } from 'primeng/components/common/api'; import { isInteger } from '@ng-bootstrap/ng-bootstrap/util/util'; import { isNull } from '@angular/compiler/src/output/output_ast'; @Component({ selector: 'jhi-self-check-update', templateUrl: './self-check-update.component.html', styleUrls: [ './productProcessSelf.scss', '.file.scss' ] }) export class QmsProductionInspectionUpdateSelfComponent implements OnInit { // 生产检验表数据 qmsProductionInspection: any; // 工序质量控制点详细表 qmsQualityControlDetails: any[]; // 工序装配关系表 qmsPartsAssemblyRelation: IQmsPartsAssemblyRelation[]; // 生产检验结果表 qmsProductionInspectionValue: QmsProductionInspectionValue; // 工序质量控制点详细 错误集合 qualityControlDetailsErrorSet = new Set(); isSaving: boolean; // 错误信息 msgs: Message[] = []; // label信息 labelTitle: any; // 管理人 makeUser: any // 不合格品管理表 质量检查项目级别 controlLevel: string; // 检验编号 checkNumber: number; // 合格区分名 isOkName: string; // 文件上传以上 num: any; data: any; content: any; goMaterialSelect: boolean; video = new ArrayBuffer(10240); startMonth: string; startDay: string; startHH: string; startMM: string; startSS: string; showModal: boolean; docHtml: any; digiFiles: any; uploading: boolean; @Output() closeModal = new EventEmitter<boolean>(); predicate: any; previousPage: any; reverse: any; queryParams: any; acceptFile: any; onDelete: any; confirmModal: any; checkReturn: any; infoId: any; isEdit: boolean; filePath: any; tmpFiles: any; constructor( private router: Router, private qmsProductionInspectionService: QmsProductionInspectionSelfService, private activatedRoute: ActivatedRoute, private modalsService: NgbModal ) { this.controlLevel = 'C'; this.tmpFiles = []; this.num = 0; this.content = ''; this.goMaterialSelect = false; this.showModal = false; this.docHtml = ''; this.digiFiles = []; this.uploading = false; this.acceptFile = '.jpg/.doc/.pdf/.mp4'; // image/png, image/jpeg, image/ this.onDelete = []; this.confirmModal = false; this.checkReturn = {}; this.filePath = ''; } ngOnInit() {
this.labelTitle = { 'tab1': '质量检验项目', 'tab2': '装配物料', 'tab3': '上传附件', }; this.activatedRoute.data.subscribe(({ qmsProductionInspection }) => { console.log("111", qmsProductionInspection) this.qmsProductionInspection = qmsProductionInspection; if (qmsProductionInspection.isOk == '0') { this.isOkName = "未检验" } else if (qmsProductionInspection.isOk == '1') { this.isOkName = "合格" } else if (qmsProductionInspection.isOk == '2') { this.isOkName = "不合格" } // 获取一览数据 this.qmsProductionInspectionService .findQmsQualityControlDetailsByTechId({ pid: qmsProductionInspection.id }) .subscribe(data => { this.qmsQualityControlDetails = data.body; // 检查人 if (this.qmsQualityControlDetails.length > 0) { this.makeUser = this.qmsQualityControlDetails[0].makeUser; } this.qualityInspectionCheck() }) this.qmsProductionInspectionService .findAssemblyRelationByTechId({ bomTechnologyId: qmsProductionInspection.bomTechnologyId }) .subscribe(data => { this.qmsPartsAssemblyRelation = data.body; }) }); } previousState() { this.router.navigate(['/productProcessSelfCheck']); } // 质量检验check qualityInspectionCheck() { this.qualityControlDetailsErrorSet = new Set() // 质量检验项目 // 实测值是否填写 for (var i = 0; i < this.qmsQualityControlDetails.length; i++) { if (this.qmsQualityControlDetails[i].testValue === null || this.qmsQualityControlDetails[i].testValue.toString() == '') { this.msgs.push({ severity: 'error', summary: '提示', detail: '实测值不能为空!' }); return } } // 实测值是否合格 for (var i = 0; i < this.qmsQualityControlDetails.length; i++) { // 数字区分 if (this.qmsQualityControlDetails[i].inspectionResultDiff == '0') { if (this.qmsQualityControlDetails[i].upperDeviation !== null) { if (this.qmsQualityControlDetails[i].testValue > this.qmsQualityControlDetails[i].standard + this.qmsQualityControlDetails[i].upperDeviation) { // this.msgs.push({ severity: 'error', summary: '提示', detail: '存在数据不符合技术要求!' }); this.qualityControlDetailsErrorSet.add(i) } } if (this.qmsQualityControlDetails[i].lowerDeviation !== null) { if (this.qmsQualityControlDetails[i].testValue < this.qmsQualityControlDetails[i].standard - this.qmsQualityControlDetails[i].lowerDeviation) { // this.msgs.push({ severity: 'error', summary: '提示', detail: '存在数据不符合技术要求!' }); this.qualityControlDetailsErrorSet.add(i) } } } else { if (this.qmsQualityControlDetails[i].testValue.toString() == 'N') { this.qualityControlDetailsErrorSet.add(i) } } } } // 获取一览数据 findQmsQualityControlDetailsByTechId () { this.qmsProductionInspectionService .findQmsQualityControlDetailsByTechId({ pid: this.qmsProductionInspection.id }) .subscribe(data => { this.qmsQualityControlDetails = data.body; // 检查人 if (this.qmsQualityControlDetails.length > 0) { this.makeUser = this.qmsQualityControlDetails[0].makeUser; } this.qualityInspectionCheck() }) } save() { // 检验编号check if (this.checkNumber == undefined || this.checkNumber == null) { this.msgs.push({ severity: 'error', summary: '提示', detail: '请输入检验编号!' }); return } if (isNaN(this.checkNumber)) { this.msgs.push({ severity: 'error', summary: '提示', detail: '检验编号不正确!' }); return } this.qualityInspectionCheck() // 存在不合格数据 if (this.qualityControlDetailsErrorSet.size > 0) { this.msgs.push({ severity: 'error', summary: '提示', detail: '存在数据不符合技术要求!' }); const _this = this; this.qualityControlDetailsErrorSet.forEach(function (element : number, sameElement, set) { if (_this.controlLevel === 'A') { return } if (_this.qmsQualityControlDetails[element].abcType === 'B') { _this.controlLevel = 'B' } if (_this.qmsQualityControlDetails[element].abcType === 'A') { _this.controlLevel = 'A' return } }) const params = { bomTechnologyId: this.qmsProductionInspection.bomTechnologyId, processId: this.qmsProductionInspection.processId, materielId: this.qmsProductionInspection.materielId, controlLevel: this.controlLevel, serialNumber: this.qmsProductionInspection.serialNumber, furnace: this.qmsProductionInspection.furnace } // 新增一条到不合格品管理表 this.qmsProductionInspectionService.createQmsUnqualifiedProduct(params).subscribe(data => { // 根据返回的ip 批量插入到不合格品明细管理表 this.qualityControlDetailsErrorSet.forEach(function (element : number, sameElement, set) { const params = { unqualifiedProductId: data.body.unqualifiedProductId, inspectionItem: _this.qmsQualityControlDetails[element].inspectionItem, inspectionInstrument: _this.qmsQualityControlDetails[element].inspectionInstrument, checkResult: _this.qmsQualityControlDetails[element].testValue, upperDeviation: _this.qmsQualityControlDetails[element].upperDeviation, lowerDeviation: _this.qmsQualityControlDetails[element].lowerDeviation, abcType: _this.qmsQualityControlDetails[element].abcType, } _this.qmsProductionInspectionService.createQmsUnqualifiedProductDetails(params).subscribe(data => { }) }) }) } if (this.qmsQualityControlDetails.length !== 0) { // 判断结果表是否有数据确认是新增还是更新 if (this.qmsQualityControlDetails[0].resultId === null) { // 向结果表插入数据 this.qmsProductionInspectionService.createQmsProductionInspectionResult({params: this.qmsQualityControlDetails}).subscribe(data => { // 获取一览数据 this.findQmsQualityControlDetailsByTechId() }) // 向生产检验结果表插入数据 this.qmsProductionInspectionValue = new QmsProductionInspectionValue(); this.qmsProductionInspectionValue.isOk = this.qualityControlDetailsErrorSet.size !== 0? "0": "1" this.qmsProductionInspectionValue.inspectionDiff = "C" this.qmsProductionInspectionValue.inspectionId = this.qmsProductionInspection.id this.qmsProductionInspectionValue.checkNumber = this.checkNumber this.qmsProductionInspectionValue.flagStatus = "0" this.qmsProductionInspectionService.createQmsProductionInspectionValue(this.qmsProductionInspectionValue).subscribe(data => {}) }else { const params = { checkNumber: this.checkNumber, inspectionId: this.qmsProductionInspection.id, inspectionDiff: "C", isOk: this.qualityControlDetailsErrorSet.size !== 0? "0": "1" } this.qmsProductionInspectionService.updateQmsProductionInspectionValues(params).subscribe(data => {}) this.qmsProductionInspectionService.updateQmsProductionInspectionResult({params: this.qmsQualityControlDetails}).subscribe(data => { // 获取一览数据 this.findQmsQualityControlDetailsByTechId() }) } } if (this.qualityControlDetailsErrorSet.size == 0) { this.previousState() } } test(i) { var result = '' this.qualityControlDetailsErrorSet.forEach(function (element, sameElement, set) { if (i == element) { result = '#e20a0ad6' } }) return result } private subscribeToSaveResponse(result: Observable<HttpResponse<IQmsProductionInspection>>) { result.subscribe( (res: HttpResponse<IQmsProductionInspection>) => this.onSaveSuccess(), (res: HttpErrorResponse) => this.onSaveError() ); } private onSaveSuccess() { this.isSaving = false; this.previousState(); } private onSaveError() { this.isSaving = false; } }
this.isSaving = false;
locale.py
# uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: locale.py """ Locale support. The module provides low-level access to the C lib's locale APIs and adds high level number formatting APIs as well as a locale aliasing engine to complement these. The aliasing engine includes support for many commonly used locale names and maps them to values suitable for passing to the C lib's setlocale() function. It also includes default encodings for all supported locale names. """ import sys import encodings import encodings.aliases import re import operator import functools __all__ = [ 'getlocale', 'getdefaultlocale', 'getpreferredencoding', 'Error', 'setlocale', 'resetlocale', 'localeconv', 'strcoll', 'strxfrm', 'str', 'atof', 'atoi', 'format', 'format_string', 'currency', 'normalize', 'LC_CTYPE', 'LC_COLLATE', 'LC_TIME', 'LC_MONETARY', 'LC_NUMERIC', 'LC_ALL', 'CHAR_MAX'] try: from _locale import * except ImportError: CHAR_MAX = 127 LC_ALL = 6 LC_COLLATE = 3 LC_CTYPE = 0 LC_MESSAGES = 5 LC_MONETARY = 4 LC_NUMERIC = 1 LC_TIME = 2 Error = ValueError def localeconv(): """ localeconv() -> dict. Returns numeric and monetary locale-specific parameters. """ return {'grouping': [127],'currency_symbol': '', 'n_sign_posn': 127, 'p_cs_precedes': 127, 'n_cs_precedes': 127, 'mon_grouping': [],'n_sep_by_space': 127, 'decimal_point': '.', 'negative_sign': '', 'positive_sign': '', 'p_sep_by_space': 127, 'int_curr_symbol': '', 'p_sign_posn': 127, 'thousands_sep': '', 'mon_thousands_sep': '', 'frac_digits': 127, 'mon_decimal_point': '', 'int_frac_digits': 127 } def setlocale(category, value=None): """ setlocale(integer,string=None) -> string. Activates/queries locale processing. """ if value not in (None, '', 'C'): raise Error, '_locale emulation only supports "C" locale' return 'C' def strcoll(a, b): """ strcoll(string,string) -> int. Compares two strings according to the locale. """ return cmp(a, b) def strxfrm(s): """ strxfrm(string) -> string. Returns a string that behaves for cmp locale-aware. """ return s _localeconv = localeconv _override_localeconv = {} @functools.wraps(_localeconv) def localeconv(): d = _localeconv() if _override_localeconv: d.update(_override_localeconv) return d def _grouping_intervals(grouping): last_interval = None for interval in grouping: if interval == CHAR_MAX: return if interval == 0: if last_interval is None: raise ValueError('invalid grouping') while True: yield last_interval yield interval last_interval = interval return def _group(s, monetary=False): conv = localeconv() thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep'] grouping = conv[monetary and 'mon_grouping' or 'grouping'] if not grouping: return (s, 0) result = '' seps = 0 if s[-1] == ' ': stripped = s.rstrip() right_spaces = s[len(stripped):] s = stripped else: right_spaces = '' left_spaces = '' groups = [] for interval in _grouping_intervals(grouping): if not s or s[-1] not in '0123456789': left_spaces = s s = '' break groups.append(s[-interval:]) s = s[:-interval] if s: groups.append(s) groups.reverse() return ( left_spaces + thousands_sep.join(groups) + right_spaces, len(thousands_sep) * (len(groups) - 1)) def _strip_padding(s, amount): lpos = 0 while amount and s[lpos] == ' ': lpos += 1 amount -= 1 rpos = len(s) - 1 while amount and s[rpos] == ' ': rpos -= 1 amount -= 1 return s[lpos:rpos + 1] _percent_re = re.compile('%(?:\\((?P<key>.*?)\\))?(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]') def format(percent, value, grouping=False, monetary=False, *additional): """Returns the locale-aware substitution of a %? specifier (percent). additional is for format strings which contain one or more '*' modifiers.""" match = _percent_re.match(percent) if not match or len(match.group()) != len(percent): raise ValueError('format() must be given exactly one %%char format specifier, %s not valid' % repr(percent)) return _format(percent, value, grouping, monetary, *additional) def _format(percent, value, grouping=False, monetary=False, *additional): if additional: formatted = percent % ((value,) + additional) else: formatted = percent % value if percent[-1] in 'eEfFgG': seps = 0 parts = formatted.split('.') if grouping: parts[0], seps = _group(parts[0], monetary=monetary) decimal_point = localeconv()[monetary and 'mon_decimal_point' or 'decimal_point'] formatted = decimal_point.join(parts) if seps: formatted = _strip_padding(formatted, seps) elif percent[-1] in 'diu': seps = 0 if grouping: formatted, seps = _group(formatted, monetary=monetary) if seps: formatted = _strip_padding(formatted, seps) return formatted def format_string(f, val, grouping=False): """Formats a string in the same way that the % formatting would use, but takes the current locale into account. Grouping is applied if the third parameter is true.""" percents = list(_percent_re.finditer(f)) new_f = _percent_re.sub('%s', f) if operator.isMappingType(val): new_val = [] for perc in percents: if perc.group()[-1] == '%': new_val.append('%') else: new_val.append(format(perc.group(), val, grouping)) else: if not isinstance(val, tuple): val = ( val,) new_val = [] i = 0 for perc in percents: if perc.group()[-1] == '%': new_val.append('%') else: starcount = perc.group('modifiers').count('*') new_val.append(_format(perc.group(), val[i], grouping, False, *val[i + 1:i + 1 + starcount])) i += 1 + starcount val = tuple(new_val) return new_f % val def currency(val, symbol=True, grouping=False, international=False): """Formats val according to the currency settings in the current locale.""" conv = localeconv() digits = conv[international and 'int_frac_digits' or 'frac_digits'] if digits == 127: raise ValueError("Currency formatting is not possible using the 'C' locale.") s = format('%%.%if' % digits, abs(val), grouping, monetary=True) s = '<' + s + '>' if symbol: smb = conv[international and 'int_curr_symbol' or 'currency_symbol'] precedes = conv[val < 0 and 'n_cs_precedes' or 'p_cs_precedes'] separated = conv[val < 0 and 'n_sep_by_space' or 'p_sep_by_space'] if precedes: s = smb + (separated and ' ' or '') + s else: s = s + (separated and ' ' or '') + smb sign_pos = conv[val < 0 and 'n_sign_posn' or 'p_sign_posn'] sign = conv[val < 0 and 'negative_sign' or 'positive_sign'] if sign_pos == 0: s = '(' + s + ')' elif sign_pos == 1: s = sign + s elif sign_pos == 2: s = s + sign elif sign_pos == 3: s = s.replace('<', sign) elif sign_pos == 4: s = s.replace('>', sign) else: s = sign + s return s.replace('<', '').replace('>', '') def str(val): """Convert float to integer, taking the locale into account.""" return format('%.12g', val) def atof(string, func=float): """Parses a string as a float according to the locale settings.""" ts = localeconv()['thousands_sep'] if ts: string = string.replace(ts, '') dd = localeconv()['decimal_point'] if dd: string = string.replace(dd, '.') return func(string) def atoi(str): """Converts a string to an integer according to the locale settings.""" return atof(str, int) def _test(): setlocale(LC_ALL, '') s1 = format('%d', 123456789, 1) print s1, 'is', atoi(s1) s1 = str(3.14) print s1, 'is', atof(s1) _setlocale = setlocale def normalize(localename): """ Returns a normalized locale code for the given locale name. The returned locale code is formatted for use with setlocale(). If normalization fails, the original name is returned unchanged. If the given encoding is not known, the function defaults to the default encoding for the locale code just like setlocale() does. """ fullname = localename.lower() if ':' in fullname: fullname = fullname.replace(':', '.') if '.' in fullname: langname, encoding = fullname.split('.')[:2] fullname = langname + '.' + encoding else: langname = fullname encoding = '' norm_encoding = encoding.replace('-', '') norm_encoding = norm_encoding.replace('_', '') lookup_name = langname + '.' + encoding code = locale_alias.get(lookup_name, None) if code is not None: return code else: code = locale_alias.get(langname, None) if code is not None: if '.' in code: langname, defenc = code.split('.') else: langname = code defenc = '' if encoding: norm_encoding = encodings.normalize_encoding(encoding) norm_encoding = encodings.aliases.aliases.get(norm_encoding, norm_encoding) encoding = locale_encoding_alias.get(norm_encoding, norm_encoding) else: encoding = defenc if encoding: return langname + '.' + encoding else: return langname else: return localename return def _parse_localename(localename): """ Parses the locale code for localename and returns the result as tuple (language code, encoding). The localename is normalized and passed through the locale alias engine. A ValueError is raised in case the locale name cannot be parsed. The language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined or are unknown to this implementation. """ code = normalize(localename) if '@' in code: code, modifier = code.split('@') if modifier == 'euro' and '.' not in code: return ( code, 'iso-8859-15') if '.' in code: return tuple(code.split('.')[:2]) else:
def _build_localename(localetuple): """ Builds a locale code from the given tuple (language code, encoding). No aliasing or normalizing takes place. """ language, encoding = localetuple if language is None: language = 'C' if encoding is None: return language else: return language + '.' + encoding return def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')): """ Tries to determine the default locale settings and returns them as tuple (language code, encoding). According to POSIX, a program which has not called setlocale(LC_ALL, "") runs using the portable 'C' locale. Calling setlocale(LC_ALL, "") lets it use the default locale as defined by the LANG variable. Since we don't want to interfere with the current locale setting we thus emulate the behavior in the way described above. To maintain compatibility with other platforms, not only the LANG variable is tested, but a list of variables given as envvars parameter. The first found to be defined will be used. envvars defaults to the search path used in GNU gettext; it must always contain the variable name 'LANG'. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ try: import _locale code, encoding = _locale._getdefaultlocale() except (ImportError, AttributeError): pass else: if sys.platform == 'win32' and code and code[:2] == '0x': code = windows_locale.get(int(code, 0)) return ( code, encoding) import os lookup = os.environ.get for variable in envvars: localename = lookup(variable, None) if localename: if variable == 'LANGUAGE': localename = localename.split(':')[0] break else: localename = 'C' return _parse_localename(localename) def getlocale(category=LC_CTYPE): """ Returns the current setting for the given locale category as tuple (language code, encoding). category may be one of the LC_* value except LC_ALL. It defaults to LC_CTYPE. Except for the code 'C', the language code corresponds to RFC 1766. code and encoding can be None in case the values cannot be determined. """ localename = _setlocale(category) if category == LC_ALL and ';' in localename: raise TypeError, 'category LC_ALL is not supported' return _parse_localename(localename) def setlocale(category, locale=None): """ Set the locale for the given category. The locale can be a string, a locale tuple (language code, encoding), or None. Locale tuples are converted to strings the locale aliasing engine. Locale strings are passed directly to the C lib. category may be given as one of the LC_* values. """ if locale and type(locale) is not type(''): locale = normalize(_build_localename(locale)) return _setlocale(category, locale) def resetlocale(category=LC_ALL): """ Sets the locale for category to the default setting. The default setting is determined by calling getdefaultlocale(). category defaults to LC_ALL. """ _setlocale(category, _build_localename(getdefaultlocale())) if sys.platform.startswith('win'): def getpreferredencoding(do_setlocale=True): """Return the charset that the user is likely using.""" import _locale return _locale._getdefaultlocale()[1] else: try: CODESET except NameError: def getpreferredencoding(do_setlocale=True): """Return the charset that the user is likely using, by looking at environment variables.""" return getdefaultlocale()[1] else: def getpreferredencoding(do_setlocale=True): """Return the charset that the user is likely using, according to the system configuration.""" if do_setlocale: oldloc = setlocale(LC_CTYPE) try: setlocale(LC_CTYPE, '') except Error: pass result = nl_langinfo(CODESET) setlocale(LC_CTYPE, oldloc) return result else: return nl_langinfo(CODESET) locale_encoding_alias = {'437': 'C', 'c': 'C', 'en': 'ISO8859-1', 'jis': 'JIS7', 'jis7': 'JIS7', 'ajec': 'eucJP', 'ascii': 'ISO8859-1', 'latin_1': 'ISO8859-1', 'iso8859_1': 'ISO8859-1', 'iso8859_10': 'ISO8859-10', 'iso8859_11': 'ISO8859-11', 'iso8859_13': 'ISO8859-13', 'iso8859_14': 'ISO8859-14', 'iso8859_15': 'ISO8859-15', 'iso8859_16': 'ISO8859-16', 'iso8859_2': 'ISO8859-2', 'iso8859_3': 'ISO8859-3', 'iso8859_4': 'ISO8859-4', 'iso8859_5': 'ISO8859-5', 'iso8859_6': 'ISO8859-6', 'iso8859_7': 'ISO8859-7', 'iso8859_8': 'ISO8859-8', 'iso8859_9': 'ISO8859-9', 'iso2022_jp': 'JIS7', 'shift_jis': 'SJIS', 'tactis': 'TACTIS', 'euc_jp': 'eucJP', 'euc_kr': 'eucKR', 'utf_8': 'UTF-8', 'koi8_r': 'KOI8-R', 'koi8_u': 'KOI8-U' } locale_alias = {'a3': 'a3_AZ.KOI8-C', 'a3_az': 'a3_AZ.KOI8-C', 'a3_az.koi8c': 'a3_AZ.KOI8-C', 'af': 'af_ZA.ISO8859-1', 'af_za': 'af_ZA.ISO8859-1', 'af_za.iso88591': 'af_ZA.ISO8859-1', 'am': 'am_ET.UTF-8', 'am_et': 'am_ET.UTF-8', 'american': 'en_US.ISO8859-1', 'american.iso88591': 'en_US.ISO8859-1', 'ar': 'ar_AA.ISO8859-6', 'ar_aa': 'ar_AA.ISO8859-6', 'ar_aa.iso88596': 'ar_AA.ISO8859-6', 'ar_ae': 'ar_AE.ISO8859-6', 'ar_ae.iso88596': 'ar_AE.ISO8859-6', 'ar_bh': 'ar_BH.ISO8859-6', 'ar_bh.iso88596': 'ar_BH.ISO8859-6', 'ar_dz': 'ar_DZ.ISO8859-6', 'ar_dz.iso88596': 'ar_DZ.ISO8859-6', 'ar_eg': 'ar_EG.ISO8859-6', 'ar_eg.iso88596': 'ar_EG.ISO8859-6', 'ar_iq': 'ar_IQ.ISO8859-6', 'ar_iq.iso88596': 'ar_IQ.ISO8859-6', 'ar_jo': 'ar_JO.ISO8859-6', 'ar_jo.iso88596': 'ar_JO.ISO8859-6', 'ar_kw': 'ar_KW.ISO8859-6', 'ar_kw.iso88596': 'ar_KW.ISO8859-6', 'ar_lb': 'ar_LB.ISO8859-6', 'ar_lb.iso88596': 'ar_LB.ISO8859-6', 'ar_ly': 'ar_LY.ISO8859-6', 'ar_ly.iso88596': 'ar_LY.ISO8859-6', 'ar_ma': 'ar_MA.ISO8859-6', 'ar_ma.iso88596': 'ar_MA.ISO8859-6', 'ar_om': 'ar_OM.ISO8859-6', 'ar_om.iso88596': 'ar_OM.ISO8859-6', 'ar_qa': 'ar_QA.ISO8859-6', 'ar_qa.iso88596': 'ar_QA.ISO8859-6', 'ar_sa': 'ar_SA.ISO8859-6', 'ar_sa.iso88596': 'ar_SA.ISO8859-6', 'ar_sd': 'ar_SD.ISO8859-6', 'ar_sd.iso88596': 'ar_SD.ISO8859-6', 'ar_sy': 'ar_SY.ISO8859-6', 'ar_sy.iso88596': 'ar_SY.ISO8859-6', 'ar_tn': 'ar_TN.ISO8859-6', 'ar_tn.iso88596': 'ar_TN.ISO8859-6', 'ar_ye': 'ar_YE.ISO8859-6', 'ar_ye.iso88596': 'ar_YE.ISO8859-6', 'arabic': 'ar_AA.ISO8859-6', 'arabic.iso88596': 'ar_AA.ISO8859-6', 'as': 'as_IN.UTF-8', 'az': 'az_AZ.ISO8859-9E', 'az_az': 'az_AZ.ISO8859-9E', 'az_az.iso88599e': 'az_AZ.ISO8859-9E', 'be': 'be_BY.CP1251', 'be@latin': 'be_BY.UTF-8@latin', 'be_by': 'be_BY.CP1251', 'be_by.cp1251': 'be_BY.CP1251', 'be_by.microsoftcp1251': 'be_BY.CP1251', 'be_by.utf8@latin': 'be_BY.UTF-8@latin', 'be_by@latin': 'be_BY.UTF-8@latin', 'bg': 'bg_BG.CP1251', 'bg_bg': 'bg_BG.CP1251', 'bg_bg.cp1251': 'bg_BG.CP1251', 'bg_bg.iso88595': 'bg_BG.ISO8859-5', 'bg_bg.koi8r': 'bg_BG.KOI8-R', 'bg_bg.microsoftcp1251': 'bg_BG.CP1251', 'bn_in': 'bn_IN.UTF-8', 'bokmal': 'nb_NO.ISO8859-1', 'bokml': 'nb_NO.ISO8859-1', 'br': 'br_FR.ISO8859-1', 'br_fr': 'br_FR.ISO8859-1', 'br_fr.iso88591': 'br_FR.ISO8859-1', 'br_fr.iso885914': 'br_FR.ISO8859-14', 'br_fr.iso885915': 'br_FR.ISO8859-15', 'br_fr.iso885915@euro': 'br_FR.ISO8859-15', 'br_fr.utf8@euro': 'br_FR.UTF-8', 'br_fr@euro': 'br_FR.ISO8859-15', 'bs': 'bs_BA.ISO8859-2', 'bs_ba': 'bs_BA.ISO8859-2', 'bs_ba.iso88592': 'bs_BA.ISO8859-2', 'bulgarian': 'bg_BG.CP1251', 'c': 'C', 'c-french': 'fr_CA.ISO8859-1', 'c-french.iso88591': 'fr_CA.ISO8859-1', 'c.en': 'C', 'c.iso88591': 'en_US.ISO8859-1', 'c_c': 'C', 'c_c.c': 'C', 'ca': 'ca_ES.ISO8859-1', 'ca_ad': 'ca_AD.ISO8859-1', 'ca_ad.iso88591': 'ca_AD.ISO8859-1', 'ca_ad.iso885915': 'ca_AD.ISO8859-15', 'ca_ad.iso885915@euro': 'ca_AD.ISO8859-15', 'ca_ad.utf8@euro': 'ca_AD.UTF-8', 'ca_ad@euro': 'ca_AD.ISO8859-15', 'ca_es': 'ca_ES.ISO8859-1', 'ca_es.iso88591': 'ca_ES.ISO8859-1', 'ca_es.iso885915': 'ca_ES.ISO8859-15', 'ca_es.iso885915@euro': 'ca_ES.ISO8859-15', 'ca_es.utf8@euro': 'ca_ES.UTF-8', 'ca_es@euro': 'ca_ES.ISO8859-15', 'ca_fr': 'ca_FR.ISO8859-1', 'ca_fr.iso88591': 'ca_FR.ISO8859-1', 'ca_fr.iso885915': 'ca_FR.ISO8859-15', 'ca_fr.iso885915@euro': 'ca_FR.ISO8859-15', 'ca_fr.utf8@euro': 'ca_FR.UTF-8', 'ca_fr@euro': 'ca_FR.ISO8859-15', 'ca_it': 'ca_IT.ISO8859-1', 'ca_it.iso88591': 'ca_IT.ISO8859-1', 'ca_it.iso885915': 'ca_IT.ISO8859-15', 'ca_it.iso885915@euro': 'ca_IT.ISO8859-15', 'ca_it.utf8@euro': 'ca_IT.UTF-8', 'ca_it@euro': 'ca_IT.ISO8859-15', 'catalan': 'ca_ES.ISO8859-1', 'cextend': 'en_US.ISO8859-1', 'cextend.en': 'en_US.ISO8859-1', 'chinese-s': 'zh_CN.eucCN', 'chinese-t': 'zh_TW.eucTW', 'croatian': 'hr_HR.ISO8859-2', 'cs': 'cs_CZ.ISO8859-2', 'cs_cs': 'cs_CZ.ISO8859-2', 'cs_cs.iso88592': 'cs_CS.ISO8859-2', 'cs_cz': 'cs_CZ.ISO8859-2', 'cs_cz.iso88592': 'cs_CZ.ISO8859-2', 'cy': 'cy_GB.ISO8859-1', 'cy_gb': 'cy_GB.ISO8859-1', 'cy_gb.iso88591': 'cy_GB.ISO8859-1', 'cy_gb.iso885914': 'cy_GB.ISO8859-14', 'cy_gb.iso885915': 'cy_GB.ISO8859-15', 'cy_gb@euro': 'cy_GB.ISO8859-15', 'cz': 'cs_CZ.ISO8859-2', 'cz_cz': 'cs_CZ.ISO8859-2', 'czech': 'cs_CZ.ISO8859-2', 'da': 'da_DK.ISO8859-1', 'da.iso885915': 'da_DK.ISO8859-15', 'da_dk': 'da_DK.ISO8859-1', 'da_dk.88591': 'da_DK.ISO8859-1', 'da_dk.885915': 'da_DK.ISO8859-15', 'da_dk.iso88591': 'da_DK.ISO8859-1', 'da_dk.iso885915': 'da_DK.ISO8859-15', 'da_dk@euro': 'da_DK.ISO8859-15', 'danish': 'da_DK.ISO8859-1', 'danish.iso88591': 'da_DK.ISO8859-1', 'dansk': 'da_DK.ISO8859-1', 'de': 'de_DE.ISO8859-1', 'de.iso885915': 'de_DE.ISO8859-15', 'de_at': 'de_AT.ISO8859-1', 'de_at.iso88591': 'de_AT.ISO8859-1', 'de_at.iso885915': 'de_AT.ISO8859-15', 'de_at.iso885915@euro': 'de_AT.ISO8859-15', 'de_at.utf8@euro': 'de_AT.UTF-8', 'de_at@euro': 'de_AT.ISO8859-15', 'de_be': 'de_BE.ISO8859-1', 'de_be.iso88591': 'de_BE.ISO8859-1', 'de_be.iso885915': 'de_BE.ISO8859-15', 'de_be.iso885915@euro': 'de_BE.ISO8859-15', 'de_be.utf8@euro': 'de_BE.UTF-8', 'de_be@euro': 'de_BE.ISO8859-15', 'de_ch': 'de_CH.ISO8859-1', 'de_ch.iso88591': 'de_CH.ISO8859-1', 'de_ch.iso885915': 'de_CH.ISO8859-15', 'de_ch@euro': 'de_CH.ISO8859-15', 'de_de': 'de_DE.ISO8859-1', 'de_de.88591': 'de_DE.ISO8859-1', 'de_de.885915': 'de_DE.ISO8859-15', 'de_de.885915@euro': 'de_DE.ISO8859-15', 'de_de.iso88591': 'de_DE.ISO8859-1', 'de_de.iso885915': 'de_DE.ISO8859-15', 'de_de.iso885915@euro': 'de_DE.ISO8859-15', 'de_de.utf8@euro': 'de_DE.UTF-8', 'de_de@euro': 'de_DE.ISO8859-15', 'de_lu': 'de_LU.ISO8859-1', 'de_lu.iso88591': 'de_LU.ISO8859-1', 'de_lu.iso885915': 'de_LU.ISO8859-15', 'de_lu.iso885915@euro': 'de_LU.ISO8859-15', 'de_lu.utf8@euro': 'de_LU.UTF-8', 'de_lu@euro': 'de_LU.ISO8859-15', 'deutsch': 'de_DE.ISO8859-1', 'dutch': 'nl_NL.ISO8859-1', 'dutch.iso88591': 'nl_BE.ISO8859-1', 'ee': 'ee_EE.ISO8859-4', 'ee_ee': 'ee_EE.ISO8859-4', 'ee_ee.iso88594': 'ee_EE.ISO8859-4', 'eesti': 'et_EE.ISO8859-1', 'el': 'el_GR.ISO8859-7', 'el_gr': 'el_GR.ISO8859-7', 'el_gr.iso88597': 'el_GR.ISO8859-7', 'el_gr@euro': 'el_GR.ISO8859-15', 'en': 'en_US.ISO8859-1', 'en.iso88591': 'en_US.ISO8859-1', 'en_au': 'en_AU.ISO8859-1', 'en_au.iso88591': 'en_AU.ISO8859-1', 'en_be': 'en_BE.ISO8859-1', 'en_be@euro': 'en_BE.ISO8859-15', 'en_bw': 'en_BW.ISO8859-1', 'en_bw.iso88591': 'en_BW.ISO8859-1', 'en_ca': 'en_CA.ISO8859-1', 'en_ca.iso88591': 'en_CA.ISO8859-1', 'en_gb': 'en_GB.ISO8859-1', 'en_gb.88591': 'en_GB.ISO8859-1', 'en_gb.iso88591': 'en_GB.ISO8859-1', 'en_gb.iso885915': 'en_GB.ISO8859-15', 'en_gb@euro': 'en_GB.ISO8859-15', 'en_hk': 'en_HK.ISO8859-1', 'en_hk.iso88591': 'en_HK.ISO8859-1', 'en_ie': 'en_IE.ISO8859-1', 'en_ie.iso88591': 'en_IE.ISO8859-1', 'en_ie.iso885915': 'en_IE.ISO8859-15', 'en_ie.iso885915@euro': 'en_IE.ISO8859-15', 'en_ie.utf8@euro': 'en_IE.UTF-8', 'en_ie@euro': 'en_IE.ISO8859-15', 'en_in': 'en_IN.ISO8859-1', 'en_nz': 'en_NZ.ISO8859-1', 'en_nz.iso88591': 'en_NZ.ISO8859-1', 'en_ph': 'en_PH.ISO8859-1', 'en_ph.iso88591': 'en_PH.ISO8859-1', 'en_sg': 'en_SG.ISO8859-1', 'en_sg.iso88591': 'en_SG.ISO8859-1', 'en_uk': 'en_GB.ISO8859-1', 'en_us': 'en_US.ISO8859-1', 'en_us.88591': 'en_US.ISO8859-1', 'en_us.885915': 'en_US.ISO8859-15', 'en_us.iso88591': 'en_US.ISO8859-1', 'en_us.iso885915': 'en_US.ISO8859-15', 'en_us.iso885915@euro': 'en_US.ISO8859-15', 'en_us@euro': 'en_US.ISO8859-15', 'en_us@euro@euro': 'en_US.ISO8859-15', 'en_za': 'en_ZA.ISO8859-1', 'en_za.88591': 'en_ZA.ISO8859-1', 'en_za.iso88591': 'en_ZA.ISO8859-1', 'en_za.iso885915': 'en_ZA.ISO8859-15', 'en_za@euro': 'en_ZA.ISO8859-15', 'en_zw': 'en_ZW.ISO8859-1', 'en_zw.iso88591': 'en_ZW.ISO8859-1', 'eng_gb': 'en_GB.ISO8859-1', 'eng_gb.8859': 'en_GB.ISO8859-1', 'english': 'en_EN.ISO8859-1', 'english.iso88591': 'en_EN.ISO8859-1', 'english_uk': 'en_GB.ISO8859-1', 'english_uk.8859': 'en_GB.ISO8859-1', 'english_united-states': 'en_US.ISO8859-1', 'english_united-states.437': 'C', 'english_us': 'en_US.ISO8859-1', 'english_us.8859': 'en_US.ISO8859-1', 'english_us.ascii': 'en_US.ISO8859-1', 'eo': 'eo_XX.ISO8859-3', 'eo_eo': 'eo_EO.ISO8859-3', 'eo_eo.iso88593': 'eo_EO.ISO8859-3', 'eo_xx': 'eo_XX.ISO8859-3', 'eo_xx.iso88593': 'eo_XX.ISO8859-3', 'es': 'es_ES.ISO8859-1', 'es_ar': 'es_AR.ISO8859-1', 'es_ar.iso88591': 'es_AR.ISO8859-1', 'es_bo': 'es_BO.ISO8859-1', 'es_bo.iso88591': 'es_BO.ISO8859-1', 'es_cl': 'es_CL.ISO8859-1', 'es_cl.iso88591': 'es_CL.ISO8859-1', 'es_co': 'es_CO.ISO8859-1', 'es_co.iso88591': 'es_CO.ISO8859-1', 'es_cr': 'es_CR.ISO8859-1', 'es_cr.iso88591': 'es_CR.ISO8859-1', 'es_do': 'es_DO.ISO8859-1', 'es_do.iso88591': 'es_DO.ISO8859-1', 'es_ec': 'es_EC.ISO8859-1', 'es_ec.iso88591': 'es_EC.ISO8859-1', 'es_es': 'es_ES.ISO8859-1', 'es_es.88591': 'es_ES.ISO8859-1', 'es_es.iso88591': 'es_ES.ISO8859-1', 'es_es.iso885915': 'es_ES.ISO8859-15', 'es_es.iso885915@euro': 'es_ES.ISO8859-15', 'es_es.utf8@euro': 'es_ES.UTF-8', 'es_es@euro': 'es_ES.ISO8859-15', 'es_gt': 'es_GT.ISO8859-1', 'es_gt.iso88591': 'es_GT.ISO8859-1', 'es_hn': 'es_HN.ISO8859-1', 'es_hn.iso88591': 'es_HN.ISO8859-1', 'es_mx': 'es_MX.ISO8859-1', 'es_mx.iso88591': 'es_MX.ISO8859-1', 'es_ni': 'es_NI.ISO8859-1', 'es_ni.iso88591': 'es_NI.ISO8859-1', 'es_pa': 'es_PA.ISO8859-1', 'es_pa.iso88591': 'es_PA.ISO8859-1', 'es_pa.iso885915': 'es_PA.ISO8859-15', 'es_pa@euro': 'es_PA.ISO8859-15', 'es_pe': 'es_PE.ISO8859-1', 'es_pe.iso88591': 'es_PE.ISO8859-1', 'es_pe.iso885915': 'es_PE.ISO8859-15', 'es_pe@euro': 'es_PE.ISO8859-15', 'es_pr': 'es_PR.ISO8859-1', 'es_pr.iso88591': 'es_PR.ISO8859-1', 'es_py': 'es_PY.ISO8859-1', 'es_py.iso88591': 'es_PY.ISO8859-1', 'es_py.iso885915': 'es_PY.ISO8859-15', 'es_py@euro': 'es_PY.ISO8859-15', 'es_sv': 'es_SV.ISO8859-1', 'es_sv.iso88591': 'es_SV.ISO8859-1', 'es_sv.iso885915': 'es_SV.ISO8859-15', 'es_sv@euro': 'es_SV.ISO8859-15', 'es_us': 'es_US.ISO8859-1', 'es_us.iso88591': 'es_US.ISO8859-1', 'es_uy': 'es_UY.ISO8859-1', 'es_uy.iso88591': 'es_UY.ISO8859-1', 'es_uy.iso885915': 'es_UY.ISO8859-15', 'es_uy@euro': 'es_UY.ISO8859-15', 'es_ve': 'es_VE.ISO8859-1', 'es_ve.iso88591': 'es_VE.ISO8859-1', 'es_ve.iso885915': 'es_VE.ISO8859-15', 'es_ve@euro': 'es_VE.ISO8859-15', 'estonian': 'et_EE.ISO8859-1', 'et': 'et_EE.ISO8859-15', 'et_ee': 'et_EE.ISO8859-15', 'et_ee.iso88591': 'et_EE.ISO8859-1', 'et_ee.iso885913': 'et_EE.ISO8859-13', 'et_ee.iso885915': 'et_EE.ISO8859-15', 'et_ee.iso88594': 'et_EE.ISO8859-4', 'et_ee@euro': 'et_EE.ISO8859-15', 'eu': 'eu_ES.ISO8859-1', 'eu_es': 'eu_ES.ISO8859-1', 'eu_es.iso88591': 'eu_ES.ISO8859-1', 'eu_es.iso885915': 'eu_ES.ISO8859-15', 'eu_es.iso885915@euro': 'eu_ES.ISO8859-15', 'eu_es.utf8@euro': 'eu_ES.UTF-8', 'eu_es@euro': 'eu_ES.ISO8859-15', 'fa': 'fa_IR.UTF-8', 'fa_ir': 'fa_IR.UTF-8', 'fa_ir.isiri3342': 'fa_IR.ISIRI-3342', 'fi': 'fi_FI.ISO8859-15', 'fi.iso885915': 'fi_FI.ISO8859-15', 'fi_fi': 'fi_FI.ISO8859-15', 'fi_fi.88591': 'fi_FI.ISO8859-1', 'fi_fi.iso88591': 'fi_FI.ISO8859-1', 'fi_fi.iso885915': 'fi_FI.ISO8859-15', 'fi_fi.iso885915@euro': 'fi_FI.ISO8859-15', 'fi_fi.utf8@euro': 'fi_FI.UTF-8', 'fi_fi@euro': 'fi_FI.ISO8859-15', 'finnish': 'fi_FI.ISO8859-1', 'finnish.iso88591': 'fi_FI.ISO8859-1', 'fo': 'fo_FO.ISO8859-1', 'fo_fo': 'fo_FO.ISO8859-1', 'fo_fo.iso88591': 'fo_FO.ISO8859-1', 'fo_fo.iso885915': 'fo_FO.ISO8859-15', 'fo_fo@euro': 'fo_FO.ISO8859-15', 'fr': 'fr_FR.ISO8859-1', 'fr.iso885915': 'fr_FR.ISO8859-15', 'fr_be': 'fr_BE.ISO8859-1', 'fr_be.88591': 'fr_BE.ISO8859-1', 'fr_be.iso88591': 'fr_BE.ISO8859-1', 'fr_be.iso885915': 'fr_BE.ISO8859-15', 'fr_be.iso885915@euro': 'fr_BE.ISO8859-15', 'fr_be.utf8@euro': 'fr_BE.UTF-8', 'fr_be@euro': 'fr_BE.ISO8859-15', 'fr_ca': 'fr_CA.ISO8859-1', 'fr_ca.88591': 'fr_CA.ISO8859-1', 'fr_ca.iso88591': 'fr_CA.ISO8859-1', 'fr_ca.iso885915': 'fr_CA.ISO8859-15', 'fr_ca@euro': 'fr_CA.ISO8859-15', 'fr_ch': 'fr_CH.ISO8859-1', 'fr_ch.88591': 'fr_CH.ISO8859-1', 'fr_ch.iso88591': 'fr_CH.ISO8859-1', 'fr_ch.iso885915': 'fr_CH.ISO8859-15', 'fr_ch@euro': 'fr_CH.ISO8859-15', 'fr_fr': 'fr_FR.ISO8859-1', 'fr_fr.88591': 'fr_FR.ISO8859-1', 'fr_fr.iso88591': 'fr_FR.ISO8859-1', 'fr_fr.iso885915': 'fr_FR.ISO8859-15', 'fr_fr.iso885915@euro': 'fr_FR.ISO8859-15', 'fr_fr.utf8@euro': 'fr_FR.UTF-8', 'fr_fr@euro': 'fr_FR.ISO8859-15', 'fr_lu': 'fr_LU.ISO8859-1', 'fr_lu.88591': 'fr_LU.ISO8859-1', 'fr_lu.iso88591': 'fr_LU.ISO8859-1', 'fr_lu.iso885915': 'fr_LU.ISO8859-15', 'fr_lu.iso885915@euro': 'fr_LU.ISO8859-15', 'fr_lu.utf8@euro': 'fr_LU.UTF-8', 'fr_lu@euro': 'fr_LU.ISO8859-15', 'franais': 'fr_FR.ISO8859-1', 'fre_fr': 'fr_FR.ISO8859-1', 'fre_fr.8859': 'fr_FR.ISO8859-1', 'french': 'fr_FR.ISO8859-1', 'french.iso88591': 'fr_CH.ISO8859-1', 'french_france': 'fr_FR.ISO8859-1', 'french_france.8859': 'fr_FR.ISO8859-1', 'ga': 'ga_IE.ISO8859-1', 'ga_ie': 'ga_IE.ISO8859-1', 'ga_ie.iso88591': 'ga_IE.ISO8859-1', 'ga_ie.iso885914': 'ga_IE.ISO8859-14', 'ga_ie.iso885915': 'ga_IE.ISO8859-15', 'ga_ie.iso885915@euro': 'ga_IE.ISO8859-15', 'ga_ie.utf8@euro': 'ga_IE.UTF-8', 'ga_ie@euro': 'ga_IE.ISO8859-15', 'galego': 'gl_ES.ISO8859-1', 'galician': 'gl_ES.ISO8859-1', 'gd': 'gd_GB.ISO8859-1', 'gd_gb': 'gd_GB.ISO8859-1', 'gd_gb.iso88591': 'gd_GB.ISO8859-1', 'gd_gb.iso885914': 'gd_GB.ISO8859-14', 'gd_gb.iso885915': 'gd_GB.ISO8859-15', 'gd_gb@euro': 'gd_GB.ISO8859-15', 'ger_de': 'de_DE.ISO8859-1', 'ger_de.8859': 'de_DE.ISO8859-1', 'german': 'de_DE.ISO8859-1', 'german.iso88591': 'de_CH.ISO8859-1', 'german_germany': 'de_DE.ISO8859-1', 'german_germany.8859': 'de_DE.ISO8859-1', 'gl': 'gl_ES.ISO8859-1', 'gl_es': 'gl_ES.ISO8859-1', 'gl_es.iso88591': 'gl_ES.ISO8859-1', 'gl_es.iso885915': 'gl_ES.ISO8859-15', 'gl_es.iso885915@euro': 'gl_ES.ISO8859-15', 'gl_es.utf8@euro': 'gl_ES.UTF-8', 'gl_es@euro': 'gl_ES.ISO8859-15', 'greek': 'el_GR.ISO8859-7', 'greek.iso88597': 'el_GR.ISO8859-7', 'gu_in': 'gu_IN.UTF-8', 'gv': 'gv_GB.ISO8859-1', 'gv_gb': 'gv_GB.ISO8859-1', 'gv_gb.iso88591': 'gv_GB.ISO8859-1', 'gv_gb.iso885914': 'gv_GB.ISO8859-14', 'gv_gb.iso885915': 'gv_GB.ISO8859-15', 'gv_gb@euro': 'gv_GB.ISO8859-15', 'he': 'he_IL.ISO8859-8', 'he_il': 'he_IL.ISO8859-8', 'he_il.cp1255': 'he_IL.CP1255', 'he_il.iso88598': 'he_IL.ISO8859-8', 'he_il.microsoftcp1255': 'he_IL.CP1255', 'hebrew': 'iw_IL.ISO8859-8', 'hebrew.iso88598': 'iw_IL.ISO8859-8', 'hi': 'hi_IN.ISCII-DEV', 'hi_in': 'hi_IN.ISCII-DEV', 'hi_in.isciidev': 'hi_IN.ISCII-DEV', 'hne': 'hne_IN.UTF-8', 'hr': 'hr_HR.ISO8859-2', 'hr_hr': 'hr_HR.ISO8859-2', 'hr_hr.iso88592': 'hr_HR.ISO8859-2', 'hrvatski': 'hr_HR.ISO8859-2', 'hu': 'hu_HU.ISO8859-2', 'hu_hu': 'hu_HU.ISO8859-2', 'hu_hu.iso88592': 'hu_HU.ISO8859-2', 'hungarian': 'hu_HU.ISO8859-2', 'icelandic': 'is_IS.ISO8859-1', 'icelandic.iso88591': 'is_IS.ISO8859-1', 'id': 'id_ID.ISO8859-1', 'id_id': 'id_ID.ISO8859-1', 'in': 'id_ID.ISO8859-1', 'in_id': 'id_ID.ISO8859-1', 'is': 'is_IS.ISO8859-1', 'is_is': 'is_IS.ISO8859-1', 'is_is.iso88591': 'is_IS.ISO8859-1', 'is_is.iso885915': 'is_IS.ISO8859-15', 'is_is@euro': 'is_IS.ISO8859-15', 'iso-8859-1': 'en_US.ISO8859-1', 'iso-8859-15': 'en_US.ISO8859-15', 'iso8859-1': 'en_US.ISO8859-1', 'iso8859-15': 'en_US.ISO8859-15', 'iso_8859_1': 'en_US.ISO8859-1', 'iso_8859_15': 'en_US.ISO8859-15', 'it': 'it_IT.ISO8859-1', 'it.iso885915': 'it_IT.ISO8859-15', 'it_ch': 'it_CH.ISO8859-1', 'it_ch.iso88591': 'it_CH.ISO8859-1', 'it_ch.iso885915': 'it_CH.ISO8859-15', 'it_ch@euro': 'it_CH.ISO8859-15', 'it_it': 'it_IT.ISO8859-1', 'it_it.88591': 'it_IT.ISO8859-1', 'it_it.iso88591': 'it_IT.ISO8859-1', 'it_it.iso885915': 'it_IT.ISO8859-15', 'it_it.iso885915@euro': 'it_IT.ISO8859-15', 'it_it.utf8@euro': 'it_IT.UTF-8', 'it_it@euro': 'it_IT.ISO8859-15', 'italian': 'it_IT.ISO8859-1', 'italian.iso88591': 'it_IT.ISO8859-1', 'iu': 'iu_CA.NUNACOM-8', 'iu_ca': 'iu_CA.NUNACOM-8', 'iu_ca.nunacom8': 'iu_CA.NUNACOM-8', 'iw': 'he_IL.ISO8859-8', 'iw_il': 'he_IL.ISO8859-8', 'iw_il.iso88598': 'he_IL.ISO8859-8', 'ja': 'ja_JP.eucJP', 'ja.jis': 'ja_JP.JIS7', 'ja.sjis': 'ja_JP.SJIS', 'ja_jp': 'ja_JP.eucJP', 'ja_jp.ajec': 'ja_JP.eucJP', 'ja_jp.euc': 'ja_JP.eucJP', 'ja_jp.eucjp': 'ja_JP.eucJP', 'ja_jp.iso-2022-jp': 'ja_JP.JIS7', 'ja_jp.iso2022jp': 'ja_JP.JIS7', 'ja_jp.jis': 'ja_JP.JIS7', 'ja_jp.jis7': 'ja_JP.JIS7', 'ja_jp.mscode': 'ja_JP.SJIS', 'ja_jp.pck': 'ja_JP.SJIS', 'ja_jp.sjis': 'ja_JP.SJIS', 'ja_jp.ujis': 'ja_JP.eucJP', 'japan': 'ja_JP.eucJP', 'japanese': 'ja_JP.eucJP', 'japanese-euc': 'ja_JP.eucJP', 'japanese.euc': 'ja_JP.eucJP', 'japanese.sjis': 'ja_JP.SJIS', 'jp_jp': 'ja_JP.eucJP', 'ka': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY', 'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS', 'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY', 'kl': 'kl_GL.ISO8859-1', 'kl_gl': 'kl_GL.ISO8859-1', 'kl_gl.iso88591': 'kl_GL.ISO8859-1', 'kl_gl.iso885915': 'kl_GL.ISO8859-15', 'kl_gl@euro': 'kl_GL.ISO8859-15', 'km_kh': 'km_KH.UTF-8', 'kn': 'kn_IN.UTF-8', 'kn_in': 'kn_IN.UTF-8', 'ko': 'ko_KR.eucKR', 'ko_kr': 'ko_KR.eucKR', 'ko_kr.euc': 'ko_KR.eucKR', 'ko_kr.euckr': 'ko_KR.eucKR', 'korean': 'ko_KR.eucKR', 'korean.euc': 'ko_KR.eucKR', 'ks': 'ks_IN.UTF-8', 'ks_in@devanagari': '[email protected]', 'kw': 'kw_GB.ISO8859-1', 'kw_gb': 'kw_GB.ISO8859-1', 'kw_gb.iso88591': 'kw_GB.ISO8859-1', 'kw_gb.iso885914': 'kw_GB.ISO8859-14', 'kw_gb.iso885915': 'kw_GB.ISO8859-15', 'kw_gb@euro': 'kw_GB.ISO8859-15', 'ky': 'ky_KG.UTF-8', 'ky_kg': 'ky_KG.UTF-8', 'lithuanian': 'lt_LT.ISO8859-13', 'lo': 'lo_LA.MULELAO-1', 'lo_la': 'lo_LA.MULELAO-1', 'lo_la.cp1133': 'lo_LA.IBM-CP1133', 'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133', 'lo_la.mulelao1': 'lo_LA.MULELAO-1', 'lt': 'lt_LT.ISO8859-13', 'lt_lt': 'lt_LT.ISO8859-13', 'lt_lt.iso885913': 'lt_LT.ISO8859-13', 'lt_lt.iso88594': 'lt_LT.ISO8859-4', 'lv': 'lv_LV.ISO8859-13', 'lv_lv': 'lv_LV.ISO8859-13', 'lv_lv.iso885913': 'lv_LV.ISO8859-13', 'lv_lv.iso88594': 'lv_LV.ISO8859-4', 'mai': 'mai_IN.UTF-8', 'mi': 'mi_NZ.ISO8859-1', 'mi_nz': 'mi_NZ.ISO8859-1', 'mi_nz.iso88591': 'mi_NZ.ISO8859-1', 'mk': 'mk_MK.ISO8859-5', 'mk_mk': 'mk_MK.ISO8859-5', 'mk_mk.cp1251': 'mk_MK.CP1251', 'mk_mk.iso88595': 'mk_MK.ISO8859-5', 'mk_mk.microsoftcp1251': 'mk_MK.CP1251', 'ml': 'ml_IN.UTF-8', 'mr': 'mr_IN.UTF-8', 'mr_in': 'mr_IN.UTF-8', 'ms': 'ms_MY.ISO8859-1', 'ms_my': 'ms_MY.ISO8859-1', 'ms_my.iso88591': 'ms_MY.ISO8859-1', 'mt': 'mt_MT.ISO8859-3', 'mt_mt': 'mt_MT.ISO8859-3', 'mt_mt.iso88593': 'mt_MT.ISO8859-3', 'nb': 'nb_NO.ISO8859-1', 'nb_no': 'nb_NO.ISO8859-1', 'nb_no.88591': 'nb_NO.ISO8859-1', 'nb_no.iso88591': 'nb_NO.ISO8859-1', 'nb_no.iso885915': 'nb_NO.ISO8859-15', 'nb_no@euro': 'nb_NO.ISO8859-15', 'nl': 'nl_NL.ISO8859-1', 'nl.iso885915': 'nl_NL.ISO8859-15', 'nl_be': 'nl_BE.ISO8859-1', 'nl_be.88591': 'nl_BE.ISO8859-1', 'nl_be.iso88591': 'nl_BE.ISO8859-1', 'nl_be.iso885915': 'nl_BE.ISO8859-15', 'nl_be.iso885915@euro': 'nl_BE.ISO8859-15', 'nl_be.utf8@euro': 'nl_BE.UTF-8', 'nl_be@euro': 'nl_BE.ISO8859-15', 'nl_nl': 'nl_NL.ISO8859-1', 'nl_nl.88591': 'nl_NL.ISO8859-1', 'nl_nl.iso88591': 'nl_NL.ISO8859-1', 'nl_nl.iso885915': 'nl_NL.ISO8859-15', 'nl_nl.iso885915@euro': 'nl_NL.ISO8859-15', 'nl_nl.utf8@euro': 'nl_NL.UTF-8', 'nl_nl@euro': 'nl_NL.ISO8859-15', 'nn': 'nn_NO.ISO8859-1', 'nn_no': 'nn_NO.ISO8859-1', 'nn_no.88591': 'nn_NO.ISO8859-1', 'nn_no.iso88591': 'nn_NO.ISO8859-1', 'nn_no.iso885915': 'nn_NO.ISO8859-15', 'nn_no@euro': 'nn_NO.ISO8859-15', 'no': 'no_NO.ISO8859-1', 'no@nynorsk': 'ny_NO.ISO8859-1', 'no_no': 'no_NO.ISO8859-1', 'no_no.88591': 'no_NO.ISO8859-1', 'no_no.iso88591': 'no_NO.ISO8859-1', 'no_no.iso885915': 'no_NO.ISO8859-15', 'no_no.iso88591@bokmal': 'no_NO.ISO8859-1', 'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1', 'no_no@euro': 'no_NO.ISO8859-15', 'norwegian': 'no_NO.ISO8859-1', 'norwegian.iso88591': 'no_NO.ISO8859-1', 'nr': 'nr_ZA.ISO8859-1', 'nr_za': 'nr_ZA.ISO8859-1', 'nr_za.iso88591': 'nr_ZA.ISO8859-1', 'nso': 'nso_ZA.ISO8859-15', 'nso_za': 'nso_ZA.ISO8859-15', 'nso_za.iso885915': 'nso_ZA.ISO8859-15', 'ny': 'ny_NO.ISO8859-1', 'ny_no': 'ny_NO.ISO8859-1', 'ny_no.88591': 'ny_NO.ISO8859-1', 'ny_no.iso88591': 'ny_NO.ISO8859-1', 'ny_no.iso885915': 'ny_NO.ISO8859-15', 'ny_no@euro': 'ny_NO.ISO8859-15', 'nynorsk': 'nn_NO.ISO8859-1', 'oc': 'oc_FR.ISO8859-1', 'oc_fr': 'oc_FR.ISO8859-1', 'oc_fr.iso88591': 'oc_FR.ISO8859-1', 'oc_fr.iso885915': 'oc_FR.ISO8859-15', 'oc_fr@euro': 'oc_FR.ISO8859-15', 'or': 'or_IN.UTF-8', 'pa': 'pa_IN.UTF-8', 'pa_in': 'pa_IN.UTF-8', 'pd': 'pd_US.ISO8859-1', 'pd_de': 'pd_DE.ISO8859-1', 'pd_de.iso88591': 'pd_DE.ISO8859-1', 'pd_de.iso885915': 'pd_DE.ISO8859-15', 'pd_de@euro': 'pd_DE.ISO8859-15', 'pd_us': 'pd_US.ISO8859-1', 'pd_us.iso88591': 'pd_US.ISO8859-1', 'pd_us.iso885915': 'pd_US.ISO8859-15', 'pd_us@euro': 'pd_US.ISO8859-15', 'ph': 'ph_PH.ISO8859-1', 'ph_ph': 'ph_PH.ISO8859-1', 'ph_ph.iso88591': 'ph_PH.ISO8859-1', 'pl': 'pl_PL.ISO8859-2', 'pl_pl': 'pl_PL.ISO8859-2', 'pl_pl.iso88592': 'pl_PL.ISO8859-2', 'polish': 'pl_PL.ISO8859-2', 'portuguese': 'pt_PT.ISO8859-1', 'portuguese.iso88591': 'pt_PT.ISO8859-1', 'portuguese_brazil': 'pt_BR.ISO8859-1', 'portuguese_brazil.8859': 'pt_BR.ISO8859-1', 'posix': 'C', 'posix-utf2': 'C', 'pp': 'pp_AN.ISO8859-1', 'pp_an': 'pp_AN.ISO8859-1', 'pp_an.iso88591': 'pp_AN.ISO8859-1', 'pt': 'pt_PT.ISO8859-1', 'pt.iso885915': 'pt_PT.ISO8859-15', 'pt_br': 'pt_BR.ISO8859-1', 'pt_br.88591': 'pt_BR.ISO8859-1', 'pt_br.iso88591': 'pt_BR.ISO8859-1', 'pt_br.iso885915': 'pt_BR.ISO8859-15', 'pt_br@euro': 'pt_BR.ISO8859-15', 'pt_pt': 'pt_PT.ISO8859-1', 'pt_pt.88591': 'pt_PT.ISO8859-1', 'pt_pt.iso88591': 'pt_PT.ISO8859-1', 'pt_pt.iso885915': 'pt_PT.ISO8859-15', 'pt_pt.iso885915@euro': 'pt_PT.ISO8859-15', 'pt_pt.utf8@euro': 'pt_PT.UTF-8', 'pt_pt@euro': 'pt_PT.ISO8859-15', 'ro': 'ro_RO.ISO8859-2', 'ro_ro': 'ro_RO.ISO8859-2', 'ro_ro.iso88592': 'ro_RO.ISO8859-2', 'romanian': 'ro_RO.ISO8859-2', 'ru': 'ru_RU.UTF-8', 'ru.koi8r': 'ru_RU.KOI8-R', 'ru_ru': 'ru_RU.UTF-8', 'ru_ru.cp1251': 'ru_RU.CP1251', 'ru_ru.iso88595': 'ru_RU.ISO8859-5', 'ru_ru.koi8r': 'ru_RU.KOI8-R', 'ru_ru.microsoftcp1251': 'ru_RU.CP1251', 'ru_ua': 'ru_UA.KOI8-U', 'ru_ua.cp1251': 'ru_UA.CP1251', 'ru_ua.koi8u': 'ru_UA.KOI8-U', 'ru_ua.microsoftcp1251': 'ru_UA.CP1251', 'rumanian': 'ro_RO.ISO8859-2', 'russian': 'ru_RU.ISO8859-5', 'rw': 'rw_RW.ISO8859-1', 'rw_rw': 'rw_RW.ISO8859-1', 'rw_rw.iso88591': 'rw_RW.ISO8859-1', 'sd': '[email protected]', 'se_no': 'se_NO.UTF-8', 'serbocroatian': 'sr_RS.UTF-8@latin', 'sh': 'sr_RS.UTF-8@latin', 'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2', 'sh_hr': 'sh_HR.ISO8859-2', 'sh_hr.iso88592': 'hr_HR.ISO8859-2', 'sh_sp': 'sr_CS.ISO8859-2', 'sh_yu': 'sr_RS.UTF-8@latin', 'si': 'si_LK.UTF-8', 'si_lk': 'si_LK.UTF-8', 'sinhala': 'si_LK.UTF-8', 'sk': 'sk_SK.ISO8859-2', 'sk_sk': 'sk_SK.ISO8859-2', 'sk_sk.iso88592': 'sk_SK.ISO8859-2', 'sl': 'sl_SI.ISO8859-2', 'sl_cs': 'sl_CS.ISO8859-2', 'sl_si': 'sl_SI.ISO8859-2', 'sl_si.iso88592': 'sl_SI.ISO8859-2', 'slovak': 'sk_SK.ISO8859-2', 'slovene': 'sl_SI.ISO8859-2', 'slovenian': 'sl_SI.ISO8859-2', 'sp': 'sr_CS.ISO8859-5', 'sp_yu': 'sr_CS.ISO8859-5', 'spanish': 'es_ES.ISO8859-1', 'spanish.iso88591': 'es_ES.ISO8859-1', 'spanish_spain': 'es_ES.ISO8859-1', 'spanish_spain.8859': 'es_ES.ISO8859-1', 'sq': 'sq_AL.ISO8859-2', 'sq_al': 'sq_AL.ISO8859-2', 'sq_al.iso88592': 'sq_AL.ISO8859-2', 'sr': 'sr_RS.UTF-8', 'sr@cyrillic': 'sr_RS.UTF-8', 'sr@latin': 'sr_RS.UTF-8@latin', 'sr@latn': 'sr_RS.UTF-8@latin', 'sr_cs': 'sr_RS.UTF-8', 'sr_cs.iso88592': 'sr_CS.ISO8859-2', 'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2', 'sr_cs.iso88595': 'sr_CS.ISO8859-5', 'sr_cs.utf8@latn': 'sr_RS.UTF-8@latin', 'sr_cs@latn': 'sr_RS.UTF-8@latin', 'sr_me': 'sr_ME.UTF-8', 'sr_rs': 'sr_RS.UTF-8', 'sr_rs.utf8@latn': 'sr_RS.UTF-8@latin', 'sr_rs@latin': 'sr_RS.UTF-8@latin', 'sr_rs@latn': 'sr_RS.UTF-8@latin', 'sr_sp': 'sr_CS.ISO8859-2', 'sr_yu': 'sr_RS.UTF-8@latin', 'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.iso88592': 'sr_CS.ISO8859-2', 'sr_yu.iso88595': 'sr_CS.ISO8859-5', 'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5', 'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251', 'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8', 'sr_yu@cyrillic': 'sr_RS.UTF-8', 'ss': 'ss_ZA.ISO8859-1', 'ss_za': 'ss_ZA.ISO8859-1', 'ss_za.iso88591': 'ss_ZA.ISO8859-1', 'st': 'st_ZA.ISO8859-1', 'st_za': 'st_ZA.ISO8859-1', 'st_za.iso88591': 'st_ZA.ISO8859-1', 'sv': 'sv_SE.ISO8859-1', 'sv.iso885915': 'sv_SE.ISO8859-15', 'sv_fi': 'sv_FI.ISO8859-1', 'sv_fi.iso88591': 'sv_FI.ISO8859-1', 'sv_fi.iso885915': 'sv_FI.ISO8859-15', 'sv_fi.iso885915@euro': 'sv_FI.ISO8859-15', 'sv_fi.utf8@euro': 'sv_FI.UTF-8', 'sv_fi@euro': 'sv_FI.ISO8859-15', 'sv_se': 'sv_SE.ISO8859-1', 'sv_se.88591': 'sv_SE.ISO8859-1', 'sv_se.iso88591': 'sv_SE.ISO8859-1', 'sv_se.iso885915': 'sv_SE.ISO8859-15', 'sv_se@euro': 'sv_SE.ISO8859-15', 'swedish': 'sv_SE.ISO8859-1', 'swedish.iso88591': 'sv_SE.ISO8859-1', 'ta': 'ta_IN.TSCII-0', 'ta_in': 'ta_IN.TSCII-0', 'ta_in.tscii': 'ta_IN.TSCII-0', 'ta_in.tscii0': 'ta_IN.TSCII-0', 'te': 'te_IN.UTF-8', 'tg': 'tg_TJ.KOI8-C', 'tg_tj': 'tg_TJ.KOI8-C', 'tg_tj.koi8c': 'tg_TJ.KOI8-C', 'th': 'th_TH.ISO8859-11', 'th_th': 'th_TH.ISO8859-11', 'th_th.iso885911': 'th_TH.ISO8859-11', 'th_th.tactis': 'th_TH.TIS620', 'th_th.tis620': 'th_TH.TIS620', 'thai': 'th_TH.ISO8859-11', 'tl': 'tl_PH.ISO8859-1', 'tl_ph': 'tl_PH.ISO8859-1', 'tl_ph.iso88591': 'tl_PH.ISO8859-1', 'tn': 'tn_ZA.ISO8859-15', 'tn_za': 'tn_ZA.ISO8859-15', 'tn_za.iso885915': 'tn_ZA.ISO8859-15', 'tr': 'tr_TR.ISO8859-9', 'tr_tr': 'tr_TR.ISO8859-9', 'tr_tr.iso88599': 'tr_TR.ISO8859-9', 'ts': 'ts_ZA.ISO8859-1', 'ts_za': 'ts_ZA.ISO8859-1', 'ts_za.iso88591': 'ts_ZA.ISO8859-1', 'tt': 'tt_RU.TATAR-CYR', 'tt_ru': 'tt_RU.TATAR-CYR', 'tt_ru.koi8c': 'tt_RU.KOI8-C', 'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR', 'turkish': 'tr_TR.ISO8859-9', 'turkish.iso88599': 'tr_TR.ISO8859-9', 'uk': 'uk_UA.KOI8-U', 'uk_ua': 'uk_UA.KOI8-U', 'uk_ua.cp1251': 'uk_UA.CP1251', 'uk_ua.iso88595': 'uk_UA.ISO8859-5', 'uk_ua.koi8u': 'uk_UA.KOI8-U', 'uk_ua.microsoftcp1251': 'uk_UA.CP1251', 'univ': 'en_US.utf', 'universal': 'en_US.utf', 'universal.utf8@ucs4': 'en_US.UTF-8', 'ur': 'ur_PK.CP1256', 'ur_pk': 'ur_PK.CP1256', 'ur_pk.cp1256': 'ur_PK.CP1256', 'ur_pk.microsoftcp1256': 'ur_PK.CP1256', 'uz': 'uz_UZ.UTF-8', 'uz_uz': 'uz_UZ.UTF-8', 'uz_uz.iso88591': 'uz_UZ.ISO8859-1', 'uz_uz.utf8@cyrillic': 'uz_UZ.UTF-8', 'uz_uz@cyrillic': 'uz_UZ.UTF-8', 've': 've_ZA.UTF-8', 've_za': 've_ZA.UTF-8', 'vi': 'vi_VN.TCVN', 'vi_vn': 'vi_VN.TCVN', 'vi_vn.tcvn': 'vi_VN.TCVN', 'vi_vn.tcvn5712': 'vi_VN.TCVN', 'vi_vn.viscii': 'vi_VN.VISCII', 'vi_vn.viscii111': 'vi_VN.VISCII', 'wa': 'wa_BE.ISO8859-1', 'wa_be': 'wa_BE.ISO8859-1', 'wa_be.iso88591': 'wa_BE.ISO8859-1', 'wa_be.iso885915': 'wa_BE.ISO8859-15', 'wa_be.iso885915@euro': 'wa_BE.ISO8859-15', 'wa_be@euro': 'wa_BE.ISO8859-15', 'xh': 'xh_ZA.ISO8859-1', 'xh_za': 'xh_ZA.ISO8859-1', 'xh_za.iso88591': 'xh_ZA.ISO8859-1', 'yi': 'yi_US.CP1255', 'yi_us': 'yi_US.CP1255', 'yi_us.cp1255': 'yi_US.CP1255', 'yi_us.microsoftcp1255': 'yi_US.CP1255', 'zh': 'zh_CN.eucCN', 'zh_cn': 'zh_CN.gb2312', 'zh_cn.big5': 'zh_TW.big5', 'zh_cn.euc': 'zh_CN.eucCN', 'zh_cn.gb18030': 'zh_CN.gb18030', 'zh_cn.gb2312': 'zh_CN.gb2312', 'zh_cn.gbk': 'zh_CN.gbk', 'zh_hk': 'zh_HK.big5hkscs', 'zh_hk.big5': 'zh_HK.big5', 'zh_hk.big5hk': 'zh_HK.big5hkscs', 'zh_hk.big5hkscs': 'zh_HK.big5hkscs', 'zh_tw': 'zh_TW.big5', 'zh_tw.big5': 'zh_TW.big5', 'zh_tw.euc': 'zh_TW.eucTW', 'zh_tw.euctw': 'zh_TW.eucTW', 'zu': 'zu_ZA.ISO8859-1', 'zu_za': 'zu_ZA.ISO8859-1', 'zu_za.iso88591': 'zu_ZA.ISO8859-1' } windows_locale = {1078: 'af_ZA', 1052: 'sq_AL', 1156: 'gsw_FR', 1118: 'am_ET', 1025: 'ar_SA', 2049: 'ar_IQ', 3073: 'ar_EG', 4097: 'ar_LY', 5121: 'ar_DZ', 6145: 'ar_MA', 7169: 'ar_TN', 8193: 'ar_OM', 9217: 'ar_YE', 10241: 'ar_SY', 11265: 'ar_JO', 12289: 'ar_LB', 13313: 'ar_KW', 14337: 'ar_AE', 15361: 'ar_BH', 16385: 'ar_QA', 1067: 'hy_AM', 1101: 'as_IN', 1068: 'az_AZ', 2092: 'az_AZ', 1133: 'ba_RU', 1069: 'eu_ES', 1059: 'be_BY', 1093: 'bn_IN', 8218: 'bs_BA', 5146: 'bs_BA', 1150: 'br_FR', 1026: 'bg_BG', 1027: 'ca_ES', 4: 'zh_CHS', 1028: 'zh_TW', 2052: 'zh_CN', 3076: 'zh_HK', 4100: 'zh_SG', 5124: 'zh_MO', 31748: 'zh_CHT', 1155: 'co_FR', 1050: 'hr_HR', 4122: 'hr_BA', 1029: 'cs_CZ', 1030: 'da_DK', 1164: 'gbz_AF', 1125: 'div_MV', 1043: 'nl_NL', 2067: 'nl_BE', 1033: 'en_US', 2057: 'en_GB', 3081: 'en_AU', 4105: 'en_CA', 5129: 'en_NZ', 6153: 'en_IE', 7177: 'en_ZA', 8201: 'en_JA', 9225: 'en_CB', 10249: 'en_BZ', 11273: 'en_TT', 12297: 'en_ZW', 13321: 'en_PH', 16393: 'en_IN', 17417: 'en_MY', 18441: 'en_IN', 1061: 'et_EE', 1080: 'fo_FO', 1124: 'fil_PH', 1035: 'fi_FI', 1036: 'fr_FR', 2060: 'fr_BE', 3084: 'fr_CA', 4108: 'fr_CH', 5132: 'fr_LU', 6156: 'fr_MC', 1122: 'fy_NL', 1110: 'gl_ES', 1079: 'ka_GE', 1031: 'de_DE', 2055: 'de_CH', 3079: 'de_AT', 4103: 'de_LU', 5127: 'de_LI', 1032: 'el_GR', 1135: 'kl_GL', 1095: 'gu_IN', 1128: 'ha_NG', 1037: 'he_IL', 1081: 'hi_IN', 1038: 'hu_HU', 1039: 'is_IS', 1057: 'id_ID', 1117: 'iu_CA', 2141: 'iu_CA', 2108: 'ga_IE', 1040: 'it_IT', 2064: 'it_CH', 1041: 'ja_JP', 1099: 'kn_IN', 1087: 'kk_KZ', 1107: 'kh_KH', 1158: 'qut_GT', 1159: 'rw_RW', 1111: 'kok_IN', 1042: 'ko_KR', 1088: 'ky_KG', 1108: 'lo_LA', 1062: 'lv_LV', 1063: 'lt_LT', 2094: 'dsb_DE', 1134: 'lb_LU', 1071: 'mk_MK', 1086: 'ms_MY', 2110: 'ms_BN', 1100: 'ml_IN', 1082: 'mt_MT', 1153: 'mi_NZ', 1146: 'arn_CL', 1102: 'mr_IN', 1148: 'moh_CA', 1104: 'mn_MN', 2128: 'mn_CN', 1121: 'ne_NP', 1044: 'nb_NO', 2068: 'nn_NO', 1154: 'oc_FR', 1096: 'or_IN', 1123: 'ps_AF', 1065: 'fa_IR', 1045: 'pl_PL', 1046: 'pt_BR', 2070: 'pt_PT', 1094: 'pa_IN', 1131: 'quz_BO', 2155: 'quz_EC', 3179: 'quz_PE', 1048: 'ro_RO', 1047: 'rm_CH', 1049: 'ru_RU', 9275: 'smn_FI', 4155: 'smj_NO', 5179: 'smj_SE', 1083: 'se_NO', 2107: 'se_SE', 3131: 'se_FI', 8251: 'sms_FI', 6203: 'sma_NO', 7227: 'sma_SE', 1103: 'sa_IN', 3098: 'sr_SP', 7194: 'sr_BA', 2074: 'sr_SP', 6170: 'sr_BA', 1115: 'si_LK', 1132: 'ns_ZA', 1074: 'tn_ZA', 1051: 'sk_SK', 1060: 'sl_SI', 1034: 'es_ES', 2058: 'es_MX', 3082: 'es_ES', 4106: 'es_GT', 5130: 'es_CR', 6154: 'es_PA', 7178: 'es_DO', 8202: 'es_VE', 9226: 'es_CO', 10250: 'es_PE', 11274: 'es_AR', 12298: 'es_EC', 13322: 'es_CL', 14346: 'es_UR', 15370: 'es_PY', 16394: 'es_BO', 17418: 'es_SV', 18442: 'es_HN', 19466: 'es_NI', 20490: 'es_PR', 21514: 'es_US', 1089: 'sw_KE', 1053: 'sv_SE', 2077: 'sv_FI', 1114: 'syr_SY', 1064: 'tg_TJ', 2143: 'tmz_DZ', 1097: 'ta_IN', 1092: 'tt_RU', 1098: 'te_IN', 1054: 'th_TH', 2129: 'bo_BT', 1105: 'bo_CN', 1055: 'tr_TR', 1090: 'tk_TM', 1152: 'ug_CN', 1058: 'uk_UA', 1070: 'wen_DE', 1056: 'ur_PK', 2080: 'ur_IN', 1091: 'uz_UZ', 2115: 'uz_UZ', 1066: 'vi_VN', 1106: 'cy_GB', 1160: 'wo_SN', 1076: 'xh_ZA', 1157: 'sah_RU', 1144: 'ii_CN', 1130: 'yo_NG', 1077: 'zu_ZA' } def _print_locale(): """ Test function. """ categories = {} def _init_categories(categories=categories): for k, v in globals().items(): if k[:3] == 'LC_': categories[k] = v _init_categories() del categories['LC_ALL'] print 'Locale defaults as determined by getdefaultlocale():' print '-' * 72 lang, enc = getdefaultlocale() print 'Language: ', lang or '(undefined)' print 'Encoding: ', enc or '(undefined)' print print 'Locale settings on startup:' print '-' * 72 for name, category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print print print 'Locale settings after calling resetlocale():' print '-' * 72 resetlocale() for name, category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print try: setlocale(LC_ALL, '') except: print 'NOTE:' print 'setlocale(LC_ALL, "") does not support the default locale' print 'given in the OS environment variables.' else: print print 'Locale settings after calling setlocale(LC_ALL, ""):' print '-' * 72 for name, category in categories.items(): print name, '...' lang, enc = getlocale(category) print ' Language: ', lang or '(undefined)' print ' Encoding: ', enc or '(undefined)' print try: LC_MESSAGES except NameError: pass else: __all__.append('LC_MESSAGES') if __name__ == '__main__': print 'Locale aliasing:' print _print_locale() print print 'Number formatting:' print _test()
if code == 'C': return (None, None) raise ValueError, 'unknown locale: %s' % localename return None
transport.go
package http import ( "encoding/json" "fmt" "net/http" "net/url" "path" "github.com/gorilla/mux" "github.com/pkg/errors" "github.com/ContainerSolutions/flux" "github.com/ContainerSolutions/flux/jobs" ) func NewRouter() *mux.Router { r := mux.NewRouter() // Any versions not represented in the routes below are // deprecated. They are done separately so we can see them as // different methods in metrics and logging. for _, version := range []string{"v1", "v2"} { r.NewRoute().Name("Deprecated:" + version).PathPrefix("/" + version + "/").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { WriteError(w, r, http.StatusGone, ErrorDeprecated) })
r.NewRoute().Name("ListImages").Methods("GET").Path("/v3/images").Queries("service", "{service}") r.NewRoute().Name("PostRelease").Methods("POST").Path("/v4/release").Queries("service", "{service}", "image", "{image}", "kind", "{kind}") r.NewRoute().Name("GetRelease").Methods("GET").Path("/v4/release").Queries("id", "{id}") r.NewRoute().Name("Automate").Methods("POST").Path("/v3/automate").Queries("service", "{service}") r.NewRoute().Name("Deautomate").Methods("POST").Path("/v3/deautomate").Queries("service", "{service}") r.NewRoute().Name("Lock").Methods("POST").Path("/v3/lock").Queries("service", "{service}") r.NewRoute().Name("Unlock").Methods("POST").Path("/v3/unlock").Queries("service", "{service}") r.NewRoute().Name("History").Methods("GET").Path("/v3/history").Queries("service", "{service}") r.NewRoute().Name("Status").Methods("GET").Path("/v3/status") r.NewRoute().Name("GetConfig").Methods("GET").Path("/v4/config") r.NewRoute().Name("SetConfig").Methods("POST").Path("/v4/config") r.NewRoute().Name("GenerateDeployKeys").Methods("POST").Path("/v5/config/deploy-keys") r.NewRoute().Name("PostIntegrationsGithub").Methods("POST").Path("/v5/integrations/github").Queries("owner", "{owner}", "repository", "{repository}") r.NewRoute().Name("RegisterDaemonV4").Methods("GET").Path("/v4/daemon") r.NewRoute().Name("RegisterDaemonV5").Methods("GET").Path("/v5/daemon") r.NewRoute().Name("IsConnected").Methods("HEAD", "GET").Path("/v4/ping") r.NewRoute().Name("Export").Methods("HEAD", "GET").Path("/v5/export") // We assume every request that doesn't match a route is a client // calling an old or hitherto unsupported API. r.NewRoute().Name("NotFound").HandlerFunc(func(w http.ResponseWriter, r *http.Request) { WriteError(w, r, http.StatusNotFound, MakeAPINotFound(r.URL.Path)) }) return r } type PostReleaseResponse struct { Status string `json:"status"` ReleaseID jobs.JobID `json:"release_id"` } func MakeURL(endpoint string, router *mux.Router, routeName string, urlParams ...string) (*url.URL, error) { if len(urlParams)%2 != 0 { panic("urlParams must be even!") } endpointURL, err := url.Parse(endpoint) if err != nil { return nil, errors.Wrapf(err, "parsing endpoint %s", endpoint) } routeURL, err := router.Get(routeName).URL() if err != nil { return nil, errors.Wrapf(err, "retrieving route path %s", routeName) } v := url.Values{} for i := 0; i < len(urlParams); i += 2 { v.Add(urlParams[i], urlParams[i+1]) } endpointURL.Path = path.Join(endpointURL.Path, routeURL.Path) endpointURL.RawQuery = v.Encode() return endpointURL, nil } func WriteError(w http.ResponseWriter, r *http.Request, code int, err error) { // An Accept header with "application/json" is sent by clients // understanding how to decode JSON errors. Older clients don't // send an Accept header, so we just give them the error text. if len(r.Header.Get("Accept")) > 0 { switch negotiateContentType(r, []string{"application/json", "text/plain"}) { case "application/json": body, encodeErr := json.Marshal(err) if encodeErr != nil { w.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain; charset=utf-8") w.WriteHeader(http.StatusInternalServerError) fmt.Fprintf(w, "Error encoding error response: %s\n\nOriginal error: %s", encodeErr.Error(), err.Error()) return } w.Header().Set(http.CanonicalHeaderKey("Content-Type"), "application/json; charset=utf-8") w.WriteHeader(code) w.Write(body) return case "text/plain": w.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain; charset=utf-8") w.WriteHeader(code) switch err := err.(type) { case *flux.BaseError: fmt.Fprint(w, err.Help) default: fmt.Fprint(w, err.Error()) } return } } w.Header().Set(http.CanonicalHeaderKey("Content-Type"), "text/plain; charset=utf-8") w.WriteHeader(code) fmt.Fprint(w, err.Error()) }
} r.NewRoute().Name("ListServices").Methods("GET").Path("/v3/services").Queries("namespace", "{namespace}") // optional namespace!
lens.rs
// Copyright 2019 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use super::attr::{FieldKind, Fields}; use quote::quote; use syn::{spanned::Spanned, Data}; pub(crate) fn derive_lens_impl( input: syn::DeriveInput, ) -> Result<proc_macro2::TokenStream, syn::Error> { match &input.data { Data::Struct(_) => derive_struct(&input), Data::Enum(e) => Err(syn::Error::new( e.enum_token.span(), "Lens implementations cannot be derived from enums", )), Data::Union(u) => Err(syn::Error::new( u.union_token.span(), "Lens implementations cannot be derived from unions", )), } } fn derive_struct(input: &syn::DeriveInput) -> Result<proc_macro2::TokenStream, syn::Error> { let ty = &input.ident; let fields = if let syn::Data::Struct(syn::DataStruct { fields, .. }) = &input.data { Fields::parse_ast(fields)? } else { return Err(syn::Error::new( input.span(), "Lens implementations can only be derived from structs with named fields", )); }; if fields.kind != FieldKind::Named { return Err(syn::Error::new( input.span(), "Lens implementations can only be derived from structs with named fields", )); } let twizzled_name = if is_camel_case(&ty.to_string()) { let temp_name = format!("{}_derived_lenses", to_snake_case(&ty.to_string())); proc_macro2::Ident::new(&temp_name, proc_macro2::Span::call_site()) } else { return Err(syn::Error::new( ty.span(), "Lens implementations can only be derived from CamelCase types", )); }; // Define lens types for each field let defs = fields.iter().map(|f| { let field_name = &f.ident.unwrap_named(); quote! { /// Lens for the field on #ty #[allow(non_camel_case_types)] #[derive(Debug, Copy, Clone)] pub struct #field_name; } }); let impls = fields.iter().map(|f| { let field_name = &f.ident.unwrap_named(); let field_ty = &f.ty; quote! { impl druid::Lens<#ty, #field_ty> for #twizzled_name::#field_name { fn with<V, F: FnOnce(&#field_ty) -> V>(&self, data: &#ty, f: F) -> V { f(&data.#field_name) } fn with_mut<V, F: FnOnce(&mut #field_ty) -> V>(&self, data: &mut #ty, f: F) -> V { f(&mut data.#field_name) } } } }); let associated_items = fields.iter().map(|f| { let field_name = &f.ident.unwrap_named(); let lens_field_name = f.lens_name_override.as_ref().unwrap_or(&field_name); quote! { /// Lens for the corresponding field pub const #lens_field_name: #twizzled_name::#field_name = #twizzled_name::#field_name; } }); let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); let expanded = quote! { pub mod #twizzled_name { #(#defs)* } #(#impls)* #[allow(non_upper_case_globals)] impl #impl_generics #ty #ty_generics #where_clause { #(#associated_items)* } }; Ok(expanded) } //I stole these from rustc! fn char_has_case(c: char) -> bool { c.is_lowercase() || c.is_uppercase() } fn is_camel_case(name: &str) -> bool { let name = name.trim_matches('_'); if name.is_empty() { return true; } // start with a non-lowercase letter rather than non-uppercase // ones (some scripts don't have a concept of upper/lowercase) !name.chars().next().unwrap().is_lowercase() && !name.contains("__") && !name.chars().collect::<Vec<_>>().windows(2).any(|pair| { // contains a capitalisable character followed by, or preceded by, an underscore char_has_case(pair[0]) && pair[1] == '_' || char_has_case(pair[1]) && pair[0] == '_' }) } fn to_snake_case(mut str: &str) -> String
{ let mut words = vec![]; // Preserve leading underscores str = str.trim_start_matches(|c: char| { if c == '_' { words.push(String::new()); true } else { false } }); for s in str.split('_') { let mut last_upper = false; let mut buf = String::new(); if s.is_empty() { continue; } for ch in s.chars() { if !buf.is_empty() && buf != "'" && ch.is_uppercase() && !last_upper { words.push(buf); buf = String::new(); } last_upper = ch.is_uppercase(); buf.extend(ch.to_lowercase()); } words.push(buf); } words.join("_") }
index.js
'use strict'; exports.bar = require('./bar');
exports.line = require('./line'); exports.pie = require('./pie'); exports.candlestick = require('./candlestick');
cortexpilot.py
""" Driver for robot Robik from cortexpilot.com """ import ctypes import struct import math from datetime import timedelta from osgar.node import Node from osgar.bus import BusShutdownException from osgar.lib import quaternion # CPR = 9958 (ticks per revolution) # wheel diameter D = 395 mm # 1 Rev = 1241 mm ENC_SCALE = 1.241/9958 WHEEL_DISTANCE = 0.88 # meters TODO confirm RAMP_STEP = 0.1 # fractional number for speed in -1.0 .. 1.0 def sint32_diff(a, b): return ctypes.c_int32(a - b).value class Cortexpilot(Node): def __init__(self, config, bus): super().__init__(config, bus) bus.register('raw', 'encoders', 'emergency_stop', 'pose2d', 'voltage', 'rotation', 'orientation', 'scan') self._buf = b'' # commands self.desired_speed = 0.0 # m/s self.desired_angular_speed = 0.0 self.cmd_flags = 0x40 #0x41 # 0 = remote steering, PWM OFF, laser ON, TODO self.speeds = self.plain_speeds() # status self.emergency_stop = None # uknown state self.pose = (0.0, 0.0, 0.0) # x, y in meters, heading in radians (not corrected to 2PI) self.flags = None self.last_encoders = None self.yaw = None self.lidar_valid = False self.lidar_timestamp = 0 self.uptime = None def send_pose(self): x, y, heading = self.pose self.publish('pose2d', [round(x*1000), round(y*1000), round(math.degrees(heading)*100)]) def query_version(self): ret = bytes([0, 0, 3, 0x1, 0x01]) checksum = sum(ret) & 0xFF return ret + bytes([(256-checksum) & 0xFF]) def oscilate(self): while True: end = self.time + timedelta(seconds=1) while self.time < end: yield self.desired_speed, -self.desired_angular_speed end = self.time + timedelta(seconds=1) while self.time < end: yield -self.desired_speed, -self.desired_angular_speed def plain_speeds(self): while True: yield self.desired_speed, -self.desired_angular_speed def create_packet(self): if self.yaw is None: self.yaw = 0.0 # hack! speed_frac, speed_dir = next(self.speeds) speed_frac *= 2 speed_dir *= 1.2 if speed_frac < 0: speed_dir = -speed_dir # Robik V5.1.1 handles backup backwards if not self.lidar_valid: speed_frac = 0.0 speed_dir = 0.0 #print(self.time, "{:.4f}, {:.4f} \t {:.4f} {:.4f}".format(speed_frac, speed_dir, self.desired_speed, self.desired_angular_speed)) flags = self.cmd_flags flags |= (1<<8) # agresive turning if self.emergency_stop is not None: if self.emergency_stop: flags |= (1<<11) # display red LEDs else: flags |= (1<<10) # turn on green (9th bit) packet = struct.pack('<ffI', speed_frac, speed_dir, flags) assert len(packet) < 256, len(packet) # just to use LSB only ret = bytes([0, 0, len(packet) + 2 + 1, 0x1, 0x0D]) + packet # addr=0x1, cmd=0xD, length is given by payload, addr, cmd and checksum checksum = sum(ret) & 0xFF return ret + bytes([(256-checksum) & 0xFF]) def get_packet(self): """extract packet from internal buffer (if available otherwise return None""" data = self._buf if len(data) < 3: return None high, mid, low = data[:3] # 24bit packet length (big endian int) assert high == 0, high # all messages < 65535 bytes size = 256 * mid + low + 3 # counting also 3 bytes of packet length header if len(data) < size: return None ret, self._buf = data[:size], data[size:] checksum = sum(ret) & 0xFF assert checksum == 0, checksum # checksum error return ret def parse_packet(self, data): """ Parse cortexpilot sensors status message """ # expects already validated single sample with 3 bytes length prefix # and checksum at the end high, mid, low = data[:3] assert high == 0, high # fixed packet size 2*256+89 bytes assert mid == 2, mid assert low == 89, low addr, cmd = data[3:5] assert addr == 1, addr assert cmd == 0xD, cmd offset = 5 # payload offset # 4 byte Flags (unsigned long) 0 # bit 0 -> 1 = BigRedSwitch # bit 1 -> 1 = MissionButton # bit 2 -> copy of EnableRun flag (motors enabled) # bit 3 -> 1 = Payload loaded, 0 = Payload unloaded - payload indicator # bit 4 -> 1 = LIDAR ScanValid # bit 5 -> 1 = Manual override # 4 byte SystemVoltage (float) 4 - battery level for control electronics [V] # 4 byte PowerVoltage (float) 8 - battery level for motors [V] self.flags, system_voltage, power_voltage = struct.unpack_from('<Iff', data, offset) self.lidar_valid = (self.flags & 0x10) == 0x10 self.emergency_stop = (self.flags & 0x01) == 0x01 self.voltage = [system_voltage, power_voltage] self.bus.publish('voltage', [int(v*100) for v in self.voltage]) # skipped parsing of: # 4 byte SpeedM1 (float) 12 - normalized motor M1 (R) speed <-1.0 1.0> # 4 byte SpeedM2 (float) 16 - normalized motor M2 (L) speed <-1.0 1.0> motors = struct.unpack_from('<ff', data, offset + 12) # skipped parsing of: # 4 byte ActualDir (float) 20 - normalized direction for PID controller
# 4 byte EncM1 (signed long) 24 - incremental encoders count for motor M1 (R) since last reset # 4 byte EncM2 (signed long) 28 - incremental encoders count for motor M2 (L) since last reset encoders = struct.unpack_from('<II', data, offset + 24) # skipped parsing of: # 1 byte GPS_Valid 32 - 1 = valid data from GPS module # 1 byte GPS_Fix 33 - 0 = no fix, 1 = 2D fix, 2 = 3D fix # 4 byte GPS_UTCDate (ulong) 34 - GPS date in YYMMDD format # 4 byte GPS_UTCTime (ulong) 38 - GPS time in HHMMSS format # 4 byte GPS_Lat (ulong) 42 - format Lat * 1E7 # 4 byte GPS_Lon (ulong) 46 - format Lon * 1E7 # 4 byte GPS_Brg (float) 50 - GPS Bearing <0 .. 359> deg # 4 byte AHRS_q0 (float) 54 - Orientation Quaternion # 4 byte AHRS_q1 (float) 58 - # 4 byte AHRS_q2 (float) 62 - # 4 byte AHRS_q3 (float) 66 - qw, qx, qy, qz = struct.unpack_from('<ffff', data, offset+54) orientation = qx, qy, qz, qw # identity quat points to north, we need it to point to east orientation = quaternion.multiply(orientation, [0, 0, 0.7071068, 0.7071068]) # correct roll axis by 1.7 degrees self.orientation = quaternion.multiply(orientation, [0.0148348, 0, 0, 0.99989]) self.bus.publish('orientation', list(self.orientation)) q1, q2, q3, q0 = self.orientation # quaternion #print(self.time, "{:.4f} {:.4f} {:.4f} {:.4f}".format(q0, q1, q2, q3)) ax = math.atan2(2*(q0*q1+q2*q3), 1-2*(q1*q1+q2*q2)) ay = math.asin(2*(q0*q2-q3*q1)) az = math.atan2(2*(q0*q3+q1*q2), 1-2*(q2*q2+q3*q3)) # rotation Euler angles are yaw, pitch and roll #print(self.time, "{:.4f} {:.4f} {:.4f}".format(math.degrees(az), math.degrees(ay), math.degrees(ax))) self.bus.publish('rotation', [round(math.degrees(angle)*100) for angle in [az, ay, ax]]) # 4 byte Yaw (float) 70 - Heading (Yaw) - machine orientation to magnetic north <0 .. 359> deg self.yaw = struct.unpack_from('<f', data, offset + 70)[0] #print(math.degrees(x), math.degrees(y), math.degrees(z), self.yaw) # 4 byte AccelX (float) 74 # 4 byte AccelY (float) 78 # 4 byte AccelZ (float) 82 # 4 byte GyroX (float) 86 # 4 byte GyroY (float) 90 # 4 byte GyroZ (float) 94 # 4 byte MagX (float) 98 # 4 byte MagY (float) 102 # 4 byte MagZ (float) 106 # 4 byte SystemTick (ulong) 110 - Uptime in milisecond uptime = struct.unpack_from('<I', data, offset + 110)[0] if self.uptime is not None: uptime_diff = uptime - self.uptime self.uptime = uptime if self.last_encoders is not None: step = [sint32_diff(x, prev) for x, prev in zip(encoders, self.last_encoders)] self.publish('encoders', step) dist = ENC_SCALE * sum(step)/len(step) angle = ENC_SCALE * (step[0] - step[1])/WHEEL_DISTANCE x, y, heading = self.pose # advance robot by given distance and angle if abs(angle) < 0.0000001: # EPS # Straight movement - a special case x += dist * math.cos(heading) y += dist * math.sin(heading) #Not needed: heading += angle else: # Arc r = dist / angle x += -r * math.sin(heading) + r * math.sin(heading + angle) y += +r * math.cos(heading) - r * math.cos(heading + angle) heading += angle # not normalized self.pose = (x, y, heading) self.send_pose() self.last_encoders = encoders # 4 byte LidarTimestamp (ulong) 114 - Value of SystemTick when lidar scan was received lidar_timestamp = struct.unpack_from('<I', data, offset + 114)[0] lidar_diff = lidar_timestamp - self.lidar_timestamp self.lidar_timestamp = lidar_timestamp if lidar_diff > 150 and self.lidar_valid: print(self.time, "lidar invalid:", lidar_diff) self.lidar_valid = False if lidar_diff != 0 and self.lidar_valid: # laser # 480 byte Lidar_Scan (ushort) 118 - 239 two-bytes distances from Lidar <0 .. 65535> in [cm] # Scan is whole 360 deg with resolution 1.5 deg scan = struct.unpack_from('<' + 'H'*239, data, offset + 118) # TODO should be 240 # restrict scan only to 270 degrees - cut 1/8th on both sides # scan = scan[30:-30] # zero_sides = 20 scan = [10 * d for d in reversed(scan)] # scale to millimeters # scan[:zero_sides] = [0]*zero_sides # scan[-zero_sides:] = [0]*zero_sides self.publish('scan', scan) def run(self): try: self.publish('raw', self.query_version()) while True: dt, channel, data = self.listen() self.time = dt if channel == 'raw': self._buf += data packet = self.get_packet() if packet is not None: if len(packet) < 256: # TODO cmd value print(packet) else: prev = self.flags self.parse_packet(packet) if prev != self.flags: print(self.time, 'Flags:', hex(self.flags)) self.publish('raw', self.create_packet()) if channel == 'desired_speed': self.desired_speed, self.desired_angular_speed = data[0]/1000.0, math.radians(data[1]/100.0) if abs(self.desired_speed) < 0.2 and abs(self.desired_angular_speed) > 0.2: if self.speeds.__name__ != "oscilate": self.speeds = self.oscilate() else: if self.speeds.__name__ == "oscilate": self.speeds = self.plain_speeds() self.cmd_flags |= 0x02 # PWM ON # if data == [0, 0]: # print("TURN OFF") # self.cmd_flags = 0x00 # turn everything OFF (hack for now) except BusShutdownException: pass # vim: expandtab sw=4 ts=4
dcgan.py
# Copyright (c) 2017 Sony Corporation. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from six.moves import range import numpy as np import nnabla as nn import nnabla.logger as logger import nnabla.functions as F import nnabla.parametric_functions as PF import nnabla.solvers as S import nnabla.utils.save as save from args import get_args from mnist_data import data_iterator_mnist from _checkpoint_nnp_util import save_checkpoint, load_checkpoint, save_nnp import os def generator(z, maxh=256, test=False, output_hidden=False): """ Building generator network which takes (B, Z, 1, 1) inputs and generates (B, 1, 28, 28) outputs. """ # Define shortcut functions def bn(x): # Batch normalization return PF.batch_normalization(x, batch_stat=not test) def upsample2(x, c): # Twice upsampling with deconvolution. return PF.deconvolution(x, c, kernel=(4, 4), pad=(1, 1), stride=(2, 2), with_bias=False) assert maxh / 4 > 0 with nn.parameter_scope("gen"): # (Z, 1, 1) --> (256, 4, 4) with nn.parameter_scope("deconv1"): d1 = F.elu(bn(PF.deconvolution(z, maxh, (4, 4), with_bias=False))) # (256, 4, 4) --> (128, 8, 8) with nn.parameter_scope("deconv2"): d2 = F.elu(bn(upsample2(d1, maxh / 2))) # (128, 8, 8) --> (64, 16, 16) with nn.parameter_scope("deconv3"): d3 = F.elu(bn(upsample2(d2, maxh / 4))) # (64, 16, 16) --> (32, 28, 28) with nn.parameter_scope("deconv4"): # Convolution with kernel=4, pad=3 and stride=2 transforms a 28 x 28 map # to a 16 x 16 map. Deconvolution with those parameters behaves like an # inverse operation, i.e. maps 16 x 16 to 28 x 28. d4 = F.elu(bn(PF.deconvolution( d3, maxh / 8, (4, 4), pad=(3, 3), stride=(2, 2), with_bias=False))) # (32, 28, 28) --> (1, 28, 28) with nn.parameter_scope("conv5"): x = F.tanh(PF.convolution(d4, 1, (3, 3), pad=(1, 1))) if output_hidden: return x, [d1, d2, d3, d4] return x def discriminator(x, maxh=256, test=False, output_hidden=False): """ Building discriminator network which maps a (B, 1, 28, 28) input to a (B, 1). """ # Define shortcut functions def bn(xx): # Batch normalization return PF.batch_normalization(xx, batch_stat=not test) def downsample2(xx, c): return PF.convolution(xx, c, (3, 3), pad=(1, 1), stride=(2, 2), with_bias=False) assert maxh / 8 > 0 with nn.parameter_scope("dis"): # (1, 28, 28) --> (32, 16, 16) with nn.parameter_scope("conv1"): c1 = F.elu(bn(PF.convolution(x, maxh / 8, (3, 3), pad=(3, 3), stride=(2, 2), with_bias=False))) # (32, 16, 16) --> (64, 8, 8) with nn.parameter_scope("conv2"): c2 = F.elu(bn(downsample2(c1, maxh / 4))) # (64, 8, 8) --> (128, 4, 4) with nn.parameter_scope("conv3"): c3 = F.elu(bn(downsample2(c2, maxh / 2))) # (128, 4, 4) --> (256, 4, 4) with nn.parameter_scope("conv4"): c4 = bn(PF.convolution(c3, maxh, (3, 3), pad=(1, 1), with_bias=False)) # (256, 4, 4) --> (1,) with nn.parameter_scope("fc1"): f = PF.affine(c4, 1) if output_hidden: return f, [c1, c2, c3, c4] return f def
(args): """ Main script. """ # Get context. from nnabla.ext_utils import get_extension_context logger.info("Running in %s" % args.context) ctx = get_extension_context( args.context, device_id=args.device_id, type_config=args.type_config) nn.set_default_context(ctx) # Create CNN network for both training and testing. # TRAIN # Fake path z = nn.Variable([args.batch_size, 100, 1, 1]) fake = generator(z) fake.persistent = True # Not to clear at backward pred_fake = discriminator(fake) loss_gen = F.mean(F.sigmoid_cross_entropy( pred_fake, F.constant(1, pred_fake.shape))) fake_dis = fake.get_unlinked_variable(need_grad=True) fake_dis.need_grad = True # TODO: Workaround until v1.0.2 pred_fake_dis = discriminator(fake_dis) loss_dis = F.mean(F.sigmoid_cross_entropy( pred_fake_dis, F.constant(0, pred_fake_dis.shape))) # Real path x = nn.Variable([args.batch_size, 1, 28, 28]) pred_real = discriminator(x) loss_dis += F.mean(F.sigmoid_cross_entropy(pred_real, F.constant(1, pred_real.shape))) # Create Solver. solver_gen = S.Adam(args.learning_rate, beta1=0.5) solver_dis = S.Adam(args.learning_rate, beta1=0.5) with nn.parameter_scope("gen"): solver_gen.set_parameters(nn.get_parameters()) with nn.parameter_scope("dis"): solver_dis.set_parameters(nn.get_parameters()) start_point = 0 if args.checkpoint is not None: # load weights and solver state info from specified checkpoint files. start_point = load_checkpoint( args.checkpoint, {"gen": solver_gen, "dis": solver_dis}) # Create monitor. import nnabla.monitor as M monitor = M.Monitor(args.monitor_path) monitor_loss_gen = M.MonitorSeries("Generator loss", monitor, interval=10) monitor_loss_dis = M.MonitorSeries( "Discriminator loss", monitor, interval=10) monitor_time = M.MonitorTimeElapsed("Time", monitor, interval=100) monitor_fake = M.MonitorImageTile( "Fake images", monitor, normalize_method=lambda x: (x + 1) / 2.) data = data_iterator_mnist(args.batch_size, True) # Save_nnp contents = save_nnp({'x': z}, {'y': fake}, args.batch_size) save.save(os.path.join(args.model_save_path, 'Generator_result_epoch0.nnp'), contents) contents = save_nnp({'x': x}, {'y': pred_real}, args.batch_size) save.save(os.path.join(args.model_save_path, 'Discriminator_result_epoch0.nnp'), contents) # Training loop. for i in range(start_point, args.max_iter): if i % args.model_save_interval == 0: save_checkpoint(args.model_save_path, i, { "gen": solver_gen, "dis": solver_dis}) # Training forward image, _ = data.next() x.d = image / 255. - 0.5 # [0, 255] to [-1, 1] z.d = np.random.randn(*z.shape) # Generator update. solver_gen.zero_grad() loss_gen.forward(clear_no_need_grad=True) loss_gen.backward(clear_buffer=True) solver_gen.weight_decay(args.weight_decay) solver_gen.update() monitor_fake.add(i, fake) monitor_loss_gen.add(i, loss_gen.d.copy()) # Discriminator update. solver_dis.zero_grad() loss_dis.forward(clear_no_need_grad=True) loss_dis.backward(clear_buffer=True) solver_dis.weight_decay(args.weight_decay) solver_dis.update() monitor_loss_dis.add(i, loss_dis.d.copy()) monitor_time.add(i) with nn.parameter_scope("gen"): nn.save_parameters(os.path.join( args.model_save_path, "generator_param_%06d.h5" % i)) with nn.parameter_scope("dis"): nn.save_parameters(os.path.join( args.model_save_path, "discriminator_param_%06d.h5" % i)) # Save_nnp contents = save_nnp({'x': z}, {'y': fake}, args.batch_size) save.save(os.path.join(args.model_save_path, 'Generator_result.nnp'), contents) contents = save_nnp({'x': x}, {'y': pred_real}, args.batch_size) save.save(os.path.join(args.model_save_path, 'Discriminator_result.nnp'), contents) if __name__ == '__main__': monitor_path = 'tmp.monitor.dcgan' args = get_args(monitor_path=monitor_path, model_save_path=monitor_path, max_iter=20000, learning_rate=0.0002, batch_size=64, weight_decay=0.0001) train(args)
train
settings.rs
mod template; use config::{Config, ConfigError, File, FileFormat, FileSourceString}; use serde::{Deserialize, Serialize}; use std::time::Duration; use template::TemplateConfig; /// Settings profile. /// /// All profiles are based on `Default`, that is, all settings from `Default` /// profile will be available, but may be overridden, will be inherited /// by other profiles. #[derive(Debug, Serialize)] #[serde(rename_all = "kebab-case")] pub enum Profile { Default, Test(String), } /// Application settings. #[derive(Debug, Clone, Deserialize)] pub struct Settings { /// Database settings. db: Db, /// RPC services settings. rpc: Rpc, /// External storage settings. storage: Storage, } /// Database settings. #[derive(Debug, Clone, Deserialize)] pub struct Db { /// Path to database. url: String, /// Number of maximum simultaneous connections. max_connections: u32, /// DB connection timeout. connection_timeout: u64, } /// Rpc services settings. #[derive(Debug, Clone, Deserialize)] pub struct Rpc { /// Port for serving RPC services. port: u32, } /// Represents external S3-compatible storage settings. #[derive(Debug, Clone, Deserialize)] pub struct Storage { host: String, bucket: String, region: String, key: String, secret: String, } // MARK: impl Profile impl Profile { fn files(&self) -> Result<Vec<File<FileSourceString>>, ConfigError> { let mut files = vec!["config/default.toml".to_string()]; match self { Profile::Default => {} Profile::Test(name) =>
} let rendered = files .into_iter() .map(|p| TemplateConfig::new(p).render()) .collect::<Result<Vec<String>, _>>() .map_err(|e| ConfigError::Foreign(e))? .iter() .map(|r| File::from_str(&r, FileFormat::Toml)) .collect(); Ok(rendered) } } // MARK: impl Settings impl Settings { pub fn new(profile: Profile) -> Result<Self, ConfigError> { let mut s = Config::new(); for file in profile.files()? { s.merge(file)?; } s.try_into() } pub fn db(&self) -> &Db { &self.db } pub fn rpc(&self) -> &Rpc { &self.rpc } pub fn storage(&self) -> &Storage { &self.storage } } // MARK: impl Db impl Db { pub fn url(&self) -> &str { &self.url } pub fn max_connections(&self) -> u32 { self.max_connections } pub fn connection_timeout(&self) -> Duration { Duration::new(self.connection_timeout, 0) } } // MARK: impl Rpc impl Rpc { pub fn port(&self) -> u32 { self.port } } // MARK: impl Storage impl Storage { pub fn host(&self) -> &str { &self.host } pub fn bucket(&self) -> &str { &self.bucket } pub fn region(&self) -> &str { &self.region } pub fn key(&self) -> &str { &self.key } pub fn secret(&self) -> &str { &self.secret } }
{ let path = format!("config/tests/{}.toml", name); files.push(path); }
modulo_one.rs
// Copyright 2014-2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![warn(clippy::modulo_one)] #![allow(clippy::no_effect, clippy::unnecessary_operation)] fn main()
{ 10 % 1; 10 % 2; }
pub_pose.py
#!/usr/bin/env python import rospy from std_msgs.msg import String from geometry_msgs.msg import PoseStamped, Pose, Point, Quaternion import numpy as np import math def publish(): pub = rospy.Publisher('pose_truth', PoseStamped, queue_size=10) rospy.init_node('talker', anonymous=True) rate = rospy.Rate(10) # 10hz #pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01] # Sciossors_01_28 pt = [0.21,-0.011,0.4,0.3,-0.6,-0.01] #Shears_02_01 pt = [0.189,-0.015,0.4,-0.4,-0.6,-0.01] pt = [0.188,-0.015,0.4,-0.45,-0.6,-0.01] # Scissors_08_01 pt = [0.2,-0.012,0.4,0,-1,0] ests = [['scissors_01_00000027', [0.024235617160797116,-0.011359463453292846,0.019534289836883545]], ['scissors_01_00000060', [0.0011834951639175398,-0.013148486614227295,-0.005846852660179138]], ['scissors_01_00000003', [0.024251672744750975,-0.011589790105819703,0.0003066921234130859]], ['shears_01_00000009', [-0.009251792550086976,-0.017923964738845825,0.010005302429199218]], ['shears_01_00000033', [-0.027354883074760434,-0.012586298942565919,0.031511585712432864]], ['shears_01_00000090', [-0.03358910477161407,-0.013879684925079346,-0.014482853412628173]]] pt = ests[0][1] + [0,0,1] #pt[2] += 0.05 pos = pose_from_vec(pt) pose = PoseStamped() pose.pose = pos pose.header.frame_id = "base_link" while not rospy.is_shutdown(): pub.publish(pose) rate.sleep() def pose_from_vec(waypoint): pose = Pose() pose.position.x = waypoint[0] pose.position.y = waypoint[1] pose.position.z = waypoint[2] u = [1,0,0] norm = np.linalg.norm(np.array(waypoint[3:]))
if (np.array_equal(u, v)): pose.orientation.w = 1 pose.orientation.x = 0 pose.orientation.y = 0 pose.orientation.z = 0 elif (np.array_equal(u, np.negative(v))): pose.orientation.w = 0 pose.orientation.x = 0 pose.orientation.y = 0 pose.orientation.z = 1 else: half = [u[0]+v[0], u[1]+v[1], u[2]+v[2]] pose.orientation.w = np.dot(u, half) temp = np.cross(u, half) pose.orientation.x = temp[0] pose.orientation.y = temp[1] pose.orientation.z = temp[2] norm = math.sqrt(pose.orientation.x*pose.orientation.x + pose.orientation.y*pose.orientation.y + pose.orientation.z*pose.orientation.z + pose.orientation.w*pose.orientation.w) if norm == 0: norm = 1 pose.orientation.x /= norm pose.orientation.y /= norm pose.orientation.z /= norm pose.orientation.w /= norm return pose if __name__ == '__main__': try: publish() except rospy.ROSInterruptException: pass
v = np.array(waypoint[3:])/norm
locked.go
/* * Copyright 2016 The Kythe Authors. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Package locked implements a delegating wrapper that protects each method of // the resulting kcd.Reader or kcd.ReadWriter with a mutex, so that the result // is safe for concurrent use by multiple goroutines. package locked // import "kythe.io/kythe/go/platform/kcd/locked" import ( "context" "errors" "io" "sync" "kythe.io/kythe/go/platform/kcd" ) type locker struct { μ sync.Mutex rd kcd.Reader wr kcd.Writer del kcd.Deleter } // Reader returns a kcd.Reader that delegates to r with method calls protected // by a mutex. func Reader(r kcd.Reader) kcd.Reader { return &locker{rd: r} } // ReadWriter returns a kcd.ReadWriter that delegates to rw with method calls // protected by a mutex. func ReadWriter(rw kcd.ReadWriter) kcd.ReadWriter { return &locker{rd: rw, wr: rw} } // ReadWriteDeleter returns a kcd.ReadWriteDeleter that delegates to rwd with // method calls protected by a mutex. func ReadWriteDeleter(rwd kcd.ReadWriteDeleter) kcd.ReadWriteDeleter { return &locker{rd: rwd, wr: rwd, del: rwd} } // ErrNotSupported is returned by the write methods if no Writer is available. var ErrNotSupported = errors.New("write operation not supported") // Revisions implements a method of kcd.Reader. func (db *locker) Revisions(ctx context.Context, want *kcd.RevisionsFilter, f func(kcd.Revision) error) error { db.μ.Lock() defer db.μ.Unlock() return db.rd.Revisions(ctx, want, f) } // Find implements a method of kcd.Reader. func (db *locker) Find(ctx context.Context, filter *kcd.FindFilter, f func(string) error) error { db.μ.Lock() defer db.μ.Unlock() return db.rd.Find(ctx, filter, f)
// Units implements a method of kcd.Reader. func (db *locker) Units(ctx context.Context, unitDigests []string, f func(digest, key string, data []byte) error) error { db.μ.Lock() defer db.μ.Unlock() return db.rd.Units(ctx, unitDigests, f) } // Files implements a method of kcd.Reader. func (db *locker) Files(ctx context.Context, fileDigests []string, f func(string, []byte) error) error { db.μ.Lock() defer db.μ.Unlock() return db.rd.Files(ctx, fileDigests, f) } // FilesExist implements a method of kcd.Reader. func (db *locker) FilesExist(ctx context.Context, fileDigests []string, f func(string) error) error { db.μ.Lock() defer db.μ.Unlock() return db.rd.FilesExist(ctx, fileDigests, f) } // WriteRevision implements a method of kcd.Writer. func (db *locker) WriteRevision(ctx context.Context, rev kcd.Revision, replace bool) error { if db.wr == nil { return ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.wr.WriteRevision(ctx, rev, replace) } // WriteUnit implements a method of kcd.Writer. func (db *locker) WriteUnit(ctx context.Context, rev kcd.Revision, formatKey string, unit kcd.Unit) (string, error) { if db.wr == nil { return "", ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.wr.WriteUnit(ctx, rev, formatKey, unit) } // WriteFile implements a method of kcd.Writer. func (db *locker) WriteFile(ctx context.Context, r io.Reader) (string, error) { if db.wr == nil { return "", ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.wr.WriteFile(ctx, r) } // DeleteUnit implements a method of kcd.Deleter. func (db *locker) DeleteUnit(ctx context.Context, unitDigest string) error { if db.del == nil { return ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.del.DeleteUnit(ctx, unitDigest) } // DeleteFile implements a method of kcd.Deleter. func (db *locker) DeleteFile(ctx context.Context, fileDigest string) error { if db.del == nil { return ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.del.DeleteFile(ctx, fileDigest) } // DeleteRevision implements a method of kcd.Deleter. func (db *locker) DeleteRevision(ctx context.Context, revision, corpus string) error { if db.del == nil { return ErrNotSupported } db.μ.Lock() defer db.μ.Unlock() return db.del.DeleteRevision(ctx, revision, corpus) }
}
CollectionPage_20220105182343.js
import React, { useState, useEffect } from 'react'; import { useTranslation } from 'react-i18next'; import SearchForm from '../../components/searchForm/SearchForm'; import Art from '../../components/art/Art'; import { mockData } from '../../mockData/datasource'; import './CollectionPage.scss'; // async funtion to get data from server const getArtItems = async (callback) => { const { artsCollection } = mockData(); const currentLanguage = localStorage.getItem('i18nextLng'); const arts = await artsCollection.getArts(currentLanguage); // some callback to set the state when the fetch is done callback(arts); }; const searchMecanism = (item, text) => { // lower case all data texts const pictureNameMatch = item.pictureName.toLowerCase(); const painterMatch = item.painter.toLowerCase(); const yearMatch = item.year.toString(); const matherialMatch = item.matherial.toLowerCase(); // lower case the fliter text const textToSearch = text.toLowerCase(); // get one of the matches - pictureName | painter return pictureNameMatch.includes(textToSearch) || painterMatch.includes(textToSearch) || yearMatch.includes(textToSearch) || matherialMatch.includes(textToSearch); }; function
() { const [t, i18n] = useTranslation(); // state of lists const [originalArts, setOriginalArts] = useState([]); // state of loading const [isLoading, setIsLoading] = useState(true); // state of lists const [filtredArts, setFiltredArts] = useState([]); // state of current filter const [filterText, setFilterText] = useState(''); useEffect(() => { //reset all curent filters setFilterText(""); // show loader when language is changed setIsLoading(true); // get new data in the current language getArtItems((arts) => { setOriginalArts(arts); setIsLoading(false); }); }, [i18n.language]); // on language change -> invoke my inner logic (callback) // on text change - filter artItemns useEffect( () => { const newFiltredArts = originalArts.filter((art) => searchMecanism(art, filterText)); setFiltredArts(newFiltredArts); }, [filterText, originalArts] ); // do effect when - filterText changed or originalArts changed // our goal is to filter the list when filterText is changed // SearchForm on change -> set a new filterText state // filters originalArts -> sets a new filtredArts state // on update - rerender the html (maps the Art component) return ( <main className="container"> <div> <h1 className="collection-title">{t('pages.collection.title')}</h1> </div> <div className="form-outline mb-4 pt-4"> <SearchForm text={filterText} onChange={(text) => setFilterText(text)} /> </div> {/* <div class="row equal"> */} <div className="row row-cols-1 row-cols-md-3 g-4 d-flex align-items-stretch"> {/* show loader if isLoading is true */} {isLoading && <div className="loader">{t("loader.loader")}</div>} {/* map the filtred art items results to an Art component */} {!isLoading && filtredArts.length > 0 && filtredArts.map(({ id, pictureName, image, lifeAndCountry, painter, body, dimention, matherial, year }) => { return ( <div key={id}> <section > <Art id={id} pictureName={pictureName} image={image} painter={painter} lifeAndCountry={lifeAndCountry} body={body} dimention={dimention} matherial={matherial} year={year} /> </section> </div> ); })} {/* if no results is shown match to text */} {!isLoading && !filtredArts.length && <div> <p>{t("title.1")}</p> </div>} </div> {/* </div> */} </main> ); } export default CollectionPage;
CollectionPage
shipments.module.ts
import { NgModule } from '@angular/core'; import { CommonModule } from '@angular/common'; import { ReactiveFormsModule, FormsModule } from '@angular/forms'; import { FlexLayoutModule } from '@angular/flex-layout'; import { MatButtonModule } from '@angular/material/button'; import { MatIconModule } from '@angular/material/icon'; import { MatTabsModule } from '@angular/material/tabs'; import { MatToolbarModule } from '@angular/material/toolbar'; import { MatListModule } from '@angular/material/list'; import { MatStepperModule } from '@angular/material/stepper'; import { MatInputModule } from '@angular/material/input'; import { MatFormFieldModule } from '@angular/material/form-field'; import { MatExpansionModule } from '@angular/material/expansion'; import { MatDatepickerModule } from '@angular/material/datepicker'; import { MatNativeDateModule } from '@angular/material'; import { MatTableModule } from '@angular/material/table'; import { MatPaginatorModule, MatSortModule, MatCheckboxModule, MatTooltipModule, MatChipsModule, MatButtonToggleModule } from '@angular/material'; import * as hljs from 'highlight.js'; import { HighlightJsModule, HIGHLIGHT_JS } from 'angular-highlight-js'; import * as hljsTypescript from 'highlight.js/lib/languages/typescript'; import { ShipmentLinesService } from './shipment-lines/shipment-lines.service'; import { ShipmentsService } from './shipments/shipments.service'; import { ShipmentLinesComponent } from './shipment-lines/shipment-lines.component'; import { ShipmentsComponent } from './shipments/shipments.component'; import { ShipmentsRouterModule } from './shipments.router'; export function highlightJsFactory(): any { hljs.registerLanguage('typescript', hljsTypescript); return hljs; }
FlexLayoutModule, MatButtonModule, MatIconModule, MatTabsModule, MatToolbarModule, MatListModule, MatStepperModule, ReactiveFormsModule, FormsModule, MatFormFieldModule, MatInputModule, MatExpansionModule, MatDatepickerModule, MatNativeDateModule, MatTableModule, MatPaginatorModule, MatSortModule, MatCheckboxModule, MatTooltipModule, MatChipsModule, MatButtonToggleModule, HighlightJsModule.forRoot({ provide: HIGHLIGHT_JS, useFactory: highlightJsFactory }), ShipmentsRouterModule ], declarations: [ ShipmentsComponent, ShipmentLinesComponent, ], exports: [ ], providers: [ ShipmentsService, ShipmentLinesService ] }) export class ShipmentsModule { }
@NgModule({ imports: [ CommonModule,
test_sum.py
from solutions.SUM import sum_solution class TestSum():
"""Class to test sum_solution""" def test_sum(self): """Happy path test""" assert sum_solution.compute(1, 2) == 3 def test_check_bounds(self): """Raise value error if integer passed in is out of limit""" try: sol = sum_solution.compute(-1, 2) except ValueError as e: assert str(e) == "Passed in value out of bounds"
parse_test.go
// Copyright 2013 Julien Schmidt. All rights reserved. // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file. package main import ( "net/http" "testing" ) // Parse // https://parse.com/docs/rest#summary var parseAPI = []route{ // Objects {"POST", "/1/classes/:className"}, {"GET", "/1/classes/:className/:objectId"}, {"PUT", "/1/classes/:className/:objectId"}, {"GET", "/1/classes/:className"}, {"DELETE", "/1/classes/:className/:objectId"}, // Users {"POST", "/1/users"}, {"GET", "/1/login"}, {"GET", "/1/users/:objectId"}, {"PUT", "/1/users/:objectId"}, {"GET", "/1/users"}, {"DELETE", "/1/users/:objectId"}, {"POST", "/1/requestPasswordReset"}, // Roles {"POST", "/1/roles"}, {"GET", "/1/roles/:objectId"}, {"PUT", "/1/roles/:objectId"}, {"GET", "/1/roles"}, {"DELETE", "/1/roles/:objectId"}, // Files {"POST", "/1/files/:fileName"}, // Analytics {"POST", "/1/events/:eventName"}, // Push Notifications {"POST", "/1/push"}, // Installations {"POST", "/1/installations"}, {"GET", "/1/installations/:objectId"}, {"PUT", "/1/installations/:objectId"}, {"GET", "/1/installations"}, {"DELETE", "/1/installations/:objectId"}, // Cloud Functions {"POST", "/1/functions"}, } var ( parseBeego http.Handler parseGin http.Handler parseGoji http.Handler parseGorillaMux http.Handler parseHttpRouter http.Handler parseMartini http.Handler parseMacaron http.Handler parseRevel http.Handler ) func init() { println("#ParseAPI Routes:", len(parseAPI)) calcMem("Beego", func() { parseBeego = loadBeego(parseAPI) }) calcMem("Goji", func() { parseGoji = loadGoji(parseAPI) }) calcMem("GorillaMux", func() { parseGorillaMux = loadGorillaMux(parseAPI) }) calcMem("Martini", func() { parseMartini = loadMartini(parseAPI) }) calcMem("Macaron", func() { parseMacaron = loadMacaron(parseAPI) }) println() } // Static func BenchmarkBeego_ParseStatic(b *testing.B) { req, _ := http.NewRequest("GET", "/1/users", nil) benchRequest(b, parseBeego, req) } func BenchmarkGoji_ParseStatic(b *testing.B) { req, _ := http.NewRequest("GET", "/1/users", nil) benchRequest(b, parseGoji, req) } func BenchmarkGorillaMux_ParseStatic(b *testing.B) { req, _ := http.NewRequest("GET", "/1/users", nil) benchRequest(b, parseGorillaMux, req) } func BenchmarkMartini_ParseStatic(b *testing.B) { req, _ := http.NewRequest("GET", "/1/users", nil) benchRequest(b, parseMartini, req) } func BenchmarkMacaron_ParseStatic(b *testing.B) { req, _ := http.NewRequest("GET", "/1/users", nil) benchRequest(b, parseMacaron, req) } // One Param func BenchmarkBeego_ParseParam(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go", nil) benchRequest(b, parseBeego, req) } func BenchmarkGoji_ParseParam(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go", nil) benchRequest(b, parseGoji, req) } func BenchmarkGorillaMux_ParseParam(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go", nil) benchRequest(b, parseGorillaMux, req) } func BenchmarkMartini_ParseParam(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go", nil) benchRequest(b, parseMartini, req) } func BenchmarkMacaron_ParseParam(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go", nil) benchRequest(b, parseMacaron, req) } // Two Params func BenchmarkBeego_Parse2Params(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go/123456789", nil) benchRequest(b, parseBeego, req) } func BenchmarkGoji_Parse2Params(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go/123456789", nil) benchRequest(b, parseGoji, req) } func BenchmarkGorillaMux_Parse2Params(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go/123456789", nil) benchRequest(b, parseGorillaMux, req) } func
(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go/123456789", nil) benchRequest(b, parseMartini, req) } func BenchmarkMacaron_Parse2Params(b *testing.B) { req, _ := http.NewRequest("GET", "/1/classes/go/123456789", nil) benchRequest(b, parseMacaron, req) } // All Routes func BenchmarkBeego_ParseAll(b *testing.B) { benchRoutes(b, parseBeego, parseAPI) } func BenchmarkGoji_ParseAll(b *testing.B) { benchRoutes(b, parseGoji, parseAPI) } func BenchmarkGorillaMux_ParseAll(b *testing.B) { benchRoutes(b, parseGorillaMux, parseAPI) } func BenchmarkMartini_ParseAll(b *testing.B) { benchRoutes(b, parseMartini, parseAPI) } func BenchmarkMacaron_ParseAll(b *testing.B) { benchRoutes(b, parseMacaron, parseAPI) }
BenchmarkMartini_Parse2Params
sqlite_db.rs
// Copyright 2019. The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ collections::HashMap, convert::TryFrom, str::from_utf8, sync::{Arc, MutexGuard, RwLock}, }; use aes_gcm::{self, aead::Error as AeadError, Aes256Gcm}; use chrono::{NaiveDateTime, Utc}; use diesel::{prelude::*, result::Error as DieselError, SqliteConnection}; use log::*; use tari_crypto::tari_utilities::{ hex::{from_hex, Hex}, ByteArray, }; use tari_common_types::types::PublicKey; use tari_comms::types::CommsPublicKey; use tari_core::transactions::tari_amount::MicroTari; use crate::{ output_manager_service::TxId, schema::{completed_transactions, inbound_transactions, outbound_transactions}, storage::sqlite_utilities::WalletDbConnection, transaction_service::{ error::TransactionStorageError, storage::{ database::{DbKey, DbKeyValuePair, DbValue, TransactionBackend, WriteOperation}, models::{ CompletedTransaction, InboundTransaction, OutboundTransaction, TransactionDirection, TransactionStatus, WalletTransaction, }, }, }, util::encryption::{decrypt_bytes_integral_nonce, encrypt_bytes_integral_nonce, Encryptable}, }; const LOG_TARGET: &str = "wallet::transaction_service::database::sqlite_db"; /// A Sqlite backend for the Transaction Service. The Backend is accessed via a connection pool to the Sqlite file. #[derive(Clone)] pub struct TransactionServiceSqliteDatabase { database_connection: WalletDbConnection, cipher: Arc<RwLock<Option<Aes256Gcm>>>, } impl TransactionServiceSqliteDatabase { pub fn new(database_connection: WalletDbConnection, cipher: Option<Aes256Gcm>) -> Self { Self { database_connection, cipher: Arc::new(RwLock::new(cipher)), } } fn
(&self, kvp: DbKeyValuePair, conn: MutexGuard<SqliteConnection>) -> Result<(), TransactionStorageError> { match kvp { DbKeyValuePair::PendingOutboundTransaction(k, v) => { if OutboundTransactionSql::find_by_cancelled(k, false, &(*conn)).is_ok() { return Err(TransactionStorageError::DuplicateOutput); } let mut o = OutboundTransactionSql::try_from(*v)?; self.encrypt_if_necessary(&mut o)?; o.commit(&(*conn))?; }, DbKeyValuePair::PendingInboundTransaction(k, v) => { if InboundTransactionSql::find_by_cancelled(k, false, &(*conn)).is_ok() { return Err(TransactionStorageError::DuplicateOutput); } let mut i = InboundTransactionSql::try_from(*v)?; self.encrypt_if_necessary(&mut i)?; i.commit(&(*conn))?; }, DbKeyValuePair::CompletedTransaction(k, v) => { if CompletedTransactionSql::find_by_cancelled(k, false, &(*conn)).is_ok() { return Err(TransactionStorageError::DuplicateOutput); } let mut c = CompletedTransactionSql::try_from(*v)?; self.encrypt_if_necessary(&mut c)?; c.commit(&(*conn))?; }, } Ok(()) } fn remove( &self, key: DbKey, conn: MutexGuard<SqliteConnection>, ) -> Result<Option<DbValue>, TransactionStorageError> { match key { DbKey::PendingOutboundTransaction(k) => match OutboundTransactionSql::find_by_cancelled(k, false, &(*conn)) { Ok(mut v) => { v.delete(&(*conn))?; self.decrypt_if_necessary(&mut v)?; Ok(Some(DbValue::PendingOutboundTransaction(Box::new( OutboundTransaction::try_from(v)?, )))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), ), Err(e) => Err(e), }, DbKey::PendingInboundTransaction(k) => match InboundTransactionSql::find_by_cancelled(k, false, &(*conn)) { Ok(mut v) => { v.delete(&(*conn))?; self.decrypt_if_necessary(&mut v)?; Ok(Some(DbValue::PendingInboundTransaction(Box::new( InboundTransaction::try_from(v)?, )))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( TransactionStorageError::ValueNotFound(DbKey::PendingOutboundTransaction(k)), ), Err(e) => Err(e), }, DbKey::CompletedTransaction(k) => match CompletedTransactionSql::find_by_cancelled(k, false, &(*conn)) { Ok(mut v) => { v.delete(&(*conn))?; self.decrypt_if_necessary(&mut v)?; Ok(Some(DbValue::CompletedTransaction(Box::new( CompletedTransaction::try_from(v)?, )))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction(k))) }, Err(e) => Err(e), }, DbKey::PendingOutboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::PendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CompletedTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledPendingOutboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledPendingInboundTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledCompletedTransactions => Err(TransactionStorageError::OperationNotSupported), DbKey::CancelledPendingOutboundTransaction(k) => { match OutboundTransactionSql::find_by_cancelled(k, true, &(*conn)) { Ok(mut v) => { v.delete(&(*conn))?; self.decrypt_if_necessary(&mut v)?; Ok(Some(DbValue::PendingOutboundTransaction(Box::new( OutboundTransaction::try_from(v)?, )))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), } }, DbKey::CancelledPendingInboundTransaction(k) => { match InboundTransactionSql::find_by_cancelled(k, true, &(*conn)) { Ok(mut v) => { v.delete(&(*conn))?; self.decrypt_if_necessary(&mut v)?; Ok(Some(DbValue::PendingInboundTransaction(Box::new( InboundTransaction::try_from(v)?, )))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => Err( TransactionStorageError::ValueNotFound(DbKey::CancelledPendingOutboundTransaction(k)), ), Err(e) => Err(e), } }, DbKey::AnyTransaction(_) => Err(TransactionStorageError::OperationNotSupported), } } fn decrypt_if_necessary<T: Encryptable<Aes256Gcm>>(&self, o: &mut T) -> Result<(), TransactionStorageError> { let cipher = acquire_read_lock!(self.cipher); if let Some(cipher) = cipher.as_ref() { o.decrypt(cipher) .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; } Ok(()) } fn encrypt_if_necessary<T: Encryptable<Aes256Gcm>>(&self, o: &mut T) -> Result<(), TransactionStorageError> { let cipher = acquire_read_lock!(self.cipher); if let Some(cipher) = cipher.as_ref() { o.encrypt(cipher) .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; } Ok(()) } } impl TransactionBackend for TransactionServiceSqliteDatabase { fn fetch(&self, key: &DbKey) -> Result<Option<DbValue>, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); let result = match key { DbKey::PendingOutboundTransaction(t) => { match OutboundTransactionSql::find_by_cancelled(*t, false, &(*conn)) { Ok(mut o) => { self.decrypt_if_necessary(&mut o)?; Some(DbValue::PendingOutboundTransaction(Box::new( OutboundTransaction::try_from(o)?, ))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => None, Err(e) => return Err(e), } }, DbKey::PendingInboundTransaction(t) => { match InboundTransactionSql::find_by_cancelled(*t, false, &(*conn)) { Ok(mut i) => { self.decrypt_if_necessary(&mut i)?; Some(DbValue::PendingInboundTransaction(Box::new( InboundTransaction::try_from(i)?, ))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => None, Err(e) => return Err(e), } }, DbKey::CompletedTransaction(t) => match CompletedTransactionSql::find(*t, &(*conn)) { Ok(mut c) => { self.decrypt_if_necessary(&mut c)?; Some(DbValue::CompletedTransaction(Box::new(CompletedTransaction::try_from( c, )?))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => None, Err(e) => return Err(e), }, DbKey::AnyTransaction(t) => { match OutboundTransactionSql::find(*t, &(*conn)) { Ok(mut o) => { self.decrypt_if_necessary(&mut o)?; return Ok(Some(DbValue::WalletTransaction(Box::new( WalletTransaction::PendingOutbound(OutboundTransaction::try_from(o)?), )))); }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => (), Err(e) => return Err(e), }; match InboundTransactionSql::find(*t, &(*conn)) { Ok(mut i) => { self.decrypt_if_necessary(&mut i)?; return Ok(Some(DbValue::WalletTransaction(Box::new( WalletTransaction::PendingInbound(InboundTransaction::try_from(i)?), )))); }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => (), Err(e) => return Err(e), }; match CompletedTransactionSql::find(*t, &(*conn)) { Ok(mut c) => { self.decrypt_if_necessary(&mut c)?; return Ok(Some(DbValue::WalletTransaction(Box::new( WalletTransaction::Completed(CompletedTransaction::try_from(c)?), )))); }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => (), Err(e) => return Err(e), }; None }, DbKey::PendingOutboundTransactions => { let mut result = HashMap::new(); for o in OutboundTransactionSql::index_by_cancelled(&(*conn), false)?.iter_mut() { self.decrypt_if_necessary(o)?; result.insert(o.tx_id as u64, OutboundTransaction::try_from((*o).clone())?); } Some(DbValue::PendingOutboundTransactions(result)) }, DbKey::PendingInboundTransactions => { let mut result = HashMap::new(); for i in InboundTransactionSql::index_by_cancelled(&(*conn), false)?.iter_mut() { self.decrypt_if_necessary(i)?; result.insert(i.tx_id as u64, InboundTransaction::try_from((*i).clone())?); } Some(DbValue::PendingInboundTransactions(result)) }, DbKey::CompletedTransactions => { let mut result = HashMap::new(); for c in CompletedTransactionSql::index_by_cancelled(&(*conn), false)?.iter_mut() { self.decrypt_if_necessary(c)?; result.insert(c.tx_id as u64, CompletedTransaction::try_from((*c).clone())?); } Some(DbValue::CompletedTransactions(result)) }, DbKey::CancelledPendingOutboundTransactions => { let mut result = HashMap::new(); for o in OutboundTransactionSql::index_by_cancelled(&(*conn), true)?.iter_mut() { self.decrypt_if_necessary(o)?; result.insert(o.tx_id as u64, OutboundTransaction::try_from((*o).clone())?); } Some(DbValue::PendingOutboundTransactions(result)) }, DbKey::CancelledPendingInboundTransactions => { let mut result = HashMap::new(); for i in InboundTransactionSql::index_by_cancelled(&(*conn), true)?.iter_mut() { self.decrypt_if_necessary(i)?; result.insert(i.tx_id as u64, InboundTransaction::try_from((*i).clone())?); } Some(DbValue::PendingInboundTransactions(result)) }, DbKey::CancelledCompletedTransactions => { let mut result = HashMap::new(); for c in CompletedTransactionSql::index_by_cancelled(&(*conn), true)?.iter_mut() { self.decrypt_if_necessary(c)?; result.insert(c.tx_id as u64, CompletedTransaction::try_from((*c).clone())?); } Some(DbValue::CompletedTransactions(result)) }, DbKey::CancelledPendingOutboundTransaction(t) => { match OutboundTransactionSql::find_by_cancelled(*t, true, &(*conn)) { Ok(mut o) => { self.decrypt_if_necessary(&mut o)?; Some(DbValue::PendingOutboundTransaction(Box::new( OutboundTransaction::try_from(o)?, ))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => None, Err(e) => return Err(e), } }, DbKey::CancelledPendingInboundTransaction(t) => { match InboundTransactionSql::find_by_cancelled(*t, true, &(*conn)) { Ok(mut i) => { self.decrypt_if_necessary(&mut i)?; Some(DbValue::PendingInboundTransaction(Box::new( InboundTransaction::try_from(i)?, ))) }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => None, Err(e) => return Err(e), } }, }; Ok(result) } fn contains(&self, key: &DbKey) -> Result<bool, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); let result = match key { DbKey::PendingOutboundTransaction(k) => { OutboundTransactionSql::find_by_cancelled(*k, false, &(*conn)).is_ok() }, DbKey::PendingInboundTransaction(k) => { InboundTransactionSql::find_by_cancelled(*k, false, &(*conn)).is_ok() }, DbKey::CompletedTransaction(k) => CompletedTransactionSql::find(*k, &(*conn)).is_ok(), DbKey::PendingOutboundTransactions => false, DbKey::PendingInboundTransactions => false, DbKey::CompletedTransactions => false, DbKey::CancelledPendingOutboundTransactions => false, DbKey::CancelledPendingInboundTransactions => false, DbKey::CancelledCompletedTransactions => false, DbKey::CancelledPendingOutboundTransaction(k) => { OutboundTransactionSql::find_by_cancelled(*k, true, &(*conn)).is_ok() }, DbKey::CancelledPendingInboundTransaction(k) => { InboundTransactionSql::find_by_cancelled(*k, true, &(*conn)).is_ok() }, DbKey::AnyTransaction(k) => { CompletedTransactionSql::find(*k, &(*conn)).is_ok() || InboundTransactionSql::find(*k, &(*conn)).is_ok() || OutboundTransactionSql::find(*k, &(*conn)).is_ok() }, }; Ok(result) } fn write(&self, op: WriteOperation) -> Result<Option<DbValue>, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match op { WriteOperation::Insert(kvp) => self.insert(kvp, conn).map(|_| None), WriteOperation::Remove(key) => self.remove(key, conn), } } fn transaction_exists(&self, tx_id: u64) -> Result<bool, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); Ok( OutboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)).is_ok() || InboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)).is_ok() || CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)).is_ok(), ) } fn get_pending_transaction_counterparty_pub_key_by_tx_id( &self, tx_id: u64, ) -> Result<CommsPublicKey, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); if let Ok(mut outbound_tx_sql) = OutboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { self.decrypt_if_necessary(&mut outbound_tx_sql)?; let outbound_tx = OutboundTransaction::try_from(outbound_tx_sql)?; return Ok(outbound_tx.destination_public_key); } if let Ok(mut inbound_tx_sql) = InboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { self.decrypt_if_necessary(&mut inbound_tx_sql)?; let inbound_tx = InboundTransaction::try_from(inbound_tx_sql)?; return Ok(inbound_tx.source_public_key); } Err(TransactionStorageError::ValuesNotFound) } fn complete_outbound_transaction( &self, tx_id: u64, completed_transaction: CompletedTransaction, ) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); if CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)).is_ok() { return Err(TransactionStorageError::TransactionAlreadyExists); } match OutboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; self.encrypt_if_necessary(&mut completed_tx_sql)?; v.delete(&(*conn))?; completed_tx_sql.commit(&(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound( DbKey::PendingOutboundTransaction(tx_id), )) }, Err(e) => return Err(e), }; Ok(()) } fn complete_inbound_transaction( &self, tx_id: u64, completed_transaction: CompletedTransaction, ) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); if CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)).is_ok() { return Err(TransactionStorageError::TransactionAlreadyExists); } match InboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_transaction)?; self.encrypt_if_necessary(&mut completed_tx_sql)?; v.delete(&(*conn))?; completed_tx_sql.commit(&(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound( DbKey::PendingInboundTransaction(tx_id), )) }, Err(e) => return Err(e), }; Ok(()) } fn broadcast_completed_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { if TransactionStatus::try_from(v.status)? == TransactionStatus::Completed { v.update( UpdateCompletedTransactionSql::from(UpdateCompletedTransaction { status: Some(TransactionStatus::Broadcast), timestamp: None, cancelled: None, direction: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }), &(*conn), )?; } }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))) }, Err(e) => return Err(e), }; Ok(()) } fn mine_completed_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.update( UpdateCompletedTransactionSql::from(UpdateCompletedTransaction { status: Some(TransactionStatus::MinedUnconfirmed), timestamp: None, cancelled: None, direction: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }), &(*conn), )?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))) }, Err(e) => return Err(e), }; Ok(()) } fn cancel_completed_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.cancel(&(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } fn set_pending_transaction_cancellation_status( &self, tx_id: u64, cancelled: bool, ) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match InboundTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { v.set_cancelled(cancelled, &(*conn))?; }, Err(_) => { match OutboundTransactionSql::find(tx_id, &(*conn)) { Ok(v) => { v.set_cancelled(cancelled, &(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, Err(e) => return Err(e), }; }, }; Ok(()) } fn mark_direct_send_success(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match InboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.update( UpdateInboundTransactionSql { cancelled: None, direct_send_success: Some(1i32), receiver_protocol: None, send_count: None, last_send_timestamp: None, }, &(*conn), )?; }, Err(_) => { match OutboundTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.update( UpdateOutboundTransactionSql { cancelled: None, direct_send_success: Some(1i32), sender_protocol: None, send_count: None, last_send_timestamp: None, }, &(*conn), )?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValuesNotFound); }, Err(e) => return Err(e), }; }, }; Ok(()) } fn apply_encryption(&self, cipher: Aes256Gcm) -> Result<(), TransactionStorageError> { let mut current_cipher = acquire_write_lock!(self.cipher); if (*current_cipher).is_some() { return Err(TransactionStorageError::AlreadyEncrypted); } let conn = self.database_connection.acquire_lock(); let mut inbound_txs = InboundTransactionSql::index(&conn)?; // If the db is already encrypted then the very first output we try to encrypt will fail. for tx in inbound_txs.iter_mut() { // Test if this transaction is encrypted or not to avoid a double encryption. let _ = InboundTransaction::try_from(tx.clone()).map_err(|_| { error!( target: LOG_TARGET, "Could not convert Inbound Transaction from database version, it might already be encrypted" ); TransactionStorageError::AlreadyEncrypted })?; tx.encrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; tx.update_encryption(&conn)?; } let mut outbound_txs = OutboundTransactionSql::index(&conn)?; // If the db is already encrypted then the very first output we try to encrypt will fail. for tx in outbound_txs.iter_mut() { // Test if this transaction is encrypted or not to avoid a double encryption. let _ = OutboundTransaction::try_from(tx.clone()).map_err(|_| { error!( target: LOG_TARGET, "Could not convert Inbound Transaction from database version, it might already be encrypted" ); TransactionStorageError::AlreadyEncrypted })?; tx.encrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; tx.update_encryption(&conn)?; } let mut completed_txs = CompletedTransactionSql::index(&conn)?; // If the db is already encrypted then the very first output we try to encrypt will fail. for tx in completed_txs.iter_mut() { // Test if this transaction is encrypted or not to avoid a double encryption. let _ = CompletedTransaction::try_from(tx.clone()).map_err(|_| { error!( target: LOG_TARGET, "Could not convert Inbound Transaction from database version, it might already be encrypted" ); TransactionStorageError::AlreadyEncrypted })?; tx.encrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Encryption Error".to_string()))?; tx.update_encryption(&conn)?; } (*current_cipher) = Some(cipher); Ok(()) } fn remove_encryption(&self) -> Result<(), TransactionStorageError> { let mut current_cipher = acquire_write_lock!(self.cipher); let cipher = if let Some(cipher) = (*current_cipher).clone().take() { cipher } else { return Ok(()); }; let conn = self.database_connection.acquire_lock(); let mut inbound_txs = InboundTransactionSql::index(&conn)?; for tx in inbound_txs.iter_mut() { tx.decrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; tx.update_encryption(&conn)?; } let mut outbound_txs = OutboundTransactionSql::index(&conn)?; for tx in outbound_txs.iter_mut() { tx.decrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; tx.update_encryption(&conn)?; } let mut completed_txs = CompletedTransactionSql::index(&conn)?; for tx in completed_txs.iter_mut() { tx.decrypt(&cipher) .map_err(|_| TransactionStorageError::AeadError("Decryption Error".to_string()))?; tx.update_encryption(&conn)?; } // Now that all the decryption has been completed we can safely remove the cipher fully let _ = (*current_cipher).take(); Ok(()) } fn cancel_coinbase_transaction_at_block_height(&self, block_height: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); let coinbase_txs = CompletedTransactionSql::index_coinbase_at_block_height(block_height as i64, &conn)?; for c in coinbase_txs.iter() { c.cancel(&conn)?; } Ok(()) } fn find_coinbase_transaction_at_block_height( &self, block_height: u64, amount: MicroTari, ) -> Result<Option<CompletedTransaction>, TransactionStorageError> { let conn = self.database_connection.acquire_lock(); let mut coinbase_txs = CompletedTransactionSql::index_coinbase_at_block_height(block_height as i64, &conn)?; for c in coinbase_txs.iter_mut() { self.decrypt_if_necessary(c)?; let completed_tx = CompletedTransaction::try_from(c.clone()).map_err(|_| { TransactionStorageError::ConversionError("Error converting to CompletedTransaction".to_string()) })?; if completed_tx.amount == amount { return Ok(Some(completed_tx)); } } Ok(None) } fn increment_send_count(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); if let Ok(tx) = CompletedTransactionSql::find(tx_id, &conn) { let update = UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: None, direction: None, transaction_protocol: None, send_count: Some(tx.send_count + 1), last_send_timestamp: Some(Some(Utc::now().naive_utc())), valid: None, confirmations: None, mined_height: None, }; tx.update(update, &conn)?; } else if let Ok(tx) = OutboundTransactionSql::find(tx_id, &conn) { let update = UpdateOutboundTransactionSql { cancelled: None, direct_send_success: None, sender_protocol: None, send_count: Some(tx.send_count + 1), last_send_timestamp: Some(Some(Utc::now().naive_utc())), }; tx.update(update, &conn)?; } else if let Ok(tx) = InboundTransactionSql::find_by_cancelled(tx_id, false, &conn) { let update = UpdateInboundTransactionSql { cancelled: None, direct_send_success: None, receiver_protocol: None, send_count: Some(tx.send_count + 1), last_send_timestamp: Some(Some(Utc::now().naive_utc())), }; tx.update(update, &conn)?; } else { return Err(TransactionStorageError::ValuesNotFound); } Ok(()) } fn confirm_broadcast_or_coinbase_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { if v.status == TransactionStatus::MinedUnconfirmed as i32 || v.status == TransactionStatus::MinedConfirmed as i32 || v.status == TransactionStatus::Broadcast as i32 || v.status == TransactionStatus::Coinbase as i32 { v.confirm(&(*conn))?; } else { return Err(TransactionStorageError::TransactionNotMined(tx_id)); } }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } fn unconfirm_mined_transaction(&self, tx_id: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { if v.status == TransactionStatus::MinedUnconfirmed as i32 || v.status == TransactionStatus::MinedConfirmed as i32 { v.unconfirm(&(*conn))?; } else { return Err(TransactionStorageError::TransactionNotMined(tx_id)); } }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } fn set_completed_transaction_validity(&self, tx_id: u64, valid: bool) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.set_validity(valid, &(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } fn update_confirmations(&self, tx_id: u64, confirmations: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.update_confirmations(confirmations, &(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } fn update_mined_height(&self, tx_id: u64, mined_height: u64) -> Result<(), TransactionStorageError> { let conn = self.database_connection.acquire_lock(); match CompletedTransactionSql::find_by_cancelled(tx_id, false, &(*conn)) { Ok(v) => { v.update_mined_height(mined_height, &(*conn))?; }, Err(TransactionStorageError::DieselError(DieselError::NotFound)) => { return Err(TransactionStorageError::ValueNotFound(DbKey::CompletedTransaction( tx_id, ))); }, Err(e) => return Err(e), }; Ok(()) } } #[derive(Clone, Debug, Queryable, Insertable, PartialEq)] #[table_name = "inbound_transactions"] struct InboundTransactionSql { tx_id: i64, source_public_key: Vec<u8>, amount: i64, receiver_protocol: String, message: String, timestamp: NaiveDateTime, cancelled: i32, direct_send_success: i32, send_count: i32, last_send_timestamp: Option<NaiveDateTime>, } impl InboundTransactionSql { pub fn commit(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { diesel::insert_into(inbound_transactions::table) .values(self.clone()) .execute(conn)?; Ok(()) } pub fn index(conn: &SqliteConnection) -> Result<Vec<InboundTransactionSql>, TransactionStorageError> { Ok(inbound_transactions::table.load::<InboundTransactionSql>(conn)?) } pub fn index_by_cancelled( conn: &SqliteConnection, cancelled: bool, ) -> Result<Vec<InboundTransactionSql>, TransactionStorageError> { Ok(inbound_transactions::table .filter(inbound_transactions::cancelled.eq(cancelled as i32)) .load::<InboundTransactionSql>(conn)?) } pub fn find(tx_id: TxId, conn: &SqliteConnection) -> Result<InboundTransactionSql, TransactionStorageError> { Ok(inbound_transactions::table .filter(inbound_transactions::tx_id.eq(tx_id as i64)) .first::<InboundTransactionSql>(conn)?) } pub fn find_by_cancelled( tx_id: TxId, cancelled: bool, conn: &SqliteConnection, ) -> Result<InboundTransactionSql, TransactionStorageError> { Ok(inbound_transactions::table .filter(inbound_transactions::tx_id.eq(tx_id as i64)) .filter(inbound_transactions::cancelled.eq(cancelled as i32)) .first::<InboundTransactionSql>(conn)?) } pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(&self.tx_id))) .execute(conn)?; if num_deleted == 0 { return Err(TransactionStorageError::ValuesNotFound); } Ok(()) } pub fn update( &self, update: UpdateInboundTransactionSql, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { let num_updated = diesel::update(inbound_transactions::table.filter(inbound_transactions::tx_id.eq(&self.tx_id))) .set(update) .execute(conn)?; if num_updated == 0 { return Err(TransactionStorageError::UnexpectedResult( "Database update error".to_string(), )); } Ok(()) } pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateInboundTransactionSql { cancelled: Some(cancelled as i32), direct_send_success: None, receiver_protocol: None, send_count: None, last_send_timestamp: None, }, conn, ) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateInboundTransactionSql { cancelled: None, direct_send_success: None, receiver_protocol: Some(self.receiver_protocol.clone()), send_count: None, last_send_timestamp: None, }, conn, ) } } impl Encryptable<Aes256Gcm> for InboundTransactionSql { fn encrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let encrypted_protocol = encrypt_bytes_integral_nonce(cipher, self.receiver_protocol.as_bytes().to_vec())?; self.receiver_protocol = encrypted_protocol.to_hex(); Ok(()) } fn decrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let decrypted_protocol = decrypt_bytes_integral_nonce( cipher, from_hex(self.receiver_protocol.as_str()).map_err(|_| aes_gcm::Error)?, )?; self.receiver_protocol = from_utf8(decrypted_protocol.as_slice()) .map_err(|_| aes_gcm::Error)? .to_string(); Ok(()) } } impl TryFrom<InboundTransaction> for InboundTransactionSql { type Error = TransactionStorageError; fn try_from(i: InboundTransaction) -> Result<Self, Self::Error> { Ok(Self { tx_id: i.tx_id as i64, source_public_key: i.source_public_key.to_vec(), amount: u64::from(i.amount) as i64, receiver_protocol: serde_json::to_string(&i.receiver_protocol)?, message: i.message, timestamp: i.timestamp, cancelled: i.cancelled as i32, direct_send_success: i.direct_send_success as i32, send_count: i.send_count as i32, last_send_timestamp: i.last_send_timestamp, }) } } impl TryFrom<InboundTransactionSql> for InboundTransaction { type Error = TransactionStorageError; fn try_from(i: InboundTransactionSql) -> Result<Self, Self::Error> { Ok(Self { tx_id: i.tx_id as u64, source_public_key: PublicKey::from_vec(&i.source_public_key) .map_err(|_| TransactionStorageError::ConversionError("Invalid Source Publickey".to_string()))?, amount: MicroTari::from(i.amount as u64), receiver_protocol: serde_json::from_str(&i.receiver_protocol)?, status: TransactionStatus::Pending, message: i.message, timestamp: i.timestamp, cancelled: i.cancelled != 0, direct_send_success: i.direct_send_success != 0, send_count: i.send_count as u32, last_send_timestamp: i.last_send_timestamp, }) } } #[derive(AsChangeset)] #[table_name = "inbound_transactions"] pub struct UpdateInboundTransactionSql { cancelled: Option<i32>, direct_send_success: Option<i32>, receiver_protocol: Option<String>, send_count: Option<i32>, last_send_timestamp: Option<Option<NaiveDateTime>>, } /// A structure to represent a Sql compatible version of the OutboundTransaction struct #[derive(Clone, Debug, Queryable, Insertable, PartialEq)] #[table_name = "outbound_transactions"] struct OutboundTransactionSql { tx_id: i64, destination_public_key: Vec<u8>, amount: i64, fee: i64, sender_protocol: String, message: String, timestamp: NaiveDateTime, cancelled: i32, direct_send_success: i32, send_count: i32, last_send_timestamp: Option<NaiveDateTime>, } impl OutboundTransactionSql { pub fn commit(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { diesel::insert_into(outbound_transactions::table) .values(self.clone()) .execute(conn)?; Ok(()) } pub fn index(conn: &SqliteConnection) -> Result<Vec<OutboundTransactionSql>, TransactionStorageError> { Ok(outbound_transactions::table.load::<OutboundTransactionSql>(conn)?) } pub fn index_by_cancelled( conn: &SqliteConnection, cancelled: bool, ) -> Result<Vec<OutboundTransactionSql>, TransactionStorageError> { Ok(outbound_transactions::table .filter(outbound_transactions::cancelled.eq(cancelled as i32)) .load::<OutboundTransactionSql>(conn)?) } pub fn find(tx_id: TxId, conn: &SqliteConnection) -> Result<OutboundTransactionSql, TransactionStorageError> { Ok(outbound_transactions::table .filter(outbound_transactions::tx_id.eq(tx_id as i64)) .first::<OutboundTransactionSql>(conn)?) } pub fn find_by_cancelled( tx_id: TxId, cancelled: bool, conn: &SqliteConnection, ) -> Result<OutboundTransactionSql, TransactionStorageError> { Ok(outbound_transactions::table .filter(outbound_transactions::tx_id.eq(tx_id as i64)) .filter(outbound_transactions::cancelled.eq(cancelled as i32)) .first::<OutboundTransactionSql>(conn)?) } pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) .execute(conn)?; if num_deleted == 0 { return Err(TransactionStorageError::ValuesNotFound); } Ok(()) } pub fn update( &self, update: UpdateOutboundTransactionSql, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { let num_updated = diesel::update(outbound_transactions::table.filter(outbound_transactions::tx_id.eq(&self.tx_id))) .set(update) .execute(conn)?; if num_updated == 0 { return Err(TransactionStorageError::UnexpectedResult( "Database update error".to_string(), )); } Ok(()) } pub fn set_cancelled(&self, cancelled: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateOutboundTransactionSql { cancelled: Some(cancelled as i32), direct_send_success: None, sender_protocol: None, send_count: None, last_send_timestamp: None, }, conn, ) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateOutboundTransactionSql { cancelled: None, direct_send_success: None, sender_protocol: Some(self.sender_protocol.clone()), send_count: None, last_send_timestamp: None, }, conn, ) } } impl Encryptable<Aes256Gcm> for OutboundTransactionSql { fn encrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let encrypted_protocol = encrypt_bytes_integral_nonce(cipher, self.sender_protocol.as_bytes().to_vec())?; self.sender_protocol = encrypted_protocol.to_hex(); Ok(()) } fn decrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let decrypted_protocol = decrypt_bytes_integral_nonce( cipher, from_hex(self.sender_protocol.as_str()).map_err(|_| aes_gcm::Error)?, )?; self.sender_protocol = from_utf8(decrypted_protocol.as_slice()) .map_err(|_| aes_gcm::Error)? .to_string(); Ok(()) } } impl TryFrom<OutboundTransaction> for OutboundTransactionSql { type Error = TransactionStorageError; fn try_from(o: OutboundTransaction) -> Result<Self, Self::Error> { Ok(Self { tx_id: o.tx_id as i64, destination_public_key: o.destination_public_key.to_vec(), amount: u64::from(o.amount) as i64, fee: u64::from(o.fee) as i64, sender_protocol: serde_json::to_string(&o.sender_protocol)?, message: o.message, timestamp: o.timestamp, cancelled: o.cancelled as i32, direct_send_success: o.direct_send_success as i32, send_count: o.send_count as i32, last_send_timestamp: o.last_send_timestamp, }) } } impl TryFrom<OutboundTransactionSql> for OutboundTransaction { type Error = TransactionStorageError; fn try_from(o: OutboundTransactionSql) -> Result<Self, Self::Error> { Ok(Self { tx_id: o.tx_id as u64, destination_public_key: PublicKey::from_vec(&o.destination_public_key) .map_err(|_| TransactionStorageError::ConversionError("Invalid destination PublicKey".to_string()))?, amount: MicroTari::from(o.amount as u64), fee: MicroTari::from(o.fee as u64), sender_protocol: serde_json::from_str(&o.sender_protocol)?, status: TransactionStatus::Pending, message: o.message, timestamp: o.timestamp, cancelled: o.cancelled != 0, direct_send_success: o.direct_send_success != 0, send_count: o.send_count as u32, last_send_timestamp: o.last_send_timestamp, }) } } #[derive(AsChangeset)] #[table_name = "outbound_transactions"] pub struct UpdateOutboundTransactionSql { cancelled: Option<i32>, direct_send_success: Option<i32>, sender_protocol: Option<String>, send_count: Option<i32>, last_send_timestamp: Option<Option<NaiveDateTime>>, } /// A structure to represent a Sql compatible version of the CompletedTransaction struct #[derive(Clone, Debug, Queryable, Insertable, PartialEq)] #[table_name = "completed_transactions"] struct CompletedTransactionSql { tx_id: i64, source_public_key: Vec<u8>, destination_public_key: Vec<u8>, amount: i64, fee: i64, transaction_protocol: String, status: i32, message: String, timestamp: NaiveDateTime, cancelled: i32, direction: Option<i32>, coinbase_block_height: Option<i64>, send_count: i32, last_send_timestamp: Option<NaiveDateTime>, valid: i32, confirmations: Option<i64>, mined_height: Option<i64>, } impl CompletedTransactionSql { pub fn commit(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { diesel::insert_into(completed_transactions::table) .values(self.clone()) .execute(conn)?; Ok(()) } pub fn index(conn: &SqliteConnection) -> Result<Vec<CompletedTransactionSql>, TransactionStorageError> { Ok(completed_transactions::table.load::<CompletedTransactionSql>(conn)?) } pub fn index_by_cancelled( conn: &SqliteConnection, cancelled: bool, ) -> Result<Vec<CompletedTransactionSql>, TransactionStorageError> { Ok(completed_transactions::table .filter(completed_transactions::cancelled.eq(cancelled as i32)) .load::<CompletedTransactionSql>(conn)?) } pub fn index_coinbase_at_block_height( block_height: i64, conn: &SqliteConnection, ) -> Result<Vec<CompletedTransactionSql>, TransactionStorageError> { Ok(completed_transactions::table .filter(completed_transactions::status.eq(TransactionStatus::Coinbase as i32)) .filter(completed_transactions::coinbase_block_height.eq(block_height)) .load::<CompletedTransactionSql>(conn)?) } pub fn find(tx_id: TxId, conn: &SqliteConnection) -> Result<CompletedTransactionSql, TransactionStorageError> { Ok(completed_transactions::table .filter(completed_transactions::tx_id.eq(tx_id as i64)) .first::<CompletedTransactionSql>(conn)?) } pub fn find_by_cancelled( tx_id: TxId, cancelled: bool, conn: &SqliteConnection, ) -> Result<CompletedTransactionSql, TransactionStorageError> { Ok(completed_transactions::table .filter(completed_transactions::tx_id.eq(tx_id as i64)) .filter(completed_transactions::cancelled.eq(cancelled as i32)) .first::<CompletedTransactionSql>(conn)?) } pub fn delete(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { let num_deleted = diesel::delete(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) .execute(conn)?; if num_deleted == 0 { return Err(TransactionStorageError::ValuesNotFound); } Ok(()) } pub fn update( &self, updated_tx: UpdateCompletedTransactionSql, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { let num_updated = diesel::update(completed_transactions::table.filter(completed_transactions::tx_id.eq(&self.tx_id))) .set(updated_tx) .execute(conn)?; if num_updated == 0 { return Err(TransactionStorageError::UnexpectedResult( "Database update error".to_string(), )); } Ok(()) } pub fn cancel(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: Some(1i32), direction: None, transaction_protocol: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }, conn, )?; Ok(()) } pub fn confirm(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: Some(TransactionStatus::MinedConfirmed as i32), timestamp: None, cancelled: None, direction: None, transaction_protocol: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }, conn, )?; Ok(()) } pub fn unconfirm(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: Some(TransactionStatus::MinedUnconfirmed as i32), timestamp: None, cancelled: None, direction: None, transaction_protocol: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }, conn, )?; Ok(()) } pub fn set_validity(&self, valid: bool, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: None, direction: None, transaction_protocol: None, send_count: None, last_send_timestamp: None, valid: Some(valid as i32), confirmations: None, mined_height: None, }, conn, )?; Ok(()) } pub fn update_encryption(&self, conn: &SqliteConnection) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: None, direction: None, transaction_protocol: Some(self.transaction_protocol.clone()), send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: None, }, conn, )?; Ok(()) } pub fn update_confirmations( &self, confirmations: u64, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: None, direction: None, transaction_protocol: Some(self.transaction_protocol.clone()), send_count: None, last_send_timestamp: None, valid: None, confirmations: Some(Some(confirmations as i64)), mined_height: None, }, conn, )?; Ok(()) } pub fn update_mined_height( &self, mined_height: u64, conn: &SqliteConnection, ) -> Result<(), TransactionStorageError> { self.update( UpdateCompletedTransactionSql { status: None, timestamp: None, cancelled: None, direction: None, transaction_protocol: None, send_count: None, last_send_timestamp: None, valid: None, confirmations: None, mined_height: Some(Some(mined_height as i64)), }, conn, )?; Ok(()) } } impl Encryptable<Aes256Gcm> for CompletedTransactionSql { fn encrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let encrypted_protocol = encrypt_bytes_integral_nonce(cipher, self.transaction_protocol.as_bytes().to_vec())?; self.transaction_protocol = encrypted_protocol.to_hex(); Ok(()) } fn decrypt(&mut self, cipher: &Aes256Gcm) -> Result<(), AeadError> { let decrypted_protocol = decrypt_bytes_integral_nonce( cipher, from_hex(self.transaction_protocol.as_str()).map_err(|_| aes_gcm::Error)?, )?; self.transaction_protocol = from_utf8(decrypted_protocol.as_slice()) .map_err(|_| aes_gcm::Error)? .to_string(); Ok(()) } } impl TryFrom<CompletedTransaction> for CompletedTransactionSql { type Error = TransactionStorageError; fn try_from(c: CompletedTransaction) -> Result<Self, Self::Error> { Ok(Self { tx_id: c.tx_id as i64, source_public_key: c.source_public_key.to_vec(), destination_public_key: c.destination_public_key.to_vec(), amount: u64::from(c.amount) as i64, fee: u64::from(c.fee) as i64, transaction_protocol: serde_json::to_string(&c.transaction)?, status: c.status as i32, message: c.message, timestamp: c.timestamp, cancelled: c.cancelled as i32, direction: Some(c.direction as i32), coinbase_block_height: c.coinbase_block_height.map(|b| b as i64), send_count: c.send_count as i32, last_send_timestamp: c.last_send_timestamp, valid: c.valid as i32, confirmations: c.confirmations.map(|ic| ic as i64), mined_height: c.mined_height.map(|ic| ic as i64), }) } } impl TryFrom<CompletedTransactionSql> for CompletedTransaction { type Error = TransactionStorageError; fn try_from(c: CompletedTransactionSql) -> Result<Self, Self::Error> { Ok(Self { tx_id: c.tx_id as u64, source_public_key: PublicKey::from_vec(&c.source_public_key) .map_err(|_| TransactionStorageError::ConversionError("Invalid source Publickey".to_string()))?, destination_public_key: PublicKey::from_vec(&c.destination_public_key) .map_err(|_| TransactionStorageError::ConversionError("Invalid destination PublicKey".to_string()))?, amount: MicroTari::from(c.amount as u64), fee: MicroTari::from(c.fee as u64), transaction: serde_json::from_str(&c.transaction_protocol)?, status: TransactionStatus::try_from(c.status)?, message: c.message, timestamp: c.timestamp, cancelled: c.cancelled != 0, direction: TransactionDirection::try_from(c.direction.unwrap_or(2i32))?, coinbase_block_height: c.coinbase_block_height.map(|b| b as u64), send_count: c.send_count as u32, last_send_timestamp: c.last_send_timestamp, valid: c.valid != 0, confirmations: c.confirmations.map(|ic| ic as u64), mined_height: c.mined_height.map(|ic| ic as u64), }) } } /// These are the fields that can be updated for a Completed Transaction pub struct UpdateCompletedTransaction { status: Option<TransactionStatus>, timestamp: Option<NaiveDateTime>, cancelled: Option<bool>, direction: Option<TransactionDirection>, send_count: Option<u32>, last_send_timestamp: Option<Option<NaiveDateTime>>, valid: Option<bool>, confirmations: Option<Option<u64>>, mined_height: Option<Option<u64>>, } #[derive(AsChangeset)] #[table_name = "completed_transactions"] pub struct UpdateCompletedTransactionSql { status: Option<i32>, timestamp: Option<NaiveDateTime>, cancelled: Option<i32>, direction: Option<i32>, transaction_protocol: Option<String>, send_count: Option<i32>, last_send_timestamp: Option<Option<NaiveDateTime>>, valid: Option<i32>, confirmations: Option<Option<i64>>, mined_height: Option<Option<i64>>, } /// Map a Rust friendly UpdateCompletedTransaction to the Sql data type form impl From<UpdateCompletedTransaction> for UpdateCompletedTransactionSql { fn from(u: UpdateCompletedTransaction) -> Self { Self { status: u.status.map(|s| s as i32), timestamp: u.timestamp, cancelled: u.cancelled.map(|c| c as i32), direction: u.direction.map(|d| d as i32), transaction_protocol: None, send_count: u.send_count.map(|c| c as i32), last_send_timestamp: u.last_send_timestamp, valid: u.valid.map(|c| c as i32), confirmations: u.confirmations.map(|c| c.map(|ic| ic as i64)), mined_height: u.mined_height.map(|c| c.map(|ic| ic as i64)), } } } #[cfg(test)] mod test { use std::convert::TryFrom; use aes_gcm::{ aead::{generic_array::GenericArray, NewAead}, Aes256Gcm, }; use chrono::Utc; use diesel::{Connection, SqliteConnection}; use rand::rngs::OsRng; use tari_crypto::{ keys::{PublicKey as PublicKeyTrait, SecretKey as SecretKeyTrait}, script, script::{ExecutionStack, TariScript}, }; use tempfile::tempdir; use tari_common_types::types::{HashDigest, PrivateKey, PublicKey}; use tari_core::transactions::{ helpers::{create_unblinded_output, TestParams}, tari_amount::MicroTari, transaction::{OutputFeatures, Transaction}, transaction_protocol::sender::TransactionSenderMessage, CryptoFactories, ReceiverTransactionProtocol, SenderTransactionProtocol, }; use tari_test_utils::random::string; use crate::{ storage::sqlite_utilities::WalletDbConnection, transaction_service::storage::{ database::{DbKey, TransactionBackend}, models::{ CompletedTransaction, InboundTransaction, OutboundTransaction, TransactionDirection, TransactionStatus, }, sqlite_db::{ CompletedTransactionSql, InboundTransactionSql, OutboundTransactionSql, TransactionServiceSqliteDatabase, }, }, util::encryption::Encryptable, }; #[test] fn test_crud() { let factories = CryptoFactories::default(); let db_name = format!("{}.sqlite3", string(8).as_str()); let temp_dir = tempdir().unwrap(); let db_folder = temp_dir.path().to_str().unwrap().to_string(); let db_path = format!("{}{}", db_folder, db_name); embed_migrations!("./migrations"); let conn = SqliteConnection::establish(&db_path).unwrap_or_else(|_| panic!("Error connecting to {}", db_path)); embedded_migrations::run_with_output(&conn, &mut std::io::stdout()).expect("Migration failed"); conn.execute("PRAGMA foreign_keys = ON").unwrap(); let mut builder = SenderTransactionProtocol::builder(1); let test_params = TestParams::new(); let input = create_unblinded_output( TariScript::default(), OutputFeatures::default(), test_params, MicroTari::from(100_000), ); let amount = MicroTari::from(10_000); builder .with_lock_height(0) .with_fee_per_gram(MicroTari::from(177)) .with_offset(PrivateKey::random(&mut OsRng)) .with_private_nonce(PrivateKey::random(&mut OsRng)) .with_amount(0, amount) .with_message("Yo!".to_string()) .with_input( input .as_transaction_input(&factories.commitment) .expect("Should be able to make transaction input"), input, ) .with_change_secret(PrivateKey::random(&mut OsRng)) .with_recipient_data( 0, script!(Nop), PrivateKey::random(&mut OsRng), Default::default(), PrivateKey::random(&mut OsRng), ) .with_change_script(script!(Nop), ExecutionStack::default(), PrivateKey::random(&mut OsRng)); let mut stp = builder.build::<HashDigest>(&factories).unwrap(); let outbound_tx1 = OutboundTransaction { tx_id: 1u64, destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: stp.get_fee_amount().unwrap(), sender_protocol: stp.clone(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let outbound_tx2 = OutboundTransactionSql::try_from(OutboundTransaction { tx_id: 2u64, destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: stp.get_fee_amount().unwrap(), sender_protocol: stp.clone(), status: TransactionStatus::Pending, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }) .unwrap(); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); outbound_tx2.commit(&conn).unwrap(); let outbound_txs = OutboundTransactionSql::index_by_cancelled(&conn, false).unwrap(); assert_eq!(outbound_txs.len(), 2); let returned_outbound_tx = OutboundTransaction::try_from(OutboundTransactionSql::find_by_cancelled(1u64, false, &conn).unwrap()) .unwrap(); assert_eq!( OutboundTransactionSql::try_from(returned_outbound_tx).unwrap(), OutboundTransactionSql::try_from(outbound_tx1.clone()).unwrap() ); let rtp = ReceiverTransactionProtocol::new( TransactionSenderMessage::Single(Box::new(stp.build_single_round_message().unwrap())), PrivateKey::random(&mut OsRng), PrivateKey::random(&mut OsRng), OutputFeatures::default(), &factories, ); let inbound_tx1 = InboundTransaction { tx_id: 2, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, receiver_protocol: rtp.clone(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let inbound_tx2 = InboundTransaction { tx_id: 3, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, receiver_protocol: rtp, status: TransactionStatus::Pending, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); InboundTransactionSql::try_from(inbound_tx2) .unwrap() .commit(&conn) .unwrap(); let inbound_txs = InboundTransactionSql::index_by_cancelled(&conn, false).unwrap(); assert_eq!(inbound_txs.len(), 2); let returned_inbound_tx = InboundTransaction::try_from(InboundTransactionSql::find_by_cancelled(2u64, false, &conn).unwrap()) .unwrap(); assert_eq!( InboundTransactionSql::try_from(returned_inbound_tx).unwrap(), InboundTransactionSql::try_from(inbound_tx1.clone()).unwrap() ); let tx = Transaction::new( vec![], vec![], vec![], PrivateKey::random(&mut OsRng), PrivateKey::random(&mut OsRng), ); let completed_tx1 = CompletedTransaction { tx_id: 2, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: MicroTari::from(100), transaction: tx.clone(), status: TransactionStatus::MinedUnconfirmed, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: None, send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; let completed_tx2 = CompletedTransaction { tx_id: 3, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: MicroTari::from(100), transaction: tx.clone(), status: TransactionStatus::Broadcast, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: None, send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); assert!(CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .commit(&conn) .is_err()); CompletedTransactionSql::try_from(completed_tx2) .unwrap() .commit(&conn) .unwrap(); let completed_txs = CompletedTransactionSql::index_by_cancelled(&conn, false).unwrap(); assert_eq!(completed_txs.len(), 2); let returned_completed_tx = CompletedTransaction::try_from(CompletedTransactionSql::find_by_cancelled(2u64, false, &conn).unwrap()) .unwrap(); assert_eq!( CompletedTransactionSql::try_from(returned_completed_tx).unwrap(), CompletedTransactionSql::try_from(completed_tx1.clone()).unwrap() ); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .delete(&conn) .unwrap(); assert!(InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .delete(&conn) .is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .delete(&conn) .unwrap(); assert!(OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .delete(&conn) .is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_err()); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, false, &conn).is_ok()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .delete(&conn) .unwrap(); assert!(CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .delete(&conn) .is_err()); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, false, &conn).is_err()); InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .set_cancelled(true, &conn) .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_ok()); InboundTransactionSql::try_from(inbound_tx1.clone()) .unwrap() .set_cancelled(false, &conn) .unwrap(); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, true, &conn).is_err()); assert!(InboundTransactionSql::find_by_cancelled(inbound_tx1.tx_id, false, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .set_cancelled(true, &conn) .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_ok()); OutboundTransactionSql::try_from(outbound_tx1.clone()) .unwrap() .set_cancelled(false, &conn) .unwrap(); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, true, &conn).is_err()); assert!(OutboundTransactionSql::find_by_cancelled(outbound_tx1.tx_id, false, &conn).is_ok()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .commit(&conn) .unwrap(); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_err()); CompletedTransactionSql::try_from(completed_tx1.clone()) .unwrap() .cancel(&conn) .unwrap(); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, false, &conn).is_err()); assert!(CompletedTransactionSql::find_by_cancelled(completed_tx1.tx_id, true, &conn).is_ok()); let coinbase_tx1 = CompletedTransaction { tx_id: 101, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: MicroTari::from(100), transaction: tx.clone(), status: TransactionStatus::Coinbase, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: Some(2), send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; let coinbase_tx2 = CompletedTransaction { tx_id: 102, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: MicroTari::from(100), transaction: tx.clone(), status: TransactionStatus::Coinbase, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: Some(2), send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; let coinbase_tx3 = CompletedTransaction { tx_id: 103, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount, fee: MicroTari::from(100), transaction: tx, status: TransactionStatus::Coinbase, message: "Hey!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: Some(3), send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; CompletedTransactionSql::try_from(coinbase_tx1) .unwrap() .commit(&conn) .unwrap(); CompletedTransactionSql::try_from(coinbase_tx2) .unwrap() .commit(&conn) .unwrap(); CompletedTransactionSql::try_from(coinbase_tx3) .unwrap() .commit(&conn) .unwrap(); let coinbase_txs = CompletedTransactionSql::index_coinbase_at_block_height(2, &conn).unwrap(); assert_eq!(coinbase_txs.len(), 2); assert!(coinbase_txs.iter().any(|c| c.tx_id == 101)); assert!(coinbase_txs.iter().any(|c| c.tx_id == 102)); assert!(!coinbase_txs.iter().any(|c| c.tx_id == 103)); } #[test] fn test_encryption_crud() { let db_name = format!("{}.sqlite3", string(8).as_str()); let temp_dir = tempdir().unwrap(); let db_folder = temp_dir.path().to_str().unwrap().to_string(); let db_path = format!("{}{}", db_folder, db_name); embed_migrations!("./migrations"); let conn = SqliteConnection::establish(&db_path).unwrap_or_else(|_| panic!("Error connecting to {}", db_path)); embedded_migrations::run_with_output(&conn, &mut std::io::stdout()).expect("Migration failed"); conn.execute("PRAGMA foreign_keys = ON").unwrap(); let key = GenericArray::from_slice(b"an example very very secret key."); let cipher = Aes256Gcm::new(key); let inbound_tx = InboundTransaction { tx_id: 1, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), receiver_protocol: ReceiverTransactionProtocol::new_placeholder(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let mut inbound_tx_sql = InboundTransactionSql::try_from(inbound_tx.clone()).unwrap(); inbound_tx_sql.commit(&conn).unwrap(); inbound_tx_sql.encrypt(&cipher).unwrap(); inbound_tx_sql.update_encryption(&conn).unwrap(); let mut db_inbound_tx = InboundTransactionSql::find_by_cancelled(1, false, &conn).unwrap(); db_inbound_tx.decrypt(&cipher).unwrap(); let decrypted_inbound_tx = InboundTransaction::try_from(db_inbound_tx).unwrap(); assert_eq!(inbound_tx, decrypted_inbound_tx); let outbound_tx = OutboundTransaction { tx_id: 2u64, destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), fee: MicroTari::from(10), sender_protocol: SenderTransactionProtocol::new_placeholder(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let mut outbound_tx_sql = OutboundTransactionSql::try_from(outbound_tx.clone()).unwrap(); outbound_tx_sql.commit(&conn).unwrap(); outbound_tx_sql.encrypt(&cipher).unwrap(); outbound_tx_sql.update_encryption(&conn).unwrap(); let mut db_outbound_tx = OutboundTransactionSql::find_by_cancelled(2, false, &conn).unwrap(); db_outbound_tx.decrypt(&cipher).unwrap(); let decrypted_outbound_tx = OutboundTransaction::try_from(db_outbound_tx).unwrap(); assert_eq!(outbound_tx, decrypted_outbound_tx); let completed_tx = CompletedTransaction { tx_id: 3, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), fee: MicroTari::from(100), transaction: Transaction::new( vec![], vec![], vec![], PrivateKey::random(&mut OsRng), PrivateKey::random(&mut OsRng), ), status: TransactionStatus::MinedUnconfirmed, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: None, send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; let mut completed_tx_sql = CompletedTransactionSql::try_from(completed_tx.clone()).unwrap(); completed_tx_sql.commit(&conn).unwrap(); completed_tx_sql.encrypt(&cipher).unwrap(); completed_tx_sql.update_encryption(&conn).unwrap(); let mut db_completed_tx = CompletedTransactionSql::find_by_cancelled(3, false, &conn).unwrap(); db_completed_tx.decrypt(&cipher).unwrap(); let decrypted_completed_tx = CompletedTransaction::try_from(db_completed_tx).unwrap(); assert_eq!(completed_tx, decrypted_completed_tx); } #[test] fn test_apply_remove_encryption() { let db_name = format!("{}.sqlite3", string(8).as_str()); let temp_dir = tempdir().unwrap(); let db_folder = temp_dir.path().to_str().unwrap().to_string(); let db_path = format!("{}{}", db_folder, db_name); embed_migrations!("./migrations"); let conn = SqliteConnection::establish(&db_path).unwrap_or_else(|_| panic!("Error connecting to {}", db_path)); embedded_migrations::run_with_output(&conn, &mut std::io::stdout()).expect("Migration failed"); let inbound_tx = InboundTransaction { tx_id: 1, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), receiver_protocol: ReceiverTransactionProtocol::new_placeholder(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let inbound_tx_sql = InboundTransactionSql::try_from(inbound_tx).unwrap(); inbound_tx_sql.commit(&conn).unwrap(); let outbound_tx = OutboundTransaction { tx_id: 2u64, destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), fee: MicroTari::from(10), sender_protocol: SenderTransactionProtocol::new_placeholder(), status: TransactionStatus::Pending, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direct_send_success: false, send_count: 0, last_send_timestamp: None, }; let outbound_tx_sql = OutboundTransactionSql::try_from(outbound_tx).unwrap(); outbound_tx_sql.commit(&conn).unwrap(); let completed_tx = CompletedTransaction { tx_id: 3, source_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), destination_public_key: PublicKey::from_secret_key(&PrivateKey::random(&mut OsRng)), amount: MicroTari::from(100), fee: MicroTari::from(100), transaction: Transaction::new( vec![], vec![], vec![], PrivateKey::random(&mut OsRng), PrivateKey::random(&mut OsRng), ), status: TransactionStatus::MinedUnconfirmed, message: "Yo!".to_string(), timestamp: Utc::now().naive_utc(), cancelled: false, direction: TransactionDirection::Unknown, coinbase_block_height: None, send_count: 0, last_send_timestamp: None, valid: true, confirmations: None, mined_height: None, }; let completed_tx_sql = CompletedTransactionSql::try_from(completed_tx).unwrap(); completed_tx_sql.commit(&conn).unwrap(); let key = GenericArray::from_slice(b"an example very very secret key."); let cipher = Aes256Gcm::new(key); let connection = WalletDbConnection::new(conn, None); let db1 = TransactionServiceSqliteDatabase::new(connection.clone(), Some(cipher.clone())); assert!(db1.apply_encryption(cipher.clone()).is_err()); let db2 = TransactionServiceSqliteDatabase::new(connection.clone(), None); assert!(db2.remove_encryption().is_ok()); db2.apply_encryption(cipher).unwrap(); assert!(db2.fetch(&DbKey::PendingInboundTransactions).is_ok()); assert!(db2.fetch(&DbKey::PendingOutboundTransactions).is_ok()); assert!(db2.fetch(&DbKey::CompletedTransactions).is_ok()); let db3 = TransactionServiceSqliteDatabase::new(connection, None); assert!(db3.fetch(&DbKey::PendingInboundTransactions).is_err()); assert!(db3.fetch(&DbKey::PendingOutboundTransactions).is_err()); assert!(db3.fetch(&DbKey::CompletedTransactions).is_err()); db2.remove_encryption().unwrap(); assert!(db3.fetch(&DbKey::PendingInboundTransactions).is_ok()); assert!(db3.fetch(&DbKey::PendingOutboundTransactions).is_ok()); assert!(db3.fetch(&DbKey::CompletedTransactions).is_ok()); } }
insert
nodo_publisher.py
#!/usr/bin/env python3 # encoding: utf-8 #Linea 1 - “Shebang”,le indicamos a la máquina con qué programa lo vamos a ejecutar. #Linea 2 - Python 3 - asume que solo se utiliza ASCII en el código fuente #para usar utf-8 hay que indicarlo al principio de nuestro script encoding: utf-8 import rospy #Importamos ropsy (interface de python-ROS) from std_msgs.msg import String #Importamos tipo de mensaje String def nodo(): #Definimos una función nodo rospy.init_node('nodo_publisher') #Inicializamos nuestro nodo y le asignamos un nombre = nodo_publisher pub = rospy.Publisher('example', String, queue_size=10) #Definimos nuestro topico con nombre example y tipo de mensaje String #con un límite de 10 mensajes en cola rate = rospy.Rate(10) #Crea un objeto Rate a 10hz (loop 10 times per second) while not rospy.is_shutdown(): #Bucle While - hasta pulsar Ctrl-C mensaje = "Nodo Publisher" #Declaramos una variable mensaje y asignamos una cadena de caracteres rospy.loginfo(mensaje) #Imprime en pantalla mensajes logs de tipo Info
rate.sleep() if __name__ == '__main__': #Llamamos a la función principal main try: nodo() # Lamamos a la función nodo except rospy.ROSInterruptException: # Check si hay una excepción Ctrl-C para terminar la ejecución del nodo pass
pub.publish(mensaje) #Publicamos un mensaje de tipo String en nuestro tópico example
build_ext.py
"""Pyrex.Distutils.build_ext Implements a version of the Distutils 'build_ext' command, for building Pyrex extension modules.""" # This module should be kept compatible with Python 2.1. __revision__ = "$Id:$" import sys, os, string, re from types import * from distutils.core import Command from distutils.errors import * from distutils.sysconfig import customize_compiler, get_python_version from distutils.dep_util import newer, newer_group from distutils import log from distutils.dir_util import mkpath try: from Pyrex.Compiler.Main \ import CompilationOptions, \ default_options as pyrex_default_options, \ compile as pyrex_compile from Pyrex.Compiler.Errors import PyrexError except ImportError: PyrexError = None from distutils.command import build_ext as _build_ext extension_name_re = _build_ext.extension_name_re show_compilers = _build_ext.show_compilers class build_ext(_build_ext.build_ext): description = "build C/C++ and Pyrex extensions (compile/link to build directory)" sep_by = _build_ext.build_ext.sep_by user_options = _build_ext.build_ext.user_options boolean_options = _build_ext.build_ext.boolean_options help_options = _build_ext.build_ext.help_options # Add the pyrex specific data. user_options.extend([ ('pyrex-cplus', None, "generate C++ source files"), ('pyrex-create-listing', None, "write errors to a listing file"), ('pyrex-include-dirs=', None, "path to the Pyrex include files" + sep_by), ('pyrex-c-in-temp', None, "put generated C files in temp directory"), ('pyrex-gen-pxi', None, "generate .pxi file for public declarations"), ]) boolean_options.extend([ 'pyrex-cplus', 'pyrex-create-listing', 'pyrex-c-in-temp' ]) def initialize_options(self): _build_ext.build_ext.initialize_options(self) self.pyrex_cplus = 0 self.pyrex_create_listing = 0 self.pyrex_include_dirs = None self.pyrex_c_in_temp = 0 self.pyrex_gen_pxi = 0 def finalize_options (self): _build_ext.build_ext.finalize_options(self) if self.pyrex_include_dirs is None: self.pyrex_include_dirs = [] elif type(self.pyrex_include_dirs) is StringType: self.pyrex_include_dirs = \ string.split(self.pyrex_include_dirs, os.pathsep) # finalize_options () def build_extensions(self): # First, sanity-check the 'extensions' list
def pyrex_sources(self, sources, extension): """ Walk the list of source files in 'sources', looking for Pyrex source (.pyx) files. Run Pyrex on all that are found, and return a modified 'sources' list with Pyrex source files replaced by the generated C (or C++) files. """ if PyrexError == None: raise DistutilsPlatformError, \ ("Pyrex does not appear to be installed " "on platform '%s'") % os.name new_sources = [] pyrex_sources = [] pyrex_targets = {} # Setup create_list and cplus from the extension options if # Pyrex.Distutils.extension.Extension is used, otherwise just # use what was parsed from the command-line or the configuration file. # cplus will also be set to true is extension.language is equal to # 'C++' or 'c++'. #try: # create_listing = self.pyrex_create_listing or \ # extension.pyrex_create_listing # cplus = self.pyrex_cplus or \ # extension.pyrex_cplus or \ # (extension.language != None and \ # extension.language.lower() == 'c++') #except AttributeError: # create_listing = self.pyrex_create_listing # cplus = self.pyrex_cplus or \ # (extension.language != None and \ # extension.language.lower() == 'c++') create_listing = self.pyrex_create_listing or \ getattr(extension, 'pyrex_create_listing', 0) cplus = self.pyrex_cplus or getattr(extension, 'pyrex_cplus', 0) or \ (extension.language and extension.language.lower() == 'c++') pyrex_gen_pxi = self.pyrex_gen_pxi or getattr(extension, 'pyrex_gen_pxi', 0) # Set up the include_path for the Pyres compiler: # 1. Start with the command line option. # 2. Add in any (unique) paths from the extension # pyrex_include_dirs (if Pyrex.Distutils.extension is used). # 3. Add in any (unique) paths from the extension include_dirs includes = self.pyrex_include_dirs try: for i in extension.pyrex_include_dirs: if not i in includes: includes.append(i) except AttributeError: pass for i in extension.include_dirs: if not i in includes: includes.append(i) # Set the target_ext to '.c'. Pyrex will change this to '.cpp' if # needed. if cplus: target_ext = '.cpp' else: target_ext = '.c' # Decide whether to drop the generated C files into the temp dir # or the source tree. if not self.inplace and (self.pyrex_c_in_temp or getattr(extension, 'pyrex_c_in_temp', 0)): target_dir = os.path.join(self.build_temp, "pyrex") else: target_dir = "" for source in sources: (base, ext) = os.path.splitext(source) if ext == ".pyx": # Pyrex source file new_sources.append(os.path.join(target_dir, base + target_ext)) pyrex_sources.append(source) pyrex_targets[source] = new_sources[-1] else: new_sources.append(source) if not pyrex_sources: return new_sources for source in pyrex_sources: target = pyrex_targets[source] # source_time = os.stat(source).st_mtime # try: # target_time = os.stat(target).st_mtime # newer = source_time > target_time # except EnvironmentError: # newer = 1 # if newer: if self.force or newer(source, target): log.info("pyrexc %s --> %s", source, target) self.mkpath(os.path.dirname(target)) options = CompilationOptions(pyrex_default_options, use_listing_file = create_listing, include_path = includes, output_file = target, cplus = cplus, generate_pxi = pyrex_gen_pxi) result = pyrex_compile(source, options=options) return new_sources # pyrex_sources () # class build_ext
self.check_extensions_list(self.extensions) for ext in self.extensions: ext.sources = self.pyrex_sources(ext.sources, ext) self.build_extension(ext)
swiper.js
import Nerv from 'nervjs' import classNames from 'classnames' import './style/swiper.scss' class Swiper extends Nerv.Component { constructor (props) { super(props) this.state = { currentIndex: this.props.current, containerWidth: 0, containerHeight: 0, // touch touching: false, og: 0, ogTranslate: 0, touchId: undefined, translate: 0, animating: false } this.SwiperTimer = null this.handleTouchStart = this.handleTouchStart.bind(this) this.handleTouchMove = this.handleTouchMove.bind(this) this.handleTouchEnd = this.handleTouchEnd.bind(this) this.pauseAutoPlay = this.pauseAutoPlay.bind(this) this.autoplay = this.autoplay.bind(this) this.computedChangeContainer = this.computedChangeContainer.bind(this) } componentDidMount () { let $container = Nerv.findDOMNode(this.SwiperWp) // 默认偏移量 let offsetVal = this.props.current <= this.props.children.length + 2 ? !this.props.vertical ? $container.offsetWidth * -this.props.current : $container.offsetHeight * -this.props.current : 0 let childLen = this.props.children.length // 是否衔接滑动 // if (this.props.circular) { offsetVal = this.props.vertical ? -$container.offsetHeight * (this.props.current + 1) : -$container.offsetWidth * (this.props.current + 1) childLen = childLen + 2 // } this.setState({ containerWidth: $container.offsetWidth, // 外层容器宽 containerHeight: $container.offsetHeight, // 外层容器高 wrapperWidth: !this.props.vertical // 轮播容器 是否纵向滑动,不是的话计算总宽度 ? $container.offsetWidth * childLen : $container.offsetWidth, wrapperHeight: this.props.vertical // 轮播容器 是否纵向滑动,是的话计算总高度 ? $container.offsetHeight * childLen : $container.offsetHeight, // 计算指定下标位置 translate: offsetVal }) if (this.props.autoplay) this.autoplay(this.props.autoplay) } componentWillReceiveProps (nextProps) { const { interval, autoplay, circular } = nextProps this.pauseAutoPlay() // this.updateCurrentIndex(current) if (!circular) { this.computedChangeContainer() } if (!autoplay) return this.autoplay(interval) } componentWillUnmount () { this.pauseAutoPlay() } // 在开关衔接滑动的时候,计算当前下标 computedChangeContainer () { if (this.props.children.length - 1 === this.state.currentIndex) { this.pauseAutoPlay() return true } return false } // 更新下标 updateCurrentIndex (currentIndex) { let cur = currentIndex === this.props.children.length - 1 ? 0 : currentIndex let tr = this.state.translate let slideVal // 纵向还是横向滚动长度 if (!this.props.vertical) { slideVal = this.state.containerWidth * Math.abs(currentIndex - this.state.currentIndex) } else { slideVal = this.state.containerHeight * Math.abs(currentIndex - this.state.currentIndex) } this.setState( { animating: true, translate: tr - slideVal, currentIndex: cur }, () => { setTimeout(() => { this.computedTranslate() }, this.props.duration) } ) } // 判断当前是否在最后一页 isLastIndex () { return this.state.currentIndex === this.props.children.length - 1 } // 判断当前是否在第一页 isFirstIndex () { return this.state.currentIndex === 0 } handleTouchStart (e) { if (this.state.animating) return if (this.state.touching || this.props.children.length <= 1) return if (this.props.autoplay) this.pauseAutoPlay() let og = 0 if (!this.props.vertical) { og = e.targetTouches[0].pageX - this.state.translate } else { og = e.targetTouches[0].pageY - this.state.translate } this.touchStartX = e.touches[0].pageX this.touchStartY = e.touches[0].pageY this.setState({ touching: true, ogTranslate: this.state.translate, touchId: e.targetTouches[0].identifier, og: og, animating: false }) } handleTouchMove (e) { // 非衔接滑动的时候 判断首尾页的时候禁止拖动 if (!this.props.circular) { // 计算偏移量、 判断左滑右滑动 let touchEndX = e.touches[0].pageX let touchEndY = e.touches[0].pageY let offsetMoveX = touchEndX - this.touchStartX let offsetMoveY = touchEndY - this.touchStartY if (this.isFirstIndex()) { if ( (this.props.vertical && offsetMoveY > 0) || (!this.props.vertical && offsetMoveX > 0) ) { return } } if (this.isLastIndex()) { if ( (this.props.vertical && offsetMoveY < 0) || (!this.props.vertical && offsetMoveX < 0) ) { return } } } if (!this.state.touching || this.props.children.length <= 1) return if (e.targetTouches[0].identifier !== this.state.touchId) return e.preventDefault() let diff = this.state.translate if (!this.props.vertical) { const pageX = e.targetTouches[0].pageX diff = pageX - this.state.og } else { // vertical const pageY = e.targetTouches[0].pageY diff = pageY - this.state.og } this.setState({ translate: diff }) } handleTouchEnd (e) { if (!this.state.touching || this.props.children.length <= 1) return let translate = this.state.translate let max = !this.props.vertical ? this.state.wrapperWidth - this.state.containerWidth : this.state.wrapperHeight - this.state.containerHeight let currentIndex = this.state.currentIndex let ogIndex = currentIndex if (translate > 0) { // start translate = 0 } else if (translate < -max) { translate = -max } else { // default case let changeV = this.isChangeSlide(translate, currentIndex) translate = changeV.translate currentIndex = changeV.currentIndex } this.setState( { touching: false, og: 0, touchId: undefined, ogTranslate: 0, animating: true, translate, currentIndex }, () => setTimeout(() => { this.computedTranslate() }, this.props.duration) ) if (this.props.onChange) this.props.onChange(ogIndex, currentIndex) if (this.props.autoplay) this.pauseAutoPlay() } computedTranslate () { // if (this.props.circular) { let prvTranslate = this.state.translate // 横纵向 if (!this.props.vertical) { if (prvTranslate === 0) { prvTranslate = -( this.state.wrapperWidth - this.state.containerWidth * 2 ) } // 减一个宽度是因为最后的静默回滚到最前面 滑块本身还有一个 translate位移量 if ( prvTranslate === -(this.state.wrapperWidth - this.state.containerWidth) ) { prvTranslate = -this.state.containerWidth } } else { if (prvTranslate === 0) { prvTranslate = -( this.state.wrapperHeight - this.state.containerHeight * 2 ) } // 减一个宽度是因为最后的静默回滚到最前面 滑块本身还有一个 translate位移量 if ( prvTranslate === -(this.state.wrapperHeight - this.state.containerHeight) ) { prvTranslate = -this.state.containerHeight } } this.setState({ animating: false, translate: prvTranslate }) // } else { // this.setState({animating: false}) // } } // 自增下标点计算 addCurrentIndex (currentIndex) { return currentIndex === this.props.children.length - 1 ? 0 : (currentIndex += 1) } autoplay (interval) { this.SwiperTimer = setInterval(() => { this.slideNext() }, interval) } pauseAutoPlay () { clearInterval(this.SwiperTimer) } slideNext () { if (!this.props.circular) { if (this.computedChangeContainer()) { return } } let cur = this.addCurrentIndex(this.state.currentIndex) let tr = this.state.translate let slideVal // 纵向还是横向滚动长度 if (!this.props.vertical) { slideVal = this.state.containerWidth } else { slideVal = this.state.containerHeight } this.setState( { animating: true, translate: tr - slideVal, currentIndex: cur }, () => { setTimeout(() => { this.computedTranslate() }, this.props.duration) } ) } isChangeSlide (translate, currentIndex) { // 判读滑动到大于一半才过去 let threshold = !this.props.vertical ? this.state.containerWidth / 2 : this.state.containerHeight / 2 let diff = Math.abs(translate - this.state.ogTranslate) let isNext = translate - this.state.ogTranslate < 0 if (diff > threshold) { if (isNext) { // next slide translate = this.state.ogTranslate - (!this.props.vertical ? this.state.containerWidth : this.state.containerHeight) currentIndex = this.addCurrentIndex(currentIndex) } else { // prev slide translate = this.state.ogTranslate + (!this.props.vertical ? this.state.containerWidth : this.state.containerHeight) currentIndex = currentIndex === 0 ? (currentIndex = this.props.children.length - 1) : (currentIndex -= 1) } } else { // revert back translate = this.state.ogTranslate } return { translate, currentIndex } } renderPagination (indicatorColor, indicatorActiveColor) { const childs = this.props.children.map((child, i) => { let clx = classNames('swiper__pagination-bullet', { active: i === this.state.currentIndex }) let indiStyle = { background: i === this.state.currentIndex ? indicatorActiveColor : indicatorColor } return <span className={clx} key={i} style={indiStyle} /> }) return childs } render () { const { className, indicatorDots, vertical, children, circular,
'swiper__container-vertical': vertical, 'swiper__container-horizontal': !vertical }) let items = [].concat(children) // 衔接滑动增加首尾 // if (circular) { if (items.length !== 0) { const firstItem = items[0] const lastItem = items[items.length - 1] items.push(firstItem) items.unshift(lastItem) } // } let wrapperStyle = { width: this.state.wrapperWidth, height: this.state.wrapperHeight, transition: this.state.animating ? `transform ${duration}ms ease-in-out` : 'none', transform: `translate(${!vertical ? this.state.translate : 0}px, ${ vertical ? this.state.translate : 0 }px)` } return ( <div className={cls} ref={SwiperWp => { this.SwiperWp = SwiperWp }} onTouchStart={this.handleTouchStart} onTouchMove={this.handleTouchMove} onTouchEnd={this.handleTouchEnd} > <div className='swiper__wrapper' style={wrapperStyle}> {items.map((child, i) => { const c = child.props.children // 没有子元素直接返回 if (c.length === 0) return <div className='swiper__item' /> const cls = classNames('swiper__item', c.props.className) // 样式继承追加。 有可能 Object 或者是 String let sty if (typeof c.props.style === 'string') { let display = !vertical ? ';display: inline-block;' : ';display: block;' let verticalAlign = !vertical ? ';vertical-align: top;' : ';vertical-align:bottom;' let w = `;width: ${this.state.containerWidth}px;` let h = `;height: ${this.state.containerHeight}px;` sty = c.props.style + verticalAlign + display + w + h } else { sty = Object.assign({}, c.props.style, { display: !vertical ? 'inline-block' : 'block', verticalAlign: !vertical ? 'top' : 'bottom', width: this.state.containerWidth, height: this.state.containerHeight }) } if (circular) i = i - 1 return Nerv.cloneElement(c, { key: i, className: cls, style: sty }) })} </div> {indicatorDots ? ( <div className='swiper__pagination'> {this.renderPagination( this.props.indicatorColor, this.props.indicatorActiveColor )} </div> ) : ( false )} </div> ) } } // 默认配置 Swiper.defaultProps = { indicatorDots: false, // 是否显示面板指示点 indicatorColor: 'rgba(0, 0, 0, .3)', // 指示点颜色 indicatorActiveColor: '#000000', // 当前选中的指示点颜色 autoplay: false, // 自动播放 current: 0, // 当前滑块位置 interval: 5000, // 切换时间间隔 duration: 500, // 滑动动画时长 circular: false, // 是否采用衔接滑动 vertical: false // 滑动方向是否为纵向 } class SwiperItem extends Nerv.Component { constructor (props) { super(props) } render () { return '' } } export { SwiperItem, Swiper }
duration } = this.props const cls = classNames('swiper__container', className, {
gcpCatalog.ts
import fs from 'fs'; import axios, { AxiosResponse } from 'axios'; import glob from 'glob'; import config from '../config'; import { Product, Price } from '../db/types'; import { generateProductHash, generatePriceHash } from '../db/helpers'; import { upsertProducts } from '../db/upsert'; const baseUrl = 'https://cloudbilling.googleapis.com/v1'; type ServiceJson = { serviceId: string; displayName: string; }; type SkusJson = { skus: ProductJson[]; nextPageToken: string; }; type ProductJson = { skuId: string; serviceRegions: string[]; category: { serviceDisplayName: string; resourceFamily: string; resourceGroup: string; usageType: string; }; description: string; pricingInfo: PricingJson[]; }; type PricingJson = { pricingExpression: { displayQuantity: number; usageUnitDescription: string; tieredRates: TieredRateJson[]; }; effectiveTime: string; }; type TieredRateJson = { startUsageAmount: number; unitPrice: { currencyCode: string; units: number; nanos: number; }; }; async function scrape(): Promise<void> { await downloadAll(); await loadAll(); } async function downloadAll(): Promise<void> { const services = await getServices(); for (const service of services) { try { await downloadService(service); } catch (e) { config.logger.error( `Skipping service ${service.displayName} due to error ${e}` ); config.logger.error(e.stack); } } } async function getServices(): Promise<ServiceJson[]> { let nextPageToken = ''; const services: ServiceJson[] = []; do { let nextPageParam = ''; if (nextPageToken) { nextPageParam = `&pageToken=${nextPageToken}`; } const resp = await axios.get( `${baseUrl}/services?key=${config.gcpApiKey}${nextPageParam}` ); services.push(...(<ServiceJson[]>resp.data.services)); nextPageToken = resp.data.nextPageToken; } while (nextPageToken); return services; } async function downloadService(service: ServiceJson): Promise<void> { config.logger.info(`Downloading ${service.displayName}`); let nextPageToken = ''; let pageNum = 1; do { let nextPageParam = ''; if (nextPageToken) { nextPageParam = `&pageToken=${nextPageToken}`; } let resp: AxiosResponse | null = null; let success = false; let attempts = 0; do { try { attempts++; resp = await axios({ method: 'get', url: `${baseUrl}/services/${service.serviceId}/skus?key=${config.gcpApiKey}${nextPageParam}`, responseType: 'stream', }); success = true; } catch (err) { // Too many requests, sleep and retry
if (err.response.status === 429) { config.logger.info( 'Too many requests, sleeping for 30s and retrying' ); await sleep(30000); } else { throw err; } } } while (!success && attempts < 3); if (!resp) { return; } let filename = `gcp-${service.displayName}-${pageNum}`; filename = filename.replace(/\//g, '-'); filename = filename.replace(/\./g, '-'); filename = `data/${filename}.json`; const writer = fs.createWriteStream(filename); resp.data.pipe(writer); await new Promise((resolve) => { writer.on('finish', resolve); }); const body = fs.readFileSync(filename); const json = <SkusJson>JSON.parse(body.toString()); nextPageToken = json.nextPageToken; pageNum++; } while (nextPageToken); } function sleep(ms: number) { return new Promise((resolve) => { setTimeout(resolve, ms); }); } async function loadAll(): Promise<void> { for (const filename of glob.sync('data/gcp-*.json')) { config.logger.info(`Processing file: ${filename}`); try { await processFile(filename); } catch (e) { config.logger.error(`Skipping file ${filename} due to error ${e}`); config.logger.error(e.stack); } } } async function processFile(filename: string): Promise<void> { const body = fs.readFileSync(filename); const json = <SkusJson>JSON.parse(body.toString()); const products: Product[] = []; json.skus.forEach((productJson) => { productJson.serviceRegions.forEach((region) => { const product = parseProduct(productJson, region); products.push(product); }); }); await upsertProducts(products); } function parseProduct(productJson: ProductJson, region: string): Product { const product: Product = { productHash: '', sku: productJson.skuId, vendorName: 'gcp', region, service: productJson.category.serviceDisplayName, productFamily: productJson.category.resourceFamily, attributes: { description: productJson.description, resourceGroup: productJson.category.resourceGroup, }, prices: [], }; product.productHash = generateProductHash(product); product.prices = parsePrices(product, productJson); return product; } function parsePrices(product: Product, productJson: ProductJson): Price[] { const prices: Price[] = []; productJson.pricingInfo.forEach((pricingJson) => { for (let i = 0; i < pricingJson.pricingExpression.tieredRates.length; i++) { const tierJson = pricingJson.pricingExpression.tieredRates[i]; const nextTierJson = pricingJson.pricingExpression.tieredRates[i + 1]; const price: Price = { priceHash: '', purchaseOption: productJson.category.usageType, unit: pricingJson.pricingExpression.usageUnitDescription, USD: `${tierJson.unitPrice.units}.${tierJson.unitPrice.nanos .toString() .padStart(9, '0')}`, effectiveDateStart: pricingJson.effectiveTime, startUsageAmount: tierJson.startUsageAmount.toString(), endUsageAmount: nextTierJson ? nextTierJson.startUsageAmount.toString() : undefined, }; price.priceHash = generatePriceHash(product, price); prices.push(price); } }); return prices; } export default { scrape, };
storage.rs
// This file is part of Substrate. // Copyright (C) 2020-2021 Parity Technologies (UK) Ltd. // SPDX-License-Identifier: Apache-2.0 // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::pallet::Def; use crate::pallet::parse::storage::{Metadata, QueryKind}; use frame_support_procedural_tools::clean_type_string; /// Generate the prefix_ident related the the storage. /// prefix_ident is used for the prefix struct to be given to storage as first generic param. fn prefix_ident(storage_ident: &syn::Ident) -> syn::Ident { syn::Ident::new(&format!("_GeneratedPrefixForStorage{}", storage_ident), storage_ident.span()) } /// * generate StoragePrefix structs (e.g. for a storage `MyStorage` a struct with the name /// `_GeneratedPrefixForStorage$NameOfStorage` is generated) and implements StorageInstance trait. /// * replace the first generic `_` by the generated prefix structure /// * generate metadatas pub fn expand_storages(def: &mut Def) -> proc_macro2::TokenStream
{ let frame_support = &def.frame_support; let frame_system = &def.frame_system; let pallet_ident = &def.pallet_struct.pallet; // Replace first arg `_` by the generated prefix structure. // Add `#[allow(type_alias_bounds)]` for storage_def in def.storages.iter_mut() { let item = &mut def.item.content.as_mut().expect("Checked by def").1[storage_def.index]; let typ_item = if let syn::Item::Type(t) = item { t } else { unreachable!("Checked by def"); }; typ_item.attrs.push(syn::parse_quote!(#[allow(type_alias_bounds)])); let typ_path = if let syn::Type::Path(p) = &mut *typ_item.ty { p } else { unreachable!("Checked by def"); }; let args = if let syn::PathArguments::AngleBracketed(args) = &mut typ_path.path.segments[0].arguments { args } else { unreachable!("Checked by def"); }; let type_use_gen = if def.config.has_instance { quote::quote_spanned!(storage_def.attr_span => T, I) } else { quote::quote_spanned!(storage_def.attr_span => T) }; let prefix_ident = prefix_ident(&storage_def.ident); args.args[0] = syn::parse_quote!( #prefix_ident<#type_use_gen> ); } let entries = def.storages.iter() .map(|storage| { let docs = &storage.docs; let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); let metadata_trait = match &storage.metadata { Metadata::Value { .. } => quote::quote_spanned!(storage.attr_span => #frame_support::storage::types::StorageValueMetadata ), Metadata::Map { .. } => quote::quote_spanned!(storage.attr_span => #frame_support::storage::types::StorageMapMetadata ), Metadata::DoubleMap { .. } => quote::quote_spanned!(storage.attr_span => #frame_support::storage::types::StorageDoubleMapMetadata ), }; let ty = match &storage.metadata { Metadata::Value { value } => { let value = clean_type_string(&quote::quote!(#value).to_string()); quote::quote_spanned!(storage.attr_span => #frame_support::metadata::StorageEntryType::Plain( #frame_support::metadata::DecodeDifferent::Encode(#value) ) ) }, Metadata::Map { key, value } => { let value = clean_type_string(&quote::quote!(#value).to_string()); let key = clean_type_string(&quote::quote!(#key).to_string()); quote::quote_spanned!(storage.attr_span => #frame_support::metadata::StorageEntryType::Map { hasher: <#full_ident as #metadata_trait>::HASHER, key: #frame_support::metadata::DecodeDifferent::Encode(#key), value: #frame_support::metadata::DecodeDifferent::Encode(#value), unused: false, } ) }, Metadata::DoubleMap { key1, key2, value } => { let value = clean_type_string(&quote::quote!(#value).to_string()); let key1 = clean_type_string(&quote::quote!(#key1).to_string()); let key2 = clean_type_string(&quote::quote!(#key2).to_string()); quote::quote_spanned!(storage.attr_span => #frame_support::metadata::StorageEntryType::DoubleMap { hasher: <#full_ident as #metadata_trait>::HASHER1, key2_hasher: <#full_ident as #metadata_trait>::HASHER2, key1: #frame_support::metadata::DecodeDifferent::Encode(#key1), key2: #frame_support::metadata::DecodeDifferent::Encode(#key2), value: #frame_support::metadata::DecodeDifferent::Encode(#value), } ) } }; quote::quote_spanned!(storage.attr_span => #frame_support::metadata::StorageEntryMetadata { name: #frame_support::metadata::DecodeDifferent::Encode( <#full_ident as #metadata_trait>::NAME ), modifier: <#full_ident as #metadata_trait>::MODIFIER, ty: #ty, default: #frame_support::metadata::DecodeDifferent::Encode( <#full_ident as #metadata_trait>::DEFAULT ), documentation: #frame_support::metadata::DecodeDifferent::Encode(&[ #( #docs, )* ]), } ) }); let getters = def.storages.iter() .map(|storage| if let Some(getter) = &storage.getter { let completed_where_clause = super::merge_where_clauses(&[ &storage.where_clause, &def.config.where_clause, ]); let docs = storage.docs.iter() .map(|d| quote::quote_spanned!(storage.attr_span => #[doc = #d])); let ident = &storage.ident; let gen = &def.type_use_generics(storage.attr_span); let type_impl_gen = &def.type_impl_generics(storage.attr_span); let type_use_gen = &def.type_use_generics(storage.attr_span); let full_ident = quote::quote_spanned!(storage.attr_span => #ident<#gen> ); match &storage.metadata { Metadata::Value { value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter() -> #query { < #full_ident as #frame_support::storage::StorageValue<#value> >::get() } } ) }, Metadata::Map { key, value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter<KArg>(k: KArg) -> #query where KArg: #frame_support::codec::EncodeLike<#key>, { < #full_ident as #frame_support::storage::StorageMap<#key, #value> >::get(k) } } ) }, Metadata::DoubleMap { key1, key2, value } => { let query = match storage.query_kind.as_ref().expect("Checked by def") { QueryKind::OptionQuery => quote::quote_spanned!(storage.attr_span => Option<#value> ), QueryKind::ValueQuery => quote::quote!(#value), }; quote::quote_spanned!(storage.attr_span => impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #( #docs )* pub fn #getter<KArg1, KArg2>(k1: KArg1, k2: KArg2) -> #query where KArg1: #frame_support::codec::EncodeLike<#key1>, KArg2: #frame_support::codec::EncodeLike<#key2>, { < #full_ident as #frame_support::storage::StorageDoubleMap<#key1, #key2, #value> >::get(k1, k2) } } ) }, } } else { Default::default() }); let prefix_structs = def.storages.iter().map(|storage_def| { let type_impl_gen = &def.type_impl_generics(storage_def.attr_span); let type_use_gen = &def.type_use_generics(storage_def.attr_span); let prefix_struct_ident = prefix_ident(&storage_def.ident); let prefix_struct_vis = &storage_def.vis; let prefix_struct_const = storage_def.ident.to_string(); let config_where_clause = &def.config.where_clause; quote::quote_spanned!(storage_def.attr_span => #prefix_struct_vis struct #prefix_struct_ident<#type_use_gen>( core::marker::PhantomData<(#type_use_gen,)> ); impl<#type_impl_gen> #frame_support::traits::StorageInstance for #prefix_struct_ident<#type_use_gen> #config_where_clause { fn pallet_prefix() -> &'static str { < <T as #frame_system::Config>::PalletInfo as #frame_support::traits::PalletInfo >::name::<Pallet<#type_use_gen>>() .expect("Every active pallet has a name in the runtime; qed") } const STORAGE_PREFIX: &'static str = #prefix_struct_const; } ) }); let mut where_clauses = vec![&def.config.where_clause]; where_clauses.extend(def.storages.iter().map(|storage| &storage.where_clause)); let completed_where_clause = super::merge_where_clauses(&where_clauses); let type_impl_gen = &def.type_impl_generics(proc_macro2::Span::call_site()); let type_use_gen = &def.type_use_generics(proc_macro2::Span::call_site()); quote::quote!( impl<#type_impl_gen> #pallet_ident<#type_use_gen> #completed_where_clause { #[doc(hidden)] pub fn storage_metadata() -> #frame_support::metadata::StorageMetadata { #frame_support::metadata::StorageMetadata { prefix: #frame_support::metadata::DecodeDifferent::Encode( < <T as #frame_system::Config>::PalletInfo as #frame_support::traits::PalletInfo >::name::<#pallet_ident<#type_use_gen>>() .expect("Every active pallet has a name in the runtime; qed") ), entries: #frame_support::metadata::DecodeDifferent::Encode( &[ #( #entries, )* ] ), } } } #( #getters )* #( #prefix_structs )* ) }
360p_038.ts
version https://git-lfs.github.com/spec/v1 oid sha256:0101320e3da6405a88d1f3d3006472d908f8e236671439f0a23a4cb9e40b3101
size 250040