file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
resnet50.rs | use crate::{input::*, model::*};
use egg::*;
fn resnet_block(
graph: &mut GraphConverter,
mut input: TensorInfo,
strides: (i32, i32),
out_channels: i32,
input_dim_1: i32,
) -> TensorInfo {
let w1 = graph.new_weight(&[out_channels, input_dim_1, 1, 1]);
let tmp = graph.conv2d(
input, w1, /*stride_h=*/ 1, /*stride_w=*/ 1, /*padding=*/ PSAME,
/*activation=*/ ACTRELU,
);
let w2 = graph.new_weight(&[out_channels, out_channels, 3, 3]);
let tmp = graph.conv2d(
tmp, w2, /*stride_h=*/ strides.0, /*stride_w=*/ strides.1,
/*padding=*/ PSAME, /*activation=*/ ACTRELU,
);
let w3 = graph.new_weight(&[out_channels * 4, out_channels, 1, 1]);
let tmp = graph.conv2d(
tmp, w3, /*stride_h=*/ 1, /*stride_w=*/ 1, /*padding=*/ PSAME,
/*activation=*/ ACTNONE,
);
if (strides.0 > 1) || (input_dim_1 != out_channels * 4) {
let w4 = graph.new_weight(&[out_channels * 4, input_dim_1, 1, 1]);
input = graph.conv2d(
input, w4, /*stride_h=*/ strides.0, /*stride_w=*/ strides.1,
/*padding=*/ PSAME, /*activation=*/ ACTRELU,
);
}
let tmp = graph.add(input, tmp);
graph.relu(tmp)
}
/// Gets the RecExpr of a resnet50 model
pub fn get_resnet50() -> RecExpr<Mdl> | {
// Step 1: create a GraphConverter instance
let mut graph = GraphConverter::default();
// Step 2: define the graph
let input = graph.new_input(&[1, 64, 56, 56]);
let mut tmp = input;
let mut input_dim_1 = 64;
for i in 0..3 {
let out_channels = 64;
tmp = resnet_block(
&mut graph,
tmp,
/*strides=*/ (1, 1),
out_channels,
input_dim_1,
);
input_dim_1 = out_channels * 4;
}
let mut strides = (2, 2);
for i in 0..4 {
let out_channels = 128;
tmp = resnet_block(&mut graph, tmp, strides, out_channels, input_dim_1);
input_dim_1 = out_channels * 4;
strides = (1, 1);
}
strides = (2, 2);
for i in 0..6 {
let out_channels = 256;
tmp = resnet_block(&mut graph, tmp, strides, out_channels, input_dim_1);
input_dim_1 = out_channels * 4;
strides = (1, 1);
}
strides = (2, 2);
for i in 0..3 {
let out_channels = 512;
tmp = resnet_block(&mut graph, tmp, strides, out_channels, input_dim_1);
input_dim_1 = out_channels * 4;
strides = (1, 1);
}
// Step 3: get the RexExpr
graph.rec_expr()
} |
|
get_client_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package clients
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"io/ioutil"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/AccelByte/accelbyte-go-sdk/iam-sdk/pkg/iamclientmodels"
)
// GetClientReader is a Reader for the GetClient structure.
type GetClientReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetClientReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetClientOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 401:
result := NewGetClientUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 403:
result := NewGetClientForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetClientNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
default:
data, err := ioutil.ReadAll(response.Body())
if err != nil {
return nil, err
}
return nil, fmt.Errorf("Requested GET /iam/clients/{clientId} returns an error %d: %s", response.Code(), string(data))
}
}
// NewGetClientOK creates a GetClientOK with default headers values
func NewGetClientOK() *GetClientOK {
return &GetClientOK{}
}
/*GetClientOK handles this case with default header values.
OK
*/
type GetClientOK struct {
Payload *iamclientmodels.ClientmodelClientResponse
}
func (o *GetClientOK) Error() string {
return fmt.Sprintf("[GET /iam/clients/{clientId}][%d] getClientOK %+v", 200, o.Payload)
}
func (o *GetClientOK) GetPayload() *iamclientmodels.ClientmodelClientResponse {
return o.Payload
}
func (o *GetClientOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(iamclientmodels.ClientmodelClientResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetClientUnauthorized creates a GetClientUnauthorized with default headers values
func | () *GetClientUnauthorized {
return &GetClientUnauthorized{}
}
/*GetClientUnauthorized handles this case with default header values.
Unauthorized access
*/
type GetClientUnauthorized struct {
}
func (o *GetClientUnauthorized) Error() string {
return fmt.Sprintf("[GET /iam/clients/{clientId}][%d] getClientUnauthorized ", 401)
}
func (o *GetClientUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetClientForbidden creates a GetClientForbidden with default headers values
func NewGetClientForbidden() *GetClientForbidden {
return &GetClientForbidden{}
}
/*GetClientForbidden handles this case with default header values.
Forbidden
*/
type GetClientForbidden struct {
}
func (o *GetClientForbidden) Error() string {
return fmt.Sprintf("[GET /iam/clients/{clientId}][%d] getClientForbidden ", 403)
}
func (o *GetClientForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGetClientNotFound creates a GetClientNotFound with default headers values
func NewGetClientNotFound() *GetClientNotFound {
return &GetClientNotFound{}
}
/*GetClientNotFound handles this case with default header values.
Data not found
*/
type GetClientNotFound struct {
}
func (o *GetClientNotFound) Error() string {
return fmt.Sprintf("[GET /iam/clients/{clientId}][%d] getClientNotFound ", 404)
}
func (o *GetClientNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
| NewGetClientUnauthorized |
event.go | package midi
import "fmt"
type EventPair struct {
DeltaTime uint32
Event Event
}
func (ep *EventPair) MidiElement() {}
func (ep *EventPair) String() string {
return fmt.Sprintf("Time: %d, Event: %s", ep.DeltaTime, ep.Event)
}
/*
Event --- MidiEvent + MidiEventData
|
|- ... // TODO
*/
// General types
type Event interface {
Event()
}
type MidiEventData interface {
MidiEventData()
}
type SysexEventData interface {
SysexEventData()
}
type MetaEventData interface {
MetaEventData()
}
type MidiEvent struct {
StatusByte byte
Message string
Channel byte
Values []byte
}
type SysexEvent struct {
Type byte | }
type MetaEvent struct {
Type byte
Data MetaEventData
}
type ChannelModeMessage struct {
Channel byte
StatusByte byte
Message string
Value byte
}
func (event *MidiEvent) Event() {}
func (event *SysexEvent) Event() {}
func (event *MetaEvent) Event() {}
func (event *ChannelModeMessage) Event() {}
// MidiEventData
type NoteOff struct {
Channel byte
Key byte
Velocity byte
}
type NoteOn struct {
Channel byte
Key byte
Velocity byte
}
type PolyphonicKeyPressure struct {
Channel byte
Key byte
Pressure byte
}
type ControllerChange struct {
Channel byte
Controller byte
Value byte
}
type ProgramChange struct {
Channel byte
Program byte
}
type ChannelKeyPressure struct {
Channel byte
Pressure byte
}
type PitchBend struct {
Channel byte
Lsb byte
Msb byte
}
func (self *NoteOff) MidiEventData() {}
func (self *NoteOn) MidiEventData() {}
func (self *PolyphonicKeyPressure) MidiEventData() {}
func (self *ControllerChange) MidiEventData() {}
func (self *ProgramChange) ProgramChange() {}
func (self *ChannelKeyPressure) ChannelKeyPressure() {}
func (self *PitchBend) PitchBend() {}
// Sysex events
// TODO
// -- Meta events --
type SetTempo struct {
Tempo int
}
func (self *SetTempo) MetaEventData() {}
func (self *SetTempo) String() string {
return fmt.Sprintf("Tempo: %d", self.Tempo)
}
type TrackName struct {
Name string
}
func (self *TrackName) MetaEventData() {}
func (self *TrackName) String() string {
return fmt.Sprintf("Track name: %s", self.Name)
}
type TimeSignature struct {
Numerator byte
Denominator byte
Clocks byte
Notes byte
}
func (self *TimeSignature) MetaEventData() {}
func (self *TimeSignature) String() string {
return fmt.Sprintf("TimeSignature: %d/%d %d %d",
self.Numerator, self.Denominator, self.Clocks, self.Notes)
}
type Marker struct {
Name string
}
func (self *Marker) MetaEventData() {}
func (self *Marker) String() string {
return fmt.Sprintf("Marker: %s", self.Name)
}
type EndOfTrack struct{}
func (self *EndOfTrack) MetaEventData() {}
func (self *EndOfTrack) String() string {
return "End of Track"
}
// Channnel mode messages
// not type safe (Experimental)
// deprecated
func (mes *ChannelModeMessage) String() string {
return fmt.Sprintf("Channel Mode Message Track: %d Type: %X (%s) Value: %d",
mes.Channel, mes.StatusByte, mes.Message, mes.Value)
}
func (me *MidiEvent) String() string {
return fmt.Sprintf("<Channel %d> %X (%s) [%v]", me.Channel+1, me.StatusByte, me.Message, me.Values)
} | IncludeF0 bool
Data SysexEventData |
core.py | # Copyright 2020 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import __main__
import glob
import logging
import os
import threading
import yaml
from klio_core import config
from klio_core.proto import klio_pb2
from klio.metrics import client as metrics_client
from klio.metrics import logger as metrics_logger
from klio.metrics import stackdriver
class RunConfig(object):
_thread_local = threading.local()
@classmethod
def _load_config_from_file(cls):
# [Klio v2] this may get expensive, to always be reading config
# from a file. Can this be replaced by something in memory
# that's also globally accessible?
klio_job_file = "/usr/src/config/.effective-klio-job.yaml"
# for backwards compatibility, and user is using setup.py and we
# have to find it somewhere...
if not os.path.exists(klio_job_file):
# use iterator so we don't waste time searching everywhere upfront
files = glob.iglob("/usr/**/klio-job.yaml", recursive=True)
for f in files:
klio_job_file = f
# only grab the first one
break
with open(klio_job_file, "r") as f:
all_config_data = yaml.safe_load(f)
return config.KlioConfig(all_config_data)
# NOTE: for now this approach is not being used (and may be removed in the
# future)
@classmethod
def _get_via_main_session(cls):
if hasattr(__main__, "run_config"):
return __main__.run_config
else:
raise Exception(
"Attempt to access RunConfig before it was set. This likely"
" means something was imported before RunConfig was set."
)
@classmethod
def _get_via_thread_local(cls):
klio_config = getattr(cls._thread_local, "klio_config", None)
if not klio_config:
cls._thread_local.klio_config = cls._load_config_from_file()
return cls._thread_local.klio_config
@classmethod
def get(cls):
return cls._get_via_thread_local()
@classmethod
def set(cls, config):
__main__.run_config = config
class KlioContext(object):
"""Context related to the currently running job.
Available to transforms via one of the :ref:`KlioContext decorators
<klio-context-decorators>`.
"""
_thread_local = threading.local()
def __init__(self):
self.__transform_name = None
def _create_klio_job_obj(self):
klio_job = klio_pb2.KlioJob()
klio_job.job_name = self.config.job_name
klio_job.gcp_project = self.config.pipeline_options.project
klio_job_str = klio_job.SerializeToString()
return klio_job_str
def _get_metrics_registry(self):
clients = []
use_logger, use_stackdriver = None, None
metrics_config = self.config.job_config.metrics
# use_logger and use_stackdriver could be False (turn off),
# None (use default config), or a dict of configured values
use_logger = metrics_config.get("logger")
use_stackdriver = metrics_config.get("stackdriver_logger")
# TODO: set runner in OS environment (via klio-exec), since
# the runner defined in config could be overwritten via
# `--direct-runner`.
# i.e.: runner = os.getenv("BEAM_RUNNER", "").lower()
runner = self.config.pipeline_options.runner
if "dataflow" in runner.lower():
# Must explicitly compare to `False` since `None` could be
# the user accepting default config.
# If explicitly false, then just disable logger underneath SD
if use_stackdriver is not False:
sd_client = stackdriver.StackdriverLogMetricsClient(
self.config
)
clients.append(sd_client)
else: | use_logger = False
if not len(clients): # setup default client
disabled = False
# User might disable the logger, but we still need a relay
# client if all other relay clients are disabled. This allows
# folks to silence metrics but not need to remove code that
# interacts with `_klio.metrics`.
# Must explicitly compare to `False` since `None` could be
# the user accepting default config
if use_logger is False:
disabled = True
logger_client = metrics_logger.MetricsLoggerClient(
self.config, disabled=disabled
)
clients.append(logger_client)
return metrics_client.MetricsRegistry(
clients, transform_name=self._transform_name
)
@property
def config(self):
"""A ``KlioConfig`` instance representing the job's configuration."""
return RunConfig.get()
@property
def job(self):
"""An instance of :ref:`kliojob` of the current job."""
klio_job = getattr(self._thread_local, "klio_job", None)
if not klio_job:
self._thread_local.klio_job = self._create_klio_job_obj()
return self._thread_local.klio_job
@property
def logger(self):
"""A namespaced logger.
Equivalent to ``logging.getLogger("klio")``.
"""
klio_logger = getattr(self._thread_local, "klio_logger", None)
if not klio_logger:
self._thread_local.klio_logger = logging.getLogger("klio")
return self._thread_local.klio_logger
@property
def metrics(self):
"""A metrics registry instance.
See :ref:`metrics <metrics>` for more information."""
metrics_registry = getattr(self._thread_local, "klio_metrics", None)
if not metrics_registry:
self._thread_local.klio_metrics = self._get_metrics_registry()
return self._thread_local.klio_metrics
# <-- private/internal attributes -->
@property
def _transform_name(self):
return self.__transform_name
@_transform_name.setter
def _transform_name(self, name):
self.__transform_name = name | # if use_stackdriver is explicitly false, then make sure
# logger client is disabled since the stackdriver client
# inherits the logger client |
vision.py | #!/usr/bin/env python
import requests
import json
from time import sleep
import base64
import rospy
from std_msgs.msg import string
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
bridge = CvBridge()
# Fix this to get an image file as a parameter (subscribe to a Node), and also take in coordinates
def getJSON(data):
try:
sign_img_cv2 = bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError:
print("Error: Cannot convert img_msg to cv2")
else:
cv2.imwrite('sign_img.jpeg', sign_img_cv2)
with open("sign_img.jpeg", "rb") as imageFile:
strtest = base64.b64encode(imageFile.read())
str2 = strtest.decode("utf-8", "backslashreplace")
# JSON format expected
dataSend = {
"requests": [
{
"image": {
"content": str2
},
"features": [
{
"type": "TEXT_DETECTION"
}]}]}
r = requests.post("https://vision.googleapis.com/v1/images:annotate?key=AIzaSyAzgApTEy_zJacjx7EgA6AGTcEfxl9Gako", json = dataSend)
return dataSend
def talker():
pub = rospy.Publisher('jsonposter', String, queue_size=50)
rospy.init_node('poster', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
response = json.dumps(getJSON())
rospy.loginfo(response)
pub.publish(response)
rate.sleep()
def | ():
rospy.init_node('listener', anonymous=True)
topic = "/image"
rospy.Subscriber(topic, Image, getJSON)
if _name_ == '_main_':
try:
talker()
except rospy.ROSInterruptException:
pass
| listener |
faucet.rs | //! The `faucet` module provides an object for launching a Solana Faucet,
//! which is the custodian of any remaining lamports in a mint.
//! The Solana Faucet builds and sends airdrop transactions,
//! checking requests against a single-request cap and a per-IP limit
//! for a given time time_slice.
use {
bincode::{deserialize, serialize, serialized_size},
byteorder::{ByteOrder, LittleEndian},
log::*,
serde_derive::{Deserialize, Serialize},
solana_metrics::datapoint_info,
solana_sdk::{
hash::Hash,
instruction::Instruction,
message::Message,
native_token::lamports_to_sol,
packet::PACKET_DATA_SIZE,
pubkey::Pubkey,
signature::{Keypair, Signer},
system_instruction,
transaction::Transaction,
},
std::{
collections::{HashMap, HashSet},
io::{Read, Write},
net::{IpAddr, Ipv4Addr, SocketAddr, TcpStream},
sync::{mpsc::Sender, Arc, Mutex},
thread,
time::Duration,
},
thiserror::Error,
tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::{TcpListener, TcpStream as TokioTcpStream},
runtime::Runtime,
},
};
#[macro_export]
macro_rules! socketaddr {
($ip:expr, $port:expr) => {
SocketAddr::from((Ipv4Addr::from($ip), $port))
};
($str:expr) => {{
let a: SocketAddr = $str.parse().unwrap();
a
}};
}
const ERROR_RESPONSE: [u8; 2] = 0u16.to_le_bytes();
pub const TIME_SLICE: u64 = 60;
pub const REQUEST_CAP: u64 = solana_sdk::native_token::LAMPORTS_PER_VLX * 10_000_000;
pub const FAUCET_PORT: u16 = 9900;
pub const FAUCET_PORT_STR: &str = "9900";
#[derive(Error, Debug)]
pub enum FaucetError {
#[error("IO Error: {0}")]
IoError(#[from] std::io::Error),
#[error("serialization error: {0}")]
Serialize(#[from] bincode::Error),
#[error("transaction_length from faucet exceeds limit: {0}")]
TransactionDataTooLarge(usize),
#[error("transaction_length from faucet: 0")]
NoDataReceived,
#[error("request too large; req: ◎{0}, cap: ◎{1}")]
PerRequestCapExceeded(f64, f64),
#[error("limit reached; req: ◎{0}, to: {1}, current: ◎{2}, cap: ◎{3}")]
PerTimeCapExceeded(f64, String, f64, f64),
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy)]
pub enum FaucetRequest {
GetAirdrop {
lamports: u64,
to: Pubkey,
blockhash: Hash,
},
}
impl Default for FaucetRequest {
fn default() -> Self {
Self::GetAirdrop {
lamports: u64::default(),
to: Pubkey::default(),
blockhash: Hash::default(),
}
}
}
pub enum FaucetTransaction {
Airdrop(Transaction),
Memo((Transaction, String)),
}
pub struct Faucet {
faucet_keypair: Keypair,
ip_cache: HashMap<IpAddr, u64>,
address_cache: HashMap<Pubkey, u64>,
pub time_slice: Duration,
per_time_cap: Option<u64>,
per_request_cap: Option<u64>,
allowed_ips: HashSet<IpAddr>,
}
impl Faucet {
pub fn new(
faucet_keypair: Keypair,
time_input: Option<u64>,
per_time_cap: Option<u64>,
per_request_cap: Option<u64>,
) -> Self {
Self::new_with_allowed_ips(
faucet_keypair,
time_input,
per_time_cap,
per_request_cap,
HashSet::new(),
)
}
pub fn new_with_allowed_ips(
faucet_keypair: Keypair,
time_input: Option<u64>,
per_time_cap: Option<u64>,
per_request_cap: Option<u64>,
allowed_ips: HashSet<IpAddr>,
) -> Self {
let time_slice = Duration::new(time_input.unwrap_or(TIME_SLICE), 0);
if let Some((per_request_cap, per_time_cap)) = per_request_cap.zip(per_time_cap) {
if per_time_cap < per_request_cap {
warn!(
"per_time_cap {} SOL < per_request_cap {} SOL; \
maximum single requests will fail",
lamports_to_sol(per_time_cap),
lamports_to_sol(per_request_cap),
);
}
}
Self {
faucet_keypair,
ip_cache: HashMap::new(),
address_cache: HashMap::new(),
time_slice,
per_time_cap,
per_request_cap,
allowed_ips,
}
}
pub fn check_time_request_limit<T: LimitByTime + std::fmt::Display>(
&mut self,
request_amount: u64,
to: T,
) -> Result<(), FaucetError> {
let new_total = to.check_cache(self, request_amount);
to.datapoint_info(request_amount, new_total);
if let Some(cap) = self.per_time_cap {
if new_total > cap {
return Err(FaucetError::PerTimeCapExceeded(
lamports_to_sol(request_amount),
to.to_string(),
lamports_to_sol(new_total),
lamports_to_sol(cap),
));
}
}
Ok(())
}
pub fn clear_caches(&mut self) {
self.ip_cache.clear();
self.address_cache.clear();
}
/// Checks per-request and per-time-ip limits; if both pass, this method returns a signed
/// SystemProgram::Transfer transaction from the faucet keypair to the requested recipient. If
/// the request exceeds this per-request limit, this method returns a signed SPL Memo
/// transaction with the memo: "request too large; req: <REQUEST> SOL cap: <CAP> SOL"
pub fn build_airdrop_transaction(
&mut self,
req: FaucetRequest,
ip: IpAddr,
) -> Result<FaucetTransaction, FaucetError> {
trace!("build_airdrop_transaction: {:?}", req);
match req {
FaucetRequest::GetAirdrop {
lamports,
to,
blockhash,
} => {
let mint_pubkey = self.faucet_keypair.pubkey();
info!(
"Requesting airdrop of {} SOL to {:?}",
lamports_to_sol(lamports),
to
);
if let Some(cap) = self.per_request_cap {
if lamports > cap {
let memo = format!(
"{}",
FaucetError::PerRequestCapExceeded(
lamports_to_sol(lamports),
lamports_to_sol(cap),
)
);
let memo_instruction = Instruction {
program_id: Pubkey::new(&spl_memo::id().to_bytes()),
accounts: vec![],
data: memo.as_bytes().to_vec(),
};
let message = Message::new(&[memo_instruction], Some(&mint_pubkey));
return Ok(FaucetTransaction::Memo((
Transaction::new(&[&self.faucet_keypair], message, blockhash),
memo,
)));
}
}
if !ip.is_loopback() && !self.allowed_ips.contains(&ip) {
self.check_time_request_limit(lamports, ip)?;
}
self.check_time_request_limit(lamports, to)?;
let transfer_instruction =
system_instruction::transfer(&mint_pubkey, &to, lamports);
let message = Message::new(&[transfer_instruction], Some(&mint_pubkey));
Ok(FaucetTransaction::Airdrop(Transaction::new(
&[&self.faucet_keypair],
message,
blockhash,
)))
}
}
}
/// Deserializes a received airdrop request, and returns a serialized transaction
pub fn process_faucet_request(
&mut self,
bytes: &[u8],
ip: IpAddr,
) -> Result<Vec<u8>, FaucetError> {
let req: FaucetRequest = deserialize(bytes)?;
info!("Airdrop transaction requested...{:?}", req);
let res = self.build_airdrop_transaction(req, ip);
match res {
Ok(tx) => {
let tx = match tx {
FaucetTransaction::Airdrop(tx) => {
info!("Airdrop transaction granted");
tx
}
FaucetTransaction::Memo((tx, memo)) => {
warn!("Memo transaction returned: {}", memo);
tx
}
};
let response_vec = bincode::serialize(&tx)?;
let mut response_vec_with_length = vec![0; 2];
LittleEndian::write_u16(&mut response_vec_with_length, response_vec.len() as u16);
response_vec_with_length.extend_from_slice(&response_vec);
Ok(response_vec_with_length)
}
Err(err) => {
warn!("Airdrop transaction failed: {}", err);
Err(err)
}
}
}
}
impl Drop for Faucet {
fn drop(&mut self) {
solana_metrics::flush();
}
}
pub fn request_airdrop_transaction(
faucet_addr: &SocketAddr,
id: &Pubkey,
lamports: u64,
blockhash: Hash,
) -> Result<Transaction, FaucetError> {
info!(
"request_airdrop_transaction: faucet_addr={} id={} lamports={} blockhash={}",
faucet_addr, id, lamports, blockhash
);
let mut stream = TcpStream::connect_timeout(faucet_addr, Duration::new(3, 0))?;
stream.set_read_timeout(Some(Duration::new(10, 0)))?;
let req = FaucetRequest::GetAirdrop {
lamports,
blockhash,
to: *id,
};
let req = serialize(&req).expect("serialize faucet request");
stream.write_all(&req)?;
// Read length of transaction
let mut buffer = [0; 2];
stream.read_exact(&mut buffer).map_err(|err| {
info!(
"request_airdrop_transaction: buffer length read_exact error: {:?}",
err
);
err
})?;
let transaction_length = LittleEndian::read_u16(&buffer) as usize;
if transaction_length > PACKET_DATA_SIZE {
return Err(FaucetError::TransactionDataTooLarge(transaction_length));
} else if transaction_length == 0 {
return Err(FaucetError::NoDataReceived);
}
// Read the transaction
let mut buffer = Vec::new();
buffer.resize(transaction_length, 0);
stream.read_exact(&mut buffer).map_err(|err| {
info!(
"request_airdrop_transaction: buffer read_exact error: {:?}",
err
);
err
})?;
let transaction: Transaction = deserialize(&buffer)?;
Ok(transaction)
}
pub fn run_local_faucet_with_port(
faucet_keypair: Keypair,
sender: Sender<Result<SocketAddr, String>>,
per_time_cap: Option<u64>,
port: u16, // 0 => auto assign
) {
thread::spawn(move || {
let faucet_addr = socketaddr!(0, port);
let faucet = Arc::new(Mutex::new(Faucet::new(
faucet_keypair,
None,
per_time_cap,
None,
)));
let runtime = Runtime::new().unwrap();
runtime.block_on(run_faucet(faucet, faucet_addr, Some(sender)));
});
}
// For integration tests. Listens on random open port and reports port to Sender.
pub fn run_local_faucet(faucet_keypair: Keypair, per_time_cap: Option<u64>) -> SocketAddr {
let | c fn run_faucet(
faucet: Arc<Mutex<Faucet>>,
faucet_addr: SocketAddr,
sender: Option<Sender<Result<SocketAddr, String>>>,
) {
let listener = TcpListener::bind(&faucet_addr).await;
if let Some(sender) = sender {
sender.send(
listener.as_ref().map(|listener| listener.local_addr().unwrap())
.map_err(|err| {
format!(
"Unable to bind faucet to {:?}, check the address is not already in use: {}",
faucet_addr, err
)
})
)
.unwrap();
}
let listener = match listener {
Err(err) => {
error!("Faucet failed to start: {}", err);
return;
}
Ok(listener) => listener,
};
info!("Faucet started. Listening on: {}", faucet_addr);
info!(
"Faucet account address: {}",
faucet.lock().unwrap().faucet_keypair.pubkey()
);
loop {
let _faucet = faucet.clone();
match listener.accept().await {
Ok((stream, _)) => {
tokio::spawn(async move {
if let Err(e) = process(stream, _faucet).await {
info!("failed to process request; error = {:?}", e);
}
});
}
Err(e) => debug!("failed to accept socket; error = {:?}", e),
}
}
}
async fn process(
mut stream: TokioTcpStream,
faucet: Arc<Mutex<Faucet>>,
) -> Result<(), Box<dyn std::error::Error>> {
let mut request = vec![0u8; serialized_size(&FaucetRequest::default()).unwrap() as usize];
while stream.read_exact(&mut request).await.is_ok() {
trace!("{:?}", request);
let response = {
match stream.peer_addr() {
Err(e) => {
info!("{:?}", e.into_inner());
ERROR_RESPONSE.to_vec()
}
Ok(peer_addr) => {
let ip = peer_addr.ip();
info!("Request IP: {:?}", ip);
match faucet.lock().unwrap().process_faucet_request(&request, ip) {
Ok(response_bytes) => {
trace!("Airdrop response_bytes: {:?}", response_bytes);
response_bytes
}
Err(e) => {
info!("Error in request: {}", e);
ERROR_RESPONSE.to_vec()
}
}
}
}
};
stream.write_all(&response).await?;
}
Ok(())
}
pub trait LimitByTime {
fn check_cache(&self, faucet: &mut Faucet, request_amount: u64) -> u64;
fn datapoint_info(&self, request_amount: u64, new_total: u64);
}
impl LimitByTime for IpAddr {
fn check_cache(&self, faucet: &mut Faucet, request_amount: u64) -> u64 {
*faucet
.ip_cache
.entry(*self)
.and_modify(|total| *total = total.saturating_add(request_amount))
.or_insert(request_amount)
}
fn datapoint_info(&self, request_amount: u64, new_total: u64) {
datapoint_info!(
"faucet-airdrop",
("request_amount", request_amount, i64),
("ip", self.to_string(), String),
("new_total", new_total, i64)
);
}
}
impl LimitByTime for Pubkey {
fn check_cache(&self, faucet: &mut Faucet, request_amount: u64) -> u64 {
*faucet
.address_cache
.entry(*self)
.and_modify(|total| *total = total.saturating_add(request_amount))
.or_insert(request_amount)
}
fn datapoint_info(&self, request_amount: u64, new_total: u64) {
datapoint_info!(
"faucet-airdrop",
("request_amount", request_amount, i64),
("address", self.to_string(), String),
("new_total", new_total, i64)
);
}
}
#[cfg(test)]
mod tests {
use super::*;
use solana_sdk::system_instruction::SystemInstruction;
use std::time::Duration;
#[test]
fn test_check_time_request_limit() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, Some(2), None);
let ip = socketaddr!([203, 0, 113, 1], 1234).ip();
assert!(faucet.check_time_request_limit(1, ip).is_ok());
assert!(faucet.check_time_request_limit(1, ip).is_ok());
assert!(faucet.check_time_request_limit(1, ip).is_err());
let address = Pubkey::new_unique();
assert!(faucet.check_time_request_limit(1, address).is_ok());
assert!(faucet.check_time_request_limit(1, address).is_ok());
assert!(faucet.check_time_request_limit(1, address).is_err());
}
#[test]
fn test_clear_caches() {
let keypair = Keypair::new();
let mut faucet = Faucet::new(keypair, None, None, None);
let ip = socketaddr!([127, 0, 0, 1], 0).ip();
assert_eq!(faucet.ip_cache.len(), 0);
faucet.check_time_request_limit(1, ip).unwrap();
assert_eq!(faucet.ip_cache.len(), 1);
faucet.clear_caches();
assert_eq!(faucet.ip_cache.len(), 0);
assert!(faucet.ip_cache.is_empty());
let address = Pubkey::new_unique();
assert_eq!(faucet.address_cache.len(), 0);
faucet.check_time_request_limit(1, address).unwrap();
assert_eq!(faucet.address_cache.len(), 1);
faucet.clear_caches();
assert_eq!(faucet.address_cache.len(), 0);
assert!(faucet.address_cache.is_empty());
}
#[test]
fn test_faucet_default_init() {
let keypair = Keypair::new();
let time_slice: Option<u64> = None;
let per_time_cap: Option<u64> = Some(200);
let per_request_cap: Option<u64> = Some(100);
let faucet = Faucet::new(keypair, time_slice, per_time_cap, per_request_cap);
assert_eq!(faucet.time_slice, Duration::new(TIME_SLICE, 0));
assert_eq!(faucet.per_time_cap, per_time_cap);
assert_eq!(faucet.per_request_cap, per_request_cap);
}
#[test]
fn test_faucet_build_airdrop_transaction() {
let to = Pubkey::new_unique();
let blockhash = Hash::default();
let request = FaucetRequest::GetAirdrop {
lamports: 2,
to,
blockhash,
};
let ip = socketaddr!([203, 0, 113, 1], 1234).ip();
let mint = Keypair::new();
let mint_pubkey = mint.pubkey();
let mut faucet = Faucet::new(mint, None, None, None);
if let FaucetTransaction::Airdrop(tx) =
faucet.build_airdrop_transaction(request, ip).unwrap()
{
let message = tx.message();
assert_eq!(tx.signatures.len(), 1);
assert_eq!(
message.account_keys,
vec![mint_pubkey, to, Pubkey::default()]
);
assert_eq!(message.recent_blockhash, blockhash);
assert_eq!(message.instructions.len(), 1);
let instruction: SystemInstruction =
deserialize(&message.instructions[0].data).unwrap();
assert_eq!(instruction, SystemInstruction::Transfer { lamports: 2 });
} else {
panic!("airdrop should succeed");
}
// Test per-time request cap
let mint = Keypair::new();
faucet = Faucet::new(mint, None, Some(2), None);
let _tx = faucet.build_airdrop_transaction(request, ip).unwrap(); // first request succeeds
let tx = faucet.build_airdrop_transaction(request, ip);
assert!(tx.is_err());
// Test multiple requests from loopback with different addresses succeed
let mint = Keypair::new();
faucet = Faucet::new(mint, None, Some(2), None);
let ip = socketaddr!([127, 0, 0, 1], 0).ip();
let other = Pubkey::new_unique();
let _tx0 = faucet.build_airdrop_transaction(request, ip).unwrap(); // first request succeeds
let request1 = FaucetRequest::GetAirdrop {
lamports: 2,
to: other,
blockhash,
};
let _tx1 = faucet.build_airdrop_transaction(request1, ip).unwrap(); // first request succeeds
let tx0 = faucet.build_airdrop_transaction(request, ip);
assert!(tx0.is_err());
let tx1 = faucet.build_airdrop_transaction(request1, ip);
assert!(tx1.is_err());
// Test multiple requests from allowed ip with different addresses succeed
let mint = Keypair::new();
let ip = socketaddr!([203, 0, 113, 1], 0).ip();
let mut allowed_ips = HashSet::new();
allowed_ips.insert(ip);
faucet = Faucet::new_with_allowed_ips(mint, None, Some(2), None, allowed_ips);
let other = Pubkey::new_unique();
let _tx0 = faucet.build_airdrop_transaction(request, ip).unwrap(); // first request succeeds
let request1 = FaucetRequest::GetAirdrop {
lamports: 2,
to: other,
blockhash,
};
let _tx1 = faucet.build_airdrop_transaction(request1, ip).unwrap(); // first request succeeds
let tx0 = faucet.build_airdrop_transaction(request, ip);
assert!(tx0.is_err());
let tx1 = faucet.build_airdrop_transaction(request1, ip);
assert!(tx1.is_err());
// Test per-request cap
let mint = Keypair::new();
let mint_pubkey = mint.pubkey();
let mut faucet = Faucet::new(mint, None, None, Some(1));
if let FaucetTransaction::Memo((tx, memo)) =
faucet.build_airdrop_transaction(request, ip).unwrap()
{
let message = tx.message();
assert_eq!(tx.signatures.len(), 1);
assert_eq!(
message.account_keys,
vec![mint_pubkey, Pubkey::new(&spl_memo::id().to_bytes())]
);
assert_eq!(message.recent_blockhash, blockhash);
assert_eq!(message.instructions.len(), 1);
let parsed_memo = std::str::from_utf8(&message.instructions[0].data).unwrap();
let expected_memo = "request too large; req: ◎0.000000002, cap: ◎0.000000001";
assert_eq!(parsed_memo, expected_memo);
assert_eq!(memo, expected_memo);
} else {
panic!("airdrop attempt should result in memo tx");
}
}
#[test]
fn test_process_faucet_request() {
let to = solana_sdk::pubkey::new_rand();
let blockhash = Hash::new(to.as_ref());
let lamports = 50;
let req = FaucetRequest::GetAirdrop {
lamports,
blockhash,
to,
};
let ip = socketaddr!([203, 0, 113, 1], 1234).ip();
let req = serialize(&req).unwrap();
let keypair = Keypair::new();
let expected_instruction = system_instruction::transfer(&keypair.pubkey(), &to, lamports);
let message = Message::new(&[expected_instruction], Some(&keypair.pubkey()));
let expected_tx = Transaction::new(&[&keypair], message, blockhash);
let expected_bytes = serialize(&expected_tx).unwrap();
let mut expected_vec_with_length = vec![0; 2];
LittleEndian::write_u16(&mut expected_vec_with_length, expected_bytes.len() as u16);
expected_vec_with_length.extend_from_slice(&expected_bytes);
let mut faucet = Faucet::new(keypair, None, None, None);
let response = faucet.process_faucet_request(&req, ip);
let response_vec = response.unwrap().to_vec();
assert_eq!(expected_vec_with_length, response_vec);
let bad_bytes = "bad bytes".as_bytes();
assert!(faucet.process_faucet_request(bad_bytes, ip).is_err());
}
}
| (sender, receiver) = std::sync::mpsc::channel();
run_local_faucet_with_port(faucet_keypair, sender, per_time_cap, 0);
receiver
.recv()
.expect("run_local_faucet")
.expect("faucet_addr")
}
pub asyn |
helpers_test.go | package jsonapi
import (
"testing"
"github.com/stretchr/testify/assert"
)
func Test_checkIsPublicSuffix(t *testing.T) {
a := assert.New(t)
a.True(isPublicSuffix("co.uk"))
a.False(isPublicSuffix("amazon.co.uk"))
a.True(isPublicSuffix("dyndns.org"))
a.False(isPublicSuffix("foo.dyndns.org"))
}
func | (t *testing.T) {
a := assert.New(t)
a.Equal("blabla", regexSafeLower("BlaBLA"))
a.Equal("\\[injected\\]\\*", regexSafeLower("[injected]*"))
}
| Test_regexSafeLower |
check_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
from mmcv.utils import collect_env as collect_base_env | from mmcv.utils import get_git_hash
import mmdeploy
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDeployment'] = f'{mmdeploy.__version__}+{get_git_hash()[:7]}'
return env_info
def check_backend():
try:
import onnxruntime as ort
except ImportError:
ort_version = None
else:
ort_version = ort.__version__
import mmdeploy.apis.onnxruntime as ort_apis
logging.info(f'onnxruntime: {ort_version} ops_is_avaliable : '
f'{ort_apis.is_available()}')
try:
import tensorrt as trt
except ImportError:
trt_version = None
else:
trt_version = trt.__version__
import mmdeploy.apis.tensorrt as trt_apis
logging.info(
f'tensorrt: {trt_version} ops_is_avaliable : {trt_apis.is_available()}'
)
try:
import ncnn
except ImportError:
ncnn_version = None
else:
ncnn_version = ncnn.__version__
import mmdeploy.apis.ncnn as ncnn_apis
logging.info(
f'ncnn: {ncnn_version} ops_is_avaliable : {ncnn_apis.is_available()}')
import mmdeploy.apis.pplnn as pplnn_apis
logging.info(f'pplnn_is_avaliable: {pplnn_apis.is_available()}')
import mmdeploy.apis.openvino as openvino_apis
logging.info(f'openvino_is_avaliable: {openvino_apis.is_available()}')
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
for name, val in collect_env().items():
logging.info('{}: {}'.format(name, val))
check_backend() | |
llrepr.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use middle::trans::context::CrateContext;
use middle::trans::type_::Type;
use lib::llvm::ValueRef;
pub trait LlvmRepr {
fn llrepr(&self, ccx: &CrateContext) -> String;
}
impl<'a, T:LlvmRepr> LlvmRepr for &'a [T] {
fn llrepr(&self, ccx: &CrateContext) -> String {
let reprs: Vec<String> = self.iter().map(|t| t.llrepr(ccx)).collect();
format!("[{}]", reprs.connect(","))
}
}
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
ccx.tn.type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
ccx.tn.val_to_string(*self)
}
} | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license |
motos.service.ts | import { HttpException, Injectable } from "@nestjs/common";
import { InjectModel, MongooseModule } from "@nestjs/mongoose";
import { Model } from "mongoose";
import { Moto } from "./motos.interface";
@Injectable()
export class | {
constructor(
@InjectModel('Motos')
private readonly motosModel : Model<Moto>
){}
async find():Promise<Moto[]>{
return await this.motosModel.find().exec()
}
async create (motoDto): Promise<Moto> {
const moto= await this.motosModel.create(motoDto)
return moto;
}
public async deletemotoById(nummoto:Number){
const moto = this.motosModel.deleteOne({nummoto}).exec();
if ((await moto).deletedCount===0){
throw new HttpException('Not found',404);
}
}
public async getmotoById(nummoto:Number){
const moto= this.motosModel.findOne({nummoto}).exec();
if (!moto) {
throw new HttpException('Not found',404);
}
return moto;
}
} | MotosService |
example2.go | // All material is licensed under the Apache License Version 2.0, January 2004
// http://www.apache.org/licenses/LICENSE-2.0
// https://play.golang.org/p/fSMITKsv3p
/*
ValueOf returns a new Value initialized to the concrete value stored in the interface i.
ValueOf(nil) returns the zero Value.
func ValueOf(i interface{}) Value {
*/
// Sample program to show how to reflect on a struct type with tags.
package main
import (
"fmt"
"reflect"
"regexp"
)
// User is a sample struct.
type User struct {
Name string `valid:"exists"`
Email string `valid:"regexp" exp:"[\w.%+-]+@(?:[[:alnum:]-]+\\.)+[[:alpha:]]{2,6}"`
}
// Result provides a detail view of the validation results.
type Result struct {
Field string
Type string
Value string
Test string
Result bool
}
// main is the entry point for the application.
func main() {
// Declare a variable of type user.
user := User{
Name: "Henry Ford",
Email: "[email protected]",
}
// Validate the value and display the results.
results := validate(&user)
for _, result := range results {
fmt.Printf("%+v\n", result)
}
}
// validate performs data validation on any struct type value.
func validate(value interface{}) []Result {
// Declare a nil slice of Result values.
var results []Result
// Retrieve the value that the interface contains or points to.
val := reflect.ValueOf(value).Elem() | // Retrieve the field information.
typeField := val.Type().Field(i)
// Declare a variable of type Result and initialize
// it with all the meta-data.
result := Result{
Field: typeField.Name,
Type: typeField.Type.String(),
Value: val.Field(i).String(),
Test: typeField.Tag.Get("valid"),
}
// Perform the requested tests.
switch result.Test {
case "exists":
if result.Value != "" {
result.Result = true
}
case "regexp":
m, err := regexp.MatchString(typeField.Tag.Get("exp"), result.Value)
if err == nil && m == true {
result.Result = true
}
}
// Append the results to the slice.
results = append(results, result)
}
return results
} |
// Iterate over the fields of the struct value.
for i := 0; i < val.NumField(); i++ { |
get_riot_messaging_service_v1_message_by_a_by_b_by_c_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
package plugins
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetRiotMessagingServiceV1MessageByAByBByCParams creates a new GetRiotMessagingServiceV1MessageByAByBByCParams object
// with the default values initialized.
func NewGetRiotMessagingServiceV1MessageByAByBByCParams() *GetRiotMessagingServiceV1MessageByAByBByCParams {
var ()
return &GetRiotMessagingServiceV1MessageByAByBByCParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithTimeout creates a new GetRiotMessagingServiceV1MessageByAByBByCParams object
// with the default values initialized, and the ability to set a timeout on a request
func | (timeout time.Duration) *GetRiotMessagingServiceV1MessageByAByBByCParams {
var ()
return &GetRiotMessagingServiceV1MessageByAByBByCParams{
timeout: timeout,
}
}
// NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithContext creates a new GetRiotMessagingServiceV1MessageByAByBByCParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithContext(ctx context.Context) *GetRiotMessagingServiceV1MessageByAByBByCParams {
var ()
return &GetRiotMessagingServiceV1MessageByAByBByCParams{
Context: ctx,
}
}
// NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithHTTPClient creates a new GetRiotMessagingServiceV1MessageByAByBByCParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithHTTPClient(client *http.Client) *GetRiotMessagingServiceV1MessageByAByBByCParams {
var ()
return &GetRiotMessagingServiceV1MessageByAByBByCParams{
HTTPClient: client,
}
}
/*GetRiotMessagingServiceV1MessageByAByBByCParams contains all the parameters to send to the API endpoint
for the get riot messaging service v1 message by a by b by c operation typically these are written to a http.Request
*/
type GetRiotMessagingServiceV1MessageByAByBByCParams struct {
/*A*/
A string
/*B*/
B string
/*C*/
C string
timeout time.Duration
Context context.Context
HTTPClient *http.Client
}
// WithTimeout adds the timeout to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithTimeout(timeout time.Duration) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithContext(ctx context.Context) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetContext(ctx context.Context) {
o.Context = ctx
}
// WithHTTPClient adds the HTTPClient to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithHTTPClient(client *http.Client) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithA adds the a to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithA(a string) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetA(a)
return o
}
// SetA adds the a to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetA(a string) {
o.A = a
}
// WithB adds the b to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithB(b string) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetB(b)
return o
}
// SetB adds the b to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetB(b string) {
o.B = b
}
// WithC adds the c to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WithC(c string) *GetRiotMessagingServiceV1MessageByAByBByCParams {
o.SetC(c)
return o
}
// SetC adds the c to the get riot messaging service v1 message by a by b by c params
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) SetC(c string) {
o.C = c
}
// WriteToRequest writes these params to a swagger request
func (o *GetRiotMessagingServiceV1MessageByAByBByCParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param a
if err := r.SetPathParam("a", o.A); err != nil {
return err
}
// path param b
if err := r.SetPathParam("b", o.B); err != nil {
return err
}
// path param c
if err := r.SetPathParam("c", o.C); err != nil {
return err
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
}
| NewGetRiotMessagingServiceV1MessageByAByBByCParamsWithTimeout |
aggregate.go | package idrac_embedded
import (
"context"
"github.com/spf13/viper"
"github.com/superchalupa/sailfish/src/ocp/testaggregate"
"sync"
"github.com/superchalupa/sailfish/src/log"
"github.com/superchalupa/sailfish/src/ocp/view"
domain "github.com/superchalupa/sailfish/src/redfishresource"
eh "github.com/looplab/eventhorizon"
)
func | (s *testaggregate.Service) {
s.RegisterAggregateFunction("idrac_embedded",
func(ctx context.Context, subLogger log.Logger, cfgMgr *viper.Viper, cfgMgrMu *sync.RWMutex, vw *view.View, extra interface{}, params map[string]interface{}) ([]eh.Command, error) {
return []eh.Command{
&domain.CreateRedfishResource{
ResourceURI: vw.GetURI(),
Type: "#Manager.v1_0_2.Manager",
Context: "/redfish/v1/$metadata#Manager.Manager",
Privileges: map[string]interface{}{
"GET": []string{"Login"},
},
Properties: map[string]interface{}{
"@odata.etag": `W/"abc123"`,
// replace with model calls...
"Model": "14G Monolithic",
"DateTimeLocalOffset": "-05:00",
"UUID": "3132334f-c0b7-3480-3510-00364c4c4544",
"Name": "Manager",
"@odata.type": "#Manager.v1_0_2.Manager",
"FirmwareVersion": "3.15.15.15",
"ManagerType": "BMC",
"Oem": map[string]interface{}{
"OemAttributes": []map[string]interface{}{
map[string]interface{}{
"@odata.id": vw.GetURI() + "/Attributes",
},
map[string]interface{}{
"@odata.id": "/redfish/v1/Managers" + "/System.Embedded.1" + "/Attributes",
},
map[string]interface{}{
"@odata.id": "/redfish/v1/Managers" + "/LifecycleController.Embedded.1" + "/Attributes",
},
},
},
}},
}, nil
})
}
| RegisterAggregate |
mod.rs | use format::data::Format;
use std::sync::Arc;
use widget::base::{Sender, Widget};
#[cfg(target_os = "linux")]
pub mod alsa;
#[cfg(target_os = "linux")]
pub use self::alsa::{default_volume, ALSA};
#[cfg(target_os = "freebsd")]
pub mod freebsd;
#[cfg(target_os = "freebsd")]
pub use self::freebsd::{default_volume, FreeBSDSound};
pub struct VolumeState {
pub volume: f32,
pub muted: bool,
}
pub struct Volume<F: Fn(VolumeState) -> Format, B: VolumeBackend<F>> {
updater: Arc<Box<F>>,
backend: B,
}
impl<F, B> Widget for Volume<F, B>
where
F: Fn(VolumeState) -> Format + Sync + Send + 'static,
B: VolumeBackend<F>,
{
fn current_value(&self) -> Format {
self.backend.current_value()
}
fn spawn_notifier(&mut self, tx: Sender<()>) {
self.backend.spawn_notifier(tx, self.updater.clone());
}
}
impl<F, B> Volume<F, B>
where
F: Fn(VolumeState) -> Format,
B: VolumeBackend<F>,
{
pub fn | (backend: B, updater: F) -> Box<Volume<F, B>> {
Box::new(Volume {
updater: Arc::new(Box::new(updater)),
backend,
})
}
}
pub trait VolumeBackend<F: Fn(VolumeState) -> Format> {
fn current_value(&self) -> Format;
fn spawn_notifier(&mut self, tx: Sender<()>, updater: Arc<Box<F>>);
}
| new |
dist_fake.py | # --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Peng Zhou
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from lib.datasets.imdb import imdb
import lib.datasets.ds_utils as ds_utils
import numpy as np
import scipy.sparse
import scipy.io as sio
import lib.utils.cython_bbox
import pickle
import subprocess
import uuid
import pdb
from .voc_eval import voc_eval
from lib.config import config as cfg
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class dist_fake(imdb):
def __init__(self, image_set, year, dist_path=None):
imdb.__init__(self, image_set)
self._year = year
self._image_set = image_set.split('dist_')[1]
self._dist_path = self._get_default_path() if dist_path is None \
else dist_path
self._data_path=self._dist_path
self._classes = ('__background__', # always index 0
'tamper','authentic')
self._classes = ('authentic', # always index 0
'tamper')
#self.classes =('authentic', # always index 0
#'splicing','removal')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = {'.png','.jpg','.tif','.bmp','.JPG'}
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(os.path.splitext(self._image_index[i].split(' ')[0])[0])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
for ext in self._image_ext:
#image_path = os.path.join('/home-3/[email protected]/work/xintong/medifor/portrait/test_data',
#index + ext)
image_path = os.path.join(self._data_path,
index + ext)
image_path1=os.path.join('/home-3/[email protected]/work/pengzhou/dataset/NC2016_Test0613',
index + ext)
if os.path.isfile(image_path):
return image_path
elif os.path.isfile(image_path1):
return image_path1
else:
continue
assert os.path.isfile(image_path) and os.path.isfile(image_path1), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path,
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
#print(image_index)
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'NC2016_Test0613')
def | (self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self.roidb_gt(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def roidb_gt(self,image_id):
num_objs = int(len(image_id.split(' ')[1:])/5)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix in range(num_objs):
bbox = image_id.split(' ')[ix*5+1:ix*5+5]
# Make pixel indexes 0-based
x1 = float(bbox[0])
y1 = float(bbox[1])
x2 = float(bbox[2])
y2 = float(bbox[3])
if x1<0:
x1=0
if y1<0:
y1=0
try:
cls=self._class_to_ind[image_id.split(' ')[ix*5+5]]
except:
if int(image_id.split(' ')[ix*5+5])==0:
print('authentic')
cls=2
else:
cls = int(image_id.split(' ')[ix*5+5])
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 ) * (y2 - y1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'JPGed':False,
'noised':False,
'seg_areas': seg_areas}
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}.txt'
path = os.path.join(
'.',
filename)
return path
def _get_voc_noise_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = 'nist_' + self._image_set + '_{:s}_noise.txt'
path = os.path.join(
'.',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
print(filename)
with open(filename, 'w') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
#pdb.set_trace()
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(index.split(' ')[0], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
#pdb.set_trace()
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._dist_path,
'coco_multi' ,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._dist_path,
self._image_set + '.txt')
cachedir = os.path.join(self._dist_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
#use_07_metric = True if int(self._year) < 2010 else False
use_07_metric = False
print('dist metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__' or cls == self.classes[0]:
cls_ind=0
continue
else:
cls_ind=self._class_to_ind[cls]
#elif cls=='median_filtering':
#cls_ind=3
#continue
filename = self._get_voc_results_file_template().format(cls)
filename2 = self._get_voc_noise_results_file_template().format(cls)
print(cls_ind)
rec, prec, ap = voc_eval(
filename,filename2, annopath, imagesetfile, cls_ind, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric,fuse=False)
aps += [ap]
print(('AP for {} = {:.4f},recall = {:.4f}, precision = {:.4f}'.format(cls, ap,rec[-1],prec[-1])))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
fig=plt.figure()
plt.plot(rec,prec)
fig.suptitle('PR curve for {} detection'.format(cls),fontsize=20)
plt.xlabel('recall',fontsize=15)
plt.xlim((0,1.0))
plt.ylim((0,1.0))
plt.ylabel('precision',fontsize=15)
fig.savefig('{}.jpg'.format(cls))
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
#if self.config['matlab_eval']:
#self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
#os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.dist_fake import dist_fake
d = dist_fake('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
| gt_roidb |
subscriptionClient.js | /* jshint latedef:false */
/* jshint forin:false */
/* jshint noempty:false */
//
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Warning: This code was generated by a tool.
//
// Changes to this file may cause incorrect behavior and will be lost if the
// code is regenerated.
'use strict';
var util = require('util');
var azureCommon = require('azure-common');
var xml = azureCommon.xml2js;
var js2xml = azureCommon.js2xml;
var Service = azureCommon.Service;
var WebResource = azureCommon.WebResource;
var SubscriptionClient = ( /** @lends SubscriptionClient */ function() {
/**
* @class
* Initializes a new instance of the SubscriptionClient class.
* @constructor
*
* @param {CloudCredentials} credentials Credentials used to authenticate
* requests.
*
* @param {string} [baseUri] Gets the URI used as the base for all cloud
* service requests.
*
* @param {Array} filters
*/
function SubscriptionClient(credentials, baseUri, filters) {
if (credentials === null || credentials === undefined) {
throw new Error('credentials cannot be null.');
}
SubscriptionClient['super_'].call(this, credentials, filters);
this.credentials = credentials;
this.baseUri = baseUri;
if (this.baseUri === null || this.baseUri === undefined) {
this.baseUri = 'https://management.core.windows.net';
}
if (this.apiVersion === null || this.apiVersion === undefined) {
this.apiVersion = '2013-08-01';
}
if (this.longRunningOperationInitialTimeout === null || this.longRunningOperationInitialTimeout === undefined) {
this.longRunningOperationInitialTimeout = -1;
}
if (this.longRunningOperationRetryTimeout === null || this.longRunningOperationRetryTimeout === undefined) {
this.longRunningOperationRetryTimeout = -1;
}
/**
* Provides an instance of the
* [SubscriptionOperations](-SubscriptionOperations.html) object.
* @type {object}
*/
this.subscriptions = new SubscriptionOperations(this);
}
util.inherits(SubscriptionClient, Service);
return SubscriptionClient;
})();
exports.SubscriptionClient = SubscriptionClient;
var SubscriptionOperations = ( /** @lends SubscriptionOperations */ function() {
/**
* @class
* __NOTE__: An instance of this class is automatically created for an
* instance of the [SubscriptionClient] {@link
* SubscriptionClient~SubscriptionClient}.
* See [subscriptions] {@link
* SubscriptionClient~SubscriptionClient#subscriptions}.
* Initializes a new instance of the SubscriptionOperations class.
* @constructor
*
* @param {SubscriptionClient} client Reference to the service client.
*/
function | (client) {
this.client = client;
}
/**
*
* @param {function} callback
*
* @returns {Stream} The response stream.
*/
SubscriptionOperations.prototype.list = function(callback) {
if (callback === null || callback === undefined) {
throw new Error('callback cannot be null.');
}
// Validate
// Tracing
// Construct URL
var url2 = '';
url2 = url2 + '/subscriptions';
var baseUrl = this.client.baseUri;
// Trim '/' character from the end of baseUrl and beginning of url.
if (baseUrl[baseUrl.length - 1] === '/') {
baseUrl = baseUrl.substring(0, (baseUrl.length - 1) + 0);
}
if (url2[0] === '/') {
url2 = url2.substring(1);
}
url2 = baseUrl + '/' + url2;
url2 = url2.replace(' ', '%20');
// Create HTTP transport objects
var httpRequest = new WebResource();
httpRequest.method = 'GET';
httpRequest.headers = {};
httpRequest.url = url2;
// Set Headers
httpRequest.headers['Content-Type'] = 'application/xml;charset=utf-8';
httpRequest.headers['x-ms-version'] = '2013-08-01';
// Send Request
return this.client.pipeline(httpRequest, function (err, response, body) {
if (err !== null && err !== undefined) {
return callback(err);
}
var statusCode = response.statusCode;
if (statusCode !== 200) {
var error = new Error(body);
error.statusCode = response.statusCode;
return callback(error);
}
// Create Result
var result = null;
// Deserialize Response
if (statusCode === 200) {
var responseContent = body;
result = { subscriptions: [] };
var options = {};
options.trim = false;
options.strict = false;
xml.parseString(responseContent, options, function (err2, responseDoc) {
if (err2 !== null && err2 !== undefined) {
return callback(err2);
}
var subscriptionsElement = js2xml.getElement(responseDoc, responseDoc, 'SUBSCRIPTIONS', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (subscriptionsElement !== null && subscriptionsElement !== undefined) {
if (subscriptionsElement !== null && subscriptionsElement !== undefined) {
for (var loweredIndex1 = 0; loweredIndex1 < js2xml.getElements(responseDoc, subscriptionsElement, 'SUBSCRIPTION', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE').length; loweredIndex1 = loweredIndex1 + 1) {
var subscriptionsElement2 = js2xml.getElements(responseDoc, subscriptionsElement, 'SUBSCRIPTION', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE')[loweredIndex1];
var subscriptionInstance = {};
result.subscriptions.push(subscriptionInstance);
var subscriptionIDElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'SUBSCRIPTIONID', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (subscriptionIDElement !== null && subscriptionIDElement !== undefined) {
var subscriptionIDInstance = subscriptionIDElement;
subscriptionInstance.subscriptionId = subscriptionIDInstance;
}
var subscriptionNameElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'SUBSCRIPTIONNAME', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (subscriptionNameElement !== null && subscriptionNameElement !== undefined) {
var subscriptionNameInstance = subscriptionNameElement;
subscriptionInstance.subscriptionName = subscriptionNameInstance;
}
var subscriptionStatusElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'SUBSCRIPTIONSTATUS', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (subscriptionStatusElement !== null && subscriptionStatusElement !== undefined && subscriptionStatusElement.length !== 0) {
var subscriptionStatusInstance = subscriptionStatusElement;
subscriptionInstance.subscriptionStatus = subscriptionStatusInstance;
}
var accountAdminLiveEmailIdElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'ACCOUNTADMINLIVEEMAILID', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (accountAdminLiveEmailIdElement !== null && accountAdminLiveEmailIdElement !== undefined) {
var accountAdminLiveEmailIdInstance = accountAdminLiveEmailIdElement;
subscriptionInstance.accountAdminLiveEmailId = accountAdminLiveEmailIdInstance;
}
var serviceAdminLiveEmailIdElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'SERVICEADMINLIVEEMAILID', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (serviceAdminLiveEmailIdElement !== null && serviceAdminLiveEmailIdElement !== undefined) {
var serviceAdminLiveEmailIdInstance = serviceAdminLiveEmailIdElement;
subscriptionInstance.serviceAdminLiveEmailId = serviceAdminLiveEmailIdInstance;
}
var maxCoreCountElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXCORECOUNT', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxCoreCountElement !== null && maxCoreCountElement !== undefined && maxCoreCountElement.length !== 0) {
var maxCoreCountInstance = parseInt(maxCoreCountElement, 10);
subscriptionInstance.maximumCoreCount = maxCoreCountInstance;
}
var maxStorageAccountsElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXSTORAGEACCOUNTS', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxStorageAccountsElement !== null && maxStorageAccountsElement !== undefined && maxStorageAccountsElement.length !== 0) {
var maxStorageAccountsInstance = parseInt(maxStorageAccountsElement, 10);
subscriptionInstance.maximumStorageAccounts = maxStorageAccountsInstance;
}
var maxHostedServicesElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXHOSTEDSERVICES', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxHostedServicesElement !== null && maxHostedServicesElement !== undefined && maxHostedServicesElement.length !== 0) {
var maxHostedServicesInstance = parseInt(maxHostedServicesElement, 10);
subscriptionInstance.maximumHostedServices = maxHostedServicesInstance;
}
var currentCoreCountElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'CURRENTCORECOUNT', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (currentCoreCountElement !== null && currentCoreCountElement !== undefined && currentCoreCountElement.length !== 0) {
var currentCoreCountInstance = parseInt(currentCoreCountElement, 10);
subscriptionInstance.currentCoreCount = currentCoreCountInstance;
}
var currentStorageAccountsElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'CURRENTSTORAGEACCOUNTS', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (currentStorageAccountsElement !== null && currentStorageAccountsElement !== undefined && currentStorageAccountsElement.length !== 0) {
var currentStorageAccountsInstance = parseInt(currentStorageAccountsElement, 10);
subscriptionInstance.currentStorageAccounts = currentStorageAccountsInstance;
}
var currentHostedServicesElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'CURRENTHOSTEDSERVICES', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (currentHostedServicesElement !== null && currentHostedServicesElement !== undefined && currentHostedServicesElement.length !== 0) {
var currentHostedServicesInstance = parseInt(currentHostedServicesElement, 10);
subscriptionInstance.currentHostedServices = currentHostedServicesInstance;
}
var maxVirtualNetworkSitesElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXVIRTUALNETWORKSITES', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxVirtualNetworkSitesElement !== null && maxVirtualNetworkSitesElement !== undefined && maxVirtualNetworkSitesElement.length !== 0) {
var maxVirtualNetworkSitesInstance = parseInt(maxVirtualNetworkSitesElement, 10);
subscriptionInstance.maximumVirtualNetworkSites = maxVirtualNetworkSitesInstance;
}
var maxLocalNetworkSitesElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXLOCALNETWORKSITES', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxLocalNetworkSitesElement !== null && maxLocalNetworkSitesElement !== undefined && maxLocalNetworkSitesElement.length !== 0) {
var maxLocalNetworkSitesInstance = parseInt(maxLocalNetworkSitesElement, 10);
subscriptionInstance.maximumLocalNetworkSites = maxLocalNetworkSitesInstance;
}
var maxDnsServersElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXDNSSERVERS', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxDnsServersElement !== null && maxDnsServersElement !== undefined && maxDnsServersElement.length !== 0) {
var maxDnsServersInstance = parseInt(maxDnsServersElement, 10);
subscriptionInstance.maximumDnsServers = maxDnsServersInstance;
}
var maxExtraVIPCountElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'MAXEXTRAVIPCOUNT', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (maxExtraVIPCountElement !== null && maxExtraVIPCountElement !== undefined && maxExtraVIPCountElement.length !== 0) {
var maxExtraVIPCountInstance = parseInt(maxExtraVIPCountElement, 10);
subscriptionInstance.maximumExtraVirtualIPCount = maxExtraVIPCountInstance;
}
var aADTenantIDElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'AADTENANTID', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (aADTenantIDElement !== null && aADTenantIDElement !== undefined) {
var aADTenantIDInstance = aADTenantIDElement;
subscriptionInstance.activeDirectoryTenantId = aADTenantIDInstance;
}
var createdTimeElement = js2xml.getElement(responseDoc, subscriptionsElement2, 'CREATEDTIME', 'HTTP://SCHEMAS.MICROSOFT.COM/WINDOWSAZURE');
if (createdTimeElement !== null && createdTimeElement !== undefined && createdTimeElement.length !== 0) {
var createdTimeInstance = new Date(createdTimeElement);
subscriptionInstance.created = createdTimeInstance;
}
}
}
}
});
}
result.statusCode = statusCode;
result.requestId = response.headers['x-ms-request-id'];
return callback(null, result);
});
};
return SubscriptionOperations;
})();
| SubscriptionOperations |
memsource_project.py | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: memsource_project
short_description: Manage a Memsource project
version_added: 0.0.1
description:
- Manage a Memsource project
author: 'Yanis Guenane (@Spredzy)'
options:
uid:
description:
- UID of the project
type: str
name:
description:
- A dict of filters to apply
required: false
default: {}
type: dict
template_id:
description:
- A dict of filters to apply
required: false
default: {}
type: dict
purge_on_delete:
description:
- Whether to purge the content of the project on delete
type: bool
extends_documentation_fragment:
- community.memsource.memsource
requirements: [memsource]
"""
EXAMPLES = """
- name: Create project from template id
community.memsource.memsource_project:
name: My Project
template_id: 12345
- name: Retrieve project information
community.memsource.memsource_project:
uid: uid
- name: Delete project
community.memsource.memsource_project:
uid: uid
state: absent
"""
RETURN = """
project:
returned: on success
description: >
Project's up to date information
type: dict
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.community.memsource.plugins.module_utils.memsource import (
get_action,
get_default_argspec,
get_memsource_client,
)
def main():
argument_spec = get_default_argspec()
argument_spec.update(
dict(
uid=dict(type="str"),
name=dict(type="str"),
template_id=dict(type="int"),
purge_on_delete=dict(type="bool"),
state=dict(type="str", default="present", choices=["absent", "present"]),
),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
_memsource = get_memsource_client(module.params)
_action = get_action(module.params)
_result = {}
project = {}
if _action == "create":
if module.params.get("template_id"): | )
else:
pass
_result.update({"changed": True})
elif _action == "read":
project = _memsource.get_project_by_id(module.params["uid"])
elif _action == "update":
pass
else:
res = _memsource.delete_project(
module.params["uid"],
purge=module.params.get("purge_on_delete", False),
do_not_fail_on_404=True,
)
if res.status_code == 204:
_result.update({"changed": True})
_result.update({"project": project})
module.exit_json(**_result)
if __name__ == "__main__":
main() | project = _memsource.create_project_from_template(
module.params.get("name"), module.params.get("template_id") |
block_explorer.js | /**
* This module provides functions for creating URLs for Blockstream's
* [block explorer]{@link https://blockstream.info}.
*
* This module does NOT provide implementations of HTTP requests which
* fetch data from these URLs.
*
* @module block_explorer
*/
import {TESTNET} from "./networks";
const BASE_URL_MAINNET = 'https://blockstream.info';
const BASE_URL_TESTNET = 'https://blockstream.info/testnet';
function blockExplorerBaseURL(network) {
return (network === TESTNET ? BASE_URL_TESTNET : BASE_URL_MAINNET);
}
/**
* Returns the block explorer URL for the given path and network.
*
* @param {string} path - the explorer path
* @param {module:networks.NETWORKS} network - bitcoin network
* @returns {string} the block explorer url
* @example
* import {MAINNET, TESTNET, blockExplorerURL} from "unchained-bitcoin";
* const path = "/block/00000000000000000011341d69792271766e4683e29b3ea169eacc59bde10a57";
* console.log(blockExplorerURL(path, MAINNET)) // https://blockstream.info/block/00000000000000000011341d69792271766e4683e29b3ea169eacc59bde10a57
* console.log(blockExplorerURL(path, TESTNET)) // https://blockstream.info/block/testnet/00000000000000000011341d69792271766e4683e29b3ea169eacc59bde10a57
*/
export function blockExplorerURL(path, network) {
return `${blockExplorerBaseURL(network)}${path}`;
}
/**
* Returns the block explorer API URL for the given path and network.
*
* @param {string} path - the API path
* @param {module:networks.NETWORKS} network - bitcoin network
* @returns {string} the full block explorer url
* @example
* import {MAINNET, TESTNET, blockExplorerAPIURL} from "unchained-bitcoin";
* const path = "/tx/1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0";
* console.log(blockExplorerAPIURL(path, MAINNET)); // https://blockstream.info/api/tx/1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0
* console.log(blockExplorerAPIURL(path, TESTNET)); // https://blockstream.info/testnet/api/tx/1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0
*/
export function blockExplorerAPIURL(path, network) {
return `${blockExplorerBaseURL(network)}/api${path}`;
}
/**
* Return the block explorer URL for the given transaction ID and network.
*
* @param {string} txid - the transaction id
* @param {module:networks.NETWORKS} network - bitcoin network
* @returns {string} the full transaction URL
* @example
* import {MAINNET, TESTNET, blockExplorerTransactionURL} from "unchained-bitcoin";
* const txid = "1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0";
* console.log(blockExplorerTransactionURL(txid, MAINNET)); // https://blockstream.info/tx/1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0
* console.log(blockExplorerTransactionURL(txid, TESTNET)); // https://blockstream.info/testnet/tx/1814a10fb22e9551a17a94a1e68971e19b4f59eaf1689e0af85b97929b3b9ae0
*/
export function blockExplorerTransactionURL(txid, network) { | return blockExplorerURL(`/tx/${txid}`, network);
}
/**
* Return the block explorer URL for the given address and network.
*
* @param {string} address - the address
* @param {module:networks.NETWORKS} network - bitcoin network
* @returns {string} full URL for address lookup
* @example
* import {MAINNET, TESTNET, blockExplorerAddressURL} from "unchained-bitcoin";
* const address = "39YqNoLULDpbjmeCTdGJ42DQhrQLzRcMdX";
* console.log(blockExplorerAddressURL(address, MAINNET)); // https://blockstream.info/address/39YqNoLULDpbjmeCTdGJ42DQhrQLzRcMdX
* console.log(blockExplorerAddressURL(address, TESTNET)); // https://blockstream.info/testnet/address/39YqNoLULDpbjmeCTdGJ42DQhrQLzRcMdX
*/
export function blockExplorerAddressURL(address, network) {
return blockExplorerURL(`/address/${address}`, network);
} | |
module.ts | import {CommonModule} from '@angular/common';
import {NgModule} from '@angular/core';
import {ReactiveFormsModule} from '@angular/forms';
import {MatButtonModule} from '@angular/material/button';
import {MatCheckboxModule} from '@angular/material/checkbox';
import {MatChipsModule} from '@angular/material/chips';
import {MatDialogModule} from '@angular/material/dialog';
import {MatFormFieldModule} from '@angular/material/form-field';
import {MatIconModule} from '@angular/material/icon';
import {MatInputModule} from '@angular/material/input';
import {BrowserAnimationsModule} from '@angular/platform-browser/animations';
import {RouterModule} from '@angular/router';
import {CollectBrowserHistoryForm} from '@app/components/flow_args_form/collect_browser_history_form';
import {CollectMultipleFilesForm} from '@app/components/flow_args_form/collect_multiple_files_form';
import {HelpersModule} from '@app/components/flow_args_form/collect_multiple_files_form_helpers/module';
import {CollectSingleFileForm} from '@app/components/flow_args_form/collect_single_file_form';
import {ByteComponentsModule} from '@app/components/form/byte_input/module';
import {DateTimeInputModule} from '@app/components/form/date_time_input/module';
import {GlobExpressionExplanationModule} from '@app/components/form/glob_expression_form_field/module';
import {CodeEditorModule} from '../code_editor/module';
import {FallbackFlowArgsForm} from './fallback_flow_args_form';
import {FlowArgsForm} from './flow_args_form';
import {OsqueryForm} from './osquery_form';
import {OsqueryQueryHelperModule} from './osquery_query_helper/module';
import {TimelineForm} from './timeline_form';
/** Module for the FlowArgsForm component. */
@NgModule({
imports: [
BrowserAnimationsModule,
RouterModule,
CommonModule,
MatCheckboxModule,
MatChipsModule,
ReactiveFormsModule,
MatFormFieldModule,
MatInputModule,
MatIconModule,
MatButtonModule,
MatDialogModule,
ByteComponentsModule,
GlobExpressionExplanationModule,
CodeEditorModule,
OsqueryQueryHelperModule,
HelpersModule,
DateTimeInputModule,
],
declarations: [
FlowArgsForm,
CollectBrowserHistoryForm,
CollectSingleFileForm,
CollectMultipleFilesForm,
OsqueryForm,
TimelineForm,
FallbackFlowArgsForm,
],
entryComponents: [
CollectBrowserHistoryForm,
CollectSingleFileForm,
CollectMultipleFilesForm,
OsqueryForm,
TimelineForm,
FallbackFlowArgsForm,
],
exports: [
FlowArgsForm,
],
})
export class | {
}
| FlowArgsFormModule |
filekeyvaluestore_test.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package keyvaluestore
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/BSNDA/fabric-sdk-go-gm/pkg/common/providers/core"
"github.com/pkg/errors"
)
var storePath = "/tmp/testkeyvaluestore"
func TestDefaultFKVS(t *testing.T) {
testFKVS(t, nil)
}
func TestFKVSWithCustomKeySerializer(t *testing.T) {
keySerializer := func(key interface{}) (string, error) {
keyString, ok := key.(string)
if !ok {
return "", errors.New("converting key to string failed")
}
return filepath.Join(storePath, fmt.Sprintf("mypath/%s/valuefile", keyString)), nil
}
testFKVS(t, keySerializer)
}
func testFKVS(t *testing.T, KeySerializer KeySerializer) {
var store core.KVStore
var err error
store, err = New(
&FileKeyValueStoreOptions{
Path: storePath,
KeySerializer: KeySerializer,
})
if err != nil {
t.Fatalf("New failed [%s]", err)
}
if err1 := cleanup(storePath); err1 != nil {
t.Fatalf("%s", err1)
}
defer cleanup(storePath)
err = store.Store(nil, []byte("1234"))
if err == nil || err.Error() != "key is nil" {
t.Fatal("SetValue(nil, ...) should throw error")
}
err = store.Store("key", nil)
if err == nil || err.Error() != "value is nil" {
t.Fatal("Store(..., nil should throw error")
}
key1 := "key1"
value1 := []byte("value1")
key2 := "key2"
value2 := []byte("value2")
if err1 := store.Store(key1, value1); err1 != nil {
t.Fatalf("SetValue %s failed [%s]", key1, err1)
}
if err1 := store.Store(key2, value2); err1 != nil {
t.Fatalf("SetValue %s failed [%s]", key1, err1)
}
// Check key1, value1
checkKeyValue(store, key1, value1, t)
// Check ke2, value2
checkKeyValue(store, key2, value2, t)
// Check non-existing key
checkNonExistingKey(store, t)
// Check empty string value
checkEmptyStringValue(store, t)
}
func checkKeyValue(store core.KVStore, key string, value []byte, t *testing.T) {
if err := checkStoreValue(store, key, value); err != nil {
t.Fatalf("checkStoreValue %s failed [%s]", key, err)
}
if err := store.Delete(key); err != nil {
t.Fatalf("Delete %s failed [%s]", key, err)
}
if err := checkStoreValue(store, key, nil); err != nil {
t.Fatalf("checkStoreValue %s failed [%s]", key, err)
}
}
func checkNonExistingKey(store core.KVStore, t *testing.T) {
_, err := store.Load("non-existing")
if err == nil || err != core.ErrKeyValueNotFound {
t.Fatal("fetching value for non-existing key should return ErrNotFound")
}
}
func checkEmptyStringValue(store core.KVStore, t *testing.T) {
keyEmptyString := "empty-string"
valueEmptyString := []byte("")
err := store.Store(keyEmptyString, valueEmptyString)
if err != nil {
t.Fatal("setting an empty string value shouldn't fail")
}
if err := checkStoreValue(store, keyEmptyString, valueEmptyString); err != nil {
t.Fatalf("checkStoreValue %s failed [%s]", keyEmptyString, err)
}
}
func TestCreateNewFileKeyValueStore(t *testing.T) {
_, err := New(
&FileKeyValueStoreOptions{
Path: "",
})
if err == nil || err.Error() != "FileKeyValueStore path is empty" {
t.Fatal("File path validation on NewFileKeyValueStore is not working as expected")
}
_, err = New(nil)
if err == nil || err.Error() != "FileKeyValueStoreOptions is nil" {
t.Fatal("File path validation on NewFileKeyValueStore is not working as expected")
}
var store core.KVStore
store, err = New(
&FileKeyValueStoreOptions{
Path: storePath,
})
if err != nil {
t.Fatal("creating a store shouldn't fail")
}
if store == nil {
t.Fatal("creating a store failed")
}
}
func cleanup(storePath string) error {
err := os.RemoveAll(storePath)
if err != nil {
return errors.Wrapf(err, "Cleaning up directory '%s' failed", storePath)
}
return nil
}
func checkStoreValue(store core.KVStore, key interface{}, expected []byte) error {
v, err := store.Load(key)
if err != nil {
if err == core.ErrKeyValueNotFound && expected == nil {
return nil
}
return err
}
if err = compare(v, expected); err != nil |
file, err := store.(*FileKeyValueStore).keySerializer(key)
if err != nil {
return err
}
if expected == nil {
_, err1 := os.Stat(file)
if err1 == nil {
return fmt.Errorf("path shouldn't exist [%s]", file)
}
if !os.IsNotExist(err1) {
return errors.Wrapf(err, "stat file failed [%s]", file)
}
// Doesn't exist, OK
return nil
}
v, err = ioutil.ReadFile(file)
if err != nil {
return err
}
return compare(v, expected)
}
func compare(v interface{}, expected []byte) error {
var vbytes []byte
var ok bool
if v == nil {
vbytes = nil
} else {
vbytes, ok = v.([]byte)
if !ok {
return errors.New("value is not []byte")
}
}
if !bytes.Equal(vbytes, expected) {
return errors.New("value from store comparison failed")
}
return nil
}
| {
return err
} |
test_config.py | from identifyPropositions import config
def | ():
assert config.get("model_name") == "en_core_web_sm"
def test_config_get_missing():
assert config.get("i_do_not_exist") is None
| test_config_get_existing |
binding_callback_event.rs | /*
*
*
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 1.0.0
*
* Generated by: https://openapi-generator.tech
*/
/// BindingCallbackEvent : Represents the parameters of a call to a provided callback.
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct BindingCallbackEvent {
/// ID of the callback being invoked
#[serde(rename = "id", skip_serializing_if = "Option::is_none")] | #[serde(rename = "parameters", skip_serializing_if = "Option::is_none")]
pub parameters: Option<Vec<serde_json::Value>>,
}
impl BindingCallbackEvent {
/// Represents the parameters of a call to a provided callback.
pub fn new() -> BindingCallbackEvent {
BindingCallbackEvent {
id: None,
parameters: None,
}
}
} | pub id: Option<i32>,
/// Callback parameters |
EditDeployment.tsx | import * as React from 'react';
import { FormikBag, Formik } from 'formik';
import { safeLoad } from 'js-yaml';
import { useTranslation } from 'react-i18next';
import { DeploymentConfigModel, DeploymentModel } from '@console/internal/models';
import { K8sResourceKind, k8sUpdate } from '@console/internal/module/k8s';
import { useExtensions, Perspective, isPerspective } from '@console/plugin-sdk';
import { useActivePerspective } from '@console/shared';
import { EditorType } from '@console/shared/src/components/synced-editor/editor-toggle'; | import { EditDeploymentData, EditDeploymentFormikValues } from './utils/edit-deployment-types';
import {
convertDeploymentToEditForm,
convertEditFormToDeployment,
} from './utils/edit-deployment-utils';
import { validationSchema } from './utils/edit-deployment-validation-utils';
export interface EditDeploymentProps {
heading: string;
resource: K8sResourceKind;
name: string;
namespace: string;
}
const EditDeployment: React.FC<EditDeploymentProps> = ({ heading, resource, namespace, name }) => {
const { t } = useTranslation();
const [perspective] = useActivePerspective();
const perspectiveExtensions = useExtensions<Perspective>(isPerspective);
const initialValues = React.useRef({
editorType: EditorType.Form,
yamlData: '',
formData: convertDeploymentToEditForm(resource),
});
const handleSubmit = (
values: EditDeploymentFormikValues,
actions: FormikBag<any, EditDeploymentData>,
) => {
let deploymentRes: K8sResourceKind;
const resourceType = getResourcesType(resource);
if (values.editorType === EditorType.YAML) {
try {
deploymentRes = safeLoad(values.yamlData);
} catch (err) {
actions.setStatus({
submitSuccess: '',
submitError: t('devconsole~Invalid YAML - {{err}}', { err }),
});
return null;
}
} else {
deploymentRes = convertEditFormToDeployment(values.formData, resource);
}
const resourceCall = k8sUpdate(
resourceType === Resources.OpenShift ? DeploymentConfigModel : DeploymentModel,
deploymentRes,
namespace,
name,
);
return resourceCall
.then((res: K8sResourceKind) => {
const resVersion = res.metadata.resourceVersion;
actions.setStatus({
submitError: '',
submitSuccess: t('devconsole~{{name}} has been updated to version {{resVersion}}', {
name,
resVersion,
}),
});
handleRedirect(namespace, perspective, perspectiveExtensions);
})
.catch((e) => {
const err = e.message;
actions.setStatus({ submitSuccess: '', submitError: t('devconsole~{{err}}', { err }) });
});
};
return (
<Formik
initialValues={initialValues.current}
onSubmit={handleSubmit}
validationSchema={validationSchema()}
enableReinitialize
>
{(formikProps) => {
return <EditDeploymentForm {...formikProps} heading={heading} resource={resource} />;
}}
</Formik>
);
};
export default EditDeployment; | import { getResourcesType } from '../edit-application/edit-application-utils';
import { handleRedirect } from '../import/import-submit-utils';
import { Resources } from '../import/import-types';
import EditDeploymentForm from './EditDeploymentForm'; |
YouTube.tsx | import * as React from 'react';
function | (
props: React.SVGProps<SVGSVGElement>,
svgRef?: React.Ref<SVGSVGElement>
) {
return (
<svg
width="1.5em"
height="1.5em"
strokeWidth={1.5}
viewBox="0 0 24 24"
fill="none"
xmlns="http://www.w3.org/2000/svg"
color="currentColor"
ref={svgRef}
{...props}
>
<path
d="M14 12l-3.5 2v-4l3.5 2z"
fill="currentColor"
stroke="currentColor"
strokeLinecap="round"
strokeLinejoin="round"
/>
<path
d="M2 12.707v-1.415c0-2.895 0-4.343.905-5.274.906-.932 2.332-.972 5.183-1.053C9.438 4.927 10.818 4.9 12 4.9c1.181 0 2.561.027 3.912.065 2.851.081 4.277.121 5.182 1.053.906.931.906 2.38.906 5.274v1.415c0 2.896 0 4.343-.905 5.275-.906.931-2.331.972-5.183 1.052-1.35.039-2.73.066-3.912.066-1.181 0-2.561-.027-3.912-.066-2.851-.08-4.277-.12-5.183-1.052C2 17.05 2 15.602 2 12.708z"
stroke="currentColor"
/>
</svg>
);
}
const ForwardRef = React.forwardRef(SvgYouTube);
export default ForwardRef;
| SvgYouTube |
example_function_order.py | #pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring,no-self-use,too-few-public-methods
def | (): # First should be defined after second, too keep call order
pass
def second():
first()
class Example:
def first(self): # First should be defined after second, too keep call order
pass
def second(self):
self.first()
def before(self): # 'Before' is placed correctly before 'after'
self.after()
def after(self):
pass
class ExampleInner:
def outer(self):
def inner(): # Inner functions are an exception, these must be defined before their usage
pass
inner()
| first |
extendplate_test.go | package extendplate
import (
"bytes"
"path/filepath"
"testing"
)
func | (t *testing.T) {
set, err := ParseDir("testdata", "*.html", nil)
if err != nil {
t.Fatal(err)
}
for _, path := range []string{
"base/dashboard.html",
"base/billing.html",
"base/docs/release.html",
"/base/dashboard.html",
"/base/billing.html",
"/base/docs/release.html",
} {
t.Run(filepath.Base(path), func(t *testing.T) {
path := path
tmpl := set.Lookup(path)
if tmpl == nil {
t.Fatalf("no template found for %s", path)
}
var w bytes.Buffer
if err := tmpl.Execute(&w, nil); err != nil {
t.Error(err)
}
})
}
}
| TestNesting |
admission.go | /*
Copyright 2014 Google Inc. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package autoprovision
import (
"io"
"github.com/GoogleCloudPlatform/kubernetes/pkg/admission"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/cnaize/kubernetes/pkg/api"
"github.com/cnaize/kubernetes/pkg/api/latest"
)
func init() |
// provision is an implementation of admission.Interface.
// It looks at all incoming requests in a namespace context, and if the namespace does not exist, it creates one.
// It is useful in deployments that do not want to restrict creation of a namespace prior to its usage.
type provision struct {
client client.Interface
store cache.Store
}
func (p *provision) Admit(a admission.Attributes) (err error) {
// only handle create requests
if a.GetOperation() != "CREATE" {
return nil
}
defaultVersion, kind, err := latest.RESTMapper.VersionAndKindForResource(a.GetResource())
if err != nil {
return err
}
mapping, err := latest.RESTMapper.RESTMapping(kind, defaultVersion)
if err != nil {
return err
}
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
return nil
}
namespace := &api.Namespace{
ObjectMeta: api.ObjectMeta{
Name: a.GetNamespace(),
Namespace: "",
},
Status: api.NamespaceStatus{},
}
_, exists, err := p.store.Get(namespace)
if err != nil {
return err
}
if exists {
return nil
}
_, err = p.client.Namespaces().Create(namespace)
if err != nil && !errors.IsAlreadyExists(err) {
return err
}
return nil
}
func NewProvision(c client.Interface) admission.Interface {
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
reflector := cache.NewReflector(
&cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return c.Namespaces().List(labels.Everything(), fields.Everything())
},
WatchFunc: func(resourceVersion string) (watch.Interface, error) {
return c.Namespaces().Watch(labels.Everything(), fields.Everything(), resourceVersion)
},
},
&api.Namespace{},
store,
0,
)
reflector.Run()
return &provision{
client: c,
store: store,
}
}
| {
admission.RegisterPlugin("NamespaceAutoProvision", func(client client.Interface, config io.Reader) (admission.Interface, error) {
return NewProvision(client), nil
})
} |
kdf.rs | extern crate wasm_bindgen_test;
use std::str::from_utf8;
use anyhow::*;
use bsv_wasm::{hash::Hash, KDF};
use pbkdf2::{
password_hash::{Ident, PasswordHasher, Salt, SaltString},
Params, Pbkdf2,
};
use wasm_bindgen_test::*;
wasm_bindgen_test::wasm_bindgen_test_configure!();
#[test]
#[wasm_bindgen_test]
fn pbkdf2_sha256_hash_test() {
let password = "stronk-password".as_bytes();
let salt = "snails".as_bytes();
let rounds: u32 = 10000;
let kdf = KDF::pbkdf2(
password.into(),
Some(salt.into()),
bsv_wasm::PBKDF2Hashes::SHA256,
rounds,
32,
);
// validated against twetch/sycamore-pro and https://neurotechnics.com/tools/pbkdf2-test
assert_eq!(
kdf.get_hash().to_hex(),
"ffb5bb1b78211b1d275f32c4ba426f0875e80640fbf313eac06ba6e79225b237"
);
}
#[test]
#[wasm_bindgen_test]
fn pbkdf2_sha256_hash_test_2() {
let password = "stronk-password".as_bytes();
let salt = "1ae0ee429ffca864413b59edd5612c1a86b097411280a6dfa376d91c6eba5a20".as_bytes(); // sha256 of [email protected]
let rounds: u32 = 10000;
let kdf = KDF::pbkdf2(
password.into(),
Some(salt.into()),
bsv_wasm::PBKDF2Hashes::SHA256,
rounds,
32,
);
// validated against twetch/sycamore-pro and https://neurotechnics.com/tools/pbkdf2-test
assert_eq!(
kdf.get_hash().to_hex(),
"f064d740b65941152755829e2b48578b259bc9bfc8c3af7b0d93a5ca677f259d"
);
}
} | #[cfg(test)]
mod kdf_tests { |
|
resource.go | package resource
import (
"context"
"fmt"
"time"
"github.com/kyma-project/rafter/tests/pkg/retry"
"github.com/pkg/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
watchtools "k8s.io/client-go/tools/watch"
)
type Resource struct {
ResCli dynamic.ResourceInterface
namespace string
kind string
log func(format string, args ...interface{})
}
func | (dynamicCli dynamic.Interface, s schema.GroupVersionResource, namespace string, logFn func(format string, args ...interface{})) *Resource {
resCli := dynamicCli.Resource(s).Namespace(namespace)
return &Resource{ResCli: resCli, namespace: namespace, kind: s.Resource, log: logFn}
}
func (r *Resource) Create(res interface{}, callbacks ...func(...interface{})) (string, error) {
var resourceVersion string
u, err := runtime.DefaultUnstructuredConverter.ToUnstructured(res)
if err != nil {
return resourceVersion, errors.Wrapf(err, "while converting resource %s %s to unstructured", r.kind, res)
}
unstructuredObj := &unstructured.Unstructured{
Object: u,
}
err = retry.OnTimeout(retry.DefaultBackoff, func() error {
var resource *unstructured.Unstructured
for _, callback := range callbacks {
callback(fmt.Sprintf("[CREATE]: %s", unstructuredObj))
}
resource, err = r.ResCli.Create(unstructuredObj, metav1.CreateOptions{})
if err != nil {
return err
}
resourceVersion = resource.GetResourceVersion()
return nil
}, callbacks...)
if err != nil {
return resourceVersion, errors.Wrapf(err, "while creating resource %s ", unstructuredObj.GetKind())
}
return resourceVersion, nil
}
func (r *Resource) Get(name string, callbacks ...func(...interface{})) (*unstructured.Unstructured, error) {
var result *unstructured.Unstructured
err := retry.OnTimeout(retry.DefaultBackoff, func() error {
var err error
result, err = r.ResCli.Get(name, metav1.GetOptions{})
return err
}, callbacks...)
if err != nil {
return nil, errors.Wrapf(err, "while getting resource %s '%s'", r.kind, name)
}
for _, callback := range callbacks {
namespace := "-"
if r.namespace != "" {
namespace = r.namespace
}
callback(fmt.Sprintf("GET %s: namespace:%s kind:%s\n%v", name, namespace, r.kind, result))
}
return result, nil
}
func (r *Resource) Delete(name string, timeout time.Duration, callbacks ...func(...interface{})) error {
var initialResourceVersion string
err := retry.OnTimeout(retry.DefaultBackoff, func() error {
u, err := r.ResCli.Get(name, metav1.GetOptions{})
if err != nil {
return err
}
initialResourceVersion = u.GetResourceVersion()
return nil
}, callbacks...)
if apierrors.IsNotFound(err) {
return nil
}
if err != nil {
return err
}
err = retry.WithIgnoreOnNotFound(retry.DefaultBackoff, func() error {
for _, callback := range callbacks {
namespace := "-"
if r.namespace != "" {
namespace = r.namespace
}
callback(fmt.Sprintf("DELETE %s: namespace:%s name:%s", r.kind, namespace, name))
}
return r.ResCli.Delete(name, &metav1.DeleteOptions{})
}, callbacks...)
if err != nil {
return err
}
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
condition := func(event watch.Event) (bool, error) {
if event.Type != watch.Deleted {
return false, nil
}
u, ok := event.Object.(*unstructured.Unstructured)
if !ok || u.GetName() != name {
return false, nil
}
return true, nil
}
_, err = watchtools.Until(ctx, initialResourceVersion, r.ResCli, condition)
if err != nil {
return err
}
return nil
}
| New |
resultset_sort.go | package proxy
import (
"bytes"
"fmt"
"github.com/maxencoder/mixer/hack"
. "github.com/siddontang/go-mysql/mysql"
)
const (
SortAsc = "asc"
SortDesc = "desc"
)
type SortKey struct {
//name of the field
Name string
Direction string
//column index of the field
column int
}
type ResultsetSorter struct {
*Resultset
sk []SortKey
}
func | (r *Resultset, sk []SortKey) (*ResultsetSorter, error) {
s := new(ResultsetSorter)
s.Resultset = r
for i, k := range sk {
if column, ok := r.FieldNames[k.Name]; ok {
sk[i].column = column
} else {
return nil, fmt.Errorf("key %s is not in resultset fields, cannot sort", k.Name)
}
}
s.sk = sk
return s, nil
}
func (r *ResultsetSorter) Len() int {
return r.RowNumber()
}
func (r *ResultsetSorter) Less(i, j int) bool {
v1 := r.Values[i]
v2 := r.Values[j]
for _, k := range r.sk {
v := cmpValue(v1[k.column], v2[k.column])
if k.Direction == SortDesc {
v = -v
}
if v < 0 {
return true
} else if v > 0 {
return false
}
//equal, cmp next key
}
return false
}
//compare value using asc
func cmpValue(v1 interface{}, v2 interface{}) int {
if v1 == nil && v2 == nil {
return 0
} else if v1 == nil {
return -1
} else if v2 == nil {
return 1
}
switch v := v1.(type) {
case string:
s := v2.(string)
return bytes.Compare(hack.Slice(v), hack.Slice(s))
case []byte:
s := v2.([]byte)
return bytes.Compare(v, s)
case int64:
s := v2.(int64)
if v < s {
return -1
} else if v > s {
return 1
} else {
return 0
}
case uint64:
s := v2.(uint64)
if v < s {
return -1
} else if v > s {
return 1
} else {
return 0
}
case float64:
s := v2.(float64)
if v < s {
return -1
} else if v > s {
return 1
} else {
return 0
}
default:
//can not go here
panic(fmt.Sprintf("invalid type %T", v))
}
}
func (r *ResultsetSorter) Swap(i, j int) {
r.Values[i], r.Values[j] = r.Values[j], r.Values[i]
r.RowDatas[i], r.RowDatas[j] = r.RowDatas[j], r.RowDatas[i]
}
| NewResultSetSorter |
facts.py | import random
import mysql.connector
import yaml
from os import path as os_path
config_path = os_path.abspath(os_path.join(os_path.dirname(__file__), 'config.yml'))
data = yaml.safe_load(open(config_path))
def extract_fact(user_id):
| mydb = mysql.connector.connect(
host=data['DB_HOST'],
user=data['DB_USERNAME'],
password=data['DB_PASSWORD'],
database=data['DB_NAME'])
mycursor = mydb.cursor(buffered=True)
mycursor.execute(f"SELECT * FROM facts WHERE user_id = {user_id} OR privacy = 'Public' ORDER BY RAND() LIMIT 1 ")
myresult = mycursor.fetchone()
mycursor.close()
mydb.close()
# title = myresult[5]
# link = myresult[6]
# note = myresult[2]
# code = myresult[3]
title, link, note, code = '', '', '', ''
if len(myresult[5]) > 0:
title = f'<b>{myresult[5]}</b>\n\n'
if len(myresult[6]) > 0:
link = f'<a href="{myresult[6]}">Источник</a>'
if len(myresult[2]) > 0:
note = f'<i>{myresult[2]}</i>\n\n'
if len(myresult[3]) > 0:
code = f'<pre><code class="language-python">{myresult[3]}</code></pre>\n\n'
message = f'{title}{code}{note}{link}'
# message = f'<b>{title}</b>\n\n<pre><code class="language-python">{code}</code></pre>\n\n<i>{note}</i>\n\n<a href="{link}">Источник</a>'
return message
|
|
work_io.py | __all__ = ['Middleware']
from gevent.queue import Queue
# 工作栈
class Middle | # cache of redis
zeus = Queue()
# Trash
apollo = Queue()
theseus = {}
# 共享任务队列
poseidon = Queue()
hera = Queue()
# FIXME
# 不明原因bug 使用dict(zip())方案生成的同样的变量,
# 在经过同一个函数方案后输出竟然不一样
cache_redis_queue = {'ssr': {}, 'v2ray': {}}
# cache_redis_queue = dict(zip(CRAWLER_SEQUENCE, [{}] * CRAWLER_SEQUENCE.__len__()))
| ware:
|
player.js | /*! @license
* Shaka Player
* Copyright 2016 Google LLC
* SPDX-License-Identifier: Apache-2.0
*/
goog.provide('shaka.Player');
goog.require('goog.asserts');
goog.require('shaka.Deprecate');
goog.require('shaka.log');
goog.require('shaka.media.AdaptationSetCriteria');
goog.require('shaka.media.BufferingObserver');
goog.require('shaka.media.DrmEngine');
goog.require('shaka.media.ExampleBasedCriteria');
goog.require('shaka.media.ManifestParser');
goog.require('shaka.media.MediaSourceEngine');
goog.require('shaka.media.MediaSourcePlayhead');
goog.require('shaka.media.MetaSegmentIndex');
goog.require('shaka.media.ClosedCaptionParser');
goog.require('shaka.media.PlayRateController');
goog.require('shaka.media.Playhead');
goog.require('shaka.media.PlayheadObserverManager');
goog.require('shaka.media.PreferenceBasedCriteria');
goog.require('shaka.media.RegionObserver');
goog.require('shaka.media.RegionTimeline');
goog.require('shaka.media.SegmentIndex');
goog.require('shaka.media.SrcEqualsPlayhead');
goog.require('shaka.media.StreamingEngine');
goog.require('shaka.media.TimeRangesUtils');
goog.require('shaka.net.NetworkingEngine');
goog.require('shaka.routing.Walker');
goog.require('shaka.text.SimpleTextDisplayer');
goog.require('shaka.text.TextEngine');
goog.require('shaka.text.UITextDisplayer');
goog.require('shaka.text.WebVttGenerator');
goog.require('shaka.util.AbortableOperation');
goog.require('shaka.util.BufferUtils');
goog.require('shaka.util.ConfigUtils');
goog.require('shaka.util.Error');
goog.require('shaka.util.EventManager');
goog.require('shaka.util.FakeEvent');
goog.require('shaka.util.FakeEventTarget');
goog.require('shaka.util.Functional');
goog.require('shaka.util.IDestroyable');
goog.require('shaka.util.LanguageUtils');
goog.require('shaka.util.ManifestParserUtils');
goog.require('shaka.util.MediaReadyState');
goog.require('shaka.util.MimeUtils');
goog.require('shaka.util.ObjectUtils');
goog.require('shaka.util.Platform');
goog.require('shaka.util.PlayerConfiguration');
goog.require('shaka.util.PublicPromise');
goog.require('shaka.util.Stats');
goog.require('shaka.util.StreamUtils');
goog.require('shaka.util.Timer');
goog.requireType('shaka.media.IClosedCaptionParser');
goog.requireType('shaka.media.PresentationTimeline');
goog.requireType('shaka.routing.Node');
goog.requireType('shaka.routing.Payload');
/**
* @event shaka.Player.ErrorEvent
* @description Fired when a playback error occurs.
* @property {string} type
* 'error'
* @property {!shaka.util.Error} detail
* An object which contains details on the error. The error's
* <code>category</code> and <code>code</code> properties will identify the
* specific error that occurred. In an uncompiled build, you can also use the
* <code>message</code> and <code>stack</code> properties to debug.
* @exportDoc
*/
/**
* @event shaka.Player.StateChangeEvent
* @description Fired when the player changes load states.
* @property {string} type
* 'onstatechange'
* @property {string} state
* The name of the state that the player just entered.
* @exportDoc
*/
/**
* @event shaka.Player.StateIdleEvent
* @description Fired when the player has stopped changing states and will
* remain idle until a new state change request (e.g. <code>load</code>,
* <code>attach</code>, etc.) is made.
* @property {string} type
* 'onstateidle'
* @property {string} state
* The name of the state that the player stopped in.
* @exportDoc
*/
/**
* @event shaka.Player.EmsgEvent
* @description Fired when a non-typical emsg is found in a segment.
* @property {string} type
* 'emsg'
* @property {shaka.extern.EmsgInfo} detail
* An object which contains the content of the emsg box.
* @exportDoc
*/
/**
* @event shaka.Player.DrmSessionUpdateEvent
* @description Fired when the CDM has accepted the license response.
* @property {string} type
* 'drmsessionupdate'
* @exportDoc
*/
/**
* @event shaka.Player.TimelineRegionAddedEvent
* @description Fired when a media timeline region is added.
* @property {string} type
* 'timelineregionadded'
* @property {shaka.extern.TimelineRegionInfo} detail
* An object which contains a description of the region.
* @exportDoc
*/
/**
* @event shaka.Player.TimelineRegionEnterEvent
* @description Fired when the playhead enters a timeline region.
* @property {string} type
* 'timelineregionenter'
* @property {shaka.extern.TimelineRegionInfo} detail
* An object which contains a description of the region.
* @exportDoc
*/
/**
* @event shaka.Player.TimelineRegionExitEvent
* @description Fired when the playhead exits a timeline region.
* @property {string} type
* 'timelineregionexit'
* @property {shaka.extern.TimelineRegionInfo} detail
* An object which contains a description of the region.
* @exportDoc
*/
/**
* @event shaka.Player.BufferingEvent
* @description Fired when the player's buffering state changes.
* @property {string} type
* 'buffering'
* @property {boolean} buffering
* True when the Player enters the buffering state.
* False when the Player leaves the buffering state.
* @exportDoc
*/
/**
* @event shaka.Player.LoadingEvent
* @description Fired when the player begins loading. The start of loading is
* defined as when the user has communicated intent to load content (i.e.
* <code>Player.load</code> has been called).
* @property {string} type
* 'loading'
* @exportDoc
*/
/**
* @event shaka.Player.LoadedEvent
* @description Fired when the player ends the load.
* @property {string} type
* 'loaded'
* @exportDoc
*/
/**
* @event shaka.Player.UnloadingEvent
* @description Fired when the player unloads or fails to load.
* Used by the Cast receiver to determine idle state.
* @property {string} type
* 'unloading'
* @exportDoc
*/
/**
* @event shaka.Player.TextTrackVisibilityEvent
* @description Fired when text track visibility changes.
* @property {string} type
* 'texttrackvisibility'
* @exportDoc
*/
/**
* @event shaka.Player.TracksChangedEvent
* @description Fired when the list of tracks changes. For example, this will
* happen when new tracks are added/removed or when track restrictions change.
* @property {string} type
* 'trackschanged'
* @exportDoc
*/
/**
* @event shaka.Player.AdaptationEvent
* @description Fired when an automatic adaptation causes the active tracks
* to change. Does not fire when the application calls
* <code>selectVariantTrack()</code>, <code>selectTextTrack()</code>,
* <code>selectAudioLanguage()</code>, or <code>selectTextLanguage()</code>.
* @property {string} type
* 'adaptation'
* @exportDoc
*/
/**
* @event shaka.Player.VariantChangedEvent
* @description Fired when a call from the application caused a variant change.
* Can be triggered by calls to <code>selectVariantTrack()</code> or
* <code>selectAudioLanguage()</code>. Does not fire when an automatic
* adaptation causes a variant change.
* @property {string} type
* 'variantchanged'
* @exportDoc
*/
| * <code>selectTextLanguage()</code>.
* @property {string} type
* 'textchanged'
* @exportDoc
*/
/**
* @event shaka.Player.ExpirationUpdatedEvent
* @description Fired when there is a change in the expiration times of an
* EME session.
* @property {string} type
* 'expirationupdated'
* @exportDoc
*/
/**
* @event shaka.Player.LargeGapEvent
* @description Fired when the playhead enters a large gap. If the
* <code>streaming.jumpLargeGaps</code> configuration is set, the default
* action of this event is to jump the gap; this can be prevented by calling
* <code>preventDefault()</code> on the event object.
* @property {string} type
* 'largegap'
* @property {number} currentTime
* The current time of the playhead.
* @property {number} gapSize
* The size of the gap, in seconds.
* @exportDoc
*/
/**
* @event shaka.Player.ManifestParsedEvent
* @description Fired after the manifest has been parsed, but before anything
* else happens. The manifest may contain streams that will be filtered out,
* at this stage of the loading process.
* @property {string} type
* 'manifestparsed'
* @exportDoc
*/
/**
* @event shaka.Player.MetadataEvent
* @description Triggers after metadata associated with the stream is found.
* Usually they are metadata of type ID3.
* @property {string} type
* 'metadata'
* @property {number} startTime
* The time that describes the beginning of the range of the metadata to
* which the cue applies.
* @property {?number} endTime
* The time that describes the end of the range of the metadata to which
* the cue applies.
* @property {string} metadataType
* Type of metadata. Eg: org.id3 or org.mp4ra
* @property {shaka.extern.ID3Metadata} payload
* The metadata itself
* @exportDoc
*/
/**
* @event shaka.Player.StreamingEvent
* @description Fired after the manifest has been parsed and track information
* is available, but before streams have been chosen and before any segments
* have been fetched. You may use this event to configure the player based on
* information found in the manifest.
* @property {string} type
* 'streaming'
* @exportDoc
*/
/**
* @event shaka.Player.AbrStatusChangedEvent
* @description Fired when the state of abr has been changed.
* (Enabled or disabled).
* @property {string} type
* 'abrstatuschanged'
* @property {boolean} newStatus
* The new status of the application. True for 'is enabled' and
* false otherwise.
* @exportDoc
*/
/**
* @event shaka.Player.RateChangeEvent
* @description Fired when the video's playback rate changes.
* This allows the PlayRateController to update it's internal rate field,
* before the UI updates playback button with the newest playback rate.
* @property {string} type
* 'ratechange'
* @exportDoc
*/
/**
* @event shaka.Player.SessionDataEvent
* @description Fired when the manifest parser find info about session data.
* Specification: https://tools.ietf.org/html/rfc8216#section-4.3.4.4
* @property {string} type
* 'sessiondata'
* @property {string} id
* The id of the session data.
* @property {string} uri
* The uri with the session data info.
* @property {string} language
* The language of the session data.
* @property {string} value
* The value of the session data.
* @exportDoc
*/
/**
* @summary The main player object for Shaka Player.
*
* @implements {shaka.util.IDestroyable}
* @export
*/
shaka.Player = class extends shaka.util.FakeEventTarget {
/**
* @param {HTMLMediaElement=} mediaElement
* When provided, the player will attach to <code>mediaElement</code>,
* similar to calling <code>attach</code>. When not provided, the player
* will remain detached.
* @param {function(shaka.Player)=} dependencyInjector Optional callback
* which is called to inject mocks into the Player. Used for testing.
*/
constructor(mediaElement, dependencyInjector) {
super();
/** @private {shaka.Player.LoadMode} */
this.loadMode_ = shaka.Player.LoadMode.NOT_LOADED;
/** @private {HTMLMediaElement} */
this.video_ = null;
/** @private {HTMLElement} */
this.videoContainer_ = null;
/**
* Since we may not always have a text displayer created (e.g. before |load|
* is called), we need to track what text visibility SHOULD be so that we
* can ensure that when we create the text displayer. When we create our
* text displayer, we will use this to show (or not show) text as per the
* user's requests.
*
* @private {boolean}
*/
this.isTextVisible_ = false;
/** @private {shaka.util.EventManager} */
this.eventManager_ = new shaka.util.EventManager();
/** @private {shaka.net.NetworkingEngine} */
this.networkingEngine_ = null;
/** @private {shaka.media.DrmEngine} */
this.drmEngine_ = null;
/** @private {shaka.media.MediaSourceEngine} */
this.mediaSourceEngine_ = null;
/** @private {shaka.media.Playhead} */
this.playhead_ = null;
/**
* The playhead observers are used to monitor the position of the playhead
* and some other source of data (e.g. buffered content), and raise events.
*
* @private {shaka.media.PlayheadObserverManager}
*/
this.playheadObservers_ = null;
/**
* This is our control over the playback rate of the media element. This
* provides the missing functionality that we need to provide trick play,
* for example a negative playback rate.
*
* @private {shaka.media.PlayRateController}
*/
this.playRateController_ = null;
// We use the buffering observer and timer to track when we move from having
// enough buffered content to not enough. They only exist when content has
// been loaded and are not re-used between loads.
/** @private {shaka.util.Timer} */
this.bufferPoller_ = null;
/** @private {shaka.media.BufferingObserver} */
this.bufferObserver_ = null;
/** @private {shaka.media.RegionTimeline} */
this.regionTimeline_ = null;
/** @private {shaka.media.StreamingEngine} */
this.streamingEngine_ = null;
/** @private {shaka.extern.ManifestParser} */
this.parser_ = null;
/** @private {?shaka.extern.ManifestParser.Factory} */
this.parserFactory_ = null;
/** @private {?shaka.extern.Manifest} */
this.manifest_ = null;
/** @private {?string} */
this.assetUri_ = null;
/** @private {shaka.extern.AbrManager} */
this.abrManager_ = null;
/**
* The factory that was used to create the abrManager_ instance.
* @private {?shaka.extern.AbrManager.Factory}
*/
this.abrManagerFactory_ = null;
/**
* Contains an ID for use with creating streams. The manifest parser should
* start with small IDs, so this starts with a large one.
* @private {number}
*/
this.nextExternalStreamId_ = 1e9;
/** @private {?shaka.extern.PlayerConfiguration} */
this.config_ = this.defaultConfig_();
/**
* The TextDisplayerFactory that was last used to make a text displayer.
* Stored so that we can tell if a new type of text displayer is desired.
* @private {?shaka.extern.TextDisplayer.Factory}
*/
this.lastTextFactory_;
/** @private {{width: number, height: number}} */
this.maxHwRes_ = {width: Infinity, height: Infinity};
/** @private {shaka.util.Stats} */
this.stats_ = null;
/** @private {!shaka.media.AdaptationSetCriteria} */
this.currentAdaptationSetCriteria_ =
new shaka.media.PreferenceBasedCriteria(
this.config_.preferredAudioLanguage,
this.config_.preferredVariantRole,
this.config_.preferredAudioChannelCount);
/** @private {string} */
this.currentTextLanguage_ = this.config_.preferredTextLanguage;
/** @private {string} */
this.currentTextRole_ = this.config_.preferredTextRole;
/** @private {boolean} */
this.currentTextForced_ = this.config_.preferForcedSubs;
/** @private {!Array.<function():(!Promise|undefined)>} */
this.cleanupOnUnload_ = [];
if (dependencyInjector) {
dependencyInjector(this);
}
this.networkingEngine_ = this.createNetworkingEngine();
this.networkingEngine_.setForceHTTPS(this.config_.streaming.forceHTTPS);
/** @private {shaka.extern.IAdManager} */
this.adManager_ = null;
if (shaka.Player.adManagerFactory_) {
this.adManager_ =
shaka.util.Functional.callFactory(shaka.Player.adManagerFactory_);
}
// If the browser comes back online after being offline, then try to play
// again.
this.eventManager_.listen(window, 'online', () => {
this.retryStreaming();
});
/** @private {shaka.routing.Node} */
this.detachNode_ = {name: 'detach'};
/** @private {shaka.routing.Node} */
this.attachNode_ = {name: 'attach'};
/** @private {shaka.routing.Node} */
this.unloadNode_ = {name: 'unload'};
/** @private {shaka.routing.Node} */
this.parserNode_ = {name: 'manifest-parser'};
/** @private {shaka.routing.Node} */
this.manifestNode_ = {name: 'manifest'};
/** @private {shaka.routing.Node} */
this.mediaSourceNode_ = {name: 'media-source'};
/** @private {shaka.routing.Node} */
this.drmNode_ = {name: 'drm-engine'};
/** @private {shaka.routing.Node} */
this.loadNode_ = {name: 'load'};
/** @private {shaka.routing.Node} */
this.srcEqualsDrmNode_ = {name: 'src-equals-drm-engine'};
/** @private {shaka.routing.Node} */
this.srcEqualsNode_ = {name: 'src-equals'};
const AbortableOperation = shaka.util.AbortableOperation;
const actions = new Map();
actions.set(this.attachNode_, (has, wants) => {
return AbortableOperation.notAbortable(this.onAttach_(has, wants));
});
actions.set(this.detachNode_, (has, wants) => {
return AbortableOperation.notAbortable(this.onDetach_(has, wants));
});
actions.set(this.unloadNode_, (has, wants) => {
return AbortableOperation.notAbortable(this.onUnload_(has, wants));
});
actions.set(this.mediaSourceNode_, (has, wants) => {
const p = this.onInitializeMediaSourceEngine_(has, wants);
return AbortableOperation.notAbortable(p);
});
actions.set(this.parserNode_, (has, wants) => {
const p = this.onInitializeParser_(has, wants);
return AbortableOperation.notAbortable(p);
});
actions.set(this.manifestNode_, (has, wants) => {
// This action is actually abortable, so unlike the other callbacks, this
// one will return an abortable operation.
return this.onParseManifest_(has, wants);
});
actions.set(this.drmNode_, (has, wants) => {
const p = this.onInitializeDrm_(has, wants);
return AbortableOperation.notAbortable(p);
});
actions.set(this.loadNode_, (has, wants) => {
return AbortableOperation.notAbortable(this.onLoad_(has, wants));
});
actions.set(this.srcEqualsDrmNode_, (has, wants) => {
const p = this.onInitializeSrcEqualsDrm_(has, wants);
return AbortableOperation.notAbortable(p);
});
actions.set(this.srcEqualsNode_, (has, wants) => {
return this.onSrcEquals_(has, wants);
});
/** @private {shaka.routing.Walker.Implementation} */
const walkerImplementation = {
getNext: (at, has, goingTo, wants) => {
return this.getNextStep_(at, has, goingTo, wants);
},
enterNode: (node, has, wants) => {
this.dispatchEvent(this.makeEvent_(
/* name= */ shaka.Player.EventName.OnStateChange,
/* data= */ {'state': node.name}));
const action = actions.get(node);
return action(has, wants);
},
handleError: async (has, error) => {
shaka.log.warning('The walker saw an error:');
if (error instanceof shaka.util.Error) {
shaka.log.warning('Error Code:', error.code);
} else {
shaka.log.warning('Error Message:', error.message);
shaka.log.warning('Error Stack:', error.stack);
}
// Regardless of what state we were in, if there is an error, we unload.
// This ensures that any initialized system will be torn-down and we
// will go back to a safe foundation. We assume that the media element
// is always safe to use after an error.
await this.onUnload_(has, shaka.Player.createEmptyPayload_());
// There are only two nodes that come before we start loading content,
// attach and detach. If we have a media element, it means we were
// attached to the element, and we can safely return to the attach state
// (we assume that the video element is always re-usable). We favor
// returning to the attach node since it means that the app won't need
// to re-attach if it saw an error.
return has.mediaElement ? this.attachNode_ : this.detachNode_;
},
onIdle: (node) => {
this.dispatchEvent(this.makeEvent_(
/* name= */ shaka.Player.EventName.OnStateIdle,
/* data= */ {'state': node.name}));
},
};
/** @private {shaka.routing.Walker} */
this.walker_ = new shaka.routing.Walker(
this.detachNode_,
shaka.Player.createEmptyPayload_(),
walkerImplementation);
// Even though |attach| will start in later interpreter cycles, it should be
// the LAST thing we do in the constructor because conceptually it relies on
// player having been initialized.
if (mediaElement) {
this.attach(mediaElement, /* initializeMediaSource= */ true);
}
}
/**
* @param {!shaka.Player.EventName} name
* @param {Object=} data
* @return {!shaka.util.FakeEvent}
* @private
*/
makeEvent_(name, data) {
return new shaka.util.FakeEvent(name, data);
}
/**
* After destruction, a Player object cannot be used again.
*
* @override
* @export
*/
async destroy() {
// Make sure we only execute the destroy logic once.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return;
}
// Mark as "dead". This should stop external-facing calls from changing our
// internal state any more. This will stop calls to |attach|, |detach|, etc.
// from interrupting our final move to the detached state.
this.loadMode_ = shaka.Player.LoadMode.DESTROYED;
// Because we have set |loadMode_| to |DESTROYED| we can't call |detach|. We
// must talk to |this.walker_| directly.
const events = this.walker_.startNewRoute((currentPayload) => {
return {
node: this.detachNode_,
payload: shaka.Player.createEmptyPayload_(),
interruptible: false,
};
});
// Wait until the detach has finished so that we don't interrupt it by
// calling |destroy| on |this.walker_|. To avoid failing here, we always
// resolve the promise.
await new Promise((resolve) => {
events.onStart = () => {
shaka.log.info('Preparing to destroy walker...');
};
events.onEnd = () => {
resolve();
};
events.onCancel = () => {
goog.asserts.assert(false,
'Our final detach call should never be cancelled.');
resolve();
};
events.onError = () => {
goog.asserts.assert(false,
'Our final detach call should never see an error');
resolve();
};
events.onSkip = () => {
goog.asserts.assert(false,
'Our final detach call should never be skipped');
resolve();
};
});
await this.walker_.destroy();
// Tear-down the event manager to ensure messages stop moving around.
if (this.eventManager_) {
this.eventManager_.release();
this.eventManager_ = null;
}
this.abrManagerFactory_ = null;
this.abrManager_ = null;
this.config_ = null;
this.stats_ = null;
this.videoContainer_ = null;
if (this.networkingEngine_) {
await this.networkingEngine_.destroy();
this.networkingEngine_ = null;
}
}
/**
* Registers a plugin callback that will be called with
* <code>support()</code>. The callback will return the value that will be
* stored in the return value from <code>support()</code>.
*
* @param {string} name
* @param {function():*} callback
* @export
*/
static registerSupportPlugin(name, callback) {
shaka.Player.supportPlugins_[name] = callback;
}
/**
* Set a factory to create an ad manager during player construction time.
* This method needs to be called bafore instantiating the Player class.
*
* @param {!shaka.extern.IAdManager.Factory} factory
* @export
*/
static setAdManagerFactory(factory) {
shaka.Player.adManagerFactory_ = factory;
}
/**
* Return whether the browser provides basic support. If this returns false,
* Shaka Player cannot be used at all. In this case, do not construct a
* Player instance and do not use the library.
*
* @return {boolean}
* @export
*/
static isBrowserSupported() {
if (!window.Promise) {
shaka.log.alwaysWarn('A Promise implementation or polyfill is required');
}
if (!window.TextDecoder || !window.TextEncoder) {
shaka.log.alwaysWarn(
'A TextDecoder/TextEncoder implementation or polyfill is required');
}
// Basic features needed for the library to be usable.
const basicSupport = !!window.Promise && !!window.Uint8Array &&
!!window.TextDecoder && !!window.TextEncoder &&
// eslint-disable-next-line no-restricted-syntax
!!Array.prototype.forEach;
if (!basicSupport) {
return false;
}
// We do not support IE
if (shaka.util.Platform.isIE()) {
return false;
}
// We do not support iOS 9, 10, or 11, nor those same versions of desktop
// Safari.
const safariVersion = shaka.util.Platform.safariVersion();
if (safariVersion && safariVersion < 12) {
return false;
}
// DRM support is not strictly necessary, but the APIs at least need to be
// there. Our no-op DRM polyfill should handle that.
// TODO(#1017): Consider making even DrmEngine optional.
const drmSupport = shaka.media.DrmEngine.isBrowserSupported();
if (!drmSupport) {
return false;
}
// If we have MediaSource (MSE) support, we should be able to use Shaka.
if (shaka.util.Platform.supportsMediaSource()) {
return true;
}
// If we don't have MSE, we _may_ be able to use Shaka. Look for native HLS
// support, and call this platform usable if we have it.
return shaka.util.Platform.supportsMediaType('application/x-mpegurl');
}
/**
* Probes the browser to determine what features are supported. This makes a
* number of requests to EME/MSE/etc which may result in user prompts. This
* should only be used for diagnostics.
*
* <p>
* NOTE: This may show a request to the user for permission.
*
* @see https://bit.ly/2ywccmH
* @param {boolean=} promptsOkay
* @return {!Promise.<shaka.extern.SupportType>}
* @export
*/
static async probeSupport(promptsOkay=true) {
goog.asserts.assert(shaka.Player.isBrowserSupported(),
'Must have basic support');
let drm = {};
if (promptsOkay) {
drm = await shaka.media.DrmEngine.probeSupport();
}
const manifest = shaka.media.ManifestParser.probeSupport();
const media = shaka.media.MediaSourceEngine.probeSupport();
const ret = {
manifest: manifest,
media: media,
drm: drm,
};
const plugins = shaka.Player.supportPlugins_;
for (const name in plugins) {
ret[name] = plugins[name]();
}
return ret;
}
/**
* Tell the player to use <code>mediaElement</code> for all <code>load</code>
* requests until <code>detach</code> or <code>destroy</code> are called.
*
* <p>
* Calling <code>attach</code> with <code>initializedMediaSource=true</code>
* will tell the player to take the initial load step and initialize media
* source.
*
* <p>
* Calls to <code>attach</code> will interrupt any in-progress calls to
* <code>load</code> but cannot interrupt calls to <code>attach</code>,
* <code>detach</code>, or <code>unload</code>.
*
* @param {!HTMLMediaElement} mediaElement
* @param {boolean=} initializeMediaSource
* @return {!Promise}
* @export
*/
attach(mediaElement, initializeMediaSource = true) {
// Do not allow the player to be used after |destroy| is called.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return Promise.reject(this.createAbortLoadError_());
}
const payload = shaka.Player.createEmptyPayload_();
payload.mediaElement = mediaElement;
// If the platform does not support media source, we will never want to
// initialize media source.
if (!shaka.util.Platform.supportsMediaSource()) {
initializeMediaSource = false;
}
const destination = initializeMediaSource ?
this.mediaSourceNode_ :
this.attachNode_;
// Do not allow this route to be interrupted because calls after this attach
// call will depend on the media element being attached.
const events = this.walker_.startNewRoute((currentPayload) => {
return {
node: destination,
payload: payload,
interruptible: false,
};
});
// List to the events that can occur with our request.
events.onStart = () => shaka.log.info('Starting attach...');
return this.wrapWalkerListenersWithPromise_(events);
}
/**
* Tell the player to stop using its current media element. If the player is:
* <ul>
* <li>detached, this will do nothing,
* <li>attached, this will release the media element,
* <li>loading, this will abort loading, unload, and release the media
* element,
* <li>playing content, this will stop playback, unload, and release the
* media element.
* </ul>
*
* <p>
* Calls to <code>detach</code> will interrupt any in-progress calls to
* <code>load</code> but cannot interrupt calls to <code>attach</code>,
* <code>detach</code>, or <code>unload</code>.
*
* @return {!Promise}
* @export
*/
detach() {
// Do not allow the player to be used after |destroy| is called.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return Promise.reject(this.createAbortLoadError_());
}
// Tell the walker to go "detached", but do not allow it to be interrupted.
// If it could be interrupted it means that our media element could fall out
// of sync.
const events = this.walker_.startNewRoute((currentPayload) => {
return {
node: this.detachNode_,
payload: shaka.Player.createEmptyPayload_(),
interruptible: false,
};
});
events.onStart = () => shaka.log.info('Starting detach...');
return this.wrapWalkerListenersWithPromise_(events);
}
/**
* Tell the player to either return to:
* <ul>
* <li>detached (when it does not have a media element),
* <li>attached (when it has a media element and
* <code>initializedMediaSource=false</code>)
* <li>media source initialized (when it has a media element and
* <code>initializedMediaSource=true</code>)
* </ul>
*
* <p>
* Calls to <code>unload</code> will interrupt any in-progress calls to
* <code>load</code> but cannot interrupt calls to <code>attach</code>,
* <code>detach</code>, or <code>unload</code>.
*
* @param {boolean=} initializeMediaSource
* @return {!Promise}
* @export
*/
unload(initializeMediaSource = true) {
// Do not allow the player to be used after |destroy| is called.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return Promise.reject(this.createAbortLoadError_());
}
// If the platform does not support media source, we will never want to
// initialize media source.
if (!shaka.util.Platform.supportsMediaSource()) {
initializeMediaSource = false;
}
// Since we are going either to attached or detached (through unloaded), we
// can't allow it to be interrupted or else we could lose track of what
// media element we are suppose to use.
//
// Using the current payload, we can determine which node we want to go to.
// If we have a media element, we want to go back to attached. If we have no
// media element, we want to go back to detached.
const payload = shaka.Player.createEmptyPayload_();
const events = this.walker_.startNewRoute((currentPayload) => {
// When someone calls |unload| we can either be before attached or
// detached (there is nothing stopping someone from calling |detach| when
// we are already detached).
//
// If we are attached to the correct element, we can tear down the
// previous playback components and go to the attached media source node
// depending on whether or not the caller wants to pre-init media source.
//
// If we don't have a media element, we assume that we are already at the
// detached node - but only the walker knows that. To ensure we are
// actually there, we tell the walker to go to detach. While this is
// technically unnecessary, it ensures that we are in the state we want
// to be in and ready for the next request.
let destination = null;
if (currentPayload.mediaElement && initializeMediaSource) {
destination = this.mediaSourceNode_;
} else if (currentPayload.mediaElement) {
destination = this.attachNode_;
} else {
destination = this.detachNode_;
}
goog.asserts.assert(destination, 'We should have picked a destination.');
// Copy over the media element because we want to keep using the same
// element - the other values don't matter.
payload.mediaElement = currentPayload.mediaElement;
return {
node: destination,
payload: payload,
interruptible: false,
};
});
events.onStart = () => shaka.log.info('Starting unload...');
return this.wrapWalkerListenersWithPromise_(events);
}
/**
* Tell the player to load the content at <code>assetUri</code> and start
* playback at <code>startTime</code>. Before calling <code>load</code>,
* a call to <code>attach</code> must have succeeded.
*
* <p>
* Calls to <code>load</code> will interrupt any in-progress calls to
* <code>load</code> but cannot interrupt calls to <code>attach</code>,
* <code>detach</code>, or <code>unload</code>.
*
* @param {string} assetUri
* @param {?number=} startTime
* When <code>startTime</code> is <code>null</code> or
* <code>undefined</code>, playback will start at the default start time (0
* for VOD and liveEdge for LIVE).
* @param {string=} mimeType
* @return {!Promise}
* @export
*/
load(assetUri, startTime, mimeType) {
// Do not allow the player to be used after |destroy| is called.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return Promise.reject(this.createAbortLoadError_());
}
// We dispatch the loading event when someone calls |load| because we want
// to surface the user intent.
this.dispatchEvent(this.makeEvent_(shaka.Player.EventName.Loading));
// Right away we know what the asset uri and start-of-load time are. We will
// fill-in the rest of the information later.
const payload = shaka.Player.createEmptyPayload_();
payload.uri = assetUri;
payload.startTimeOfLoad = Date.now() / 1000;
if (mimeType) {
payload.mimeType = mimeType;
}
// Because we allow |startTime| to be optional, it means that it will be
// |undefined| when not provided. This means that we need to re-map
// |undefined| to |null| while preserving |0| as a meaningful value.
if (startTime !== undefined) {
payload.startTime = startTime;
}
// TODO: Refactor to determine whether it's a manifest or not, and whether
// or not we can play it. Then we could return a better error than
// UNABLE_TO_GUESS_MANIFEST_TYPE for WebM in Safari.
const useSrcEquals = this.shouldUseSrcEquals_(payload);
const destination = useSrcEquals ? this.srcEqualsNode_ : this.loadNode_;
// Allow this request to be interrupted, this will allow other requests to
// cancel a load and quickly start a new load.
const events = this.walker_.startNewRoute((currentPayload) => {
if (currentPayload.mediaElement == null) {
// Because we return null, this "new route" will not be used.
return null;
}
// Keep using whatever media element we have right now.
payload.mediaElement = currentPayload.mediaElement;
return {
node: destination,
payload: payload,
interruptible: true,
};
});
// Stats are for a single playback/load session. Stats must be initialized
// before we allow calls to |updateStateHistory|.
this.stats_ = new shaka.util.Stats();
// Load's request is a little different, so we can't use our normal
// listeners-to-promise method. It is the only request where we may skip the
// request, so we need to set the on skip callback to reject with a specific
// error.
events.onStart =
() => shaka.log.info('Starting load of ' + assetUri + '...');
return new Promise((resolve, reject) => {
events.onSkip = () => reject(new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.PLAYER,
shaka.util.Error.Code.NO_VIDEO_ELEMENT));
events.onEnd = () => {
resolve();
// We dispatch the loaded event when the load promise is resolved
this.dispatchEvent(this.makeEvent_(shaka.Player.EventName.Loaded));
};
events.onCancel = () => reject(this.createAbortLoadError_());
events.onError = (e) => reject(e);
});
}
/**
* Check if src= should be used to load the asset at |uri|. Assume that media
* source is the default option, and that src= is for special cases.
*
* @param {shaka.routing.Payload} payload
* @return {boolean}
* |true| if the content should be loaded with src=, |false| if the content
* should be loaded with MediaSource.
* @private
*/
shouldUseSrcEquals_(payload) {
const Platform = shaka.util.Platform;
// If we are using a platform that does not support media source, we will
// fall back to src= to handle all playback.
if (!Platform.supportsMediaSource()) {
return true;
}
// The most accurate way to tell the player how to load the content is via
// MIME type. We can fall back to features of the URI if needed.
let mimeType = payload.mimeType;
const uri = payload.uri || '';
// If we don't have a MIME type, try to guess based on the file extension.
// TODO: Too generic to belong to ManifestParser now. Refactor.
if (!mimeType) {
// Try using the uri extension.
const extension = shaka.media.ManifestParser.getExtension(uri);
mimeType = {
'mp4': 'video/mp4',
'm4v': 'video/mp4',
'm4a': 'audio/mp4',
'webm': 'video/webm',
'weba': 'audio/webm',
'mkv': 'video/webm', // Chromium browsers supports it.
'ts': 'video/mp2t',
'ogv': 'video/ogg',
'ogg': 'audio/ogg',
'mpg': 'video/mpeg',
'mpeg': 'video/mpeg',
'm3u8': 'application/x-mpegurl',
'mp3': 'audio/mpeg',
'aac': 'audio/aac',
'flac': 'audio/flac',
'wav': 'audio/wav',
}[extension];
}
// TODO: The load graph system has a design limitation that requires routing
// destination to be chosen synchronously. This means we can only make the
// right choice about src= consistently if we have a well-known file
// extension or API-provided MIME type. Detection of MIME type from a HEAD
// request (as is done for manifest types) can't be done yet.
if (mimeType) {
// If we have a MIME type, check if the browser can play it natively.
// This will cover both single files and native HLS.
const mediaElement = payload.mediaElement || Platform.anyMediaElement();
const canPlayNatively = mediaElement.canPlayType(mimeType) != '';
// If we can't play natively, then src= isn't an option.
if (!canPlayNatively) {
return false;
}
const canPlayMediaSource =
shaka.media.ManifestParser.isSupported(uri, mimeType);
// If MediaSource isn't an option, the native option is our only chance.
if (!canPlayMediaSource) {
return true;
}
// If we land here, both are feasible.
goog.asserts.assert(canPlayNatively && canPlayMediaSource,
'Both native and MSE playback should be possible!');
// We would prefer MediaSource in some cases, and src= in others. For
// example, Android has native HLS, but we'd prefer our own MediaSource
// version there.
// Native HLS can be preferred on any platform via this flag:
if (this.config_.streaming.preferNativeHls) {
return true;
}
// For Safari, we have an older flag which only applies to this one
// browser:
if (Platform.isApple()) {
return this.config_.streaming.useNativeHlsOnSafari;
}
// In all other cases, we prefer MediaSource.
return false;
}
// Unless there are good reasons to use src= (single-file playback or native
// HLS), we prefer MediaSource. So the final return value for choosing src=
// is false.
return false;
}
/**
* This should only be called by the load graph when it is time to attach to
* a media element. The only times this may be called are when we are being
* asked to re-attach to the current media element, or attach to a new media
* element while not attached to a media element.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* Attaching to a media element is defined as:
* - Registering error listeners to the media element.
* - Caching the video element for use outside of the load graph.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
onAttach_(has, wants) {
// If we don't have a media element yet, it means we are entering
// "attach" from another node.
//
// If we have a media element, it should match |wants.mediaElement|
// because it means we are going from "attach" to "attach".
//
// These constraints should be maintained and guaranteed by the routing
// logic in |getNextStep_|.
goog.asserts.assert(
has.mediaElement == null || has.mediaElement == wants.mediaElement,
'The routing logic failed. MediaElement requirement failed.');
if (has.mediaElement == null) {
has.mediaElement = wants.mediaElement;
const onError = (error) => this.onVideoError_(error);
this.eventManager_.listen(has.mediaElement, 'error', onError);
}
this.video_ = has.mediaElement;
return Promise.resolve();
}
/**
* This should only be called by the load graph when it is time to detach from
* a media element. The only times this may be called are when we are being
* asked to detach from the current media element, or detach when we are
* already detached.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* Detaching from a media element is defined as:
* - Removing error listeners from the media element.
* - Dropping the cached reference to the video element.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
onDetach_(has, wants) {
// If we are going from "detached" to "detached" we wouldn't have
// a media element to detach from.
if (has.mediaElement) {
this.eventManager_.unlisten(has.mediaElement, 'error');
has.mediaElement = null;
}
// Clear our cached copy of the media element.
this.video_ = null;
return Promise.resolve();
}
/**
* This should only be called by the load graph when it is time to unload all
* currently initialized playback components. Unlike the other load actions,
* this action is built to be more general. We need to do this because we
* don't know what state the player will be in before unloading (including
* after an error occurred in the middle of a transition).
*
* This method assumes that any component could be |null| and should be safe
* to call from any point in the load graph.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
async onUnload_(has, wants) {
// Set the load mode to unload right away so that all the public methods
// will stop using the internal components. We need to make sure that we
// are not overriding the destroyed state because we will unload when we are
// destroying the player.
if (this.loadMode_ != shaka.Player.LoadMode.DESTROYED) {
this.loadMode_ = shaka.Player.LoadMode.NOT_LOADED;
}
// Run any general cleanup tasks now. This should be here at the top, right
// after setting loadMode_, so that internal components still exist as they
// did when the cleanup tasks were registered in the array.
const cleanupTasks = this.cleanupOnUnload_.map((cb) => cb());
this.cleanupOnUnload_ = [];
await Promise.all(cleanupTasks);
// Dispatch the unloading event.
this.dispatchEvent(this.makeEvent_(shaka.Player.EventName.Unloading));
// Remove everything that has to do with loading content from our payload
// since we are releasing everything that depended on it.
has.mimeType = null;
has.startTime = null;
has.uri = null;
// In most cases we should have a media element. The one exception would
// be if there was an error and we, by chance, did not have a media element.
if (has.mediaElement) {
this.eventManager_.unlisten(has.mediaElement, 'loadedmetadata');
this.eventManager_.unlisten(has.mediaElement, 'playing');
this.eventManager_.unlisten(has.mediaElement, 'pause');
this.eventManager_.unlisten(has.mediaElement, 'ended');
this.eventManager_.unlisten(has.mediaElement, 'ratechange');
}
// Some observers use some playback components, shutting down the observers
// first ensures that they don't try to use the playback components
// mid-destroy.
if (this.playheadObservers_) {
this.playheadObservers_.release();
this.playheadObservers_ = null;
}
if (this.bufferPoller_) {
this.bufferPoller_.stop();
this.bufferPoller_ = null;
}
// Stop the parser early. Since it is at the start of the pipeline, it
// should be start early to avoid is pushing new data downstream.
if (this.parser_) {
await this.parser_.stop();
this.parser_ = null;
this.parserFactory_ = null;
}
// Abr Manager will tell streaming engine what to do, so we need to stop
// it before we destroy streaming engine. Unlike with the other components,
// we do not release the instance, we will reuse it in later loads.
if (this.abrManager_) {
await this.abrManager_.stop();
}
// Streaming engine will push new data to media source engine, so we need
// to shut it down before destroy media source engine.
if (this.streamingEngine_) {
await this.streamingEngine_.destroy();
this.streamingEngine_ = null;
}
if (this.playRateController_) {
this.playRateController_.release();
this.playRateController_ = null;
}
// Playhead is used by StreamingEngine, so we can't destroy this until after
// StreamingEngine has stopped.
if (this.playhead_) {
this.playhead_.release();
this.playhead_ = null;
}
// Media source engine holds onto the media element, and in order to detach
// the media keys (with drm engine), we need to break the connection between
// media source engine and the media element.
if (this.mediaSourceEngine_) {
await this.mediaSourceEngine_.destroy();
this.mediaSourceEngine_ = null;
}
if (this.adManager_) {
this.adManager_.onAssetUnload();
}
// In order to unload a media element, we need to remove the src attribute
// and then load again. When we destroy media source engine, this will be
// done for us, but for src=, we need to do it here.
//
// DrmEngine requires this to be done before we destroy DrmEngine itself.
if (has.mediaElement && has.mediaElement.src) {
// TODO: Investigate this more. Only reproduces on Firefox 69.
// Introduce a delay before detaching the video source. We are seeing
// spurious Promise rejections involving an AbortError in our tests
// otherwise.
await new Promise(
(resolve) => new shaka.util.Timer(resolve).tickAfter(0.1));
has.mediaElement.removeAttribute('src');
has.mediaElement.load();
// Remove all track nodes
while (has.mediaElement.lastChild) {
has.mediaElement.removeChild(has.mediaElement.firstChild);
}
}
if (this.drmEngine_) {
await this.drmEngine_.destroy();
this.drmEngine_ = null;
}
this.assetUri_ = null;
this.bufferObserver_ = null;
if (this.manifest_) {
for (const variant of this.manifest_.variants) {
for (const stream of [variant.audio, variant.video]) {
if (stream && stream.segmentIndex) {
stream.segmentIndex.release();
}
}
}
for (const stream of this.manifest_.textStreams) {
if (stream.segmentIndex) {
stream.segmentIndex.release();
}
}
}
this.manifest_ = null;
this.stats_ = new shaka.util.Stats(); // Replace with a clean stats object.
this.lastTextFactory_ = null;
// Make sure that the app knows of the new buffering state.
this.updateBufferState_();
}
/**
* This should only be called by the load graph when it is time to initialize
* media source engine. The only time this may be called is when we are
* attached to the same media element as in the request.
*
* This method assumes that it is safe for it to execute. The load-graph is
* responsible for ensuring all assumptions are true.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
*
* @return {!Promise}
* @private
*/
async onInitializeMediaSourceEngine_(has, wants) {
goog.asserts.assert(
shaka.util.Platform.supportsMediaSource(),
'We should not be initializing media source on a platform that does ' +
'not support media source.');
goog.asserts.assert(
has.mediaElement,
'We should have a media element when initializing media source.');
goog.asserts.assert(
has.mediaElement == wants.mediaElement,
'|has| and |wants| should have the same media element when ' +
'initializing media source.');
goog.asserts.assert(
this.mediaSourceEngine_ == null,
'We should not have a media source engine yet.');
const closedCaptionsParser = new shaka.media.ClosedCaptionParser();
// When changing text visibility we need to update both the text displayer
// and streaming engine because we don't always stream text. To ensure that
// text displayer and streaming engine are always in sync, wait until they
// are both initialized before setting the initial value.
const textDisplayerFactory = this.config_.textDisplayFactory;
const textDisplayer =
shaka.util.Functional.callFactory(textDisplayerFactory);
this.lastTextFactory_ = textDisplayerFactory;
const mediaSourceEngine = this.createMediaSourceEngine(
has.mediaElement,
closedCaptionsParser,
textDisplayer,
(metadata, offset, endTime) => {
this.processTimedMetadataMediaSrc_(metadata, offset, endTime);
});
// Wait for media source engine to finish opening. This promise should
// NEVER be rejected as per the media source engine implementation.
await mediaSourceEngine.open();
// Wait until it is ready to actually store the reference.
this.mediaSourceEngine_ = mediaSourceEngine;
}
/**
* Create the parser for the asset located at |wants.uri|. This should only be
* called as part of the load graph.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
async onInitializeParser_(has, wants) {
goog.asserts.assert(
has.mediaElement,
'We should have a media element when initializing the parser.');
goog.asserts.assert(
has.mediaElement == wants.mediaElement,
'|has| and |wants| should have the same media element when ' +
'initializing the parser.');
goog.asserts.assert(
this.networkingEngine_,
'Need networking engine when initializing the parser.');
goog.asserts.assert(
this.config_,
'Need player config when initializing the parser.');
// We are going to "lock-in" the mime type and uri since they are
// what we are going to use to create our parser and parse the manifest.
has.mimeType = wants.mimeType;
has.uri = wants.uri;
goog.asserts.assert(
has.uri,
'We should have an asset uri when initializing the parsing.');
// Store references to things we asserted so that we don't need to reassert
// them again later.
const assetUri = has.uri;
const networkingEngine = this.networkingEngine_;
// Save the uri so that it can be used outside of the load-graph.
this.assetUri_ = assetUri;
// Create the parser that we will use to parse the manifest.
this.parserFactory_ = await shaka.media.ManifestParser.getFactory(
assetUri,
networkingEngine,
this.config_.manifest.retryParameters,
has.mimeType);
goog.asserts.assert(this.parserFactory_, 'Must have manifest parser');
this.parser_ = shaka.util.Functional.callFactory(this.parserFactory_);
const manifestConfig =
shaka.util.ObjectUtils.cloneObject(this.config_.manifest);
// Don't read video segments if the player is attached to an audio element
if (wants.mediaElement && wants.mediaElement.nodeName === 'AUDIO') {
manifestConfig.disableVideo = true;
}
this.parser_.configure(manifestConfig);
}
/**
* Parse the manifest at |has.uri| using the parser that should have already
* been created. This should only be called as part of the load graph.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!shaka.util.AbortableOperation}
* @private
*/
onParseManifest_(has, wants) {
goog.asserts.assert(
has.mimeType == wants.mimeType,
'|has| and |wants| should have the same mime type when parsing.');
goog.asserts.assert(
has.uri == wants.uri,
'|has| and |wants| should have the same uri when parsing.');
goog.asserts.assert(
has.uri,
'|has| should have a valid uri when parsing.');
goog.asserts.assert(
has.uri == this.assetUri_,
'|has.uri| should match the cached asset uri.');
goog.asserts.assert(
this.networkingEngine_,
'Need networking engine to parse manifest.');
goog.asserts.assert(
this.config_,
'Need player config to parse manifest.');
goog.asserts.assert(
this.parser_,
'|this.parser_| should have been set in an earlier step.');
// Store references to things we asserted so that we don't need to reassert
// them again later.
const assetUri = has.uri;
const networkingEngine = this.networkingEngine_;
// This will be needed by the parser once it starts parsing, so we will
// initialize it now even through it appears a little out-of-place.
this.regionTimeline_ =
new shaka.media.RegionTimeline(() => this.seekRange());
this.regionTimeline_.setListeners(/* onRegionAdded= */ (region) => {
this.onRegionEvent_(shaka.Player.EventName.TimelineRegionAdded, region);
if (this.adManager_) {
this.adManager_.onDashTimedMetadata(region);
}
});
// TODO (#1391): Once filterManifest_ is async, remove this eslint disable.
/* eslint-disable require-await */
const playerInterface = {
networkingEngine: networkingEngine,
filter: async (manifest) => this.filterManifest_(manifest),
makeTextStreamsForClosedCaptions: (manifest) => {
return this.makeTextStreamsForClosedCaptions_(manifest);
},
// Called when the parser finds a timeline region. This can be called
// before we start playback or during playback (live/in-progress
// manifest).
onTimelineRegionAdded: (region) => this.regionTimeline_.addRegion(region),
onEvent: (event) => this.dispatchEvent(event),
onError: (error) => this.onError_(error),
isLowLatencyMode: () => this.isLowLatencyMode_(),
isAutoLowLatencyMode: () => this.isAutoLowLatencyMode_(),
enableLowLatencyMode: () => {
this.configure('streaming.lowLatencyMode', true);
},
};
/* eslint-enable require-await */
const startTime = Date.now() / 1000;
return new shaka.util.AbortableOperation(/* promise= */ (async () => {
this.manifest_ = await this.parser_.start(assetUri, playerInterface);
// This event is fired after the manifest is parsed, but before any
// filtering takes place.
const event = this.makeEvent_(shaka.Player.EventName.ManifestParsed);
this.dispatchEvent(event);
// We require all manifests to have at least one variant.
if (this.manifest_.variants.length == 0) {
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.MANIFEST,
shaka.util.Error.Code.NO_VARIANTS);
}
// Make sure that all variants are either: audio-only, video-only, or
// audio-video.
shaka.Player.filterForAVVariants_(this.manifest_);
const now = Date.now() / 1000;
const delta = now - startTime;
this.stats_.setManifestTime(delta);
})(), /* onAbort= */ () => {
shaka.log.info('Aborting parser step...');
return this.parser_.stop();
});
}
/**
* This should only be called by the load graph when it is time to initialize
* drmEngine. The only time this may be called is when we are attached a
* media element and have parsed a manifest.
*
* The load-graph is responsible for ensuring all assumptions made by this
* method are valid before executing it.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
async onInitializeDrm_(has, wants) {
goog.asserts.assert(
has.mimeType == wants.mimeType,
'The load graph should have ensured the mime types matched.');
goog.asserts.assert(
has.uri == wants.uri,
'The load graph should have ensured the uris matched');
goog.asserts.assert(
this.networkingEngine_,
'|onInitializeDrm_| should never be called after |destroy|');
goog.asserts.assert(
this.config_,
'|onInitializeDrm_| should never be called after |destroy|');
goog.asserts.assert(
this.manifest_,
'|this.manifest_| should have been set in an earlier step.');
goog.asserts.assert(
has.mediaElement,
'We should have a media element when initializing the DRM Engine.');
const startTime = Date.now() / 1000;
let firstEvent = true;
this.drmEngine_ = this.createDrmEngine({
netEngine: this.networkingEngine_,
onError: (e) => {
this.onError_(e);
},
onKeyStatus: (map) => {
this.onKeyStatus_(map);
},
onExpirationUpdated: (id, expiration) => {
this.onExpirationUpdated_(id, expiration);
},
onEvent: (e) => {
this.dispatchEvent(e);
if (e.type == shaka.Player.EventName.DrmSessionUpdate && firstEvent) {
firstEvent = false;
const now = Date.now() / 1000;
const delta = now - startTime;
this.stats_.setDrmTime(delta);
}
},
});
// TODO: remove once MediaCap implementation is complete.
if (!this.config_.useMediaCapabilities) {
shaka.util.StreamUtils.filterManifestByMediaSource(this.manifest_);
}
this.drmEngine_.configure(this.config_.drm);
await this.drmEngine_.initForPlayback(
this.manifest_.variants,
this.manifest_.offlineSessionIds,
this.config_.useMediaCapabilities);
await this.drmEngine_.attach(has.mediaElement);
// Now that we have drm information, filter the manifest (again) so that we
// can ensure we only use variants with the selected key system.
await this.filterManifest_(this.manifest_);
}
/**
* This should only be called by the load graph when it is time to load all
* playback components needed for playback. The only times this may be called
* is when we are attached to the same media element as in the request.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* Loading is defined as:
* - Attaching all playback-related listeners to the media element
* - Initializing playback and observers
* - Initializing ABR Manager
* - Initializing Streaming Engine
* - Starting playback at |wants.startTime|
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @private
*/
async onLoad_(has, wants) {
goog.asserts.assert(
has.mimeType == wants.mimeType,
'|has| and |wants| should have the same mime type when loading.');
goog.asserts.assert(
has.uri == wants.uri,
'|has| and |wants| should have the same uri when loading.');
goog.asserts.assert(
has.mediaElement,
'We should have a media element when loading.');
goog.asserts.assert(
!isNaN(wants.startTimeOfLoad),
'|wants| should tell us when the load was originally requested');
// Since we are about to start playback, we will lock in the start time as
// something we are now depending on.
has.startTime = wants.startTime;
// Store a reference to values in |has| after asserting so that closure will
// know that they will still be non-null between calls to await.
const mediaElement = has.mediaElement;
const assetUri = has.uri;
// Save the uri so that it can be used outside of the load-graph.
this.assetUri_ = assetUri;
this.playRateController_ = new shaka.media.PlayRateController({
getRate: () => has.mediaElement.playbackRate,
getDefaultRate: () => has.mediaElement.defaultPlaybackRate,
setRate: (rate) => { has.mediaElement.playbackRate = rate; },
movePlayhead: (delta) => { has.mediaElement.currentTime += delta; },
});
const updateStateHistory = () => this.updateStateHistory_();
const onRateChange = () => this.onRateChange_();
this.eventManager_.listen(mediaElement, 'playing', updateStateHistory);
this.eventManager_.listen(mediaElement, 'pause', updateStateHistory);
this.eventManager_.listen(mediaElement, 'ended', updateStateHistory);
this.eventManager_.listen(mediaElement, 'ratechange', onRateChange);
const abrFactory = this.config_.abrFactory;
if (!this.abrManager_ || this.abrManagerFactory_ != abrFactory) {
this.abrManagerFactory_ = abrFactory;
this.abrManager_ = shaka.util.Functional.callFactory(abrFactory);
if (typeof this.abrManager_.playbackRateChanged != 'function') {
shaka.Deprecate.deprecateFeature(4,
'AbrManager',
'Please use an AbrManager with playbackRateChanged function.');
this.abrManager_.playbackRateChanged = (rate) => {};
}
this.abrManager_.configure(this.config_.abr);
}
// Copy preferred languages from the config again, in case the config was
// changed between construction and playback.
this.currentAdaptationSetCriteria_ =
new shaka.media.PreferenceBasedCriteria(
this.config_.preferredAudioLanguage,
this.config_.preferredVariantRole,
this.config_.preferredAudioChannelCount);
this.currentTextLanguage_ = this.config_.preferredTextLanguage;
shaka.Player.applyPlayRange_(this.manifest_.presentationTimeline,
this.config_.playRangeStart,
this.config_.playRangeEnd);
this.abrManager_.init((variant, clearBuffer, safeMargin) => {
return this.switch_(variant, clearBuffer, safeMargin);
});
this.playhead_ = this.createPlayhead(has.startTime);
this.playheadObservers_ = this.createPlayheadObserversForMSE_();
// We need to start the buffer management code near the end because it will
// set the initial buffering state and that depends on other components
// being initialized.
const rebufferThreshold = Math.max(
this.manifest_.minBufferTime, this.config_.streaming.rebufferingGoal);
this.startBufferManagement_(rebufferThreshold);
// If the content is multi-codec and the browser can play more than one of
// them, choose codecs now before we initialize streaming.
shaka.util.StreamUtils.chooseCodecsAndFilterManifest(
this.manifest_, this.config_.preferredAudioChannelCount);
this.streamingEngine_ = this.createStreamingEngine();
this.streamingEngine_.configure(this.config_.streaming);
// Set the load mode to "loaded with media source" as late as possible so
// that public methods won't try to access internal components until
// they're all initialized. We MUST switch to loaded before calling
// "streaming" so that they can access internal information.
this.loadMode_ = shaka.Player.LoadMode.MEDIA_SOURCE;
// The event must be fired after we filter by restrictions but before the
// active stream is picked to allow those listening for the "streaming"
// event to make changes before streaming starts.
this.dispatchEvent(this.makeEvent_(shaka.Player.EventName.Streaming));
// Pick the initial streams to play.
const initialVariant = this.chooseVariant_();
goog.asserts.assert(initialVariant, 'Must choose an initial variant!');
this.addVariantToSwitchHistory_(
initialVariant, /* fromAdaptation= */ true);
this.streamingEngine_.switchVariant(
initialVariant, /* clearBuffer= */ false, /* safeMargin= */ 0);
// Decide if text should be shown automatically.
const initialTextStream = this.chooseTextStream_();
if (initialTextStream) {
this.addTextStreamToSwitchHistory_(
initialTextStream, /* fromAdaptation= */ true);
}
this.setInitialTextState_(initialVariant, initialTextStream);
// Don't initialize with a text stream unless we should be streaming text.
if (initialTextStream && this.shouldStreamText_()) {
this.streamingEngine_.switchTextStream(initialTextStream);
}
// Now that we have initial streams, we may adjust the start time to align
// to a segment boundary.
if (this.config_.streaming.startAtSegmentBoundary) {
const startTime = this.playhead_.getTime();
const adjustedTime =
await this.adjustStartTime_(initialVariant, startTime);
this.playhead_.setStartTime(adjustedTime);
}
// Start streaming content. This will start the flow of content down to
// media source.
await this.streamingEngine_.start();
if (this.config_.abr.enabled) {
this.abrManager_.enable();
this.onAbrStatusChanged_();
}
// Re-filter the manifest after streams have been chosen.
this.filterManifestByCurrentVariant_();
// Dispatch a 'trackschanged' event now that all initial filtering is done.
this.onTracksChanged_();
// Since the first streams just became active, send an adaptation event.
this.onAdaptation_();
// Now that we've filtered out variants that aren't compatible with the
// active one, update abr manager with filtered variants.
// NOTE: This may be unnecessary. We've already chosen one codec in
// chooseCodecsAndFilterManifest_ before we started streaming. But it
// doesn't hurt, and this will all change when we start using
// MediaCapabilities and codec switching.
// TODO(#1391): Re-evaluate with MediaCapabilities and codec switching.
this.updateAbrManagerVariants_();
const hasPrimary = this.manifest_.variants.some((v) => v.primary);
if (!this.config_.preferredAudioLanguage && !hasPrimary) {
shaka.log.warning('No preferred audio language set. We have chosen an ' +
'arbitrary language initially');
}
// Wait for the 'loadedmetadata' event to measure load() latency.
this.eventManager_.listenOnce(mediaElement, 'loadedmetadata', () => {
const now = Date.now() / 1000;
const delta = now - wants.startTimeOfLoad;
this.stats_.setLoadLatency(delta);
});
}
/**
* This should only be called by the load graph when it is time to initialize
* drmEngine for src= playbacks.
*
* The load-graph is responsible for ensuring all assumptions made by this
* method are valid before executing it.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!Promise}
* @private
*/
async onInitializeSrcEqualsDrm_(has, wants) {
const ContentType = shaka.util.ManifestParserUtils.ContentType;
goog.asserts.assert(
this.networkingEngine_,
'|onInitializeSrcEqualsDrm_| should never be called after |destroy|');
goog.asserts.assert(
this.config_,
'|onInitializeSrcEqualsDrm_| should never be called after |destroy|');
const startTime = Date.now() / 1000;
let firstEvent = true;
this.drmEngine_ = this.createDrmEngine({
netEngine: this.networkingEngine_,
onError: (e) => {
this.onError_(e);
},
onKeyStatus: (map) => {
this.onKeyStatus_(map);
},
onExpirationUpdated: (id, expiration) => {
this.onExpirationUpdated_(id, expiration);
},
onEvent: (e) => {
this.dispatchEvent(e);
if (e.type == shaka.Player.EventName.DrmSessionUpdate && firstEvent) {
firstEvent = false;
const now = Date.now() / 1000;
const delta = now - startTime;
this.stats_.setDrmTime(delta);
}
},
});
this.drmEngine_.configure(this.config_.drm);
// TODO: Instead of feeding DrmEngine with Variants, we should refactor
// DrmEngine so that it takes a minimal config derived from Variants. In
// cases like this one or in removal of stored content, the details are
// largely unimportant. We should have a saner way to initialize DrmEngine.
// That would also insulate DrmEngine from manifest changes in the future.
// For now, that is time-consuming and this synthetic Variant is easy, so
// I'm putting it off. Since this is only expected to be used for native
// HLS in Safari, this should be safe. -JCP
/** @type {shaka.extern.Variant} */
const variant = {
id: 0,
language: 'und',
primary: false,
audio: null,
video: {
id: 0,
originalId: null,
createSegmentIndex: () => Promise.resolve(),
segmentIndex: null,
mimeType: 'video/mp4',
codecs: '',
encrypted: true,
drmInfos: [], // Filled in by DrmEngine config.
keyIds: new Set(),
language: 'und',
label: null,
type: ContentType.VIDEO,
primary: false,
trickModeVideo: null,
emsgSchemeIdUris: null,
roles: [],
forced: false,
channelsCount: null,
audioSamplingRate: null,
spatialAudio: false,
closedCaptions: null,
},
bandwidth: 100,
allowedByApplication: true,
allowedByKeySystem: true,
decodingInfos: [],
};
await this.drmEngine_.initForPlayback(
[variant], /* offlineSessionIds= */ []);
await this.drmEngine_.attach(has.mediaElement);
}
/**
* This should only be called by the load graph when it is time to set-up the
* media element to play content using src=. The only times this may be called
* is when we are attached to the same media element as in the request.
*
* This method assumes that it is safe for it to execute, the load-graph is
* responsible for ensuring all assumptions are true.
*
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {!shaka.util.AbortableOperation}
*
* @private
*/
onSrcEquals_(has, wants) {
goog.asserts.assert(
has.mediaElement,
'We should have a media element when loading.');
goog.asserts.assert(
wants.uri,
'|has| should have a valid uri when loading.');
goog.asserts.assert(
!isNaN(wants.startTimeOfLoad),
'|wants| should tell us when the load was originally requested');
goog.asserts.assert(
this.video_ == has.mediaElement,
'The video element should match our media element');
// Lock-in the values that we are using so that the routing logic knows what
// we have.
has.uri = wants.uri;
has.startTime = wants.startTime;
// Save the uri so that it can be used outside of the load-graph.
this.assetUri_ = has.uri;
this.playhead_ = new shaka.media.SrcEqualsPlayhead(has.mediaElement);
if (has.startTime != null) {
this.playhead_.setStartTime(has.startTime);
}
this.playRateController_ = new shaka.media.PlayRateController({
getRate: () => has.mediaElement.playbackRate,
getDefaultRate: () => has.mediaElement.defaultPlaybackRate,
setRate: (rate) => { has.mediaElement.playbackRate = rate; },
movePlayhead: (delta) => { has.mediaElement.currentTime += delta; },
});
// We need to start the buffer management code near the end because it will
// set the initial buffering state and that depends on other components
// being initialized.
const rebufferThreshold = this.config_.streaming.rebufferingGoal;
this.startBufferManagement_(rebufferThreshold);
// Add all media element listeners.
const updateStateHistory = () => this.updateStateHistory_();
const onRateChange = () => this.onRateChange_();
this.eventManager_.listen(has.mediaElement, 'playing', updateStateHistory);
this.eventManager_.listen(has.mediaElement, 'pause', updateStateHistory);
this.eventManager_.listen(has.mediaElement, 'ended', updateStateHistory);
this.eventManager_.listen(has.mediaElement, 'ratechange', onRateChange);
// Wait for the 'loadedmetadata' event to measure load() latency, but only
// if preload is set in a way that would result in this event firing
// automatically. See https://github.com/google/shaka-player/issues/2483
if (this.video_.preload != 'none') {
this.eventManager_.listenOnce(this.video_, 'loadedmetadata', () => {
const now = Date.now() / 1000;
const delta = now - wants.startTimeOfLoad;
this.stats_.setLoadLatency(delta);
});
}
// The audio tracks are only available on Safari at the moment, but this
// drives the tracks API for Safari's native HLS. So when they change,
// fire the corresponding Shaka Player event.
if (this.video_.audioTracks) {
this.eventManager_.listen(
this.video_.audioTracks, 'addtrack', () => this.onTracksChanged_());
this.eventManager_.listen(
this.video_.audioTracks, 'removetrack',
() => this.onTracksChanged_());
this.eventManager_.listen(
this.video_.audioTracks, 'change', () => this.onTracksChanged_());
}
if (this.video_.textTracks) {
this.eventManager_.listen(this.video_.textTracks, 'addtrack', (e) => {
this.onTracksChanged_();
this.processTimedMetadataSrcEqls_(/** @type {!TrackEvent} */(e));
});
this.eventManager_.listen(
this.video_.textTracks, 'removetrack', () => this.onTracksChanged_());
this.eventManager_.listen(
this.video_.textTracks, 'change', () => this.onTracksChanged_());
}
// By setting |src| we are done "loading" with src=. We don't need to set
// the current time because |playhead| will do that for us.
has.mediaElement.src = has.uri;
// Tizen 3 / WebOS won't load anything unless you call load() explicitly,
// no matter the value of the preload attribute. This is harmful on some
// other platforms by triggering unbounded loading of media data, but is
// necessary here.
if (shaka.util.Platform.isTizen() || shaka.util.Platform.isWebOS()) {
has.mediaElement.load();
}
// Set the load mode last so that we know that all our components are
// initialized.
this.loadMode_ = shaka.Player.LoadMode.SRC_EQUALS;
// The event doesn't mean as much for src= playback, since we don't control
// streaming. But we should fire it in this path anyway since some
// applications may be expecting it as a life-cycle event.
this.dispatchEvent(this.makeEvent_(shaka.Player.EventName.Streaming));
// The "load" Promise is resolved when we have loaded the metadata. If we
// wait for the full data, that won't happen on Safari until the play button
// is hit.
const fullyLoaded = new shaka.util.PublicPromise();
shaka.util.MediaReadyState.waitForReadyState(this.video_,
HTMLMediaElement.HAVE_METADATA,
this.eventManager_,
() => {
fullyLoaded.resolve();
});
// This flag is used below in the language preference setup to check if this
// load was canceled before the necessary events fire.
let unloaded = false;
this.cleanupOnUnload_.push(() => {
unloaded = true;
});
// We can't switch to preferred languages, though, until the data is loaded.
shaka.util.MediaReadyState.waitForReadyState(this.video_,
HTMLMediaElement.HAVE_CURRENT_DATA,
this.eventManager_,
async () => {
// If we have moved on to another piece of content while waiting for
// the above event, we should not change tracks here.
if (unloaded) {
return;
}
this.setupPreferredAudioOnSrc_();
// Applying the text preference too soon can result in it being
// reverted. Wait for native HLS to pick something first.
const textTracks = this.getFilteredTextTracks_();
if (!textTracks.find((t) => t.mode != 'disabled')) {
await new Promise((resolve) => {
this.eventManager_.listenOnce(
this.video_.textTracks, 'change', resolve);
// We expect the event to fire because it does on Safari.
// But in case it doesn't on some other platform or future
// version, move on in 1 second no matter what. This keeps the
// language settings from being completely ignored if something
// goes wrong.
new shaka.util.Timer(resolve).tickAfter(1);
});
}
// If we have moved on to another piece of content while waiting for
// the above event/timer, we should not change tracks here.
if (unloaded) {
return;
}
this.setupPreferredTextOnSrc_();
});
if (this.video_.error) {
// Already failed!
fullyLoaded.reject(this.videoErrorToShakaError_());
} else if (this.video_.preload == 'none') {
shaka.log.alwaysWarn(
'With <video preload="none">, the browser will not load anything ' +
'until play() is called. We are unable to measure load latency in ' +
'a meaningful way, and we cannot provide track info yet. Please do ' +
'not use preload="none" with Shaka Player.');
// We can't wait for an event load loadedmetadata, since that will be
// blocked until a user interaction. So resolve the Promise now.
fullyLoaded.resolve();
}
this.eventManager_.listenOnce(this.video_, 'error', () => {
fullyLoaded.reject(this.videoErrorToShakaError_());
});
return new shaka.util.AbortableOperation(fullyLoaded, /* onAbort= */ () => {
const abortedError = new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.PLAYER,
shaka.util.Error.Code.OPERATION_ABORTED);
fullyLoaded.reject(abortedError);
return Promise.resolve(); // Abort complete.
});
}
/**
* This method setup the preferred audio using src=..
*
* @private
*/
setupPreferredAudioOnSrc_() {
const preferredAudioLanguage = this.config_.preferredAudioLanguage;
// If the user has not selected a preference, the browser preference is
// left.
if (preferredAudioLanguage == '') {
return;
}
this.selectAudioLanguage(preferredAudioLanguage);
const preferredVariantRole = this.config_.preferredVariantRole;
// If the user has not selected a role preference, the previous match is
// selected.
if (preferredVariantRole == '') {
return;
}
this.selectAudioLanguage(preferredAudioLanguage, preferredVariantRole);
}
/**
* This method setup the preferred text using src=.
*
* @private
*/
setupPreferredTextOnSrc_() {
const preferredTextLanguage = this.config_.preferredTextLanguage;
const preferForcedSubs = this.config_.preferForcedSubs;
// If the user has not selected a preference, the browser preference is
// left.
if (preferredTextLanguage == '') {
return;
}
this.selectTextLanguage(preferredTextLanguage, '', preferForcedSubs);
const preferredTextRole = this.config_.preferredTextRole;
// If the user has not selected a role preference, the previous match is
// selected.
if (preferredTextRole == '') {
return;
}
this.selectTextLanguage(preferredTextLanguage, preferredTextRole,
preferForcedSubs);
}
/**
* We're looking for metadata tracks to process id3 tags. One of the uses is
* for ad info on LIVE streams
*
* @param {!TrackEvent} event
* @private
*/
processTimedMetadataSrcEqls_(event) {
const track = event.track;
goog.asserts.assert(track instanceof TextTrack, 'Wrong track type!');
if (track.kind != 'metadata') {
return;
}
// Hidden mode is required for the cuechange event to launch correctly
track.mode = 'hidden';
this.eventManager_.listen(track, 'cuechange', () => {
if (!track.activeCues) {
return;
}
for (const cue of track.activeCues) {
this.dispatchMetadataEvent_(cue.startTime, cue.endTime,
cue.type, cue.value);
if (this.adManager_) {
this.adManager_.onCueMetadataChange(cue.value);
}
}
});
// In Safari the initial assignment does not always work, so we schedule
// this process to be repeated several times to ensure that it has been put
// in the correct mode.
new shaka.util.Timer(() => {
const textTracks = this.getMetadataTracks_();
for (const textTrack of textTracks) {
textTrack.mode = 'hidden';
}
}).tickNow().tickAfter(/* seconds= */ 0.5);
}
/**
* @param {!Array.<shaka.extern.ID3Metadata>} metadata
* @param {number} offset
* @param {?number} segmentEndTime
* @private
*/
processTimedMetadataMediaSrc_(metadata, offset, segmentEndTime) {
for (const sample of metadata) {
if (sample['data'] && sample['cueTime'] && sample['frames']) {
const start = sample['cueTime'] + offset;
const end = segmentEndTime;
const metadataType = 'ID3';
for (const frame of sample['frames']) {
const payload = frame;
this.dispatchMetadataEvent_(start, end, metadataType, payload);
}
if (this.adManager_) {
this.adManager_.onHlsTimedMetadata(sample, start);
}
}
}
}
/**
* Construct and fire a Player.Metadata event
*
* @param {number} startTime
* @param {?number} endTime
* @param {string} metadataType
* @param {shaka.extern.ID3Metadata} payload
* @private
*/
dispatchMetadataEvent_(startTime, endTime, metadataType, payload) {
goog.asserts.assert(!endTime || startTime <= endTime,
'Metadata start time should be less or equal to the end time!');
const eventName = shaka.Player.EventName.Metadata;
const data = {
startTime: startTime,
endTime: endTime,
metadataType: metadataType,
payload: payload,
};
this.dispatchEvent(this.makeEvent_(eventName, data));
}
/**
* Take a series of variants and ensure that they only contain one type of
* variant. The different options are:
* 1. Audio-Video
* 2. Audio-Only
* 3. Video-Only
*
* A manifest can only contain a single type because once we initialize media
* source to expect specific streams, it must always have content for those
* streams. If we were to start with audio+video and switch to an audio-only
* variant, media source would block waiting for video content.
*
* @param {shaka.extern.Manifest} manifest
* @private
*/
static filterForAVVariants_(manifest) {
const isAVVariant = (variant) => {
// Audio-video variants may include both streams separately or may be
// single multiplexed streams with multiple codecs.
return (variant.video && variant.audio) ||
(variant.video && variant.video.codecs.includes(','));
};
if (manifest.variants.some(isAVVariant)) {
shaka.log.debug('Found variant with audio and video content, ' +
'so filtering out audio-only content.');
manifest.variants = manifest.variants.filter(isAVVariant);
}
}
/**
* Create a new DrmEngine instance. This may be replaced by tests to create
* fake instances. Configuration and initialization will be handled after
* |createDrmEngine|.
*
* @param {shaka.media.DrmEngine.PlayerInterface} playerInterface
* @return {!shaka.media.DrmEngine}
*/
createDrmEngine(playerInterface) {
const updateExpirationTime = this.config_.drm.updateExpirationTime;
return new shaka.media.DrmEngine(playerInterface, updateExpirationTime);
}
/**
* Creates a new instance of NetworkingEngine. This can be replaced by tests
* to create fake instances instead.
*
* @return {!shaka.net.NetworkingEngine}
*/
createNetworkingEngine() {
/** @type {function(number, number)} */
const onProgressUpdated_ = (deltaTimeMs, bytesDownloaded) => {
// In some situations, such as during offline storage, the abr manager
// might not yet exist. Therefore, we need to check if abr manager has
// been initialized before using it.
if (this.abrManager_) {
this.abrManager_.segmentDownloaded(deltaTimeMs, bytesDownloaded);
}
};
return new shaka.net.NetworkingEngine(onProgressUpdated_);
}
/**
* Creates a new instance of Playhead. This can be replaced by tests to
* create fake instances instead.
*
* @param {?number} startTime
* @return {!shaka.media.Playhead}
*/
createPlayhead(startTime) {
goog.asserts.assert(this.manifest_, 'Must have manifest');
goog.asserts.assert(this.video_, 'Must have video');
return new shaka.media.MediaSourcePlayhead(
this.video_,
this.manifest_,
this.config_.streaming,
startTime,
() => this.onSeek_(),
(event) => this.dispatchEvent(event));
}
/**
* Create the observers for MSE playback. These observers are responsible for
* notifying the app and player of specific events during MSE playback.
*
* @return {!shaka.media.PlayheadObserverManager}
* @private
*/
createPlayheadObserversForMSE_() {
goog.asserts.assert(this.manifest_, 'Must have manifest');
goog.asserts.assert(this.regionTimeline_, 'Must have region timeline');
goog.asserts.assert(this.video_, 'Must have video element');
// Create the region observer. This will allow us to notify the app when we
// move in and out of timeline regions.
const regionObserver = new shaka.media.RegionObserver(this.regionTimeline_);
const onEnterRegion = (region, seeking) => {
this.onRegionEvent_(shaka.Player.EventName.TimelineRegionEnter, region);
};
const onExitRegion = (region, seeking) => {
this.onRegionEvent_(shaka.Player.EventName.TimelineRegionExit, region);
};
const onSkipRegion = (region, seeking) => {
// If we are seeking, we don't want to surface the enter/exit events since
// they didn't play through them.
if (!seeking) {
this.onRegionEvent_(shaka.Player.EventName.TimelineRegionEnter, region);
this.onRegionEvent_(shaka.Player.EventName.TimelineRegionExit, region);
}
};
regionObserver.setListeners(onEnterRegion, onExitRegion, onSkipRegion);
// Now that we have all our observers, create a manager for them.
const manager = new shaka.media.PlayheadObserverManager(this.video_);
manager.manage(regionObserver);
return manager;
}
/**
* Initialize and start the buffering system (observer and timer) so that we
* can monitor our buffer lead during playback.
*
* @param {number} rebufferingGoal
* @private
*/
startBufferManagement_(rebufferingGoal) {
goog.asserts.assert(
!this.bufferObserver_,
'No buffering observer should exist before initialization.');
goog.asserts.assert(
!this.bufferPoller_,
'No buffer timer should exist before initialization.');
// Give dummy values, will be updated below.
this.bufferObserver_ = new shaka.media.BufferingObserver(1, 2);
// Force us back to a buffering state. This ensure everything is starting in
// the same state.
this.bufferObserver_.setState(shaka.media.BufferingObserver.State.STARVING);
this.updateBufferingSettings_(rebufferingGoal);
this.updateBufferState_();
// TODO: We should take some time to look into the effects of our
// quarter-second refresh practice. We often use a quarter-second
// but we have no documentation about why.
this.bufferPoller_ = new shaka.util.Timer(() => {
this.pollBufferState_();
}).tickEvery(/* seconds= */ 0.25);
}
/**
* Updates the buffering thresholds based on the new rebuffering goal.
*
* @param {number} rebufferingGoal
* @private
*/
updateBufferingSettings_(rebufferingGoal) {
// The threshold to transition back to satisfied when starving.
const starvingThreshold = rebufferingGoal;
// The threshold to transition into starving when satisfied.
// We use a "typical" threshold, unless the rebufferingGoal is unusually
// low.
// Then we force the value down to half the rebufferingGoal, since
// starvingThreshold must be strictly larger than satisfiedThreshold for the
// logic in BufferingObserver to work correctly.
const satisfiedThreshold = Math.min(
shaka.Player.TYPICAL_BUFFERING_THRESHOLD_, rebufferingGoal / 2);
this.bufferObserver_.setThresholds(starvingThreshold, satisfiedThreshold);
}
/**
* This method is called periodically to check what the buffering observer
* says so that we can update the rest of the buffering behaviours.
*
* @private
*/
pollBufferState_() {
goog.asserts.assert(
this.video_,
'Need a media element to update the buffering observer');
goog.asserts.assert(
this.bufferObserver_,
'Need a buffering observer to update');
let bufferedToEnd;
switch (this.loadMode_) {
case shaka.Player.LoadMode.SRC_EQUALS:
bufferedToEnd = this.isBufferedToEndSrc_();
break;
case shaka.Player.LoadMode.MEDIA_SOURCE:
bufferedToEnd = this.isBufferedToEndMS_();
break;
default:
bufferedToEnd = false;
break;
}
const bufferLead = shaka.media.TimeRangesUtils.bufferedAheadOf(
this.video_.buffered,
this.video_.currentTime);
const stateChanged = this.bufferObserver_.update(bufferLead, bufferedToEnd);
// If the state changed, we need to surface the event.
if (stateChanged) {
this.updateBufferState_();
}
}
/**
* Create a new media source engine. This will ONLY be replaced by tests as a
* way to inject fake media source engine instances.
*
* @param {!HTMLMediaElement} mediaElement
* @param {!shaka.media.IClosedCaptionParser} closedCaptionsParser
* @param {!shaka.extern.TextDisplayer} textDisplayer
* @param {!function(!Array.<shaka.extern.ID3Metadata>, number, ?number)}
* onMetadata
*
* @return {!shaka.media.MediaSourceEngine}
*/
createMediaSourceEngine(mediaElement, closedCaptionsParser, textDisplayer,
onMetadata) {
return new shaka.media.MediaSourceEngine(
mediaElement, closedCaptionsParser, textDisplayer, onMetadata);
}
/**
* Creates a new instance of StreamingEngine. This can be replaced by tests
* to create fake instances instead.
*
* @return {!shaka.media.StreamingEngine}
*/
createStreamingEngine() {
goog.asserts.assert(
this.playhead_ && this.abrManager_ && this.mediaSourceEngine_ &&
this.manifest_,
'Must not be destroyed');
/** @type {shaka.media.StreamingEngine.PlayerInterface} */
const playerInterface = {
getPresentationTime: () => this.playhead_.getTime(),
getBandwidthEstimate: () => this.abrManager_.getBandwidthEstimate(),
mediaSourceEngine: this.mediaSourceEngine_,
netEngine: this.networkingEngine_,
onError: (error) => this.onError_(error),
onEvent: (event) => this.dispatchEvent(event),
onManifestUpdate: () => this.onManifestUpdate_(),
onSegmentAppended: () => this.onSegmentAppended_(),
};
return new shaka.media.StreamingEngine(this.manifest_, playerInterface);
}
/**
* Changes configuration settings on the Player. This checks the names of
* keys and the types of values to avoid coding errors. If there are errors,
* this logs them to the console and returns false. Correct fields are still
* applied even if there are other errors. You can pass an explicit
* <code>undefined</code> value to restore the default value. This has two
* modes of operation:
*
* <p>
* First, this can be passed a single "plain" object. This object should
* follow the {@link shaka.extern.PlayerConfiguration} object. Not all fields
* need to be set; unset fields retain their old values.
*
* <p>
* Second, this can be passed two arguments. The first is the name of the key
* to set. This should be a '.' separated path to the key. For example,
* <code>'streaming.alwaysStreamText'</code>. The second argument is the
* value to set.
*
* @param {string|!Object} config This should either be a field name or an
* object.
* @param {*=} value In the second mode, this is the value to set.
* @return {boolean} True if the passed config object was valid, false if
* there were invalid entries.
* @export
*/
configure(config, value) {
goog.asserts.assert(this.config_, 'Config must not be null!');
goog.asserts.assert(typeof(config) == 'object' || arguments.length == 2,
'String configs should have values!');
// ('fieldName', value) format
if (arguments.length == 2 && typeof(config) == 'string') {
config = shaka.util.ConfigUtils.convertToConfigObject(config, value);
}
goog.asserts.assert(typeof(config) == 'object', 'Should be an object!');
// Deprecate 'manifest.dash.defaultPresentationDelay' configuration.
if (config['manifest'] && config['manifest']['dash'] &&
'defaultPresentationDelay' in config['manifest']['dash']) {
shaka.Deprecate.deprecateFeature(4,
'manifest.dash.defaultPresentationDelay configuration',
'Please Use manifest.defaultPresentationDelay instead.');
config['manifest']['defaultPresentationDelay'] =
config['manifest']['dash']['defaultPresentationDelay'];
delete config['manifest']['dash']['defaultPresentationDelay'];
}
// If lowLatencyMode is enabled, and inaccurateManifestTolerance and
// rebufferingGoal are not specified, set inaccurateManifestTolerance to 0
// and rebufferingGoal to 0.01 by default for low latency streaming.
if (config['streaming'] && config['streaming']['lowLatencyMode']) {
if (config['streaming']['inaccurateManifestTolerance'] == undefined) {
config['streaming']['inaccurateManifestTolerance'] = 0;
}
if (config['streaming']['rebufferingGoal'] == undefined) {
config['streaming']['rebufferingGoal'] = 0.01;
}
}
const ret = shaka.util.PlayerConfiguration.mergeConfigObjects(
this.config_, config, this.defaultConfig_());
this.applyConfig_();
return ret;
}
/**
* Apply config changes.
* @private
*/
applyConfig_() {
if (this.parser_) {
const manifestConfig =
shaka.util.ObjectUtils.cloneObject(this.config_.manifest);
// Don't read video segments if the player is attached to an audio element
if (this.video_ && this.video_.nodeName === 'AUDIO') {
manifestConfig.disableVideo = true;
}
this.parser_.configure(manifestConfig);
}
if (this.drmEngine_) {
this.drmEngine_.configure(this.config_.drm);
}
if (this.streamingEngine_) {
this.streamingEngine_.configure(this.config_.streaming);
// Need to apply the restrictions.
try {
// this.filterManifestWithRestrictions_() may throw.
this.filterManifestWithRestrictions_(this.manifest_);
} catch (error) {
this.onError_(error);
}
if (this.abrManager_) {
// Update AbrManager variants to match these new settings.
this.updateAbrManagerVariants_();
}
// If the streams we are playing are restricted, we need to switch.
const activeVariant = this.streamingEngine_.getCurrentVariant();
if (activeVariant) {
if (!activeVariant.allowedByApplication ||
!activeVariant.allowedByKeySystem) {
shaka.log.debug('Choosing new variant after changing configuration');
this.chooseVariantAndSwitch_();
}
}
}
if (this.networkingEngine_) {
this.networkingEngine_.setForceHTTPS(this.config_.streaming.forceHTTPS);
}
if (this.mediaSourceEngine_) {
const textDisplayerFactory = this.config_.textDisplayFactory;
if (this.lastTextFactory_ != textDisplayerFactory) {
const displayer =
shaka.util.Functional.callFactory(textDisplayerFactory);
this.mediaSourceEngine_.setTextDisplayer(displayer);
this.lastTextFactory_ = textDisplayerFactory;
if (this.streamingEngine_) {
// Reload the text stream, so the cues will load again.
this.streamingEngine_.reloadTextStream();
}
}
}
if (this.abrManager_) {
this.abrManager_.configure(this.config_.abr);
// Simply enable/disable ABR with each call, since multiple calls to these
// methods have no effect.
if (this.config_.abr.enabled) {
this.abrManager_.enable();
} else {
this.abrManager_.disable();
}
this.onAbrStatusChanged_();
}
if (this.bufferObserver_) {
let rebufferThreshold = this.config_.streaming.rebufferingGoal;
if (this.manifest_) {
rebufferThreshold =
Math.max(rebufferThreshold, this.manifest_.minBufferTime);
}
this.updateBufferingSettings_(rebufferThreshold);
}
if (this.manifest_) {
shaka.Player.applyPlayRange_(this.manifest_.presentationTimeline,
this.config_.playRangeStart,
this.config_.playRangeEnd);
}
}
/**
* Return a copy of the current configuration. Modifications of the returned
* value will not affect the Player's active configuration. You must call
* <code>player.configure()</code> to make changes.
*
* @return {shaka.extern.PlayerConfiguration}
* @export
*/
getConfiguration() {
goog.asserts.assert(this.config_, 'Config must not be null!');
const ret = this.defaultConfig_();
shaka.util.PlayerConfiguration.mergeConfigObjects(
ret, this.config_, this.defaultConfig_());
return ret;
}
/**
* Return a reference to the current configuration. Modifications to the
* returned value will affect the Player's active configuration. This method
* is not exported as sharing configuration with external objects is not
* supported.
*
* @return {shaka.extern.PlayerConfiguration}
*/
getSharedConfiguration() {
goog.asserts.assert(
this.config_, 'Cannot call getSharedConfiguration after call destroy!');
return this.config_;
}
/**
* Reset configuration to default.
* @export
*/
resetConfiguration() {
goog.asserts.assert(this.config_, 'Cannot be destroyed');
// Remove the old keys so we remove open-ended dictionaries like drm.servers
// but keeps the same object reference.
for (const key in this.config_) {
delete this.config_[key];
}
shaka.util.PlayerConfiguration.mergeConfigObjects(
this.config_, this.defaultConfig_(), this.defaultConfig_());
this.applyConfig_();
}
/**
* Get the current load mode.
*
* @return {shaka.Player.LoadMode}
* @export
*/
getLoadMode() {
return this.loadMode_;
}
/**
* Get the media element that the player is currently using to play loaded
* content. If the player has not loaded content, this will return
* <code>null</code>.
*
* @return {HTMLMediaElement}
* @export
*/
getMediaElement() {
return this.video_;
}
/**
* @return {shaka.net.NetworkingEngine} A reference to the Player's networking
* engine. Applications may use this to make requests through Shaka's
* networking plugins.
* @export
*/
getNetworkingEngine() {
return this.networkingEngine_;
}
/**
* Get the uri to the asset that the player has loaded. If the player has not
* loaded content, this will return <code>null</code>.
*
* @return {?string}
* @export
*/
getAssetUri() {
return this.assetUri_;
}
/**
* Returns a shaka.ads.AdManager instance, responsible for Dynamic
* Ad Insertion functionality.
*
* @return {shaka.extern.IAdManager}
* @export
*/
getAdManager() {
// NOTE: this clause is redundant, but it keeps the compiler from
// inlining this function. Inlining leads to setting the adManager
// not taking effect in the compiled build.
// Closure has a @noinline flag, but apparently not all cases are
// supported by it, and ours isn't.
// If they expand support, we might be able to get rid of this
// clause.
if (!this.adManager_) {
return null;
}
return this.adManager_;
}
/**
* Get if the player is playing live content. If the player has not loaded
* content, this will return <code>false</code>.
*
* @return {boolean}
* @export
*/
isLive() {
if (this.manifest_) {
return this.manifest_.presentationTimeline.isLive();
}
// For native HLS, the duration for live streams seems to be Infinity.
if (this.video_ && this.video_.src) {
return this.video_.duration == Infinity;
}
return false;
}
/**
* Get if the player is playing in-progress content. If the player has not
* loaded content, this will return <code>false</code>.
*
* @return {boolean}
* @export
*/
isInProgress() {
return this.manifest_ ?
this.manifest_.presentationTimeline.isInProgress() :
false;
}
/**
* Check if the manifest contains only audio-only content. If the player has
* not loaded content, this will return <code>false</code>.
*
* <p>
* The player does not support content that contain more than one type of
* variants (i.e. mixing audio-only, video-only, audio-video). Content will be
* filtered to only contain one type of variant.
*
* @return {boolean}
* @export
*/
isAudioOnly() {
if (this.manifest_) {
const variants = this.manifest_.variants;
if (!variants.length) {
return false;
}
// Note that if there are some audio-only variants and some audio-video
// variants, the audio-only variants are removed during filtering.
// Therefore if the first variant has no video, that's sufficient to say
// it is audio-only content.
return !variants[0].video;
} else if (this.video_ && this.video_.src) {
// If we have video track info, use that. It will be the least
// error-prone way with native HLS. In contrast, videoHeight might be
// unset until the first frame is loaded. Since isAudioOnly is queried
// by the UI on the 'trackschanged' event, the videoTracks info should be
// up-to-date.
if (this.video_.videoTracks) {
return this.video_.videoTracks.length == 0;
}
// We cast to the more specific HTMLVideoElement to access videoHeight.
// This might be an audio element, though, in which case videoHeight will
// be undefined at runtime. For audio elements, this will always return
// true.
const video = /** @type {HTMLVideoElement} */(this.video_);
return video.videoHeight == 0;
} else {
return false;
}
}
/**
* Return the value of lowLatencyMode configuration.
* @return {boolean}
* @private
*/
isLowLatencyMode_() {
return this.config_.streaming.lowLatencyMode;
}
/**
* Return the value of autoLowLatencyMode configuration.
* @return {boolean}
* @private
*/
isAutoLowLatencyMode_() {
return this.config_.streaming.autoLowLatencyMode;
}
/**
* Get the range of time (in seconds) that seeking is allowed. If the player
* has not loaded content, this will return a range from 0 to 0.
*
* @return {{start: number, end: number}}
* @export
*/
seekRange() {
if (this.manifest_) {
const timeline = this.manifest_.presentationTimeline;
return {
'start': timeline.getSeekRangeStart(),
'end': timeline.getSeekRangeEnd(),
};
}
// If we have loaded content with src=, we ask the video element for its
// seekable range. This covers both plain mp4s and native HLS playbacks.
if (this.video_ && this.video_.src) {
const seekable = this.video_.seekable;
if (seekable.length) {
return {
'start': seekable.start(0),
'end': seekable.end(seekable.length - 1),
};
}
}
return {'start': 0, 'end': 0};
}
/**
* Get the key system currently used by EME. If EME is not being used, this
* will return an empty string. If the player has not loaded content, this
* will return an empty string.
*
* @return {string}
* @export
*/
keySystem() {
return shaka.media.DrmEngine.keySystem(this.drmInfo());
}
/**
* Get the drm info used to initialize EME. If EME is not being used, this
* will return <code>null</code>. If the player is idle or has not initialized
* EME yet, this will return <code>null</code>.
*
* @return {?shaka.extern.DrmInfo}
* @export
*/
drmInfo() {
return this.drmEngine_ ? this.drmEngine_.getDrmInfo() : null;
}
/**
* Get the next known expiration time for any EME session. If the session
* never expires, this will return <code>Infinity</code>. If there are no EME
* sessions, this will return <code>Infinity</code>. If the player has not
* loaded content, this will return <code>Infinity</code>.
*
* @return {number}
* @export
*/
getExpiration() {
return this.drmEngine_ ? this.drmEngine_.getExpiration() : Infinity;
}
/**
* Gets a map of EME key ID to the current key status.
*
* @return {!Object<string, string>}
* @export
*/
getKeyStatuses() {
return this.drmEngine_ ? this.drmEngine_.getKeyStatuses() : {};
}
/**
* Check if the player is currently in a buffering state (has too little
* content to play smoothly). If the player has not loaded content, this will
* return <code>false</code>.
*
* @return {boolean}
* @export
*/
isBuffering() {
const State = shaka.media.BufferingObserver.State;
return this.bufferObserver_ ?
this.bufferObserver_.getState() == State.STARVING :
false;
}
/**
* Get the playback rate of what is playing right now. If we are using trick
* play, this will return the trick play rate.
* If no content is playing, this will return 0.
* If content is buffering, this will return the expected playback rate once
* the video starts playing.
*
* <p>
* If the player has not loaded content, this will return a playback rate of
* 0.
*
* @return {number}
* @export
*/
getPlaybackRate() {
if (!this.video_) {
return 0;
}
return this.playRateController_ ?
this.playRateController_.getRealRate() :
1;
}
/**
* Enable trick play to skip through content without playing by repeatedly
* seeking. For example, a rate of 2.5 would result in 2.5 seconds of content
* being skipped every second. A negative rate will result in moving
* backwards.
*
* <p>
* If the player has not loaded content or is still loading content this will
* be a no-op. Wait until <code>load</code> has completed before calling.
*
* <p>
* Trick play will be canceled automatically if the playhead hits the
* beginning or end of the seekable range for the content.
*
* @param {number} rate
* @export
*/
trickPlay(rate) {
// A playbackRate of 0 is used internally when we are in a buffering state,
// and doesn't make sense for trick play. If you set a rate of 0 for trick
// play, we will reject it and issue a warning. If it happens during a
// test, we will fail the test through this assertion.
goog.asserts.assert(rate != 0, 'Should never set a trick play rate of 0!');
if (rate == 0) {
shaka.log.alwaysWarn('A trick play rate of 0 is unsupported!');
return;
}
if (this.video_.paused) {
// Our fast forward is implemented with playbackRate and needs the video
// to be playing (to not be paused) to take immediate effect.
// If the video is paused, "unpause" it.
this.video_.play();
}
this.playRateController_.set(rate);
if (this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE) {
this.abrManager_.playbackRateChanged(rate);
this.streamingEngine_.setTrickPlay(Math.abs(rate) > 1);
}
}
/**
* Cancel trick-play. If the player has not loaded content or is still loading
* content this will be a no-op.
*
* @export
*/
cancelTrickPlay() {
const defaultPlaybackRate = this.playRateController_.getDefaultRate();
if (this.loadMode_ == shaka.Player.LoadMode.SRC_EQUALS) {
this.playRateController_.set(defaultPlaybackRate);
}
if (this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE) {
this.playRateController_.set(defaultPlaybackRate);
this.abrManager_.playbackRateChanged(defaultPlaybackRate);
this.streamingEngine_.setTrickPlay(false);
}
}
/**
* Return a list of variant tracks that can be switched to.
*
* <p>
* If the player has not loaded content, this will return an empty list.
*
* @return {!Array.<shaka.extern.Track>}
* @export
*/
getVariantTracks() {
if (this.manifest_) {
const currentVariant = this.streamingEngine_ ?
this.streamingEngine_.getCurrentVariant() : null;
const tracks = [];
// Convert each variant to a track.
for (const variant of this.manifest_.variants) {
if (!shaka.util.StreamUtils.isPlayable(variant)) {
continue;
}
const track = shaka.util.StreamUtils.variantToTrack(variant);
track.active = variant == currentVariant;
tracks.push(track);
}
return tracks;
} else if (this.video_ && this.video_.audioTracks) {
// Safari's native HLS always shows a single element in videoTracks.
// You can't use that API to change resolutions. But we can use
// audioTracks to generate a variant list that is usable for changing
// languages.
const audioTracks = Array.from(this.video_.audioTracks);
return audioTracks.map((audio) =>
shaka.util.StreamUtils.html5AudioTrackToTrack(audio));
} else {
return [];
}
}
/**
* Return a list of text tracks that can be switched to.
*
* <p>
* If the player has not loaded content, this will return an empty list.
*
* @return {!Array.<shaka.extern.Track>}
* @export
*/
getTextTracks() {
if (this.manifest_) {
const currentTextStream = this.streamingEngine_ ?
this.streamingEngine_.getCurrentTextStream() : null;
const tracks = [];
// Convert all selectable text streams to tracks.
for (const text of this.manifest_.textStreams) {
const track = shaka.util.StreamUtils.textStreamToTrack(text);
track.active = text == currentTextStream;
tracks.push(track);
}
return tracks;
} else if (this.video_ && this.video_.src && this.video_.textTracks) {
const textTracks = this.getFilteredTextTracks_();
const StreamUtils = shaka.util.StreamUtils;
return textTracks.map((text) => StreamUtils.html5TextTrackToTrack(text));
} else {
return [];
}
}
/**
* Return a list of image tracks that can be switched to.
*
* If the player has not loaded content, this will return an empty list.
*
* @return {!Array.<shaka.extern.Track>}
* @export
*/
getImageTracks() {
if (this.manifest_) {
const imageStreams = this.manifest_.imageStreams;
const StreamUtils = shaka.util.StreamUtils;
return imageStreams.map((image) => StreamUtils.imageStreamToTrack(image));
} else {
return [];
}
}
/**
* Return a Thumbnail object from a image track Id and time.
*
* If the player has not loaded content, this will return a null.
*
* @param {number} trackId
* @param {number} time
* @return {!Promise.<?shaka.extern.Thumbnail>}
* @export
*/
async getThumbnails(trackId, time) {
if (this.manifest_) {
const imageStream = this.manifest_.imageStreams.find(
(stream) => stream.id == trackId);
if (!imageStream) {
return null;
}
if (!imageStream.segmentIndex) {
await imageStream.createSegmentIndex();
}
const referencePosition = imageStream.segmentIndex.find(time);
if (referencePosition == null) {
return null;
}
const reference = imageStream.segmentIndex.get(referencePosition);
// This expression is used to detect one or more numbers (0-9) followed
// by an x and after one or more numbers (0-9)
const match = /(\d+)x(\d+)/.exec(imageStream.tilesLayout);
if (!match) {
shaka.log.warning('Tiles layout does not contain a valid format ' +
' (columns x rows)');
return null;
}
const fullImageWidth = imageStream.width || 0;
const fullImageHeight = imageStream.height || 0;
const columns = parseInt(match[1], 10);
const rows = parseInt(match[2], 10);
const width = fullImageWidth / columns;
const height = fullImageHeight / rows;
let positionX = 0;
let positionY = 0;
const totalImages = columns * rows;
// If the number of images in the segment is greater than 1, we have to
// find the correct image. For that we will return to the app the
// coordinates of the position of the correct image.
// Image search is always from left to right and top to bottom.
// Note: The time between images within the segment is always
// equidistant.
//
// Eg: Total images 5, tileLayout 5x1, segmentTime 5, thumbnailTime 2
// positionX = 0.4 * fullImageWidth
// positionY = 0
if (totalImages > 1) {
const thumbnailTime = time - reference.startTime;
const segmentTime = reference.endTime - reference.startTime;
const thumbnailPosition =
Math.floor(thumbnailTime * totalImages / segmentTime);
positionX = (thumbnailPosition % columns) / columns * fullImageWidth;
positionY = (thumbnailPosition % rows) / rows * fullImageHeight;
}
return {
height: height,
positionX: positionX,
positionY: positionY,
uris: reference.getUris(),
width: width,
};
}
return null;
}
/**
* Select a specific text track. <code>track</code> should come from a call to
* <code>getTextTracks</code>. If the track is not found, this will be a
* no-op. If the player has not loaded content, this will be a no-op.
*
* <p>
* Note that <code>AdaptationEvents</code> are not fired for manual track
* selections.
*
* @param {shaka.extern.Track} track
* @export
*/
selectTextTrack(track) {
if (this.manifest_ && this.streamingEngine_) {
const stream = this.manifest_.textStreams.find(
(stream) => stream.id == track.id);
if (!stream) {
shaka.log.error('No stream with id', track.id);
return;
}
if (stream == this.streamingEngine_.getCurrentTextStream()) {
shaka.log.debug('Text track already selected.');
return;
}
// Add entries to the history.
this.addTextStreamToSwitchHistory_(stream, /* fromAdaptation= */ false);
this.streamingEngine_.switchTextStream(stream);
this.onTextChanged_();
// Workaround for https://github.com/google/shaka-player/issues/1299
// When track is selected, back-propagate the language to
// currentTextLanguage_.
this.currentTextLanguage_ = stream.language;
} else if (this.video_ && this.video_.src && this.video_.textTracks) {
const textTracks = this.getFilteredTextTracks_();
for (const textTrack of textTracks) {
if (shaka.util.StreamUtils.html5TrackId(textTrack) == track.id) {
// Leave the track in 'hidden' if it's selected but not showing.
textTrack.mode = this.isTextVisible_ ? 'showing' : 'hidden';
} else {
// Safari allows multiple text tracks to have mode == 'showing', so be
// explicit in resetting the others.
textTrack.mode = 'disabled';
}
}
this.onTextChanged_();
}
}
/**
* Select a specific variant track to play. <code>track</code> should come
* from a call to <code>getVariantTracks</code>. If <code>track</code> cannot
* be found, this will be a no-op. If the player has not loaded content, this
* will be a no-op.
*
* <p>
* Changing variants will take effect once the currently buffered content has
* been played. To force the change to happen sooner, use
* <code>clearBuffer</code> with <code>safeMargin</code>. Setting
* <code>clearBuffer</code> to <code>true</code> will clear all buffered
* content after <code>safeMargin</code>, allowing the new variant to start
* playing sooner.
*
* <p>
* Note that <code>AdaptationEvents</code> are not fired for manual track
* selections.
*
* @param {shaka.extern.Track} track
* @param {boolean=} clearBuffer
* @param {number=} safeMargin Optional amount of buffer (in seconds) to
* retain when clearing the buffer. Useful for switching variant quickly
* without causing a buffering event. Defaults to 0 if not provided. Ignored
* if clearBuffer is false. Can cause hiccups on some browsers if chosen too
* small, e.g. The amount of two segments is a fair minimum to consider as
* safeMargin value.
* @export
*/
selectVariantTrack(track, clearBuffer = false, safeMargin = 0) {
if (this.manifest_ && this.streamingEngine_) {
if (this.config_.abr.enabled) {
shaka.log.alwaysWarn('Changing tracks while abr manager is enabled ' +
'will likely result in the selected track ' +
'being overriden. Consider disabling abr before ' +
'calling selectVariantTrack().');
}
const variant = this.manifest_.variants.find(
(variant) => variant.id == track.id);
if (!variant) {
shaka.log.error('No variant with id', track.id);
return;
}
// Double check that the track is allowed to be played. The track list
// should only contain playable variants, but if restrictions change and
// |selectVariantTrack| is called before the track list is updated, we
// could get a now-restricted variant.
if (!shaka.util.StreamUtils.isPlayable(variant)) {
shaka.log.error('Unable to switch to restricted track', track.id);
return;
}
if (variant == this.streamingEngine_.getCurrentVariant()) {
shaka.log.debug('Variant already selected.');
return;
}
// Add entries to the history.
this.addVariantToSwitchHistory_(variant, /* fromAdaptation= */ false);
this.streamingEngine_.switchVariant(variant, clearBuffer, safeMargin);
// Dispatch a 'variantchanged' event
this.onVariantChanged_();
// Workaround for https://github.com/google/shaka-player/issues/1299
// When track is selected, back-propagate the language to
// currentAudioLanguage_.
this.currentAdaptationSetCriteria_ = new shaka.media.ExampleBasedCriteria(
variant);
// Update AbrManager variants to match these new settings.
this.updateAbrManagerVariants_();
} else if (this.video_ && this.video_.audioTracks) {
// Safari's native HLS won't let you choose an explicit variant, though
// you can choose audio languages this way.
const audioTracks = Array.from(this.video_.audioTracks);
for (const audioTrack of audioTracks) {
if (shaka.util.StreamUtils.html5TrackId(audioTrack) == track.id) {
// This will reset the "enabled" of other tracks to false.
audioTrack.enabled = true;
}
}
this.onVariantChanged_();
}
}
/**
* Return a list of audio language-role combinations available. If the
* player has not loaded any content, this will return an empty list.
*
* @return {!Array.<shaka.extern.LanguageRole>}
* @export
*/
getAudioLanguagesAndRoles() {
return shaka.Player.getLanguageAndRolesFrom_(this.getVariantTracks());
}
/**
* Return a list of text language-role combinations available. If the player
* has not loaded any content, this will be return an empty list.
*
* @return {!Array.<shaka.extern.LanguageRole>}
* @export
*/
getTextLanguagesAndRoles() {
return shaka.Player.getLanguageAndRolesFrom_(this.getTextTracks());
}
/**
* Return a list of audio languages available. If the player has not loaded
* any content, this will return an empty list.
*
* @return {!Array.<string>}
* @export
*/
getAudioLanguages() {
return Array.from(shaka.Player.getLanguagesFrom_(this.getVariantTracks()));
}
/**
* Return a list of text languages available. If the player has not loaded
* any content, this will return an empty list.
*
* @return {!Array.<string>}
* @export
*/
getTextLanguages() {
return Array.from(shaka.Player.getLanguagesFrom_(this.getTextTracks()));
}
/**
* Sets the current audio language and current variant role to the selected
* language and role, and chooses a new variant if need be. If the player has
* not loaded any content, this will be a no-op.
*
* @param {string} language
* @param {string=} role
* @export
*/
selectAudioLanguage(language, role) {
const LanguageUtils = shaka.util.LanguageUtils;
if (this.manifest_ && this.playhead_) {
this.currentAdaptationSetCriteria_ =
new shaka.media.PreferenceBasedCriteria(language, role || '',
/* channelCount= */ 0, /* label= */ '');
if (!this.config_.abr.enabled) {
const diff = (a, b) => {
if (!a.video && !b.video) {
return 0;
} else if (!a.video || !b.video) {
return Infinity;
} else {
return Math.abs((a.video.height || 0) - (b.video.height || 0)) +
Math.abs((a.video.width || 0) - (b.video.width || 0));
}
};
// Find the variant whose size is closest to the active variant. This
// ensures we stay at about the same resolution when just changing the
// language/role.
const active = this.streamingEngine_.getCurrentVariant();
const set =
this.currentAdaptationSetCriteria_.create(this.manifest_.variants);
let bestVariant = null;
for (const curVariant of set.values()) {
if (!bestVariant ||
diff(bestVariant, active) > diff(curVariant, active)) {
bestVariant = curVariant;
}
}
if (bestVariant) {
const track = shaka.util.StreamUtils.variantToTrack(bestVariant);
this.selectVariantTrack(track, /* clearBuffer= */ true);
return;
}
}
// If we haven't switched yet, just use ABR to find a new track.
this.chooseVariantAndSwitch_();
} else if (this.video_ && this.video_.audioTracks) {
const audioTracks = Array.from(this.video_.audioTracks);
const selectedLanguage = LanguageUtils.normalize(language);
let languageMatch = null;
let languageAndRoleMatch = null;
for (const audioTrack of audioTracks) {
const track = shaka.util.StreamUtils.html5AudioTrackToTrack(audioTrack);
if (LanguageUtils.normalize(track.language) == selectedLanguage) {
languageMatch = audioTrack;
if (role) {
if (track.roles.includes(role)) {
languageAndRoleMatch = audioTrack;
}
} else { // no role
if (track.roles.length == 0) {
languageAndRoleMatch = audioTrack;
}
}
}
}
// This will reset the "enabled" of other tracks to false.
if (languageAndRoleMatch) {
languageAndRoleMatch.enabled = true;
this.onVariantChanged_();
} else if (languageMatch) {
languageMatch.enabled = true;
this.onVariantChanged_();
}
}
}
/**
* Sets the current text language and current text role to the selected
* language and role, and chooses a new variant if need be. If the player has
* not loaded any content, this will be a no-op.
*
* @param {string} language
* @param {string=} role
* @param {boolean=} forced
* @export
*/
selectTextLanguage(language, role, forced = false) {
const LanguageUtils = shaka.util.LanguageUtils;
if (this.manifest_ && this.playhead_) {
this.currentTextLanguage_ = language;
this.currentTextRole_ = role || '';
this.currentTextForced_ = forced;
const chosenText = this.chooseTextStream_();
if (chosenText) {
if (chosenText == this.streamingEngine_.getCurrentTextStream()) {
shaka.log.debug('Text track already selected.');
return;
}
this.addTextStreamToSwitchHistory_(
chosenText, /* fromAdaptation= */ false);
if (this.shouldStreamText_()) {
this.streamingEngine_.switchTextStream(chosenText);
this.onTextChanged_();
}
}
} else {
const selectedLanguage = LanguageUtils.normalize(language);
const track = this.getTextTracks().find((t) => {
return LanguageUtils.normalize(t.language) == selectedLanguage &&
(!role || t.roles.includes(role)) && t.forced == forced;
});
if (track) {
this.selectTextTrack(track);
}
}
}
/**
* Select variant tracks that have a given label. This assumes the
* label uniquely identifies an audio stream, so all the variants
* are expected to have the same variant.audio.
*
* @param {string} label
* @export
*/
selectVariantsByLabel(label) {
if (this.manifest_ && this.playhead_) {
let firstVariantWithLabel = null;
for (const variant of this.manifest_.variants) {
if (variant.audio.label == label) {
firstVariantWithLabel = variant;
break;
}
}
if (firstVariantWithLabel == null) {
shaka.log.warning('No variants were found with label: ' +
label + '. Ignoring the request to switch.');
return;
}
// Label is a unique identifier of a variant's audio stream.
// Because of that we assume that all the variants with the same
// label have the same language.
this.currentAdaptationSetCriteria_ =
new shaka.media.PreferenceBasedCriteria(
firstVariantWithLabel.language, '', 0, label);
this.chooseVariantAndSwitch_();
}
}
/**
* Check if the text displayer is enabled.
*
* @return {boolean}
* @export
*/
isTextTrackVisible() {
const expected = this.isTextVisible_;
if (this.mediaSourceEngine_) {
// Make sure our values are still in-sync.
const actual = this.mediaSourceEngine_.getTextDisplayer().isTextVisible();
goog.asserts.assert(
actual == expected, 'text visibility has fallen out of sync');
// Always return the actual value so that the app has the most accurate
// information (in the case that the values come out of sync in prod).
return actual;
} else if (this.video_ && this.video_.src && this.video_.textTracks) {
const textTracks = this.getFilteredTextTracks_();
return textTracks.some((t) => t.mode == 'showing');
}
return expected;
}
/**
* Ignore the TextTracks with the 'metadata' or 'chapters' kind, or the one
* generated by the SimpleTextDisplayer.
*
* @return {!Array.<TextTrack>}
* @private
*/
getFilteredTextTracks_() {
goog.asserts.assert(this.video_.textTracks,
'TextTracks should be valid.');
return Array.from(this.video_.textTracks)
.filter((t) => t.kind != 'metadata' && t.kind != 'chapters' &&
t.label != shaka.Player.TextTrackLabel);
}
/**
* Get the TextTracks with the 'metadata' kind.
*
* @return {!Array.<TextTrack>}
* @private
*/
getMetadataTracks_() {
goog.asserts.assert(this.video_.textTracks,
'TextTracks should be valid.');
return Array.from(this.video_.textTracks)
.filter((t) => t.kind == 'metadata');
}
/**
* Enable or disable the text displayer. If the player is in an unloaded
* state, the request will be applied next time content is loaded.
*
* @param {boolean} isVisible
* @export
*/
setTextTrackVisibility(isVisible) {
const oldVisibilty = this.isTextVisible_;
// Convert to boolean in case apps pass 0/1 instead false/true.
const newVisibility = !!isVisible;
if (oldVisibilty == newVisibility) {
return;
}
this.isTextVisible_ = newVisibility;
// Hold of on setting the text visibility until we have all the components
// we need. This ensures that they stay in-sync.
if (this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE) {
this.mediaSourceEngine_.getTextDisplayer()
.setTextVisibility(newVisibility);
// When the user wants to see captions, we stream captions. When the user
// doesn't want to see captions, we don't stream captions. This is to
// avoid bandwidth consumption by an unused resource. The app developer
// can override this and configure us to always stream captions.
if (!this.config_.streaming.alwaysStreamText) {
if (newVisibility) {
if (this.streamingEngine_.getCurrentTextStream()) {
// We already have a selected text stream.
} else {
// Find the text stream that best matches the user's preferences.
const streams =
shaka.util.StreamUtils.filterStreamsByLanguageAndRole(
this.manifest_.textStreams,
this.currentTextLanguage_,
this.currentTextRole_,
this.currentTextForced_);
// It is possible that there are no streams to play.
if (streams.length > 0) {
this.streamingEngine_.switchTextStream(streams[0]);
this.onTextChanged_();
}
}
} else {
this.streamingEngine_.unloadTextStream();
}
}
} else if (this.video_ && this.video_.src && this.video_.textTracks) {
const textTracks = this.getFilteredTextTracks_();
// Find the active track by looking for one which is not disabled. This
// is the only way to identify the track which is currently displayed.
// Set it to 'showing' or 'hidden' based on newVisibility.
for (const textTrack of textTracks) {
if (textTrack.mode != 'disabled') {
textTrack.mode = newVisibility ? 'showing' : 'hidden';
}
}
}
// We need to fire the event after we have updated everything so that
// everything will be in a stable state when the app responds to the
// event.
this.onTextTrackVisibility_();
}
/**
* Get the current playhead position as a date. This should only be called
* when the player has loaded a live stream. If the player has not loaded a
* live stream, this will return <code>null</code>.
*
* @return {Date}
* @export
*/
getPlayheadTimeAsDate() {
if (!this.isLive()) {
shaka.log.warning('getPlayheadTimeAsDate is for live streams!');
return null;
}
const walkerPayload = this.walker_.getCurrentPayload();
let presentationTime = 0;
if (this.playhead_) {
presentationTime = this.playhead_.getTime();
} else if (walkerPayload) {
if (walkerPayload.startTime == null) {
// A live stream with no requested start time and no playhead yet. We
// would start at the live edge, but we don't have that yet, so return
// the current date & time.
return new Date();
} else {
// A specific start time has been requested. This is what Playhead will
// use once it is created.
presentationTime = walkerPayload.startTime;
}
}
if (this.manifest_) {
const timeline = this.manifest_.presentationTimeline;
const startTime = timeline.getPresentationStartTime();
return new Date(/* ms= */ (startTime + presentationTime) * 1000);
} else if (this.video_ && this.video_.getStartDate) {
// Apple's native HLS gives us getStartDate(), which is only available if
// EXT-X-PROGRAM-DATETIME is in the playlist.
const startDate = this.video_.getStartDate();
if (isNaN(startDate.getTime())) {
shaka.log.warning(
'EXT-X-PROGRAM-DATETIME required to get playhead time as Date!');
return null;
}
return new Date(startDate.getTime() + (presentationTime * 1000));
} else {
shaka.log.warning('No way to get playhead time as Date!');
return null;
}
}
/**
* Get the presentation start time as a date. This should only be called when
* the player has loaded a live stream. If the player has not loaded a live
* stream, this will return <code>null</code>.
*
* @return {Date}
* @export
*/
getPresentationStartTimeAsDate() {
if (!this.isLive()) {
shaka.log.warning('getPresentationStartTimeAsDate is for live streams!');
return null;
}
if (this.manifest_) {
const timeline = this.manifest_.presentationTimeline;
const startTime = timeline.getPresentationStartTime();
goog.asserts.assert(startTime != null,
'Presentation start time should not be null!');
return new Date(/* ms= */ startTime * 1000);
} else if (this.video_ && this.video_.getStartDate) {
// Apple's native HLS gives us getStartDate(), which is only available if
// EXT-X-PROGRAM-DATETIME is in the playlist.
const startDate = this.video_.getStartDate();
if (isNaN(startDate.getTime())) {
shaka.log.warning(
'EXT-X-PROGRAM-DATETIME required to get presentation start time ' +
'as Date!');
return null;
}
return startDate;
} else {
shaka.log.warning('No way to get presentation start time as Date!');
return null;
}
}
/**
* Get information about what the player has buffered. If the player has not
* loaded content or is currently loading content, the buffered content will
* be empty.
*
* @return {shaka.extern.BufferedInfo}
* @export
*/
getBufferedInfo() {
if (this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE) {
return this.mediaSourceEngine_.getBufferedInfo();
}
const info = {
total: [],
audio: [],
video: [],
text: [],
};
if (this.loadMode_ == shaka.Player.LoadMode.SRC_EQUALS) {
const TimeRangesUtils = shaka.media.TimeRangesUtils;
info.total = TimeRangesUtils.getBufferedInfo(this.video_.buffered);
}
return info;
}
/**
* Get statistics for the current playback session. If the player is not
* playing content, this will return an empty stats object.
*
* @return {shaka.extern.Stats}
* @export
*/
getStats() {
// If the Player is not in a fully-loaded state, then return an empty stats
// blob so that this call will never fail.
const loaded = this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE ||
this.loadMode_ == shaka.Player.LoadMode.SRC_EQUALS;
if (!loaded) {
return shaka.util.Stats.getEmptyBlob();
}
this.updateStateHistory_();
goog.asserts.assert(this.video_, 'If we have stats, we should have video_');
const element = /** @type {!HTMLVideoElement} */ (this.video_);
const completionRatio = element.currentTime / element.duration;
if (!isNaN(completionRatio)) {
this.stats_.setCompletionPercent(Math.round(100 * completionRatio));
}
if (element.getVideoPlaybackQuality) {
const info = element.getVideoPlaybackQuality();
this.stats_.setDroppedFrames(
Number(info.droppedVideoFrames),
Number(info.totalVideoFrames));
this.stats_.setCorruptedFrames(Number(info.corruptedVideoFrames));
}
const licenseSeconds =
this.drmEngine_ ? this.drmEngine_.getLicenseTime() : NaN;
this.stats_.setLicenseTime(licenseSeconds);
if (this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE) {
// Event through we are loaded, it is still possible that we don't have a
// variant yet because we set the load mode before we select the first
// variant to stream.
const variant = this.streamingEngine_.getCurrentVariant();
if (variant) {
const rate = this.playRateController_ ?
this.playRateController_.getRealRate() : 1;
const variantBandwidth = rate * variant.bandwidth;
// TODO: Should include text bandwidth if it enabled.
const currentStreamBandwidth = variantBandwidth;
this.stats_.setCurrentStreamBandwidth(currentStreamBandwidth);
}
if (variant && variant.video) {
this.stats_.setResolution(
/* width= */ variant.video.width || NaN,
/* height= */ variant.video.height || NaN);
}
if (this.isLive()) {
const now = this.getPresentationStartTimeAsDate().valueOf() +
this.seekRange().end * 1000;
const latency = (Date.now() - now) / 1000;
this.stats_.setLiveLatency(latency);
}
if (this.manifest_ && this.manifest_.presentationTimeline) {
const maxSegmentDuration =
this.manifest_.presentationTimeline.getMaxSegmentDuration();
this.stats_.setMaxSegmentDuration(maxSegmentDuration);
}
const estimate = this.abrManager_.getBandwidthEstimate();
this.stats_.setBandwidthEstimate(estimate);
}
return this.stats_.getBlob();
}
/**
* Adds the given text track to the loaded manifest. <code>load()</code> must
* resolve before calling. The presentation must have a duration.
*
* This returns the created track, which can immediately be selected by the
* application. The track will not be automatically selected.
*
* @param {string} uri
* @param {string} language
* @param {string} kind
* @param {string=} mimeType
* @param {string=} codec
* @param {string=} label
* @param {boolean=} forced
* @return {shaka.extern.Track}
* @export
*/
addTextTrack(uri, language, kind, mimeType, codec, label, forced = false) {
shaka.Deprecate.deprecateFeature(4,
'addTextTrack',
'Please use an addTextTrackAsync.');
if (this.loadMode_ != shaka.Player.LoadMode.MEDIA_SOURCE &&
this.loadMode_ != shaka.Player.LoadMode.SRC_EQUALS) {
shaka.log.error(
'Must call load() and wait for it to resolve before adding text ' +
'tracks.');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.PLAYER,
shaka.util.Error.Code.CONTENT_NOT_LOADED);
}
if (!mimeType) {
// Try using the uri extension.
const extension = shaka.media.ManifestParser.getExtension(uri);
mimeType = {
'sbv': 'text/x-subviewer',
'srt': 'text/srt',
'vtt': 'text/vtt',
'webvtt': 'text/vtt',
'ttml': 'application/ttml+xml',
'lrc': 'application/x-subtitle-lrc',
'ssa': 'text/x-ssa',
'ass': 'text/x-ssa',
}[extension];
if (!mimeType) {
shaka.log.error(
'The mimeType has not been provided and it could not be deduced ' +
'from its extension.');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.TEXT_COULD_NOT_GUESS_MIME_TYPE,
extension);
}
}
if (this.loadMode_ == shaka.Player.LoadMode.SRC_EQUALS) {
if (mimeType != 'text/vtt') {
shaka.log.error('Only WebVTT is supported when using src=');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.TEXT_ONLY_WEBVTT_SRC_EQUALS,
mimeType);
}
if (forced) {
// See: https://github.com/whatwg/html/issues/4472
kind = 'forced';
}
const trackElement = document.createElement('track');
trackElement.setAttribute('src', uri);
trackElement.setAttribute('label', label || '');
trackElement.setAttribute('kind', kind);
trackElement.setAttribute('srclang', language);
// Because we're pulling in the text track file via Javascript, the
// same-origin policy applies. If you'd like to have a player served
// from one domain, but the text track served from another, you'll
// need to enable CORS in order to do so. In addition to enabling CORS
// on the server serving the text tracks, you will need to add the
// crossorigin attribute to the video element itself.
if (!this.video_.getAttribute('crossorigin')) {
this.video_.setAttribute('crossorigin', 'anonymous');
}
this.video_.appendChild(trackElement);
const textTracks = this.getTextTracks();
const srcTrack = textTracks.find((t) => {
return t.language == language &&
t.label == (label || '') &&
t.kind == kind;
});
if (srcTrack) {
this.onTracksChanged_();
return srcTrack;
}
// This should not happen, but there are browser implementations that may
// not support the Track element.
shaka.log.error('Cannot add this text when loaded with src=');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.CANNOT_ADD_EXTERNAL_TEXT_TO_SRC_EQUALS);
}
const ContentType = shaka.util.ManifestParserUtils.ContentType;
const duration = this.manifest_.presentationTimeline.getDuration();
if (duration == Infinity) {
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.MANIFEST,
shaka.util.Error.Code.CANNOT_ADD_EXTERNAL_TEXT_TO_LIVE_STREAM);
}
/** @type {shaka.extern.Stream} */
const stream = {
id: this.nextExternalStreamId_++,
originalId: null,
createSegmentIndex: () => Promise.resolve(),
segmentIndex: shaka.media.SegmentIndex.forSingleSegment(
/* startTime= */ 0,
/* duration= */ duration,
/* uris= */ [uri]),
mimeType: mimeType || '',
codecs: codec || '',
kind: kind,
encrypted: false,
drmInfos: [],
keyIds: new Set(),
language: language,
label: label || null,
type: ContentType.TEXT,
primary: false,
trickModeVideo: null,
emsgSchemeIdUris: null,
roles: [],
forced: !!forced,
channelsCount: null,
audioSamplingRate: null,
spatialAudio: false,
closedCaptions: null,
};
const fullMimeType = shaka.util.MimeUtils.getFullType(
stream.mimeType, stream.codecs);
const supported = shaka.text.TextEngine.isTypeSupported(fullMimeType);
if (!supported) {
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.MISSING_TEXT_PLUGIN,
mimeType);
}
this.manifest_.textStreams.push(stream);
this.onTracksChanged_();
return shaka.util.StreamUtils.textStreamToTrack(stream);
}
/**
* Adds the given text track to the loaded manifest. <code>load()</code> must
* resolve before calling. The presentation must have a duration.
*
* This returns the created track, which can immediately be selected by the
* application. The track will not be automatically selected.
*
* @param {string} uri
* @param {string} language
* @param {string} kind
* @param {string=} mimeType
* @param {string=} codec
* @param {string=} label
* @param {boolean=} forced
* @return {!Promise.<shaka.extern.Track>}
* @export
*/
async addTextTrackAsync(uri, language, kind, mimeType, codec, label,
forced = false) {
if (this.loadMode_ != shaka.Player.LoadMode.MEDIA_SOURCE &&
this.loadMode_ != shaka.Player.LoadMode.SRC_EQUALS) {
shaka.log.error(
'Must call load() and wait for it to resolve before adding text ' +
'tracks.');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.PLAYER,
shaka.util.Error.Code.CONTENT_NOT_LOADED);
}
if (!mimeType) {
// Try using the uri extension.
const extension = shaka.media.ManifestParser.getExtension(uri);
mimeType = {
'sbv': 'text/x-subviewer',
'srt': 'text/srt',
'vtt': 'text/vtt',
'webvtt': 'text/vtt',
'ttml': 'application/ttml+xml',
'lrc': 'application/x-subtitle-lrc',
'ssa': 'text/x-ssa',
'ass': 'text/x-ssa',
}[extension];
if (!mimeType) {
try {
goog.asserts.assert(
this.networkingEngine_, 'Need networking engine.');
// eslint-disable-next-line require-atomic-updates
mimeType = await shaka.media.ManifestParser.getMimeType(uri,
this.networkingEngine_,
this.config_.streaming.retryParameters);
} catch (error) {}
}
if (!mimeType) {
shaka.log.error(
'The mimeType has not been provided and it could not be deduced ' +
'from its extension.');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.TEXT_COULD_NOT_GUESS_MIME_TYPE,
extension);
}
}
if (this.loadMode_ == shaka.Player.LoadMode.SRC_EQUALS) {
if (mimeType != 'text/vtt') {
goog.asserts.assert(
this.networkingEngine_, 'Need networking engine.');
const data = await this.getTextData_(uri,
this.networkingEngine_,
this.config_.streaming.retryParameters);
const vvtText = this.convertToWebVTT_(data, mimeType);
const blob = new Blob([vvtText], {type: 'text/vtt'});
uri = URL.createObjectURL(blob);
mimeType = 'text/vtt';
}
if (forced) {
// See: https://github.com/whatwg/html/issues/4472
kind = 'forced';
}
const trackElement = document.createElement('track');
trackElement.setAttribute('src', uri);
trackElement.setAttribute('label', label || '');
trackElement.setAttribute('kind', kind);
trackElement.setAttribute('srclang', language);
// Because we're pulling in the text track file via Javascript, the
// same-origin policy applies. If you'd like to have a player served
// from one domain, but the text track served from another, you'll
// need to enable CORS in order to do so. In addition to enabling CORS
// on the server serving the text tracks, you will need to add the
// crossorigin attribute to the video element itself.
if (!this.video_.getAttribute('crossorigin')) {
this.video_.setAttribute('crossorigin', 'anonymous');
}
this.video_.appendChild(trackElement);
const textTracks = this.getTextTracks();
const srcTrack = textTracks.find((t) => {
return t.language == language &&
t.label == (label || '') &&
t.kind == kind;
});
if (srcTrack) {
this.onTracksChanged_();
return srcTrack;
}
// This should not happen, but there are browser implementations that may
// not support the Track element.
shaka.log.error('Cannot add this text when loaded with src=');
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.CANNOT_ADD_EXTERNAL_TEXT_TO_SRC_EQUALS);
}
const ContentType = shaka.util.ManifestParserUtils.ContentType;
const duration = this.manifest_.presentationTimeline.getDuration();
if (duration == Infinity) {
throw new shaka.util.Error(
shaka.util.Error.Severity.RECOVERABLE,
shaka.util.Error.Category.MANIFEST,
shaka.util.Error.Code.CANNOT_ADD_EXTERNAL_TEXT_TO_LIVE_STREAM);
}
/** @type {shaka.extern.Stream} */
const stream = {
id: this.nextExternalStreamId_++,
originalId: null,
createSegmentIndex: () => Promise.resolve(),
segmentIndex: shaka.media.SegmentIndex.forSingleSegment(
/* startTime= */ 0,
/* duration= */ duration,
/* uris= */ [uri]),
mimeType: mimeType || '',
codecs: codec || '',
kind: kind,
encrypted: false,
drmInfos: [],
keyIds: new Set(),
language: language,
label: label || null,
type: ContentType.TEXT,
primary: false,
trickModeVideo: null,
emsgSchemeIdUris: null,
roles: [],
forced: !!forced,
channelsCount: null,
audioSamplingRate: null,
spatialAudio: false,
closedCaptions: null,
};
const fullMimeType = shaka.util.MimeUtils.getFullType(
stream.mimeType, stream.codecs);
const supported = shaka.text.TextEngine.isTypeSupported(fullMimeType);
if (!supported) {
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.MISSING_TEXT_PLUGIN,
mimeType);
}
this.manifest_.textStreams.push(stream);
this.onTracksChanged_();
return shaka.util.StreamUtils.textStreamToTrack(stream);
}
/**
* @param {string} uri
* @param {!shaka.net.NetworkingEngine} netEngine
* @param {shaka.extern.RetryParameters} retryParams
* @return {!Promise.<BufferSource>}
* @private
*/
async getTextData_(uri, netEngine, retryParams) {
const type = shaka.net.NetworkingEngine.RequestType.SEGMENT;
const request = shaka.net.NetworkingEngine.makeRequest([uri], retryParams);
request.method = 'GET';
const response = await netEngine.request(type, request).promise;
return response.data;
}
/**
* Converts an input string to a WebVTT format string.
*
* @param {BufferSource} buffer
* @param {string} mimeType
* @return {string}
* @private
*/
convertToWebVTT_(buffer, mimeType) {
const factory = shaka.text.TextEngine.findParser(mimeType);
if (factory) {
const obj = factory();
const time = {
periodStart: 0,
segmentStart: 0,
segmentEnd: this.video_.duration,
};
const data = shaka.util.BufferUtils.toUint8(buffer);
const cues = obj.parseMedia(data, time);
return shaka.text.WebVttGenerator.convert(cues);
}
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.TEXT,
shaka.util.Error.Code.MISSING_TEXT_PLUGIN,
mimeType);
}
/**
* Set the maximum resolution that the platform's hardware can handle.
* This will be called automatically by <code>shaka.cast.CastReceiver</code>
* to enforce limitations of the Chromecast hardware.
*
* @param {number} width
* @param {number} height
* @export
*/
setMaxHardwareResolution(width, height) {
this.maxHwRes_.width = width;
this.maxHwRes_.height = height;
}
/**
* Retry streaming after a streaming failure has occurred. When the player has
* not loaded content or is loading content, this will be a no-op and will
* return <code>false</code>.
*
* <p>
* If the player has loaded content, and streaming has not seen an error, this
* will return <code>false</code>.
*
* <p>
* If the player has loaded content, and streaming seen an error, but the
* could not resume streaming, this will return <code>false</code>.
*
* @return {boolean}
* @export
*/
retryStreaming() {
return this.loadMode_ == shaka.Player.LoadMode.MEDIA_SOURCE ?
this.streamingEngine_.retry() :
false;
}
/**
* Get the manifest that the player has loaded. If the player has not loaded
* any content, this will return <code>null</code>.
*
* NOTE: This structure is NOT covered by semantic versioning compatibility
* guarantees. It may change at any time!
*
* This is marked as deprecated to warn Closure Compiler users at compile-time
* to avoid using this method.
*
* @return {?shaka.extern.Manifest}
* @export
* @deprecated
*/
getManifest() {
shaka.log.alwaysWarn(
'Shaka Player\'s internal Manifest structure is NOT covered by ' +
'semantic versioning compatibility guarantees. It may change at any ' +
'time! Please consider filing a feature request for whatever you ' +
'use getManifest() for.');
return this.manifest_;
}
/**
* Get the type of manifest parser that the player is using. If the player has
* not loaded any content, this will return <code>null</code>.
*
* @return {?shaka.extern.ManifestParser.Factory}
* @export
*/
getManifestParserFactory() {
return this.parserFactory_;
}
/**
* @param {shaka.extern.Variant} variant
* @param {boolean} fromAdaptation
* @private
*/
addVariantToSwitchHistory_(variant, fromAdaptation) {
const switchHistory = this.stats_.getSwitchHistory();
switchHistory.updateCurrentVariant(variant, fromAdaptation);
}
/**
* @param {shaka.extern.Stream} textStream
* @param {boolean} fromAdaptation
* @private
*/
addTextStreamToSwitchHistory_(textStream, fromAdaptation) {
const switchHistory = this.stats_.getSwitchHistory();
switchHistory.updateCurrentText(textStream, fromAdaptation);
}
/**
* @return {shaka.extern.PlayerConfiguration}
* @private
*/
defaultConfig_() {
const config = shaka.util.PlayerConfiguration.createDefault();
config.streaming.failureCallback = (error) => {
this.defaultStreamingFailureCallback_(error);
};
// Because this.video_ may not be set when the config is built, the default
// TextDisplay factory must capture a reference to "this".
config.textDisplayFactory = () => {
if (this.videoContainer_) {
return new shaka.text.UITextDisplayer(
this.video_, this.videoContainer_);
} else {
return new shaka.text.SimpleTextDisplayer(this.video_);
}
};
return config;
}
/**
* Set the videoContainer to construct UITextDisplayer.
* @param {HTMLElement} videoContainer
* @export
*/
setVideoContainer(videoContainer) {
this.videoContainer_ = videoContainer;
}
/**
* @param {!shaka.util.Error} error
* @private
*/
defaultStreamingFailureCallback_(error) {
const retryErrorCodes = [
shaka.util.Error.Code.BAD_HTTP_STATUS,
shaka.util.Error.Code.HTTP_ERROR,
shaka.util.Error.Code.TIMEOUT,
];
if (this.isLive() && retryErrorCodes.includes(error.code)) {
error.severity = shaka.util.Error.Severity.RECOVERABLE;
shaka.log.warning('Live streaming error. Retrying automatically...');
this.retryStreaming();
}
}
/**
* For CEA closed captions embedded in the video streams, create dummy text
* stream. This can be safely called again on existing manifests, for
* manifest updates.
* @param {!shaka.extern.Manifest} manifest
* @private
*/
makeTextStreamsForClosedCaptions_(manifest) {
const ContentType = shaka.util.ManifestParserUtils.ContentType;
const TextStreamKind = shaka.util.ManifestParserUtils.TextStreamKind;
const CEA608_MIME = shaka.util.MimeUtils.CEA608_CLOSED_CAPTION_MIMETYPE;
const CEA708_MIME = shaka.util.MimeUtils.CEA708_CLOSED_CAPTION_MIMETYPE;
// A set, to make sure we don't create two text streams for the same video.
const closedCaptionsSet = new Set();
for (const textStream of manifest.textStreams) {
if (textStream.mimeType == CEA608_MIME ||
textStream.mimeType == CEA708_MIME) {
// This function might be called on a manifest update, so don't make a
// new text stream for closed caption streams we have seen before.
closedCaptionsSet.add(textStream.originalId);
}
}
for (const variant of manifest.variants) {
const video = variant.video;
if (video && video.closedCaptions) {
for (const id of video.closedCaptions.keys()) {
if (!closedCaptionsSet.has(id)) {
const mimeType = id.startsWith('CC') ? CEA608_MIME : CEA708_MIME;
// Add an empty segmentIndex, for the benefit of the period combiner
// in our builtin DASH parser.
const segmentIndex = new shaka.media.MetaSegmentIndex();
const textStream = {
id: this.nextExternalStreamId_++, // A globally unique ID.
originalId: id, // The CC ID string, like 'CC1', 'CC3', etc.
createSegmentIndex: () => Promise.resolve(),
segmentIndex,
mimeType,
codecs: '',
kind: TextStreamKind.CLOSED_CAPTION,
encrypted: false,
drmInfos: [],
keyIds: new Set(),
language: video.closedCaptions.get(id),
label: null,
type: ContentType.TEXT,
primary: false,
trickModeVideo: null,
emsgSchemeIdUris: null,
roles: video.roles,
forced: false,
channelsCount: null,
audioSamplingRate: null,
spatialAudio: false,
closedCaptions: null,
};
manifest.textStreams.push(textStream);
closedCaptionsSet.add(id);
}
}
}
}
}
/**
* Filters a manifest, removing unplayable streams/variants.
*
* @param {?shaka.extern.Manifest} manifest
* @private
*/
async filterManifest_(manifest) {
await this.filterManifestWithStreamUtils_(manifest);
this.filterManifestWithRestrictions_(manifest);
}
/**
* Filters a manifest, removing unplayable streams/variants.
*
* @param {?shaka.extern.Manifest} manifest
* @private
*/
async filterManifestWithStreamUtils_(manifest) {
goog.asserts.assert(manifest, 'Manifest should exist!');
goog.asserts.assert(this.video_, 'Must not be destroyed');
/** @type {?shaka.extern.Variant} */
const currentVariant = this.streamingEngine_ ?
this.streamingEngine_.getCurrentVariant() : null;
await shaka.util.StreamUtils.filterManifest(
this.drmEngine_, currentVariant, manifest,
this.config_.useMediaCapabilities);
this.checkPlayableVariants_(manifest);
}
/**
* Apply the restrictions configuration to the manifest, and check if there's
* a variant that meets the restrictions.
*
* @param {?shaka.extern.Manifest} manifest
* @private
*/
filterManifestWithRestrictions_(manifest) {
// Return if |destroy| is called.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return;
}
const tracksChanged = shaka.util.StreamUtils.applyRestrictions(
manifest.variants, this.config_.restrictions, this.maxHwRes_);
if (tracksChanged && this.streamingEngine_) {
this.onTracksChanged_();
}
// We may need to create new sessions for any new init data.
const curDrmInfo = this.drmEngine_ ? this.drmEngine_.getDrmInfo() : null;
// DrmEngine.newInitData() requires mediaKeys to be available.
if (curDrmInfo && this.drmEngine_.getMediaKeys()) {
for (const variant of manifest.variants) {
const videoDrmInfos = variant.video ? variant.video.drmInfos : [];
const audioDrmInfos = variant.audio ? variant.audio.drmInfos : [];
const drmInfos = videoDrmInfos.concat(audioDrmInfos);
for (const drmInfo of drmInfos) {
// Ignore any data for different key systems.
if (drmInfo.keySystem == curDrmInfo.keySystem) {
for (const initData of (drmInfo.initData || [])) {
this.drmEngine_.newInitData(
initData.initDataType, initData.initData);
}
}
}
}
}
this.checkRestrictedVariants_(manifest);
}
/**
* @private
*/
filterManifestByCurrentVariant_() {
goog.asserts.assert(this.manifest_, 'Manifest should be valid');
goog.asserts.assert(this.streamingEngine_,
'StreamingEngine should be valid');
const currentVariant = this.streamingEngine_ ?
this.streamingEngine_.getCurrentVariant() : null;
shaka.util.StreamUtils.filterManifestByCurrentVariant(currentVariant,
this.manifest_);
this.checkPlayableVariants_(this.manifest_);
}
/**
* @param {shaka.extern.Variant} initialVariant
* @param {number} time
* @return {!Promise.<number>}
* @private
*/
async adjustStartTime_(initialVariant, time) {
/** @type {?shaka.extern.Stream} */
const activeAudio = initialVariant.audio;
/** @type {?shaka.extern.Stream} */
const activeVideo = initialVariant.video;
/**
* @param {?shaka.extern.Stream} stream
* @param {number} time
* @return {!Promise.<?number>}
*/
const getAdjustedTime = async (stream, time) => {
if (!stream) {
return null;
}
await stream.createSegmentIndex();
const ref = stream.segmentIndex[Symbol.iterator]().seek(time);
if (!ref) {
return null;
}
const refTime = ref.startTime;
goog.asserts.assert(refTime <= time,
'Segment should start before target time!');
return refTime;
};
const audioStartTime = await getAdjustedTime(activeAudio, time);
const videoStartTime = await getAdjustedTime(activeVideo, time);
// If we have both video and audio times, pick the larger one. If we picked
// the smaller one, that one will download an entire segment to buffer the
// difference.
if (videoStartTime != null && audioStartTime != null) {
return Math.max(videoStartTime, audioStartTime);
} else if (videoStartTime != null) {
return videoStartTime;
} else if (audioStartTime != null) {
return audioStartTime;
} else {
return time;
}
}
/**
* Update the buffering state to be either "we are buffering" or "we are not
* buffering", firing events to the app as needed.
*
* @private
*/
updateBufferState_() {
const isBuffering = this.isBuffering();
shaka.log.v2('Player changing buffering state to', isBuffering);
// Make sure we have all the components we need before we consider ourselves
// as being loaded.
// TODO: Make the check for "loaded" simpler.
const loaded = this.stats_ && this.bufferObserver_ && this.playhead_;
if (loaded) {
this.playRateController_.setBuffering(isBuffering);
this.updateStateHistory_();
}
// Surface the buffering event so that the app knows if/when we are
// buffering.
const eventName = shaka.Player.EventName.Buffering;
this.dispatchEvent(this.makeEvent_(eventName, {'buffering': isBuffering}));
}
/**
* A callback for when the playback rate changes. We need to watch the
* playback rate so that if the playback rate on the media element changes
* (that was not caused by our play rate controller) we can notify the
* controller so that it can stay in-sync with the change.
*
* @private
*/
onRateChange_() {
/** @type {number} */
const newRate = this.video_.playbackRate;
// On Edge, when someone seeks using the native controls, it will set the
// playback rate to zero until they finish seeking, after which it will
// return the playback rate.
//
// If the playback rate changes while seeking, Edge will cache the playback
// rate and use it after seeking.
//
// https://github.com/google/shaka-player/issues/951
if (newRate == 0) {
return;
}
if (this.playRateController_) {
// The playback rate has changed. This could be us or someone else.
// If this was us, setting the rate again will be a no-op.
this.playRateController_.set(newRate);
}
const event = this.makeEvent_(shaka.Player.EventName.RateChange);
this.dispatchEvent(event);
}
/**
* Try updating the state history. If the player has not finished
* initializing, this will be a no-op.
*
* @private
*/
updateStateHistory_() {
// If we have not finish initializing, this will be a no-op.
if (!this.stats_) {
return;
}
if (!this.bufferObserver_) {
return;
}
const State = shaka.media.BufferingObserver.State;
const history = this.stats_.getStateHistory();
if (this.bufferObserver_.getState() == State.STARVING) {
history.update('buffering');
} else if (this.video_.paused) {
history.update('paused');
} else if (this.video_.ended) {
history.update('ended');
} else {
history.update('playing');
}
}
/**
* Callback from Playhead.
*
* @private
*/
onSeek_() {
if (this.playheadObservers_) {
this.playheadObservers_.notifyOfSeek();
}
if (this.streamingEngine_) {
this.streamingEngine_.seeked();
}
if (this.bufferObserver_) {
// If we seek into an unbuffered range, we should fire a 'buffering' event
// immediately. If StreamingEngine can buffer fast enough, we may not
// update our buffering tracking otherwise.
this.pollBufferState_();
}
}
/**
* Update AbrManager with variants while taking into account restrictions,
* preferences, and ABR.
*
* On error, this dispatches an error event and returns false.
*
* @return {boolean} True if successful.
* @private
*/
updateAbrManagerVariants_() {
try {
goog.asserts.assert(this.manifest_, 'Manifest should exist by now!');
this.checkRestrictedVariants_(this.manifest_);
} catch (e) {
this.onError_(e);
return false;
}
const playableVariants = this.manifest_.variants.filter((variant) => {
return shaka.util.StreamUtils.isPlayable(variant);
});
// Update the abr manager with newly filtered variants.
const adaptationSet = this.currentAdaptationSetCriteria_.create(
playableVariants);
this.abrManager_.setVariants(Array.from(adaptationSet.values()));
return true;
}
/**
* Chooses a variant from all possible variants while taking into account
* restrictions, preferences, and ABR.
*
* On error, this dispatches an error event and returns null.
*
* @return {?shaka.extern.Variant}
* @private
*/
chooseVariant_() {
if (this.updateAbrManagerVariants_()) {
return this.abrManager_.chooseVariant();
} else {
return null;
}
}
/**
* Choose a text stream from all possible text streams while taking into
* account user preference.
*
* @return {?shaka.extern.Stream}
* @private
*/
chooseTextStream_() {
const subset = shaka.util.StreamUtils.filterStreamsByLanguageAndRole(
this.manifest_.textStreams,
this.currentTextLanguage_,
this.currentTextRole_,
this.currentTextForced_);
return subset[0] || null;
}
/**
* Chooses a new Variant. If the new variant differs from the old one, it
* adds the new one to the switch history and switches to it.
*
* Called after a config change, a key status event, or an explicit language
* change.
*
* @private
*/
chooseVariantAndSwitch_() {
goog.asserts.assert(this.config_, 'Must not be destroyed');
// Because we're running this after a config change (manual language
// change) or a key status event, it is always okay to clear the buffer
// here.
const chosenVariant = this.chooseVariant_();
if (chosenVariant) {
if (chosenVariant == this.streamingEngine_.getCurrentVariant()) {
shaka.log.debug('Variant already selected.');
return;
}
this.addVariantToSwitchHistory_(
chosenVariant, /* fromAdaptation= */ true);
this.streamingEngine_.switchVariant(
chosenVariant, /* clearBuffers= */ true, /* safeMargin= */ 0);
// Dispatch a 'variantchanged' event
this.onVariantChanged_();
}
// Send an adaptation event so that the UI can show the new
// language/tracks.
this.onAdaptation_();
}
/**
* Decide during startup if text should be streamed/shown.
* @private
*/
setInitialTextState_(initialVariant, initialTextStream) {
// Check if we should show text (based on difference between audio and text
// languages).
if (initialTextStream) {
if (initialVariant.audio && this.shouldInitiallyShowText_(
initialVariant.audio, initialTextStream)) {
this.isTextVisible_ = true;
}
if (this.isTextVisible_) {
// If the cached value says to show text, then update the text displayer
// since it defaults to not shown.
this.mediaSourceEngine_.getTextDisplayer().setTextVisibility(true);
goog.asserts.assert(this.shouldStreamText_(),
'Should be streaming text');
}
this.onTextTrackVisibility_();
} else {
this.isTextVisible_ = false;
}
}
/**
* Check if we should show text on screen automatically.
*
* The text should automatically be shown if the text is language-compatible
* with the user's text language preference, but not compatible with the
* audio.
*
* For example:
* preferred | chosen | chosen |
* text | text | audio | show
* -----------------------------------
* en-CA | en | jp | true
* en | en-US | fr | true
* fr-CA | en-US | jp | false
* en-CA | en-US | en-US | false
*
* @param {shaka.extern.Stream} audioStream
* @param {shaka.extern.Stream} textStream
* @return {boolean}
* @private
*/
shouldInitiallyShowText_(audioStream, textStream) {
const LanguageUtils = shaka.util.LanguageUtils;
/** @type {string} */
const preferredTextLocale =
LanguageUtils.normalize(this.config_.preferredTextLanguage);
/** @type {string} */
const audioLocale = LanguageUtils.normalize(audioStream.language);
/** @type {string} */
const textLocale = LanguageUtils.normalize(textStream.language);
return (
LanguageUtils.areLanguageCompatible(textLocale, preferredTextLocale) &&
!LanguageUtils.areLanguageCompatible(audioLocale, textLocale));
}
/**
* Callback from StreamingEngine.
*
* @private
*/
onManifestUpdate_() {
if (this.parser_ && this.parser_.update) {
this.parser_.update();
}
}
/**
* Callback from StreamingEngine.
*
* @private
*/
onSegmentAppended_() {
// When we append a segment to media source (via streaming engine) we are
// changing what data we have buffered, so notify the playhead of the
// change.
if (this.playhead_) {
this.playhead_.notifyOfBufferingChange();
}
this.pollBufferState_();
}
/**
* Callback from AbrManager.
*
* @param {shaka.extern.Variant} variant
* @param {boolean=} clearBuffer
* @param {number=} safeMargin Optional amount of buffer (in seconds) to
* retain when clearing the buffer.
* Defaults to 0 if not provided. Ignored if clearBuffer is false.
* @private
*/
switch_(variant, clearBuffer = false, safeMargin = 0) {
shaka.log.debug('switch_');
goog.asserts.assert(this.config_.abr.enabled,
'AbrManager should not call switch while disabled!');
goog.asserts.assert(this.manifest_, 'We need a manifest to switch ' +
'variants.');
if (!this.streamingEngine_) {
// There's no way to change it.
return;
}
if (variant == this.streamingEngine_.getCurrentVariant()) {
// This isn't a change.
return;
}
this.addVariantToSwitchHistory_(variant, /* fromAdaptation= */ true);
this.streamingEngine_.switchVariant(variant, clearBuffer, safeMargin);
this.onAdaptation_();
}
/**
* Dispatches an 'adaptation' event.
* @private
*/
onAdaptation_() {
// Delay the 'adaptation' event so that StreamingEngine has time to absorb
// the changes before the user tries to query it.
const event = this.makeEvent_(shaka.Player.EventName.Adaptation);
this.delayDispatchEvent_(event);
}
/**
* Dispatches a 'trackschanged' event.
* @private
*/
onTracksChanged_() {
// Delay the 'trackschanged' event so StreamingEngine has time to absorb the
// changes before the user tries to query it.
const event = this.makeEvent_(shaka.Player.EventName.TracksChanged);
this.delayDispatchEvent_(event);
}
/**
* Dispatches a 'variantchanged' event.
* @private
*/
onVariantChanged_() {
// Delay the 'variantchanged' event so StreamingEngine has time to absorb
// the changes before the user tries to query it.
const event = this.makeEvent_(shaka.Player.EventName.VariantChanged);
this.delayDispatchEvent_(event);
}
/**
* Dispatches a 'textchanged' event.
* @private
*/
onTextChanged_() {
// Delay the 'textchanged' event so StreamingEngine time to absorb the
// changes before the user tries to query it.
const event = this.makeEvent_(shaka.Player.EventName.TextChanged);
this.delayDispatchEvent_(event);
}
/** @private */
onTextTrackVisibility_() {
const event = this.makeEvent_(shaka.Player.EventName.TextTrackVisibility);
this.delayDispatchEvent_(event);
}
/** @private */
onAbrStatusChanged_() {
const event = this.makeEvent_(shaka.Player.EventName.AbrStatusChanged, {
newStatus: this.config_.abr.enabled,
});
this.delayDispatchEvent_(event);
}
/**
* @param {!shaka.util.Error} error
* @private
*/
onError_(error) {
goog.asserts.assert(error instanceof shaka.util.Error, 'Wrong error type!');
// Errors dispatched after |destroy| is called are not meaningful and should
// be safe to ignore.
if (this.loadMode_ == shaka.Player.LoadMode.DESTROYED) {
return;
}
const eventName = shaka.Player.EventName.Error;
const event = this.makeEvent_(eventName, {'detail': error});
this.dispatchEvent(event);
if (event.defaultPrevented) {
error.handled = true;
}
}
/**
* When we fire region events, we need to copy the information out of the
* region to break the connection with the player's internal data. We do the
* copy here because this is the transition point between the player and the
* app.
*
* @param {!shaka.Player.EventName} eventName
* @param {shaka.extern.TimelineRegionInfo} region
*
* @private
*/
onRegionEvent_(eventName, region) {
// Always make a copy to avoid exposing our internal data to the app.
const clone = {
schemeIdUri: region.schemeIdUri,
value: region.value,
startTime: region.startTime,
endTime: region.endTime,
id: region.id,
eventElement: region.eventElement,
};
this.dispatchEvent(this.makeEvent_(eventName, {detail: clone}));
}
/**
* Turn the media element's error object into a Shaka Player error object.
*
* @return {shaka.util.Error}
* @private
*/
videoErrorToShakaError_() {
goog.asserts.assert(this.video_.error,
'Video error expected, but missing!');
if (!this.video_.error) {
return null;
}
const code = this.video_.error.code;
if (code == 1 /* MEDIA_ERR_ABORTED */) {
// Ignore this error code, which should only occur when navigating away or
// deliberately stopping playback of HTTP content.
return null;
}
// Extra error information from MS Edge:
let extended = this.video_.error.msExtendedCode;
if (extended) {
// Convert to unsigned:
if (extended < 0) {
extended += Math.pow(2, 32);
}
// Format as hex:
extended = extended.toString(16);
}
// Extra error information from Chrome:
const message = this.video_.error.message;
return new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.MEDIA,
shaka.util.Error.Code.VIDEO_ERROR,
code, extended, message);
}
/**
* @param {!Event} event
* @private
*/
onVideoError_(event) {
const error = this.videoErrorToShakaError_();
if (!error) {
return;
}
this.onError_(error);
}
/**
* @param {!Object.<string, string>} keyStatusMap A map of hex key IDs to
* statuses.
* @private
*/
onKeyStatus_(keyStatusMap) {
if (!this.streamingEngine_) {
// We can't use this info to manage restrictions in src= mode, so ignore
// it.
return;
}
const keyIds = Object.keys(keyStatusMap);
if (keyIds.length == 0) {
shaka.log.warning(
'Got a key status event without any key statuses, so we don\'t ' +
'know the real key statuses. If we don\'t have all the keys, ' +
'you\'ll need to set restrictions so we don\'t select those tracks.');
}
// If EME is using a synthetic key ID, the only key ID is '00' (a single 0
// byte). In this case, it is only used to report global success/failure.
// See note about old platforms in: https://bit.ly/2tpez5Z
const isGlobalStatus = keyIds.length == 1 && keyIds[0] == '00';
if (isGlobalStatus) {
shaka.log.warning(
'Got a synthetic key status event, so we don\'t know the real key ' +
'statuses. If we don\'t have all the keys, you\'ll need to set ' +
'restrictions so we don\'t select those tracks.');
}
const restrictedStatuses = shaka.Player.restrictedStatuses_;
let tracksChanged = false;
// Only filter tracks for keys if we have some key statuses to look at.
if (keyIds.length) {
for (const variant of this.manifest_.variants) {
const streams = shaka.util.StreamUtils.getVariantStreams(variant);
for (const stream of streams) {
const originalAllowed = variant.allowedByKeySystem;
// Only update if we have key IDs for the stream. If the keys aren't
// all present, then the track should be restricted.
if (stream.keyIds.size) {
variant.allowedByKeySystem = true;
for (const keyId of stream.keyIds) {
const keyStatus = keyStatusMap[isGlobalStatus ? '00' : keyId];
variant.allowedByKeySystem = variant.allowedByKeySystem &&
!!keyStatus && !restrictedStatuses.includes(keyStatus);
}
}
if (originalAllowed != variant.allowedByKeySystem) {
tracksChanged = true;
}
} // for (const stream of streams)
} // for (const variant of this.manifest_.variants)
} // if (keyIds.size)
if (tracksChanged) {
this.updateAbrManagerVariants_();
}
const currentVariant = this.streamingEngine_.getCurrentVariant();
if (currentVariant && !currentVariant.allowedByKeySystem) {
shaka.log.debug('Choosing new streams after key status changed');
this.chooseVariantAndSwitch_();
}
if (tracksChanged) {
this.onTracksChanged_();
}
}
/**
* Callback from DrmEngine
* @param {string} keyId
* @param {number} expiration
* @private
*/
onExpirationUpdated_(keyId, expiration) {
if (this.parser_ && this.parser_.onExpirationUpdated) {
this.parser_.onExpirationUpdated(keyId, expiration);
}
const event = this.makeEvent_(shaka.Player.EventName.ExpirationUpdated);
this.dispatchEvent(event);
}
/**
* @return {boolean} true if we should stream text right now.
* @private
*/
shouldStreamText_() {
return this.config_.streaming.alwaysStreamText || this.isTextTrackVisible();
}
/**
* Applies playRangeStart and playRangeEnd to the given timeline. This will
* only affect non-live content.
*
* @param {shaka.media.PresentationTimeline} timeline
* @param {number} playRangeStart
* @param {number} playRangeEnd
*
* @private
*/
static applyPlayRange_(timeline, playRangeStart, playRangeEnd) {
if (playRangeStart > 0) {
if (timeline.isLive()) {
shaka.log.warning(
'|playRangeStart| has been configured for live content. ' +
'Ignoring the setting.');
} else {
timeline.setUserSeekStart(playRangeStart);
}
}
// If the playback has been configured to end before the end of the
// presentation, update the duration unless it's live content.
const fullDuration = timeline.getDuration();
if (playRangeEnd < fullDuration) {
if (timeline.isLive()) {
shaka.log.warning(
'|playRangeEnd| has been configured for live content. ' +
'Ignoring the setting.');
} else {
timeline.setDuration(playRangeEnd);
}
}
}
/**
* Checks if the variants are all restricted, and throw an appropriate
* exception if so.
*
* @param {shaka.extern.Manifest} manifest
*
* @private
*/
checkRestrictedVariants_(manifest) {
const restrictedStatuses = shaka.Player.restrictedStatuses_;
const keyStatusMap =
this.drmEngine_ ? this.drmEngine_.getKeyStatuses() : {};
const keyIds = Object.keys(keyStatusMap);
const isGlobalStatus = keyIds.length && keyIds[0] == '00';
let hasPlayable = false;
let hasAppRestrictions = false;
/** @type {!Set.<string>} */
const missingKeys = new Set();
/** @type {!Set.<string>} */
const badKeyStatuses = new Set();
for (const variant of manifest.variants) {
// TODO: Combine with onKeyStatus_.
const streams = [];
if (variant.audio) {
streams.push(variant.audio);
}
if (variant.video) {
streams.push(variant.video);
}
for (const stream of streams) {
if (stream.keyIds.size) {
for (const keyId of stream.keyIds) {
const keyStatus = keyStatusMap[isGlobalStatus ? '00' : keyId];
if (!keyStatus) {
missingKeys.add(keyId);
} else if (restrictedStatuses.includes(keyStatus)) {
badKeyStatuses.add(keyStatus);
}
}
} // if (stream.keyIds.size)
}
if (!variant.allowedByApplication) {
hasAppRestrictions = true;
} else if (variant.allowedByKeySystem) {
hasPlayable = true;
}
}
if (!hasPlayable) {
/** @type {shaka.extern.RestrictionInfo} */
const data = {
hasAppRestrictions,
missingKeys: Array.from(missingKeys),
restrictedKeyStatuses: Array.from(badKeyStatuses),
};
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.MANIFEST,
shaka.util.Error.Code.RESTRICTIONS_CANNOT_BE_MET,
data);
}
}
/**
* Confirm some variants are playable. Otherwise, throw an exception.
* @param {!shaka.extern.Manifest} manifest
* @private
*/
checkPlayableVariants_(manifest) {
const valid = manifest.variants.some(shaka.util.StreamUtils.isPlayable);
// If none of the variants are playable, throw
// CONTENT_UNSUPPORTED_BY_BROWSER.
if (!valid) {
throw new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.MANIFEST,
shaka.util.Error.Code.CONTENT_UNSUPPORTED_BY_BROWSER);
}
}
/**
* Fire an event, but wait a little bit so that the immediate execution can
* complete before the event is handled.
*
* @param {!shaka.util.FakeEvent} event
* @private
*/
async delayDispatchEvent_(event) {
// Wait until the next interpreter cycle.
await Promise.resolve();
// Only dispatch the event if we are still alive.
if (this.loadMode_ != shaka.Player.LoadMode.DESTROYED) {
this.dispatchEvent(event);
}
}
/**
* Get the normalized languages for a group of tracks.
*
* @param {!Array.<?shaka.extern.Track>} tracks
* @return {!Set.<string>}
* @private
*/
static getLanguagesFrom_(tracks) {
const languages = new Set();
for (const track of tracks) {
if (track.language) {
languages.add(shaka.util.LanguageUtils.normalize(track.language));
} else {
languages.add('und');
}
}
return languages;
}
/**
* Get all permutations of normalized languages and role for a group of
* tracks.
*
* @param {!Array.<?shaka.extern.Track>} tracks
* @return {!Array.<shaka.extern.LanguageRole>}
* @private
*/
static getLanguageAndRolesFrom_(tracks) {
/** @type {!Map.<string, !Set>} */
const languageToRoles = new Map();
/** @type {!Map.<string, !Map.<string, string>>} */
const languageRoleToLabel = new Map();
for (const track of tracks) {
let language = 'und';
let roles = [];
if (track.language) {
language = shaka.util.LanguageUtils.normalize(track.language);
}
if (track.type == 'variant') {
roles = track.audioRoles;
} else {
roles = track.roles;
}
if (!roles || !roles.length) {
// We must have an empty role so that we will still get a language-role
// entry from our Map.
roles = [''];
}
if (!languageToRoles.has(language)) {
languageToRoles.set(language, new Set());
}
for (const role of roles) {
languageToRoles.get(language).add(role);
if (track.label) {
if (!languageRoleToLabel.has(language)) {
languageRoleToLabel.set(language, new Map());
}
languageRoleToLabel.get(language).set(role, track.label);
}
}
}
// Flatten our map to an array of language-role pairs.
const pairings = [];
languageToRoles.forEach((roles, language) => {
for (const role of roles) {
let label = null;
if (languageRoleToLabel.has(language) &&
languageRoleToLabel.get(language).has(role)) {
label = languageRoleToLabel.get(language).get(role);
}
pairings.push({language, role, label});
}
});
return pairings;
}
/**
* Assuming the player is playing content with media source, check if the
* player has buffered enough content to make it to the end of the
* presentation.
*
* @return {boolean}
* @private
*/
isBufferedToEndMS_() {
goog.asserts.assert(
this.video_,
'We need a video element to get buffering information');
goog.asserts.assert(
this.mediaSourceEngine_,
'We need a media source engine to get buffering information');
goog.asserts.assert(
this.manifest_,
'We need a manifest to get buffering information');
// This is a strong guarantee that we are buffered to the end, because it
// means the playhead is already at that end.
if (this.video_.ended) {
return true;
}
// This means that MediaSource has buffered the final segment in all
// SourceBuffers and is no longer accepting additional segments.
if (this.mediaSourceEngine_.ended()) {
return true;
}
// Live streams are "buffered to the end" when they have buffered to the
// live edge or beyond (into the region covered by the presentation delay).
if (this.manifest_.presentationTimeline.isLive()) {
const liveEdge =
this.manifest_.presentationTimeline.getSegmentAvailabilityEnd();
const bufferEnd =
shaka.media.TimeRangesUtils.bufferEnd(this.video_.buffered);
if (bufferEnd != null && bufferEnd >= liveEdge) {
return true;
}
}
return false;
}
/**
* Assuming the player is playing content with src=, check if the player has
* buffered enough content to make it to the end of the presentation.
*
* @return {boolean}
* @private
*/
isBufferedToEndSrc_() {
goog.asserts.assert(
this.video_,
'We need a video element to get buffering information');
// This is a strong guarantee that we are buffered to the end, because it
// means the playhead is already at that end.
if (this.video_.ended) {
return true;
}
// If we have buffered to the duration of the content, it means we will have
// enough content to buffer to the end of the presentation.
const bufferEnd =
shaka.media.TimeRangesUtils.bufferEnd(this.video_.buffered);
// Because Safari's native HLS reports slightly inaccurate values for
// bufferEnd here, we use a fudge factor. Without this, we can end up in a
// buffering state at the end of the stream. See issue #2117.
// TODO: Try to remove the fudge here once we no longer manage buffering
// state above the browser with playbackRate=0.
const fudge = 1; // 1000 ms
return bufferEnd != null && bufferEnd >= this.video_.duration - fudge;
}
/**
* Create an error for when we purposely interrupt a load operation.
*
* @return {!shaka.util.Error}
* @private
*/
createAbortLoadError_() {
return new shaka.util.Error(
shaka.util.Error.Severity.CRITICAL,
shaka.util.Error.Category.PLAYER,
shaka.util.Error.Code.LOAD_INTERRUPTED);
}
/**
* Key
* ----------------------
* D : Detach Node
* A : Attach Node
* MS : Media Source Node
* P : Manifest Parser Node
* M : Manifest Node
* DRM : Drm Engine Node
* L : Load Node
* U : Unloading Node
* SRC : Src Equals Node
*
* Graph Topology
* ----------------------
*
* [SRC]-----+
* ^ |
* | v
* [D]<-->[A]<-----[U]
* | ^
* v |
* [MS]------+
* | |
* v |
* [P]-------+
* | |
* v |
* [M]-------+
* | |
* v |
* [DRM]-----+
* | |
* v |
* [L]-------+
*
* @param {!shaka.routing.Node} currentlyAt
* @param {shaka.routing.Payload} currentlyWith
* @param {!shaka.routing.Node} wantsToBeAt
* @param {shaka.routing.Payload} wantsToHave
* @return {?shaka.routing.Node}
* @private
*/
getNextStep_(currentlyAt, currentlyWith, wantsToBeAt, wantsToHave) {
let next = null;
// Detach is very simple, either stay in detach (because |detach| was called
// while in detached) or go somewhere that requires us to attach to an
// element.
if (currentlyAt == this.detachNode_) {
next = wantsToBeAt == this.detachNode_ ?
this.detachNode_ :
this.attachNode_;
}
if (currentlyAt == this.attachNode_) {
next = this.getNextAfterAttach_(wantsToBeAt, currentlyWith, wantsToHave);
}
if (currentlyAt == this.mediaSourceNode_) {
next = this.getNextAfterMediaSource_(
wantsToBeAt, currentlyWith, wantsToHave);
}
if (currentlyAt == this.parserNode_) {
next = this.getNextMatchingAllDependencies_(
/* destination= */ this.loadNode_,
/* next= */ this.manifestNode_,
/* reset= */ this.unloadNode_,
/* goingTo= */ wantsToBeAt,
/* has= */ currentlyWith,
/* wants= */ wantsToHave);
}
if (currentlyAt == this.manifestNode_) {
next = this.getNextMatchingAllDependencies_(
/* destination= */ this.loadNode_,
/* next= */ this.drmNode_,
/* reset= */ this.unloadNode_,
/* goingTo= */ wantsToBeAt,
/* has= */ currentlyWith,
/* wants= */ wantsToHave);
}
// For DRM, we have two options "load" or "unload". If all our constraints
// are met, we can go to "load". If anything is off, we must go back to
// "unload" to reset.
if (currentlyAt == this.drmNode_) {
next = this.getNextMatchingAllDependencies_(
/* destination= */ this.loadNode_,
/* next= */ this.loadNode_,
/* reset= */ this.unloadNode_,
/* goingTo= */ wantsToBeAt,
/* has= */ currentlyWith,
/* wants= */ wantsToHave);
}
// For DRM w/ src= playback, we only care about destination and media
// element.
if (currentlyAt == this.srcEqualsDrmNode_) {
if (wantsToBeAt == this.srcEqualsNode_ &&
currentlyWith.mediaElement == wantsToHave.mediaElement) {
next = this.srcEqualsNode_;
} else {
next = this.unloadNode_;
}
}
// After we load content, always go through unload because we can't safely
// use components after we have started playback.
if (currentlyAt == this.loadNode_ || currentlyAt == this.srcEqualsNode_) {
next = this.unloadNode_;
}
if (currentlyAt == this.unloadNode_) {
next = this.getNextAfterUnload_(wantsToBeAt, currentlyWith, wantsToHave);
}
goog.asserts.assert(next, 'Missing next step!');
return next;
}
/**
* @param {!shaka.routing.Node} goingTo
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {?shaka.routing.Node}
* @private
*/
getNextAfterAttach_(goingTo, has, wants) {
// Attach and detach are the only two nodes that we can directly go
// back-and-forth between.
if (goingTo == this.detachNode_) {
return this.detachNode_;
}
// If we are going anywhere other than detach, then we need the media
// element to match, if they don't match, we need to go through detach
// first.
if (has.mediaElement != wants.mediaElement) {
return this.detachNode_;
}
// If we are already in attached, and someone calls |attach| again (to the
// same video element), we can handle the redundant request by re-entering
// our current state.
if (goingTo == this.attachNode_) {
return this.attachNode_;
}
// The next step from attached to loaded is through media source.
if (goingTo == this.mediaSourceNode_ || goingTo == this.loadNode_) {
return this.mediaSourceNode_;
}
// If we are going to src=, then we should set up DRM first. This will
// support cases like FairPlay HLS on Safari.
if (goingTo == this.srcEqualsNode_) {
return this.srcEqualsDrmNode_;
}
// We are missing a rule, the null will get caught by a common check in
// the routing system.
return null;
}
/**
* @param {!shaka.routing.Node} goingTo
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {?shaka.routing.Node}
* @private
*/
getNextAfterMediaSource_(goingTo, has, wants) {
// We can only go to parse manifest or unload. If we want to go to load and
// we have the right media element, we can go to parse manifest. If we
// don't, no matter where we want to go, we must go through unload.
if (goingTo == this.loadNode_ && has.mediaElement == wants.mediaElement) {
return this.parserNode_;
}
// Right now the unload node is responsible for tearing down all playback
// components (including media source). So since we have created media
// source, we need to unload since our dependencies are not compatible.
//
// TODO: We are structured this way to maintain a historic structure. Going
// forward, there is no reason to restrict ourselves to this. Going
// forward we should explore breaking apart |onUnload| and develop
// more meaningful terminology around tearing down playback resources.
return this.unloadNode_;
}
/**
* After unload there are only two options, attached or detached. This choice
* is based on whether or not we have a media element. If we have a media
* element, then we go to attach. If we don't have a media element, we go to
* detach.
*
* @param {!shaka.routing.Node} goingTo
* @param {shaka.routing.Payload} has
* @param {shaka.routing.Payload} wants
* @return {?shaka.routing.Node}
* @private
*/
getNextAfterUnload_(goingTo, has, wants) {
// If we don't want a media element, detach.
// If we have the wrong media element, detach.
// Otherwise it means we want to attach to a media element and it is safe to
// do so.
return !wants.mediaElement || has.mediaElement != wants.mediaElement ?
this.detachNode_ :
this.attachNode_;
}
/**
* A general method used to handle routing when we can either than one step
* toward our destination (while all our dependencies match) or go to a node
* that will reset us so we can try again.
*
* @param {!shaka.routing.Node} destinationNode
* What |goingTo| must be for us to step toward |nextNode|. Otherwise we
* will go to |resetNode|.
* @param {!shaka.routing.Node} nextNode
* The node we will go to next if |goingTo == destinationNode| and all
* dependencies match.
* @param {!shaka.routing.Node} resetNode
* The node we will go to next if |goingTo != destinationNode| or any
* dependency does not match.
* @param {!shaka.routing.Node} goingTo
* The node that the walker is trying to go to.
* @param {shaka.routing.Payload} has
* The payload that the walker currently has.
* @param {shaka.routing.Payload} wants
* The payload that the walker wants to have when iy gets to |goingTo|.
* @return {shaka.routing.Node}
* @private
*/
getNextMatchingAllDependencies_(destinationNode, nextNode, resetNode, goingTo,
has, wants) {
if (goingTo == destinationNode &&
has.mediaElement == wants.mediaElement &&
has.uri == wants.uri &&
has.mimeType == wants.mimeType) {
return nextNode;
}
return resetNode;
}
/**
* @return {shaka.routing.Payload}
* @private
*/
static createEmptyPayload_() {
return {
mediaElement: null,
mimeType: null,
startTime: null,
startTimeOfLoad: NaN,
uri: null,
};
}
/**
* Using a promise, wrap the listeners returned by |Walker.startNewRoute|.
* This will work for most usages in |Player| but should not be used for
* special cases.
*
* This will connect |onCancel|, |onEnd|, |onError|, and |onSkip| with
* |resolve| and |reject| but will leave |onStart| unset.
*
* @param {shaka.routing.Walker.Listeners} listeners
* @return {!Promise}
* @private
*/
wrapWalkerListenersWithPromise_(listeners) {
return new Promise((resolve, reject) => {
listeners.onCancel = () => reject(this.createAbortLoadError_());
listeners.onEnd = () => resolve();
listeners.onError = (e) => reject(e);
listeners.onSkip = () => reject(this.createAbortLoadError_());
});
}
};
/**
* An internal enum that contains the string values of all of the player events.
* This exists primarily to act as an implicit list of events, for tests.
*
* @enum {string}
*/
shaka.Player.EventName = {
AbrStatusChanged: 'abrstatuschanged',
Adaptation: 'adaptation',
Buffering: 'buffering',
DrmSessionUpdate: 'drmsessionupdate',
Emsg: 'emsg',
Error: 'error',
ExpirationUpdated: 'expirationupdated',
LargeGap: 'largegap',
Loaded: 'loaded',
Loading: 'loading',
ManifestParsed: 'manifestparsed',
Metadata: 'metadata',
OnStateChange: 'onstatechange',
OnStateIdle: 'onstateidle',
RateChange: 'ratechange',
SessionDataEvent: 'sessiondata',
Streaming: 'streaming',
TextChanged: 'textchanged',
TextTrackVisibility: 'texttrackvisibility',
TimelineRegionAdded: 'timelineregionadded',
TimelineRegionEnter: 'timelineregionenter',
TimelineRegionExit: 'timelineregionexit',
TracksChanged: 'trackschanged',
Unloading: 'unloading',
VariantChanged: 'variantchanged',
};
/**
* In order to know what method of loading the player used for some content, we
* have this enum. It lets us know if content has not been loaded, loaded with
* media source, or loaded with src equals.
*
* This enum has a low resolution, because it is only meant to express the
* outer limits of the various states that the player is in. For example, when
* someone calls a public method on player, it should not matter if they have
* initialized drm engine, it should only matter if they finished loading
* content.
*
* @enum {number}
* @export
*/
shaka.Player.LoadMode = {
'DESTROYED': 0,
'NOT_LOADED': 1,
'MEDIA_SOURCE': 2,
'SRC_EQUALS': 3,
};
/**
* The typical buffering threshold. When we have less than this buffered (in
* seconds), we enter a buffering state. This specific value is based on manual
* testing and evaluation across a variety of platforms.
*
* To make the buffering logic work in all cases, this "typical" threshold will
* be overridden if the rebufferingGoal configuration is too low.
*
* @const {number}
* @private
*/
shaka.Player.TYPICAL_BUFFERING_THRESHOLD_ = 0.5;
/**
* @define {string} A version number taken from git at compile time.
* @export
*/
shaka.Player.version = 'v3.1.0-pre-uncompiled';
// Initialize the deprecation system using the version string we just set
// on the player.
shaka.Deprecate.init(shaka.Player.version);
/**
* These are the EME key statuses that represent restricted playback.
* 'usable', 'released', 'output-downscaled', 'status-pending' are statuses
* of the usable keys. 'expired' status is being handled separately in
* DrmEngine.
*
* @const {!Array.<string>}
* @private
*/
shaka.Player.restrictedStatuses_ = ['output-restricted', 'internal-error'];
/** @private {!Object.<string, function():*>} */
shaka.Player.supportPlugins_ = {};
/** @private {?shaka.extern.IAdManager.Factory} */
shaka.Player.adManagerFactory_ = null;
/**
* @const {string}
*/
shaka.Player.TextTrackLabel = 'Shaka Player TextTrack'; | /**
* @event shaka.Player.TextChangedEvent
* @description Fired when a call from the application caused a text stream
* change. Can be triggered by calls to <code>selectTextTrack()</code> or |
check-doc.py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
This checks if all command line args are documented.
Return value is 0 to indicate no error.
Author: @MarcoFalke
'''
from subprocess import check_output
import re
import sys
FOLDER_GREP = 'src'
FOLDER_TEST = 'src/test/'
CMD_ROOT_DIR = '`git rev-parse --show-toplevel`/{}'.format(FOLDER_GREP)
CMD_GREP_ARGS = r"egrep -r -I '(map(Multi)?Args(\.count\(|\[)|Get(Bool)?Arg\()\"\-[^\"]+?\"' {} | grep -v '{}'".format(CMD_ROOT_DIR, FOLDER_TEST)
CMD_GREP_DOCS = r"egrep -r -I 'HelpMessageOpt\(\"\-[^\"=]+?(=|\")' {}".format(CMD_ROOT_DIR)
REGEX_ARG = re.compile(r'(?:map(?:Multi)?Args(?:\.count\(|\[)|Get(?:Bool)?Arg\()\"(\-[^\"]+?)\"')
REGEX_DOC = re.compile(r'HelpMessageOpt\(\"(\-[^\"=]+?)(?:=|\")')
# list unsupported, deprecated and duplicate args as they need no documentation
SET_DOC_OPTIONAL = set(['-rpcssl', '-benchmark', '-h', '-help', '-socks', '-tor', '-debugnet', '-whitelistalwaysrelay', '-prematurewitness', '-walletprematurewitness', '-promiscuousmempoolflags', '-blockminsize', '-sendfreetransactions', '-checklevel', '-liquidityprovider', '-anonymizebdsamount'])
def main(): | args_used = set(re.findall(re.compile(REGEX_ARG), used))
args_docd = set(re.findall(re.compile(REGEX_DOC), docd)).union(SET_DOC_OPTIONAL)
args_need_doc = args_used.difference(args_docd)
args_unknown = args_docd.difference(args_used)
print("Args used : {}".format(len(args_used)))
print("Args documented : {}".format(len(args_docd)))
print("Args undocumented: {}".format(len(args_need_doc)))
print(args_need_doc)
print("Args unknown : {}".format(len(args_unknown)))
print(args_unknown)
sys.exit(len(args_need_doc))
if __name__ == "__main__":
main() | used = check_output(CMD_GREP_ARGS, shell=True, universal_newlines=True)
docd = check_output(CMD_GREP_DOCS, shell=True, universal_newlines=True)
|
coldfusion-brush.js | /**
* SyntaxHighlighter
* http://alexgorbatchev.com/SyntaxHighlighter
*
* SyntaxHighlighter is donationware. If you are using it, please donate.
* http://alexgorbatchev.com/SyntaxHighlighter/donate.html
*
* @version
* 3.0.83 (July 02 2010)
*
* @copyright
* Copyright (C) 2004-2010 Alex Gorbatchev.
*
* @license
* Dual licensed under the MIT and GPL licenses.
*/
;(function()
{
// CommonJS
typeof(require) != 'undefined' ? SyntaxHighlighter = require('shCore').SyntaxHighlighter : null;
function Brush()
{
// Contributed by Jen
// http://www.jensbits.com/2009/05/14/coldfusion-brush-for-syntaxhighlighter-plus
var funcs = 'Abs ACos AddSOAPRequestHeader AddSOAPResponseHeader AjaxLink AjaxOnLoad ArrayAppend ArrayAvg ArrayClear ArrayDeleteAt ' +
'ArrayInsertAt ArrayIsDefined ArrayIsEmpty ArrayLen ArrayMax ArrayMin ArraySet ArraySort ArraySum ArraySwap ArrayToList ' +
'Asc ASin Atn BinaryDecode BinaryEncode BitAnd BitMaskClear BitMaskRead BitMaskSet BitNot BitOr BitSHLN BitSHRN BitXor ' +
'Ceiling CharsetDecode CharsetEncode Chr CJustify Compare CompareNoCase Cos CreateDate CreateDateTime CreateObject ' +
'CreateODBCDate CreateODBCDateTime CreateODBCTime CreateTime CreateTimeSpan CreateUUID DateAdd DateCompare DateConvert ' +
'DateDiff DateFormat DatePart Day DayOfWeek DayOfWeekAsString DayOfYear DaysInMonth DaysInYear DE DecimalFormat DecrementValue ' +
'Decrypt DecryptBinary DeleteClientVariable DeserializeJSON DirectoryExists DollarFormat DotNetToCFType Duplicate Encrypt ' +
'EncryptBinary Evaluate Exp ExpandPath FileClose FileCopy FileDelete FileExists FileIsEOF FileMove FileOpen FileRead ' +
'FileReadBinary FileReadLine FileSetAccessMode FileSetAttribute FileSetLastModified FileWrite Find FindNoCase FindOneOf ' +
'FirstDayOfMonth Fix FormatBaseN GenerateSecretKey GetAuthUser GetBaseTagData GetBaseTagList GetBaseTemplatePath ' +
'GetClientVariablesList GetComponentMetaData GetContextRoot GetCurrentTemplatePath GetDirectoryFromPath GetEncoding ' +
'GetException GetFileFromPath GetFileInfo GetFunctionList GetGatewayHelper GetHttpRequestData GetHttpTimeString ' +
'GetK2ServerDocCount GetK2ServerDocCountLimit GetLocale GetLocaleDisplayName GetLocalHostIP GetMetaData GetMetricData ' +
'GetPageContext GetPrinterInfo GetProfileSections GetProfileString GetReadableImageFormats GetSOAPRequest GetSOAPRequestHeader ' +
'GetSOAPResponse GetSOAPResponseHeader GetTempDirectory GetTempFile GetTemplatePath GetTickCount GetTimeZoneInfo GetToken ' +
'GetUserRoles GetWriteableImageFormats Hash Hour HTMLCodeFormat HTMLEditFormat IIf ImageAddBorder ImageBlur ImageClearRect ' +
'ImageCopy ImageCrop ImageDrawArc ImageDrawBeveledRect ImageDrawCubicCurve ImageDrawLine ImageDrawLines ImageDrawOval ' +
'ImageDrawPoint ImageDrawQuadraticCurve ImageDrawRect ImageDrawRoundRect ImageDrawText ImageFlip ImageGetBlob ImageGetBufferedImage ' +
'ImageGetEXIFTag ImageGetHeight ImageGetIPTCTag ImageGetWidth ImageGrayscale ImageInfo ImageNegative ImageNew ImageOverlay ImagePaste ' +
'ImageRead ImageReadBase64 ImageResize ImageRotate ImageRotateDrawingAxis ImageScaleToFit ImageSetAntialiasing ImageSetBackgroundColor ' +
'ImageSetDrawingColor ImageSetDrawingStroke ImageSetDrawingTransparency ImageSharpen ImageShear ImageShearDrawingAxis ImageTranslate ' +
'ImageTranslateDrawingAxis ImageWrite ImageWriteBase64 ImageXORDrawingMode IncrementValue InputBaseN Insert Int IsArray IsBinary ' +
'IsBoolean IsCustomFunction IsDate IsDDX IsDebugMode IsDefined IsImage IsImageFile IsInstanceOf IsJSON IsLeapYear IsLocalHost ' +
'IsNumeric IsNumericDate IsObject IsPDFFile IsPDFObject IsQuery IsSimpleValue IsSOAPRequest IsStruct IsUserInAnyRole IsUserInRole ' +
'IsUserLoggedIn IsValid IsWDDX IsXML IsXmlAttribute IsXmlDoc IsXmlElem IsXmlNode IsXmlRoot JavaCast JSStringFormat LCase Left Len ' +
'ListAppend ListChangeDelims ListContains ListContainsNoCase ListDeleteAt ListFind ListFindNoCase ListFirst ListGetAt ListInsertAt ' +
'ListLast ListLen ListPrepend ListQualify ListRest ListSetAt ListSort ListToArray ListValueCount ListValueCountNoCase LJustify Log ' +
'Log10 LSCurrencyFormat LSDateFormat LSEuroCurrencyFormat LSIsCurrency LSIsDate LSIsNumeric LSNumberFormat LSParseCurrency LSParseDateTime ' +
'LSParseEuroCurrency LSParseNumber LSTimeFormat LTrim Max Mid Min Minute Month MonthAsString Now NumberFormat ParagraphFormat ParseDateTime ' +
'Pi PrecisionEvaluate PreserveSingleQuotes Quarter QueryAddColumn QueryAddRow QueryConvertForGrid QueryNew QuerySetCell QuotedValueList Rand ' +
'Randomize RandRange REFind REFindNoCase ReleaseComObject REMatch REMatchNoCase RemoveChars RepeatString Replace ReplaceList ReplaceNoCase ' +
'REReplace REReplaceNoCase Reverse Right RJustify Round RTrim Second SendGatewayMessage SerializeJSON SetEncoding SetLocale SetProfileString ' +
'SetVariable Sgn Sin Sleep SpanExcluding SpanIncluding Sqr StripCR StructAppend StructClear StructCopy StructCount StructDelete StructFind ' +
'StructFindKey StructFindValue StructGet StructInsert StructIsEmpty StructKeyArray StructKeyExists StructKeyList StructKeyList StructNew ' +
'StructSort StructUpdate Tan TimeFormat ToBase64 ToBinary ToScript ToString Trim UCase URLDecode URLEncodedFormat URLSessionFormat Val ' +
'ValueList VerifyClient Week Wrap Wrap WriteOutput XmlChildPos XmlElemNew XmlFormat XmlGetNodeType XmlNew XmlParse XmlSearch XmlTransform ' +
'XmlValidate Year YesNoFormat';
var keywords = 'cfabort cfajaximport cfajaxproxy cfapplet cfapplication cfargument cfassociate cfbreak cfcache cfcalendar ' +
'cfcase cfcatch cfchart cfchartdata cfchartseries cfcol cfcollection cfcomponent cfcontent cfcookie cfdbinfo ' +
'cfdefaultcase cfdirectory cfdiv cfdocument cfdocumentitem cfdocumentsection cfdump cfelse cfelseif cferror ' +
'cfexchangecalendar cfexchangeconnection cfexchangecontact cfexchangefilter cfexchangemail cfexchangetask ' +
'cfexecute cfexit cffeed cffile cfflush cfform cfformgroup cfformitem cfftp cffunction cfgrid cfgridcolumn ' +
'cfgridrow cfgridupdate cfheader cfhtmlhead cfhttp cfhttpparam cfif cfimage cfimport cfinclude cfindex ' +
'cfinput cfinsert cfinterface cfinvoke cfinvokeargument cflayout cflayoutarea cfldap cflocation cflock cflog ' +
'cflogin cfloginuser cflogout cfloop cfmail cfmailparam cfmailpart cfmenu cfmenuitem cfmodule cfNTauthenticate ' +
'cfobject cfobjectcache cfoutput cfparam cfpdf cfpdfform cfpdfformparam cfpdfparam cfpdfsubform cfpod cfpop ' +
'cfpresentation cfpresentationslide cfpresenter cfprint cfprocessingdirective cfprocparam cfprocresult ' +
'cfproperty cfquery cfqueryparam cfregistry cfreport cfreportparam cfrethrow cfreturn cfsavecontent cfschedule ' +
'cfscript cfsearch cfselect cfset cfsetting cfsilent cfslider cfsprydataset cfstoredproc cfswitch cftable ' +
'cftextarea cfthread cfthrow cftimer cftooltip cftrace cftransaction cftree cftreeitem cftry cfupdate cfwddx ' +
'cfwindow cfxml cfzip cfzipparam';
var operators = 'all and any between cross in join like not null or outer some';
this.regexList = [
{ regex: new RegExp('--(.*)$', 'gm'), css: 'comments' }, // one line and multiline comments
{ regex: SyntaxHighlighter.regexLib.xmlComments, css: 'comments' }, // single quoted strings
{ regex: SyntaxHighlighter.regexLib.doubleQuotedString, css: 'string' }, // double quoted strings
{ regex: SyntaxHighlighter.regexLib.singleQuotedString, css: 'string' }, // single quoted strings
{ regex: new RegExp(this.getKeywords(funcs), 'gmi'), css: 'functions' }, // functions
{ regex: new RegExp(this.getKeywords(operators), 'gmi'), css: 'color1' }, // operators and such
{ regex: new RegExp(this.getKeywords(keywords), 'gmi'), css: 'keyword' } // keyword
];
}
|
SyntaxHighlighter.brushes.ColdFusion = Brush;
// CommonJS
typeof(exports) != 'undefined' ? exports.Brush = Brush : null;
})(); | Brush.prototype = new SyntaxHighlighter.Highlighter();
Brush.aliases = ['coldfusion','cf']; |
unsafe-fn-used-as-value.rs | // -*- rust -*-
| fn main() {
let x = f; //~ ERROR access to unsafe function requires unsafe function or block
x();
} | unsafe fn f() { return; }
|
linkedlist.py | # LinkedList implementation using a helper Element class
class | (object):
def __init__(self, value):
self.value = value
self.next = None
class LinkedList(object):
def __init__(self, head=None):
self.head = head
def append(self, new_element):
current = self.head
if self.head:
while current.next:
current = current.next
current.next = new_element
else:
self.head = new_element
def get_position(self, position):
index = 1
current = self.head
if self.head:
while current.next and index < position:
current = current.next
index += 1
if index == position:
return current
else:
return None
def insert(self, new_element, position):
index = 1
current = self.head
previous = current
if position != 1:
while current.next and index < position:
previous = current
current = current.next
index += 1
if index == position:
new_element.next = current
previous.next = new_element
else:
if self.head:
new_element.next = current
self.head = new_element
else:
self.head = new_element
def delete(self, value):
current = self.head
if self.head:
if current.value == value:
self.head = current.next
current.next = None
else:
while current.next:
previous = current
current = current.next
if current.value == value:
previous.next = current.next
current.next = None
# Test cases
# Set up some Elements
e1 = Element(1)
e2 = Element(2)
e3 = Element(3)
e4 = Element(4)
# Start setting up a LinkedList
ll = LinkedList(e1)
ll.append(e2)
ll.append(e3)
# Test get_position
# Output should print 3
print(ll.head.next.next.value)
# Output should also print 3
print(ll.get_position(3).value)
# Test insert
ll.insert(e4, 3)
# Output should print 4 now
print(ll.get_position(3).value)
# Test delete
ll.delete(1)
# Output should print 2 now
print(ll.get_position(1).value)
# Output should print 4 now
print(ll.get_position(2).value)
# Should print 3 now
print(ll.get_position(3).value)
| Element |
day14.rs | use lazy_static::lazy_static;
use regex::Regex;
use std::collections::HashMap;
static INPUT: &str = include_str!("../../inputs/day14.txt");
lazy_static! {
static ref U64_REGEX: Regex = Regex::new(r"\d+").unwrap();
}
type Memory = HashMap<u64, u64>;
type Mask = String;
type Assignment = (u64, u64);
type MaskAndAddrs = (Mask, Vec<Assignment>);
pub fn run() {
let masks_and_addrs = transform_input(INPUT);
println!("Part 1: {}", part1(&masks_and_addrs));
println!("Part 2: {}", part2(&masks_and_addrs));
}
fn transform_input(input: &'static str) -> Vec<MaskAndAddrs> {
let mut masks_and_addrs: Vec<MaskAndAddrs> = Vec::new();
let mut cur_mask: Option<Mask> = None;
let mut cur_assignments: Vec<Assignment> = Vec::new();
for line in input.lines() {
if line.starts_with("mask") {
if let Some(mask) = cur_mask {
masks_and_addrs.push((mask, cur_assignments.clone()));
cur_assignments.clear();
}
cur_mask = Some(String::from(line.split_at(7).1));
} else {
let m: Vec<u64> = U64_REGEX
.find_iter(line)
.map(|x| x.as_str().parse().unwrap())
.collect();
cur_assignments.push((m[0], m[1]));
}
}
masks_and_addrs.push((cur_mask.unwrap(), cur_assignments));
masks_and_addrs
}
fn apply_mask(mask: &str, value: u64) -> u64 {
let value_bits: Vec<char> = format!("{:036b}", value).chars().collect();
let masked_bits = mask
.chars()
.enumerate()
.map(|(idx, c)| match c {
'X' => value_bits[idx],
c => c,
})
.collect::<String>();
u64::from_str_radix(&masked_bits, 2).unwrap()
}
fn part1(masks_and_addrs: &[MaskAndAddrs]) -> u64 {
let mut memory: Memory = HashMap::new();
for (mask, assignments) in masks_and_addrs.iter() {
for (location, value) in assignments.iter() {
memory.insert(*location, apply_mask(&mask, *value));
}
}
memory.values().sum()
}
fn part2(masks_and_addrs: &[MaskAndAddrs]) -> u64 {
let mut memory: Memory = HashMap::new();
for (mask, assignments) in masks_and_addrs.iter() {
for (base_location, value) in assignments.iter() {
let locations = generate_nondeterministic_locations(*base_location, &mask);
for location in locations.iter() {
memory.insert(*location, *value);
}
}
}
memory.values().sum()
}
fn generate_nondeterministic_locations(location: u64, mask: &str) -> Vec<u64> {
let location_bits: Vec<char> = format!("{:036b}", location).chars().collect();
let locations: Vec<String> = mask.chars().enumerate().fold(
vec![String::from("")],
|locs: Vec<String>, (idx, c)| {
let next_chars: Vec<char> = match c {
'0' => vec![location_bits[idx]],
'1' => vec!['1'],
'X' => vec!['0', '1'],
_ => panic!("I WANT TO SLEEP."),
};
let mut new_locs = Vec::new();
for s in locs.into_iter() {
// TO be quite frank, this is an atrocity, but it's been an hour and I
// want to sleep.
for c in next_chars.iter() {
let mut new_str = s.clone();
new_str.push(*c);
new_locs.push(new_str);
} | }
new_locs
},
);
locations
.iter()
.map(|x| u64::from_str_radix(x, 2).unwrap())
.collect()
} | |
rcserver_test.go | package rcserver
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"os"
"regexp"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
_ "github.com/rclone/rclone/backend/local"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/accounting"
"github.com/rclone/rclone/fs/config/configfile"
"github.com/rclone/rclone/fs/rc"
)
const (
testBindAddress = "localhost:0"
testTemplate = "testdata/golden/testindex.html"
testFs = "testdata/files"
remoteURL = "[" + testFs + "]/" // initial URL path to fetch from that remote
)
func TestMain(m *testing.M) {
// Pretend to be rclone version if we have a version string parameter
if os.Args[len(os.Args)-1] == "version" {
fmt.Printf("rclone %s\n", fs.Version)
os.Exit(0)
}
// Pretend to error if we have an unknown command
if os.Args[len(os.Args)-1] == "unknown_command" {
fmt.Printf("rclone %s\n", fs.Version)
fmt.Fprintf(os.Stderr, "Unknown command\n")
os.Exit(1)
}
os.Exit(m.Run())
}
// Test the RC server runs and we can do HTTP fetches from it.
// We'll do the majority of the testing with the httptest framework
func TestRcServer(t *testing.T) {
opt := rc.DefaultOpt
opt.HTTPOptions.ListenAddr = testBindAddress
opt.HTTPOptions.Template = testTemplate
opt.Enabled = true
opt.Serve = true
opt.Files = testFs
mux := http.NewServeMux()
rcServer := newServer(context.Background(), &opt, mux)
assert.NoError(t, rcServer.Serve())
defer func() {
rcServer.Close()
rcServer.Wait()
}()
testURL := rcServer.Server.URL()
// Do the simplest possible test to check the server is alive
// Do it a few times to wait for the server to start
var resp *http.Response
var err error
for i := 0; i < 10; i++ {
resp, err = http.Get(testURL + "file.txt")
if err == nil {
break
}
time.Sleep(10 * time.Millisecond)
}
require.NoError(t, err)
body, err := ioutil.ReadAll(resp.Body)
_ = resp.Body.Close()
require.NoError(t, err)
require.NoError(t, resp.Body.Close())
assert.Equal(t, http.StatusOK, resp.StatusCode)
assert.Equal(t, "this is file1.txt\n", string(body))
}
type testRun struct {
Name string
URL string
Status int
Method string
Range string
Body string
ContentType string
Expected string
Contains *regexp.Regexp
Headers map[string]string
}
// Run a suite of tests
func testServer(t *testing.T, tests []testRun, opt *rc.Options) {
ctx := context.Background()
configfile.Install()
mux := http.NewServeMux()
opt.HTTPOptions.Template = testTemplate
rcServer := newServer(ctx, opt, mux)
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
method := test.Method
if method == "" |
var inBody io.Reader
if test.Body != "" {
buf := bytes.NewBufferString(test.Body)
inBody = buf
}
req, err := http.NewRequest(method, "http://1.2.3.4/"+test.URL, inBody)
require.NoError(t, err)
if test.Range != "" {
req.Header.Add("Range", test.Range)
}
if test.ContentType != "" {
req.Header.Add("Content-Type", test.ContentType)
}
w := httptest.NewRecorder()
rcServer.handler(w, req)
resp := w.Result()
assert.Equal(t, test.Status, resp.StatusCode)
body, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
if test.Contains == nil {
assert.Equal(t, test.Expected, string(body))
} else {
assert.True(t, test.Contains.Match(body), fmt.Sprintf("body didn't match: %v: %v", test.Contains, string(body)))
}
for k, v := range test.Headers {
assert.Equal(t, v, resp.Header.Get(k), k)
}
})
}
}
// return an enabled rc
func newTestOpt() rc.Options {
opt := rc.DefaultOpt
opt.Enabled = true
return opt
}
func TestFileServing(t *testing.T) {
tests := []testRun{{
Name: "index",
URL: "",
Status: http.StatusOK,
Expected: `<pre>
<a href="dir/">dir/</a>
<a href="file.txt">file.txt</a>
</pre>
`,
}, {
Name: "notfound",
URL: "notfound",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dirnotfound",
URL: "dirnotfound/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusOK,
Expected: `<pre>
<a href="file2.txt">file2.txt</a>
</pre>
`,
}, {
Name: "file",
URL: "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRemoteServing(t *testing.T) {
tests := []testRun{
// Test serving files from the test remote
{
Name: "index",
URL: remoteURL + "",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /</title>
</head>
<body>
<h1>Directory listing of /</h1>
<a href="dir/">dir/</a><br />
<a href="file.txt">file.txt</a><br />
</body>
</html>
`,
}, {
Name: "notfound-index",
URL: "[notfound]/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "",
"status": 404
}
`,
}, {
Name: "notfound",
URL: remoteURL + "notfound",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to find object: object not found",
"input": null,
"path": "notfound",
"status": 404
}
`,
}, {
Name: "dirnotfound",
URL: remoteURL + "dirnotfound/",
Status: http.StatusNotFound,
Expected: `{
"error": "failed to list directory: directory not found",
"input": null,
"path": "dirnotfound",
"status": 404
}
`,
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusOK,
Expected: `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Directory listing of /dir</title>
</head>
<body>
<h1>Directory listing of /dir</h1>
<a href="file2.txt">file2.txt</a><br />
</body>
</html>
`,
}, {
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file with no slash after ]",
URL: strings.TrimRight(remoteURL, "/") + "file.txt",
Status: http.StatusOK,
Expected: "this is file1.txt\n",
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file2",
URL: remoteURL + "dir/file2.txt",
Status: http.StatusOK,
Expected: "this is dir/file2.txt\n",
}, {
Name: "file-head",
URL: remoteURL + "file.txt",
Method: "HEAD",
Status: http.StatusOK,
Expected: ``,
Headers: map[string]string{
"Content-Length": "18",
},
}, {
Name: "file-range",
URL: remoteURL + "file.txt",
Status: http.StatusPartialContent,
Range: "bytes=8-12",
Expected: `file1`,
}, {
Name: "bad-remote",
URL: "[notfoundremote:]/",
Status: http.StatusInternalServerError,
Expected: `{
"error": "failed to make Fs: didn't find section in config file",
"input": null,
"path": "/",
"status": 500
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRC(t *testing.T) {
tests := []testRun{{
Name: "rc-root",
URL: "",
Method: "POST",
Status: http.StatusNotFound,
Expected: `{
"error": "couldn't find method \"\"",
"input": {},
"path": "",
"status": 404
}
`,
}, {
Name: "rc-noop",
URL: "rc/noop",
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "rc-error",
URL: "rc/error",
Method: "POST",
Status: http.StatusInternalServerError,
Expected: `{
"error": "arbitrary error on input map[]",
"input": {},
"path": "rc/error",
"status": 500
}
`,
}, {
Name: "core-gc",
URL: "core/gc", // returns nil, nil so check it is made into {}
Method: "POST",
Status: http.StatusOK,
Expected: "{}\n",
}, {
Name: "url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage"
}
`,
}, {
Name: "json",
URL: "rc/noop",
Method: "POST",
Body: `{ "param1":"string", "param2":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": true
}
`,
}, {
Name: "json-and-url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `{ "param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "sausage",
"param3": true
}
`,
}, {
Name: "json-bad",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `{ param1":"string", "param3":true }`,
ContentType: "application/json",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to read input JSON: invalid character 'p' looking for beginning of object key string",
"input": {
"param1": "potato",
"param2": "sausage"
},
"path": "rc/noop",
"status": 400
}
`,
}, {
Name: "form",
URL: "rc/noop",
Method: "POST",
Body: `param1=string¶m2=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "string",
"param2": "true"
}
`,
}, {
Name: "form-and-url-params",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `param1=string¶m3=true`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: `{
"param1": "potato",
"param2": "sausage",
"param3": "true"
}
`,
}, {
Name: "form-bad",
URL: "rc/noop?param1=potato¶m2=sausage",
Method: "POST",
Body: `%zz`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusBadRequest,
Expected: `{
"error": "failed to parse form/URL parameters: invalid URL escape \"%zz\"",
"input": null,
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestRCWithAuth(t *testing.T) {
tests := []testRun{{
Name: "core-command",
URL: "core/command",
Method: "POST",
Body: `command=version`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`{
"error": false,
"result": "rclone %s\n"
}
`, fs.Version),
}, {
Name: "core-command-bad-returnType",
URL: "core/command",
Method: "POST",
Body: `command=version&returnType=POTATO`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusInternalServerError,
Expected: `{
"error": "Unknown returnType \"POTATO\"",
"input": {
"command": "version",
"returnType": "POTATO"
},
"path": "core/command",
"status": 500
}
`,
}, {
Name: "core-command-stream",
URL: "core/command",
Method: "POST",
Body: `command=version&returnType=STREAM`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`rclone %s
{}
`, fs.Version),
}, {
Name: "core-command-stream-error",
URL: "core/command",
Method: "POST",
Body: `command=unknown_command&returnType=STREAM`,
ContentType: "application/x-www-form-urlencoded",
Status: http.StatusOK,
Expected: fmt.Sprintf(`rclone %s
Unknown command
{
"error": "exit status 1",
"input": {
"command": "unknown_command",
"returnType": "STREAM"
},
"path": "core/command",
"status": 500
}
`, fs.Version),
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
opt.NoAuth = true
testServer(t, tests, &opt)
}
func TestMethods(t *testing.T) {
tests := []testRun{{
Name: "options",
URL: "",
Method: "OPTIONS",
Status: http.StatusOK,
Expected: "",
Headers: map[string]string{
"Access-Control-Allow-Origin": "http://localhost:5572/",
"Access-Control-Request-Method": "POST, OPTIONS, GET, HEAD",
"Access-Control-Allow-Headers": "authorization, Content-Type",
},
}, {
Name: "bad",
URL: "",
Method: "POTATO",
Status: http.StatusMethodNotAllowed,
Expected: `{
"error": "method \"POTATO\" not allowed",
"input": null,
"path": "",
"status": 405
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestMetrics(t *testing.T) {
stats := accounting.GlobalStats()
tests := makeMetricsTestCases(stats)
opt := newTestOpt()
opt.EnableMetrics = true
testServer(t, tests, &opt)
// Test changing a couple options
stats.Bytes(500)
stats.Deletes(30)
stats.Errors(2)
stats.Bytes(324)
tests = makeMetricsTestCases(stats)
testServer(t, tests, &opt)
}
func makeMetricsTestCases(stats *accounting.StatsInfo) (tests []testRun) {
tests = []testRun{{
Name: "Bytes Transferred Metric",
URL: "/metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_bytes_transferred_total %d", stats.GetBytes())),
}, {
Name: "Checked Files Metric",
URL: "/metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_checked_files_total %d", stats.GetChecks())),
}, {
Name: "Errors Metric",
URL: "/metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_errors_total %d", stats.GetErrors())),
}, {
Name: "Deleted Files Metric",
URL: "/metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_deleted_total %d", stats.Deletes(0))),
}, {
Name: "Files Transferred Metric",
URL: "/metrics",
Method: "GET",
Status: http.StatusOK,
Contains: regexp.MustCompile(fmt.Sprintf("rclone_files_transferred_total %d", stats.GetTransfers())),
},
}
return
}
var matchRemoteDirListing = regexp.MustCompile(`<title>Directory listing of /</title>`)
func TestServingRoot(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "*",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestServingRootNoFiles(t *testing.T) {
tests := []testRun{{
Name: "rootlist",
URL: "",
Status: http.StatusOK,
Contains: matchRemoteDirListing,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoFiles(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: "file.txt",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}, {
Name: "dir",
URL: "dir/",
Status: http.StatusNotFound,
Expected: "Not Found\n",
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
func TestNoServe(t *testing.T) {
tests := []testRun{{
Name: "file",
URL: remoteURL + "file.txt",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}, {
Name: "dir",
URL: remoteURL + "dir/",
Status: http.StatusNotFound,
Expected: "404 page not found\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = testFs
testServer(t, tests, &opt)
}
func TestAuthRequired(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusForbidden,
Expected: `{
"error": "authentication must be set up on the rc server to use \"rc/noopauth\" or the --rc-no-auth flag must be in use",
"input": {},
"path": "rc/noopauth",
"status": 403
}
`,
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
testServer(t, tests, &opt)
}
func TestNoAuth(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = true
testServer(t, tests, &opt)
}
func TestWithUserPass(t *testing.T) {
tests := []testRun{{
Name: "auth",
URL: "rc/noopauth",
Method: "POST",
Body: `{}`,
ContentType: "application/javascript",
Status: http.StatusOK,
Expected: "{}\n",
}}
opt := newTestOpt()
opt.Serve = false
opt.Files = ""
opt.NoAuth = false
opt.HTTPOptions.BasicUser = "user"
opt.HTTPOptions.BasicPass = "pass"
testServer(t, tests, &opt)
}
func TestRCAsync(t *testing.T) {
tests := []testRun{{
Name: "ok",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":true }`,
Status: http.StatusOK,
Contains: regexp.MustCompile(`(?s)\{.*\"jobid\":.*\}`),
}, {
Name: "bad",
URL: "rc/noop",
Method: "POST",
ContentType: "application/json",
Body: `{ "_async":"truthy" }`,
Status: http.StatusBadRequest,
Expected: `{
"error": "couldn't parse key \"_async\" (truthy) as bool: strconv.ParseBool: parsing \"truthy\": invalid syntax",
"input": {
"_async": "truthy"
},
"path": "rc/noop",
"status": 400
}
`,
}}
opt := newTestOpt()
opt.Serve = true
opt.Files = ""
testServer(t, tests, &opt)
}
| {
method = "GET"
} |
coredump_config_modify_iter_info.py | from netapp.coredump.coredump_config_info import CoredumpConfigInfo
from netapp.netapp_object import NetAppObject
class CoredumpConfigModifyIterInfo(NetAppObject):
"""
Information about the modify operation that was
attempted/performed against coredump-config object.
were not modified due to some error.
due to some error.
This element will be returned only if input element
'return-failure-list' is true.
"""
_error_code = None
@property
def error_code(self):
"""
Error code, if the modify operation caused an error.
"""
return self._error_code
@error_code.setter
def error_code(self, val):
if val != None:
self.validate('error_code', val)
self._error_code = val
_error_message = None
@property
def error_message(self):
"""
Error description, if the modify operation caused an
error.
"""
return self._error_message
@error_message.setter
def error_message(self, val):
if val != None:
self.validate('error_message', val)
self._error_message = val
_coredump_config_key = None
@property
def coredump_config_key(self):
"""
The keys for the coredump-config object to which the
modify operation applies.
"""
return self._coredump_config_key | self._coredump_config_key = val
@staticmethod
def get_api_name():
return "coredump-config-modify-iter-info"
@staticmethod
def get_desired_attrs():
return [
'error-code',
'error-message',
'coredump-config-key',
]
def describe_properties(self):
return {
'error_code': { 'class': int, 'is_list': False, 'required': 'optional' },
'error_message': { 'class': basestring, 'is_list': False, 'required': 'optional' },
'coredump_config_key': { 'class': CoredumpConfigInfo, 'is_list': False, 'required': 'required' },
} | @coredump_config_key.setter
def coredump_config_key(self, val):
if val != None:
self.validate('coredump_config_key', val) |
layerSourceUtils.js | // All material copyright ESRI, All Rights Reserved, unless otherwise specified.
// See https://js.arcgis.com/4.11/esri/copyright.txt for details.
//>>built | leftTableKey:a.leftTableKey,rightTableKey:a.rightTableKey,joinType:r.fromJSON(a.joinType)};break;case "raster":e={workspaceId:a.workspaceId,dataSourceName:a.dataSourceName}}e.type=t.fromJSON(a.type);a=g.fixJson(e);c.dataSource=a}c.type=h.fromJSON(b.type);return g.fixJson(c)}Object.defineProperty(d,"__esModule",{value:!0});d.MAPLAYER="map-layer";d.DATALAYER="data-layer";var q=new f.default({esriGeometryPoint:"point",esriGeometryMultipoint:"multipoint",esriGeometryPolyline:"polyline",esriGeometryPolygon:"polygon",
esriGeometryMultiPatch:"multipatch"}),h=new f.default({mapLayer:d.MAPLAYER,dataLayer:d.DATALAYER}),t=new f.default({joinTable:"join-table",queryTable:"query-table"}),r=new f.default({esriLeftOuterJoin:"left-outer-join",esriLeftInnerJoin:"left-inner-join"});d.isMapLayerSource=n;d.isDataLayerSource=p;d.castSource=k;d.sourceToJSON=l;d.sourceFromJSON=m}); | define(["require","exports","../../core/kebabDictionary","../../core/lang"],function(u,d,f,g){function n(b){return null!=b&&b.hasOwnProperty("mapLayerId")}function p(b){return null!=b&&b.hasOwnProperty("dataSource")}function k(b){if(!b)return b;n(b)&&(b.type=d.MAPLAYER);if(p(b)&&(b.type=d.DATALAYER,!b.dataSource.type)){var c=b.dataSource;c.workspaceId?c.type=c.gdbVersion?"table":c.query||c.oidFields?"query-table":"raster":c.leftTableKey&&c.rightTableKey&&c.leftTableSource&&c.rightTableSource&&(c.type=
"join-table",c.leftTableSource=k(c.leftTableSource),c.rightTableSource=k(c.rightTableSource))}return b}function l(b){var c={};if(b.type===d.MAPLAYER)c.mapLayerId=b.mapLayerId,b.gdbVersion&&(c.gdbVersion=b.gdbVersion);else if(b.type===d.DATALAYER){b.fields&&(c.fields=b.fields);var a;a=b.dataSource;var e;switch(a.type){case "table":e={dataSourceName:a.dataSourceName,workspaceId:a.workspaceId,gdbVersion:a.gdbVersion};break;case "query-table":e={geometryType:q.toJSON(a.geometryType),workspaceId:a.workspaceId,
query:a.query,oidFields:a.oidFields,spatialReference:a.spatialReference};break;case "join-table":e={leftTableSource:l(a.leftTableSource),rightTableSource:l(a.rightTableSource),leftTableKey:a.leftTableKey,rightTableKey:a.rightTableKey,joinType:r.toJSON(a.joinType)};break;case "raster":e={workspaceId:a.workspaceId,dataSourceName:a.dataSourceName}}e.type=t.toJSON(a.type);a=g.fixJson(e);c.dataSource=a}c.type=h.toJSON(b.type);return g.fixJson(c)}function m(b){var c={};if(h.fromJSON(b.type)===d.MAPLAYER)c.mapLayerId=
b.mapLayerId,b.gdbVersion&&(c.gdbVersion=b.gdbVersion);else if(h.fromJSON(b.type)===d.DATALAYER){b.fields&&(c.fields=b.fields);var a;a=b.dataSource;var e;switch(a.type){case "table":e={dataSourceName:a.dataSourceName,workspaceId:a.workspaceId,gdbVersion:a.gdbVersion};break;case "queryTable":e={geometryType:q.fromJSON(a.geometryType),workspaceId:a.workspaceId,query:a.query,oidFields:a.oidFields,spatialReference:a.spatialReference};break;case "joinTable":e={leftTableSource:m(a.leftTableSource),rightTableSource:m(a.rightTableSource), |
_840_magic_squares_in_grid.rs | struct Solution;
impl Solution {
fn is_magic(grid: &[Vec<i32>], r: usize, c: usize) -> bool {
let mut xor = 0;
for i in 1..10 {
xor ^= i;
}
for i in 0..3 {
for j in 0..3 {
xor ^= grid[r + i][c + j];
}
}
if xor != 0 {
return false;
}
let r0 = grid[r][c] + grid[r][c + 1] + grid[r][c + 2];
if r0 != 15 {
return false;
}
let r1 = grid[r + 1][c] + grid[r + 1][c + 1] + grid[r + 1][c + 2];
if r1 != 15 {
return false;
}
let r2 = grid[r + 2][c] + grid[r + 2][c + 1] + grid[r + 2][c + 2];
if r2 != 15 {
return false;
}
let c0 = grid[r][c] + grid[r + 1][c] + grid[r + 2][c];
if c0 != 15 {
return false;
}
let c1 = grid[r][c + 1] + grid[r + 1][c + 1] + grid[r + 2][c + 1];
if c1 != 15 {
return false;
}
let c2 = grid[r][c + 2] + grid[r + 1][c + 2] + grid[r + 2][c + 2];
if c2 != 15 {
return false;
}
let d0 = grid[r][c] + grid[r + 1][c + 1] + grid[r + 2][c + 2];
if d0 != 15 {
return false;
}
let d1 = grid[r][c + 2] + grid[r + 1][c + 1] + grid[r + 2][c];
if d1 != 15 |
true
}
fn num_magic_squares_inside(grid: Vec<Vec<i32>>) -> i32 {
let n = grid.len();
let m = grid[0].len();
if n < 3 || m < 3 {
return 0;
}
let mut sum = 0;
for i in 0..=(n - 3) {
for j in 0..=(m - 3) {
if Self::is_magic(&grid, i, j) {
sum += 1;
}
}
}
sum
}
}
#[test]
fn test() {
let grid: Vec<Vec<i32>> = vec_vec_i32![[4, 3, 8, 4], [9, 5, 1, 9], [2, 7, 6, 2]];
assert_eq!(Solution::num_magic_squares_inside(grid), 1);
let grid: Vec<Vec<i32>> = vec_vec_i32![[5, 5, 5], [5, 5, 5], [5, 5, 5]];
assert_eq!(Solution::num_magic_squares_inside(grid), 0);
}
| {
return false;
} |
app.js | // Import controls.
import { MDCTabBar } from '@material/tab-bar';
import { MDCTextField } from '@material/textfield';
import { MDCTextFieldHelperText } from '@material/textfield/helper-text';
import { MDCTextFieldCharacterCounter } from '@material/textfield/character-counter';
import { MDCSnackbar } from '@material/snackbar';
import anime from 'animejs/lib/anime.es.js';
// Init.
const TAB_BAR = new MDCTabBar(document.getElementsByClassName('mdc-tab-bar')[0]);
const CONTACT_FORM = document.getElementsByTagName('form')[0];
const SNACK_BAR = new MDCSnackbar(document.getElementsByClassName('mdc-snackbar')[0]);
const NAME_TEXT_FIELD = new MDCTextField(document.getElementById('ks-name-text-field'));
const EMAIL_TEXT_FIELD = new MDCTextField(document.getElementById('ks-email-text-field'));
const MESSAGE_TEXT_FIELD = new MDCTextField(document.getElementById('ks-message-text-field'));
const MESSAGE_TEXT_FIELD_CHARACTER_COUNTER = new MDCTextFieldCharacterCounter(document.getElementById('ks-message-text-field-character-counter'));
const EMAIL_VALIDATION_MSG = new MDCTextFieldHelperText(document.getElementById('ks-email-validation-msg'));
const PROJECT_DEMO_BUTTONS = document.getElementsByClassName('ks-project-demo-button');
const SEND_CONTACT_FORM_BUTTON = document.getElementById('ks-send-contact-form-button');
const SEND_CONTACT_FORM_BUTTON_LABLE = document.getElementById('ks-send-contact-form-button-label');
// About page animation.
function animateAboutPage() {
anime({
targets: ['.ks-about-content > .mdc-typography', '#ks-social-media-container'],
keyframes: [
{ opacity: 0 },
{ opacity: 1 }
],
duration: 1000,
delay: (_element, i) => { return i * 150 },
easing: 'easeInOutQuad'
});
}
// Card animation.
function animateProjectsCards() {
anime({
targets: '.ks-card',
keyframes: [
{ opacity: 0 },
{ opacity: 1 }
],
duration: 1000,
delay: (_element, i) => { return i * 150 },
easing: 'easeInOutQuad'
});
}
function animateContactForm() {
anime({
targets: 'form',
keyframes: [
{ opacity: 0 },
{ opacity: 1 }
],
duration: 1000,
easing: 'easeInOutQuad'
});
}
// Tab toggle control.
TAB_BAR.listen('MDCTab:interacted', (event) => {
if (event.detail.tabId === 'mdc-tab-1') {
document.getElementsByClassName('ks-section-content')[0].classList.remove('ks-hide');
document.getElementsByClassName('ks-section-content')[1].classList.add('ks-hide');
// Show card animation.
for (const CARDS of document.getElementsByClassName('ks-card')) {
CARDS.style.opacity = 0; | }
animateProjectsCards();
}
if (event.detail.tabId === 'mdc-tab-2') {
document.getElementsByClassName('ks-section-content')[0].classList.add('ks-hide');
document.getElementsByClassName('ks-section-content')[1].classList.remove('ks-hide');
// Show form animation.
document.getElementsByTagName('form')[0].style.opacity = 0;
animateContactForm();
}
});
// Form button control.
CONTACT_FORM.addEventListener('input', (_event) => {
if (NAME_TEXT_FIELD.valid && EMAIL_TEXT_FIELD.valid && MESSAGE_TEXT_FIELD.valid) {
SEND_CONTACT_FORM_BUTTON.removeAttribute('disabled', '');
} else {
SEND_CONTACT_FORM_BUTTON.setAttribute('disabled', '');
}
});
// Form eamil field validation.
EMAIL_TEXT_FIELD.listen('input', (_event) => {
if ((!/^\w+([\.-]?\w+)*@\w+([\.-]?\w+)*(\.\w{2,3})+$/.test(EMAIL_TEXT_FIELD.value)) && EMAIL_TEXT_FIELD.value !== '') {
EMAIL_TEXT_FIELD.getDefaultFoundation().setValid(false);
EMAIL_VALIDATION_MSG.getDefaultFoundation().setContent('The email is badly formated.');
} else if (EMAIL_TEXT_FIELD.value === '') {
EMAIL_VALIDATION_MSG.getDefaultFoundation().setContent('This field is required.');
} else {
EMAIL_TEXT_FIELD.getDefaultFoundation().setValid(true);
}
});
for (const PROJECT_DEMO_BUTTON of PROJECT_DEMO_BUTTONS) {
PROJECT_DEMO_BUTTON.addEventListener('click', (_event) => {
window.location = PROJECT_DEMO_BUTTON.dataset.projectdemolink;
});
}
// Submit control.
SEND_CONTACT_FORM_BUTTON.addEventListener('click', async (event) => {
event.preventDefault();
SEND_CONTACT_FORM_BUTTON.setAttribute('disabled', '');
SEND_CONTACT_FORM_BUTTON_LABLE.textContent = 'Sending';
if (NAME_TEXT_FIELD.valid && EMAIL_TEXT_FIELD.valid && MESSAGE_TEXT_FIELD.valid) {
let sendContactFormResponse;
try {
sendContactFormResponse = await fetch(new Request('https://kynsonszetau.com/api/sendcontactform/'), {
body: JSON.stringify({
name: NAME_TEXT_FIELD.value,
email: EMAIL_TEXT_FIELD.value,
message: MESSAGE_TEXT_FIELD.value
}),
headers: {
'Content-Type': 'application/json'
},
method: 'POST',
mode: 'cors'
});
} catch (_error) {
SNACK_BAR.labelText = 'En error has occoured, please try again.';
SNACK_BAR.open();
SEND_CONTACT_FORM_BUTTON_LABLE.textContent = 'Send';
SEND_CONTACT_FORM_BUTTON.removeAttribute('disabled', '');
return;
}
if (sendContactFormResponse.ok) {
SNACK_BAR.labelText = 'Your message has been sent.';
MESSAGE_TEXT_FIELD_CHARACTER_COUNTER.getDefaultFoundation().setCounterValue(0, 500);
CONTACT_FORM.reset();
} else if (sendContactFormResponse.status === 429) {
SNACK_BAR.labelText = 'Too many request, please try again in 12 hours.';
SEND_CONTACT_FORM_BUTTON.removeAttribute('disabled', '');
} else {
SNACK_BAR.labelText = 'En error has occoured, please try again.';
SEND_CONTACT_FORM_BUTTON.removeAttribute('disabled', '');
}
SEND_CONTACT_FORM_BUTTON_LABLE.textContent = 'Send';
SNACK_BAR.open();
} else {
SNACK_BAR.labelText = 'Please fill in all fields.';
SEND_CONTACT_FORM_BUTTON_LABLE.textContent = 'Send';
SNACK_BAR.open();
}
});
animateAboutPage();
animateProjectsCards(); | |
map_selection.rs | use amethyst::ecs::{storage::DenseVecStorage, Component};
use asset_model::loaded::AssetId;
use asset_selection_model::play::AssetSelection;
use derivative::Derivative;
/// Selected map ID or random for a particular controller.
#[derive(Clone, Component, Copy, Debug, Derivative, PartialEq)]
#[derivative(Default)]
pub enum MapSelection {
/// No map is currently selected.
#[derivative(Default)]
None,
/// User has selected *Random*.
Random(Option<AssetId>),
/// User has selected a map.
Id(AssetId),
}
impl MapSelection {
/// Returns the `AssetId` of the selection.
pub fn | (self) -> Option<AssetId> {
match self {
MapSelection::None => None,
MapSelection::Random(asset_id) => asset_id,
MapSelection::Id(asset_id) => Some(asset_id),
}
}
}
impl From<AssetSelection> for MapSelection {
fn from(asset_selection: AssetSelection) -> Self {
match asset_selection {
AssetSelection::Random => MapSelection::Random(None),
AssetSelection::Id(asset_id) => MapSelection::Id(asset_id),
}
}
}
| asset_id |
dragndrop.py | """
domonic.webapi.dragndrop
====================================
https://developer.mozilla.org/en-US/docs/Web/API/HTML_Drag_and_Drop_API
"""
from domonic.events import DragEvent
class DataTransfer:
def __init__(self):
self.data = {}
self.types = []
self.files = []
self.items = []
self.dropEffect = ""
self.effectAllowed = ""
def clearData(self, type):
self.data[type] = ""
self.types.remove(type)
def getData(self, type):
return self.data[type]
def setData(self, type, data):
self.data[type] = data
self.types.append(type)
def setDragImage(self, image, x, y):
pass
def | (self, element):
self.items.append(element)
# def addFile(self, file):
# self.files.append(file)
# class DataTransferItem:
# def __init__(self, type, data):
# self.type = type
# self.data = data
# def getAsString(self):
# return self.data
# def getAsFile(self):
# return self.data
# def getAsFileSystemHandle(self):
# return self.data
# def webkitGetAsEntry(self):
# return self.data
| addElement |
coherence_types.go | // Copyright (c) 2020, Oracle and/or its affiliates.
// Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl.
package v1
import (
corev1 "k8s.io/api/core/v1"
"time"
)
// Common Coherence API structs
// NOTE: This file is used to generate the CRDs use by the Operator. The CRD files should not be manually edited
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ----- constants ----------------------------------------------------------
const (
// The default number of replicas that will be created for a role if no value is specified in the spec
DefaultReplicas int32 = 3
// The default health check port.
DefaultHealthPort int32 = 6676
// The defaultrole name that will be used for a role if no value is specified in the spec
DefaultRoleName = "storage"
// The suffix appended to a cluster name to give the WKA service name
WKAServiceNameSuffix = "-wka"
// The key of the label used to hold the Coherence cluster name
CoherenceClusterLabel string = "coherenceCluster"
// The key of the label used to hold the Coherence role name
CoherenceRoleLabel string = "coherenceRole"
// The key of the label used to hold the component name
CoherenceComponentLabel string = "component"
)
// ----- helper functions ---------------------------------------------------
// Return a map that is two maps merged.
// If both maps are nil then nil is returned.
// Where there are duplicate keys those in m1 take precedence.
// Keys that map to "" will not be added to the merged result
func MergeMap(m1, m2 map[string]string) map[string]string {
if m1 == nil && m2 == nil {
return nil
}
merged := make(map[string]string)
for k, v := range m2 {
if v != "" {
merged[k] = v
}
}
for k, v := range m1 {
if v != "" {
merged[k] = v
} else {
delete(merged, k)
}
}
return merged
}
// ----- ApplicationSpec struct ---------------------------------------------
// The specification of the application deployed into the Coherence
// role members.
// +k8s:openapi-gen=true
type ApplicationSpec struct {
// The application type to execute.
// This field would be set if using the Coherence Graal image and running a none-Java
// application. For example if the application was a Node application this field
// would be set to "node". The default is to run a plain Java application.
// +optional
Type *string `json:"type,omitempty"`
// Class is the Coherence container main class. The default value is
// com.tangosol.net.DefaultCacheServer.
// If the application type is non-Java this would be the name of the corresponding language specific
// runnable, for example if the application type is "node" the main may be a Javascript file.
// +optional
Main *string `json:"main,omitempty"`
// Args is the optional arguments to pass to the main class.
// +listType=atomic
// +optional
Args []string `json:"args,omitempty"`
// The inlined application image definition
ImageSpec `json:",inline"`
// The application folder in the custom artifacts Docker image containing
// application artifacts.
// This will effectively become the working directory of the Coherence container.
// If not set the application directory default value is "/app".
// +optional
AppDir *string `json:"appDir,omitempty"`
// The folder in the custom artifacts Docker image containing jar
// files to be added to the classpath of the Coherence container.
// If not set the lib directory default value is "/app/lib".
// +optional
LibDir *string `json:"libDir,omitempty"`
// The folder in the custom artifacts Docker image containing
// configuration files to be added to the classpath of the Coherence container.
// If not set the config directory default value is "/app/conf".
// +optional
ConfigDir *string `json:"configDir,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this ApplicationSpec struct with any nil or not set
// values set by the corresponding value in the defaults Images struct.
func (in *ApplicationSpec) DeepCopyWithDefaults(defaults *ApplicationSpec) *ApplicationSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ApplicationSpec{}
clone.ImageSpec = *in.ImageSpec.DeepCopyWithDefaults(&defaults.ImageSpec)
if in.Type != nil {
clone.Type = in.Type
} else {
clone.Type = defaults.Type
}
if in.Main != nil {
clone.Main = in.Main
} else {
clone.Main = defaults.Main
}
if in.Args != nil {
clone.Args = in.Args
} else {
clone.Args = defaults.Args
}
if in.AppDir != nil {
clone.AppDir = in.AppDir
} else {
clone.AppDir = defaults.AppDir
}
if in.LibDir != nil {
clone.LibDir = in.LibDir
} else {
clone.LibDir = defaults.LibDir
}
if in.ConfigDir != nil {
clone.ConfigDir = in.ConfigDir
} else {
clone.ConfigDir = defaults.ConfigDir
}
return &clone
}
// ----- CoherenceSpec struct -----------------------------------------------
// The Coherence specific configuration.
// +k8s:openapi-gen=true
type CoherenceSpec struct {
// The Coherence images configuration.
ImageSpec `json:",inline"`
// A boolean flag indicating whether members of this role are storage enabled.
// This value will set the corresponding coherence.distributed.localstorage System property.
// If not specified the default value is true.
// This flag is also used to configure the ScalingPolicy value if a value is not specified. If the
// StorageEnabled field is not specified or is true the scaling will be safe, if StorageEnabled is
// set to false scaling will be parallel.
// +optional
StorageEnabled *bool `json:"storageEnabled,omitempty"`
// CacheConfig is the name of the cache configuration file to use
// +optional
CacheConfig *string `json:"cacheConfig,omitempty"`
// OverrideConfig is name of the Coherence operational configuration override file,
// the default is tangosol-coherence-override.xml
// +optional
OverrideConfig *string `json:"overrideConfig,omitempty"`
// The Coherence log level, default being 5 (info level).
// +optional
LogLevel *int32 `json:"logLevel,omitempty"`
// Persistence values configure the on-disc data persistence settings.
// The bool Enabled enables or disabled on disc persistence of data.
// +optional
Persistence *PersistentStorageSpec `json:"persistence,omitempty"`
// Snapshot values configure the on-disc persistence data snapshot (backup) settings.
// The bool Enabled enables or disabled a different location for
// persistence snapshot data. If set to false then snapshot files will be written
// to the same volume configured for persistence data in the Persistence section.
// +optional
Snapshot *PersistentStorageSpec `json:"snapshot,omitempty"`
// Management configures Coherence management over REST
// Note: Coherence management over REST will be available in 12.2.1.4.
// +optional
Management *PortSpecWithSSL `json:"management,omitempty"`
// Metrics configures Coherence metrics publishing
// Note: Coherence metrics publishing will be available in 12.2.1.4.
// +optional
Metrics *PortSpecWithSSL `json:"metrics,omitempty"`
// Exclude members of this role from being part of the cluster's WKA list.
ExcludeFromWKA *bool `json:"excludeFromWKA,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this CoherenceSpec struct with any nil or not set
// values set by the corresponding value in the defaults CoherenceSpec struct.
func (in *CoherenceSpec) DeepCopyWithDefaults(defaults *CoherenceSpec) *CoherenceSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := CoherenceSpec{}
clone.ImageSpec = *in.ImageSpec.DeepCopyWithDefaults(&defaults.ImageSpec)
clone.Persistence = in.Persistence.DeepCopyWithDefaults(defaults.Persistence)
clone.Snapshot = in.Snapshot.DeepCopyWithDefaults(defaults.Snapshot)
clone.Management = in.Management.DeepCopyWithDefaults(defaults.Management)
clone.Metrics = in.Metrics.DeepCopyWithDefaults(defaults.Metrics)
if in.StorageEnabled != nil {
clone.StorageEnabled = in.StorageEnabled
} else {
clone.StorageEnabled = defaults.StorageEnabled
}
if in.CacheConfig != nil {
clone.CacheConfig = in.CacheConfig
} else {
clone.CacheConfig = defaults.CacheConfig
}
if in.OverrideConfig != nil {
clone.OverrideConfig = in.OverrideConfig
} else {
clone.OverrideConfig = defaults.OverrideConfig
}
if in.LogLevel != nil {
clone.LogLevel = in.LogLevel
} else {
clone.LogLevel = defaults.LogLevel
}
if in.ExcludeFromWKA != nil {
clone.ExcludeFromWKA = in.ExcludeFromWKA
} else {
clone.ExcludeFromWKA = defaults.ExcludeFromWKA
}
return &clone
}
// IsWKAMember returns true if this role is a WKA list member.
func (in *CoherenceSpec) IsWKAMember() bool {
return in != nil && (in.ExcludeFromWKA == nil || !*in.ExcludeFromWKA)
}
// ----- JVMSpec struct -----------------------------------------------------
// The JVM configuration.
// +k8s:openapi-gen=true
type JVMSpec struct {
// Args specifies the options (System properties, -XX: args etc) to pass to the JVM.
// +listType=atomic
// +optional
Args []string `json:"args,omitempty"`
// The settings for enabling debug mode in the JVM.
// +optional
Debug *JvmDebugSpec `json:"debug,omitempty"`
// If set to true Adds the -XX:+UseContainerSupport JVM option to ensure that the JVM
// respects any container resource limits.
// The default value is true
// +optional
UseContainerLimits *bool `json:"useContainerLimits,omitempty"`
// If set to true, enabled continuour flight recorder recordings.
// This will add the JVM options -XX:+UnlockCommercialFeatures -XX:+FlightRecorder
// -XX:FlightRecorderOptions=defaultrecording=true,dumponexit=true,dumponexitpath=/dumps
// +optional
FlightRecorder *bool `json:"flightRecorder,omitempty"`
// Set JVM garbage collector options.
// +optional
Gc *JvmGarbageCollectorSpec `json:"gc,omitempty"`
// +optional
DiagnosticsVolume *corev1.VolumeSource `json:"diagnosticsVolume,omitempty"`
// Configure the JVM memory options.
// +optional
Memory *JvmMemorySpec `json:"memory,omitempty"`
// Configure JMX using JMXMP.
// +optional
Jmxmp *JvmJmxmpSpec `json:"jmxmp,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JVMSpec struct with any nil or not set
// values set by the corresponding value in the defaults JVMSpec struct.
func (in *JVMSpec) DeepCopyWithDefaults(defaults *JVMSpec) *JVMSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JVMSpec{}
clone.Debug = in.Debug.DeepCopyWithDefaults(defaults.Debug)
clone.Gc = in.Gc.DeepCopyWithDefaults(defaults.Gc)
clone.Memory = in.Memory.DeepCopyWithDefaults(defaults.Memory)
clone.Jmxmp = in.Jmxmp.DeepCopyWithDefaults(defaults.Jmxmp)
if in.UseContainerLimits != nil {
clone.UseContainerLimits = in.UseContainerLimits
} else {
clone.UseContainerLimits = defaults.UseContainerLimits
}
if in.FlightRecorder != nil {
clone.FlightRecorder = in.FlightRecorder
} else {
clone.FlightRecorder = defaults.FlightRecorder
}
if in.DiagnosticsVolume != nil {
clone.DiagnosticsVolume = in.DiagnosticsVolume
} else {
clone.DiagnosticsVolume = defaults.DiagnosticsVolume
}
// Merge Args
if in.Args != nil {
clone.Args = []string{}
clone.Args = append(clone.Args, defaults.Args...)
clone.Args = append(clone.Args, in.Args...)
} else if defaults.Args != nil {
clone.Args = []string{}
clone.Args = append(clone.Args, defaults.Args...)
}
return &clone
}
// ----- ImageSpec struct ---------------------------------------------------
// CoherenceInternalImageSpec defines the settings for a Docker image
// +k8s:openapi-gen=true
type ImageSpec struct {
// Docker image name.
// More info: https://kubernetes.io/docs/concepts/containers/images
// +optional
Image *string `json:"image,omitempty"`
// Image pull policy.
// One of Always, Never, IfNotPresent.
// More info: https://kubernetes.io/docs/concepts/containers/images#updating-images
// +optional
ImagePullPolicy *corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this ImageSpec struct with any nil or not set values set
// by the corresponding value in the defaults ImageSpec struct.
func (in *ImageSpec) DeepCopyWithDefaults(defaults *ImageSpec) *ImageSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ImageSpec{}
if in.Image != nil {
clone.Image = in.Image
} else {
clone.Image = defaults.Image
}
if in.ImagePullPolicy != nil {
clone.ImagePullPolicy = in.ImagePullPolicy
} else {
clone.ImagePullPolicy = defaults.ImagePullPolicy
}
return &clone
}
// ----- LoggingSpec struct -------------------------------------------------
// LoggingSpec defines the settings for the Coherence Pod logging
// +k8s:openapi-gen=true
type LoggingSpec struct {
// ConfigFile allows the location of the Java util logging configuration file to be overridden.
// If this value is not set the logging.properties file embedded in this chart will be used.
// If this value is set the configuration will be located by trying the following locations in order:
// 1. If store.logging.configMapName is set then the config map will be mounted as a volume and the logging
// properties file will be located as a file location relative to the ConfigMap volume mount point.
// 2. If userArtifacts.imageName is set then using this value as a file name relative to the location of the
// configuration files directory in the user artifacts image.
// 3. Using this value as an absolute file name.
// +optional
ConfigFile *string `json:"configFile,omitempty"`
// ConfigMapName allows a config map to be mounted as a volume containing the logging
// configuration file to use.
// +optional
ConfigMapName *string `json:"configMapName,omitempty"`
// Configures whether Fluentd is enabled and the configuration
// of the Fluentd side-car container
// +optional
Fluentd *FluentdSpec `json:"fluentd,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this LoggingSpec struct with any nil or not set values set
// by the corresponding value in the defaults LoggingSpec struct.
func (in *LoggingSpec) DeepCopyWithDefaults(defaults *LoggingSpec) *LoggingSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := LoggingSpec{}
clone.Fluentd = in.Fluentd.DeepCopyWithDefaults(defaults.Fluentd)
if in.ConfigFile != nil {
clone.ConfigFile = in.ConfigFile
} else {
clone.ConfigFile = defaults.ConfigFile
}
if in.ConfigMapName != nil {
clone.ConfigMapName = in.ConfigMapName
} else {
clone.ConfigMapName = defaults.ConfigMapName
}
return &clone
}
// ----- PersistentStorageSpec struct ---------------------------------------
// PersistenceStorageSpec defines the persistence settings for the Coherence
// +k8s:openapi-gen=true
type PersistentStorageSpec struct {
// +optional
Enabled *bool `json:"enabled,omitempty"`
// PersistentVolumeClaim allows the configuration of a normal k8s persistent volume claim
// for persistence data.
// +optional
PersistentVolumeClaim *corev1.PersistentVolumeClaimSpec `json:"persistentVolumeClaim,omitempty"` // from k8s.io/api/core/v1
// Volume allows the configuration of a normal k8s volume mapping
// for persistence data instead of a persistent volume claim. If a value is defined
// for store.persistence.volume then no PVC will be created and persistence data
// will instead be written to this volume. It is up to the deployer to understand
// the consequences of this and how the guarantees given when using PVCs differ
// to the storage guarantees for the particular volume type configured here.
// +optional
Volume *corev1.VolumeSource `json:"volume,omitempty"` // from k8s.io/api/core/v1
}
// DeepCopyWithDefaults returns a copy of this PersistentStorageSpec struct with any nil or not set values set
// by the corresponding value in the defaults PersistentStorageSpec struct.
func (in *PersistentStorageSpec) DeepCopyWithDefaults(defaults *PersistentStorageSpec) *PersistentStorageSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := PersistentStorageSpec{}
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.PersistentVolumeClaim != nil {
clone.PersistentVolumeClaim = in.PersistentVolumeClaim
} else {
clone.PersistentVolumeClaim = defaults.PersistentVolumeClaim
}
if in.Volume != nil {
clone.Volume = in.Volume
} else {
clone.Volume = defaults.Volume
}
return &clone
}
// ----- SSLSpec struct -----------------------------------------------------
// SSLSpec defines the SSL settings for a Coherence component over REST endpoint.
// +k8s:openapi-gen=true
type SSLSpec struct {
// Enabled is a boolean flag indicating whether enables or disables SSL on the Coherence management
// over REST endpoint, the default is false (disabled).
// +optional
Enabled *bool `json:"enabled,omitempty"`
// Secrets is the name of the k8s secrets containing the Java key stores and password files.
// This value MUST be provided if SSL is enabled on the Coherence management over ReST endpoint.
// +optional
Secrets *string `json:"secrets,omitempty"`
// Keystore is the name of the Java key store file in the k8s secret to use as the SSL keystore
// when configuring component over REST to use SSL.
// +optional
KeyStore *string `json:"keyStore,omitempty"`
// KeyStorePasswordFile is the name of the file in the k8s secret containing the keystore
// password when configuring component over REST to use SSL.
// +optional
KeyStorePasswordFile *string `json:"keyStorePasswordFile,omitempty"`
// KeyStorePasswordFile is the name of the file in the k8s secret containing the key
// password when configuring component over REST to use SSL.
// +optional
KeyPasswordFile *string `json:"keyPasswordFile,omitempty"`
// KeyStoreAlgorithm is the name of the keystore algorithm for the keystore in the k8s secret
// used when configuring component over REST to use SSL. If not set the default is SunX509
// +optional
KeyStoreAlgorithm *string `json:"keyStoreAlgorithm,omitempty"`
// KeyStoreProvider is the name of the keystore provider for the keystore in the k8s secret
// used when configuring component over REST to use SSL.
// +optional
KeyStoreProvider *string `json:"keyStoreProvider,omitempty"`
// KeyStoreType is the name of the Java keystore type for the keystore in the k8s secret used
// when configuring component over REST to use SSL. If not set the default is JKS.
// +optional
KeyStoreType *string `json:"keyStoreType,omitempty"`
// TrustStore is the name of the Java trust store file in the k8s secret to use as the SSL
// trust store when configuring component over REST to use SSL.
// +optional
TrustStore *string `json:"trustStore,omitempty"`
// TrustStorePasswordFile is the name of the file in the k8s secret containing the trust store
// password when configuring component over REST to use SSL.
// +optional
TrustStorePasswordFile *string `json:"trustStorePasswordFile,omitempty"`
// TrustStoreAlgorithm is the name of the keystore algorithm for the trust store in the k8s
// secret used when configuring component over REST to use SSL. If not set the default is SunX509.
// +optional
TrustStoreAlgorithm *string `json:"trustStoreAlgorithm,omitempty"`
// TrustStoreProvider is the name of the keystore provider for the trust store in the k8s
// secret used when configuring component over REST to use SSL.
// +optional
TrustStoreProvider *string `json:"trustStoreProvider,omitempty"`
// TrustStoreType is the name of the Java keystore type for the trust store in the k8s secret
// used when configuring component over REST to use SSL. If not set the default is JKS.
// +optional
TrustStoreType *string `json:"trustStoreType,omitempty"`
// RequireClientCert is a boolean flag indicating whether the client certificate will be
// authenticated by the server (two-way SSL) when configuring component over REST to use SSL.
// If not set the default is false
// +optional
RequireClientCert *bool `json:"requireClientCert,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this SSLSpec struct with any nil or not set values set
// by the corresponding value in the defaults SSLSpec struct.
func (in *SSLSpec) DeepCopyWithDefaults(defaults *SSLSpec) *SSLSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := SSLSpec{}
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.Secrets != nil {
clone.Secrets = in.Secrets
} else {
clone.Secrets = defaults.Secrets
}
if in.KeyStore != nil {
clone.KeyStore = in.KeyStore
} else {
clone.KeyStore = defaults.KeyStore
}
if in.KeyStorePasswordFile != nil {
clone.KeyStorePasswordFile = in.KeyStorePasswordFile
} else {
clone.KeyStorePasswordFile = defaults.KeyStorePasswordFile
}
if in.KeyPasswordFile != nil {
clone.KeyPasswordFile = in.KeyPasswordFile
} else {
clone.KeyPasswordFile = defaults.KeyPasswordFile
}
if in.KeyStoreAlgorithm != nil {
clone.KeyStoreAlgorithm = in.KeyStoreAlgorithm
} else {
clone.KeyStoreAlgorithm = defaults.KeyStoreAlgorithm
}
if in.KeyStoreProvider != nil {
clone.KeyStoreProvider = in.KeyStoreProvider
} else {
clone.KeyStoreProvider = defaults.KeyStoreProvider
}
if in.KeyStoreType != nil {
clone.KeyStoreType = in.KeyStoreType
} else {
clone.KeyStoreType = defaults.KeyStoreType
}
if in.TrustStore != nil {
clone.TrustStore = in.TrustStore
} else {
clone.TrustStore = defaults.TrustStore
}
if in.TrustStorePasswordFile != nil {
clone.TrustStorePasswordFile = in.TrustStorePasswordFile
} else {
clone.TrustStorePasswordFile = defaults.TrustStorePasswordFile
}
if in.TrustStoreAlgorithm != nil {
clone.TrustStoreAlgorithm = in.TrustStoreAlgorithm
} else {
clone.TrustStoreAlgorithm = defaults.TrustStoreAlgorithm
}
if in.TrustStoreProvider != nil {
clone.TrustStoreProvider = in.TrustStoreProvider
} else {
clone.TrustStoreProvider = defaults.TrustStoreProvider
}
if in.TrustStoreType != nil {
clone.TrustStoreType = in.TrustStoreType
} else {
clone.TrustStoreType = defaults.TrustStoreType
}
if in.RequireClientCert != nil {
clone.RequireClientCert = in.RequireClientCert
} else {
clone.RequireClientCert = defaults.RequireClientCert
}
return &clone
}
// ----- PortSpec struct ----------------------------------------------------
// PortSpec defines the port settings for a Coherence component
// +k8s:openapi-gen=true
type PortSpec struct {
// Port specifies the port used.
// +optional
Port int32 `json:"port,omitempty"`
// Protocol for container port. Must be UDP or TCP. Defaults to "TCP"
// +optional
Protocol *string `json:"protocol,omitempty"`
// Service specifies the service used to expose the port.
// +optional
Service *ServiceSpec `json:"service,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this PortSpec struct with any nil or not set values set
// by the corresponding value in the defaults PortSpec struct.
func (in *PortSpec) DeepCopyWithDefaults(defaults *PortSpec) *PortSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := PortSpec{}
if in.Port != 0 {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.Protocol != nil {
clone.Protocol = in.Protocol
} else {
clone.Protocol = defaults.Protocol
}
if in.Service != nil {
clone.Service = in.Service
} else {
clone.Service = defaults.Service
}
return &clone
}
// ----- NamedPortSpec struct ----------------------------------------------------
// NamedPortSpec defines a named port for a Coherence component
// +k8s:openapi-gen=true
type NamedPortSpec struct {
// Name specifies the name of th port.
// +optional
Name string `json:"name,omitempty"`
PortSpec `json:",inline"`
}
// DeepCopyWithDefaults returns a copy of this NamedPortSpec struct with any nil or not set values set
// by the corresponding value in the defaults NamedPortSpec struct.
func (in *NamedPortSpec) DeepCopyWithDefaults(defaults *NamedPortSpec) *NamedPortSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := NamedPortSpec{}
if in.Name != "" {
clone.Name = in.Name
} else {
clone.Name = defaults.Name
}
if in.Port != 0 {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.Protocol != nil {
clone.Protocol = in.Protocol
} else {
clone.Protocol = defaults.Protocol
}
if in.Service != nil {
clone.Service = in.Service.DeepCopyWithDefaults(defaults.Service)
} else {
clone.Service = defaults.Service
}
return &clone
}
// Merge merges two arrays of NamedPortSpec structs.
// Any NamedPortSpec instances in both arrays that share the same name will be merged,
// the field set in the primary NamedPortSpec will take precedence over those in the
// secondary NamedPortSpec.
func MergeNamedPortSpecs(primary, secondary []NamedPortSpec) []NamedPortSpec {
if primary == nil {
return secondary
}
if secondary == nil {
return primary
}
if len(primary) == 0 && len(secondary) == 0 {
return []NamedPortSpec{}
}
var mr []NamedPortSpec
mr = append(mr, primary...)
for _, p := range secondary {
found := false
for i, pp := range primary {
if pp.Name == p.Name {
clone := pp.DeepCopyWithDefaults(&p)
mr[i] = *clone
found = true
break
}
}
if !found {
mr = append(mr, p)
}
}
return mr
}
// ----- JvmDebugSpec struct ---------------------------------------------------
// The JVM Debug specific configuration.
// See:
// +k8s:openapi-gen=true
type JvmDebugSpec struct {
// Enabled is a flag to enable or disable running the JVM in debug mode. Default is disabled.
// +optional
Enabled *bool `json:"enabled,omitempty"`
// A boolean true if the target VM is to be suspended immediately before the main class is loaded;
// false otherwise. The default value is false.
// +optional
Suspend *bool `json:"suspend,omitempty"`
// Attach specifies the address of the debugger that the JVM should attempt to connect back to
// instead of listening on a port.
// +optional
Attach *string `json:"attach,omitempty"`
// The port that the debugger will listen on; the default is 5005.
// +optional
Port *int32 `json:"port,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JvmDebugSpec struct with any nil or not set values set
// by the corresponding value in the defaults JvmDebugSpec struct.
func (in *JvmDebugSpec) DeepCopyWithDefaults(defaults *JvmDebugSpec) *JvmDebugSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JvmDebugSpec{}
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.Suspend != nil {
clone.Suspend = in.Suspend
} else {
clone.Suspend = defaults.Suspend
}
if in.Port != nil {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.Attach != nil {
clone.Attach = in.Attach
} else {
clone.Attach = defaults.Attach
}
return &clone
}
// ----- JVM GC struct ------------------------------------------------------
// Options for managing the JVM garbage collector.
type JvmGarbageCollectorSpec struct {
// The name of the JVM garbage collector to use.
// G1 - adds the -XX:+UseG1GC option
// CMS - adds the -XX:+UseConcMarkSweepGC option
// Parallel - adds the -XX:+UseParallelGC
// Default - use the JVMs default collector
// The field value is case insensitive
// If not set G1 is used.
// If set to a value other than those above then
// the default collector for the JVM will be used.
// +optional
Collector *string `json:"enabled,omitempty"`
// Args specifies the GC options to pass to the JVM.
// +optional
Args []string `json:"args,omitempty"`
// Enable the following GC logging args -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps
// -XX:+PrintHeapAtGC -XX:+PrintTenuringDistribution -XX:+PrintGCApplicationStoppedTime
// -XX:+PrintGCApplicationConcurrentTime
// Default is true
// +optional
Logging *bool `json:"logging,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JvmGarbageCollectorSpec struct with any nil or not set values set
// by the corresponding value in the defaults JvmGarbageCollectorSpec struct.
func (in *JvmGarbageCollectorSpec) DeepCopyWithDefaults(defaults *JvmGarbageCollectorSpec) *JvmGarbageCollectorSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JvmGarbageCollectorSpec{}
if in.Collector != nil {
clone.Collector = in.Collector
} else {
clone.Collector = defaults.Collector
}
if in.Args != nil {
clone.Args = in.Args
} else {
clone.Args = defaults.Args
}
if in.Logging != nil {
clone.Logging = in.Logging
} else {
clone.Logging = defaults.Logging
}
return &clone
}
// ----- JVM MemoryGC struct ------------------------------------------------
// Options for managing the JVM memory.
type JvmMemorySpec struct {
// HeapSize is the min/max heap value to pass to the JVM.
// The format should be the same as that used for Java's -Xms and -Xmx JVM options.
// If not set the JVM defaults are used.
// +optional
HeapSize *string `json:"heapSize,omitempty"`
// StackSize is the stack sixe value to pass to the JVM.
// The format should be the same as that used for Java's -Xss JVM option.
// If not set the JVM defaults are used.
// +optional
StackSize *string `json:"stackSize,omitempty"`
// MetaspaceSize is the min/max metaspace size to pass to the JVM.
// This sets the -XX:MetaspaceSize and -XX:MaxMetaspaceSize=size JVM options.
// If not set the JVM defaults are used.
// +optional
MetaspaceSize *string `json:"metaspaceSize,omitempty"`
// DirectMemorySize sets the maximum total size (in bytes) of the New I/O (the java.nio package) direct-buffer
// allocations. This value sets the -XX:MaxDirectMemorySize JVM option.
// If not set the JVM defaults are used.
// +optional
DirectMemorySize *string `json:"directMemorySize,omitempty"`
// Adds the -XX:NativeMemoryTracking=mode JVM options
// where mode is on of "off", "summary" or "detail", the default is "summary"
// If not set to "off" also add -XX:+PrintNMTStatistics
// +optional
NativeMemoryTracking *string `json:"nativeMemoryTracking,omitempty"`
// Configure the JVM behaviour when an OutOfMemoryError occurs.
// +optional
OnOutOfMemory *JvmOutOfMemorySpec `json:"onOutOfMemory,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JvmMemorySpec struct with any nil or not set values set
// by the corresponding value in the defaults JvmMemorySpec struct.
func (in *JvmMemorySpec) DeepCopyWithDefaults(defaults *JvmMemorySpec) *JvmMemorySpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JvmMemorySpec{}
clone.OnOutOfMemory = in.OnOutOfMemory.DeepCopyWithDefaults(defaults.OnOutOfMemory)
if in.HeapSize != nil {
clone.HeapSize = in.HeapSize
} else {
clone.HeapSize = defaults.HeapSize
}
if in.StackSize != nil {
clone.StackSize = in.StackSize
} else {
clone.StackSize = defaults.StackSize
}
if in.MetaspaceSize != nil {
clone.MetaspaceSize = in.MetaspaceSize
} else {
clone.MetaspaceSize = defaults.MetaspaceSize
}
if in.DirectMemorySize != nil {
clone.DirectMemorySize = in.DirectMemorySize
} else {
clone.DirectMemorySize = defaults.DirectMemorySize
}
if in.NativeMemoryTracking != nil {
clone.NativeMemoryTracking = in.NativeMemoryTracking
} else {
clone.NativeMemoryTracking = defaults.NativeMemoryTracking
}
return &clone
}
// ----- JVM Out Of Memory struct -------------------------------------------
// Options for managing the JVM behaviour when an OutOfMemoryError occurs.
type JvmOutOfMemorySpec struct {
// If set to true the JVM will exit when an OOM error occurs.
// Default is true
// +optional
Exit *bool `json:"exit,omitempty"`
// If set to true adds the -XX:+HeapDumpOnOutOfMemoryError JVM option to cause a heap dump
// to be created when an OOM error occurs.
// Default is true
// +optional
HeapDump *bool `json:"heapDump,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JvmOutOfMemorySpec struct with any nil or not set values set
// by the corresponding value in the defaults JvmOutOfMemorySpec struct.
func (in *JvmOutOfMemorySpec) DeepCopyWithDefaults(defaults *JvmOutOfMemorySpec) *JvmOutOfMemorySpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JvmOutOfMemorySpec{}
if in.Exit != nil {
clone.Exit = in.Exit
} else {
clone.Exit = defaults.Exit
}
if in.HeapDump != nil {
clone.HeapDump = in.HeapDump
} else {
clone.HeapDump = defaults.HeapDump
}
return &clone
}
// ----- JvmJmxmpSpec struct -------------------------------------------------------
// Options for configuring JMX using JMXMP.
type JvmJmxmpSpec struct {
// If set to true the JMXMP support will be enabled.
// Default is false
// +optional
Enabled *bool `json:"enabled,omitempty"`
// The port tht the JMXMP MBeanServer should bind to.
// If not set the default port is 9099
// +optional
Port *int32 `json:"port,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this JvmJmxmpSpec struct with any nil or not set values set
// by the corresponding value in the defaults JvmJmxmpSpec struct.
func (in *JvmJmxmpSpec) DeepCopyWithDefaults(defaults *JvmJmxmpSpec) *JvmJmxmpSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := JvmJmxmpSpec{}
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.Port != nil {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
return &clone
}
// ----- PortSpecWithSSL struct ----------------------------------------------------
// PortSpecWithSSL defines a port with SSL settings for a Coherence component
// +k8s:openapi-gen=true
type PortSpecWithSSL struct {
// Enable or disable flag.
// +optional
Enabled *bool `json:"enabled,omitempty"`
// The port to bind to.
// +optional
Port *int32 `json:"port,omitempty"`
// SSL configures SSL settings for a Coherence component
// +optional
SSL *SSLSpec `json:"ssl,omitempty"`
}
// IsSSLEnabled returns true if this port is SSL enabled
func (in *PortSpecWithSSL) IsSSLEnabled() bool {
return in != nil && in.Enabled != nil && *in.Enabled
}
// DeepCopyWithDefaults returns a copy of this PortSpecWithSSL struct with any nil or not set values set
// by the corresponding value in the defaults PortSpecWithSSL struct.
func (in *PortSpecWithSSL) DeepCopyWithDefaults(defaults *PortSpecWithSSL) *PortSpecWithSSL {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy() | if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.Port != nil {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.SSL != nil {
clone.SSL = in.SSL
} else {
clone.SSL = defaults.SSL
}
return &clone
}
// ----- ServiceSpec struct -------------------------------------------------
// ServiceSpec defines the settings for a Service
// +k8s:openapi-gen=true
type ServiceSpec struct {
// Enabled controls whether to create the service yaml or not
// +optional
Enabled *bool `json:"enabled,omitempty"`
// An optional name to use to override the generated service name.
// +optional
Name *string `json:"name,omitempty"`
// The service port value
// +optional
Port *int32 `json:"port,omitempty"`
// Type is the K8s service type (typically ClusterIP or LoadBalancer)
// The default is "ClusterIP".
// +optional
Type *corev1.ServiceType `json:"type,omitempty"`
// LoadBalancerIP is the IP address of the load balancer
// +optional
LoadBalancerIP *string `json:"loadBalancerIP,omitempty"`
// The extra labels to add to the service.
// More info: http://kubernetes.io/docs/user-guide/labels
// +optional
Labels map[string]string `json:"labels,omitempty"`
// Annotations is free form yaml that will be added to the service annotations
// +optional
Annotations map[string]string `json:"annotations,omitempty"`
// Supports "ClientIP" and "None". Used to maintain session affinity.
// Enable client IP based session affinity.
// Must be ClientIP or None.
// Defaults to None.
// More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
// +optional
SessionAffinity *corev1.ServiceAffinity `json:"sessionAffinity,omitempty"`
// If specified and supported by the platform, this will restrict traffic through the cloud-provider
// load-balancer will be restricted to the specified client IPs. This field will be ignored if the
// cloud-provider does not support the feature."
// More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
// +listType=atomic
// +optional
LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty"`
// externalName is the external reference that kubedns or equivalent will
// return as a CNAME record for this service. No proxying will be involved.
// Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
// and requires Type to be ExternalName.
// +optional
ExternalName *string `json:"externalName,omitempty"`
// externalTrafficPolicy denotes if this Service desires to route external
// traffic to node-local or cluster-wide endpoints. "Local" preserves the
// client source IP and avoids a second hop for LoadBalancer and Nodeport
// type services, but risks potentially imbalanced traffic spreading.
// "Cluster" obscures the client source IP and may cause a second hop to
// another node, but should have good overall load-spreading.
// +optional
ExternalTrafficPolicy *corev1.ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty"`
// healthCheckNodePort specifies the healthcheck nodePort for the service.
// If not specified, HealthCheckNodePort is created by the service api
// backend with the allocated nodePort. Will use user-specified nodePort value
// if specified by the client. Only effects when Type is set to LoadBalancer
// and ExternalTrafficPolicy is set to Local.
// +optional
HealthCheckNodePort *int32 `json:"healthCheckNodePort,omitempty"`
// publishNotReadyAddresses, when set to true, indicates that DNS implementations
// must publish the notReadyAddresses of subsets for the Endpoints associated with
// the Service. The default value is false.
// The primary use case for setting this field is to use a StatefulSet's Headless Service
// to propagate SRV records for its Pods without respect to their readiness for purpose
// of peer discovery.
// +optional
PublishNotReadyAddresses *bool `json:"publishNotReadyAddresses,omitempty"`
// sessionAffinityConfig contains the configurations of session affinity.
// +optional
SessionAffinityConfig *corev1.SessionAffinityConfig `json:"sessionAffinityConfig,omitempty"`
}
// Set the Type of the service.
func (in *ServiceSpec) SetServiceType(t corev1.ServiceType) {
if in != nil {
in.Type = &t
}
}
// DeepCopyWithDefaults returns a copy of this ServiceSpec struct with any nil or not set values set
// by the corresponding value in the defaults ServiceSpec struct.
func (in *ServiceSpec) DeepCopyWithDefaults(defaults *ServiceSpec) *ServiceSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ServiceSpec{}
// Annotations are a map and are merged
clone.Annotations = MergeMap(in.Annotations, defaults.Annotations)
// Labels are a map and are merged
clone.Labels = MergeMap(in.Labels, defaults.Labels)
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.Type != nil {
clone.Type = in.Type
} else {
clone.Type = defaults.Type
}
if in.Name != nil {
clone.Name = in.Name
} else {
clone.Name = defaults.Name
}
if in.Port != nil {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.LoadBalancerIP != nil {
clone.LoadBalancerIP = in.LoadBalancerIP
} else {
clone.LoadBalancerIP = defaults.LoadBalancerIP
}
if in.Port != nil {
clone.Port = in.Port
} else {
clone.Port = defaults.Port
}
if in.SessionAffinity != nil {
clone.SessionAffinity = in.SessionAffinity
} else {
clone.SessionAffinity = defaults.SessionAffinity
}
if in.LoadBalancerSourceRanges != nil {
clone.LoadBalancerSourceRanges = in.LoadBalancerSourceRanges
} else {
clone.LoadBalancerSourceRanges = defaults.LoadBalancerSourceRanges
}
if in.ExternalName != nil {
clone.ExternalName = in.ExternalName
} else {
clone.ExternalName = defaults.ExternalName
}
if in.ExternalTrafficPolicy != nil {
clone.ExternalTrafficPolicy = in.ExternalTrafficPolicy
} else {
clone.ExternalTrafficPolicy = defaults.ExternalTrafficPolicy
}
if in.HealthCheckNodePort != nil {
clone.HealthCheckNodePort = in.HealthCheckNodePort
} else {
clone.HealthCheckNodePort = defaults.HealthCheckNodePort
}
if in.PublishNotReadyAddresses != nil {
clone.PublishNotReadyAddresses = in.PublishNotReadyAddresses
} else {
clone.PublishNotReadyAddresses = defaults.PublishNotReadyAddresses
}
if in.SessionAffinityConfig != nil {
clone.SessionAffinityConfig = in.SessionAffinityConfig
} else {
clone.SessionAffinityConfig = defaults.SessionAffinityConfig
}
return &clone
}
// ----- ScalingSpec -----------------------------------------------------
// The configuration to control safe scaling.
type ScalingSpec struct {
// ScalingPolicy describes how the replicas of the cluster role will be scaled.
// The default if not specified is based upon the value of the StorageEnabled field.
// If StorageEnabled field is not specified or is true the default scaling will be safe, if StorageEnabled is
// set to false the default scaling will be parallel.
// +optional
Policy *ScalingPolicy `json:"policy,omitempty"`
// The probe to use to determine whether a role is Status HA.
// If not set the default handler will be used.
// In most use-cases the default handler would suffice but in
// advanced use-cases where the application code has a different
// concept of Status HA to just checking Coherence services then
// a different handler may be specified.
// +optional
Probe *ScalingProbe `json:"probe,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this ScalingSpec struct with any nil or not set values set
// by the corresponding value in the defaults ScalingSpec struct.
func (in *ScalingSpec) DeepCopyWithDefaults(defaults *ScalingSpec) *ScalingSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ScalingSpec{}
clone.Probe = in.Probe.DeepCopyWithDefaults(defaults.Probe)
if in.Policy != nil {
clone.Policy = in.Policy
} else {
clone.Policy = defaults.Policy
}
return &clone
}
// ----- ScalingProbe ----------------------------------------------------
// ScalingProbe is the handler that will be used to determine how to check for StatusHA in a CoherenceRole.
// StatusHA checking is primarily used during scaling of a role, a role must be in a safe Status HA state
// before scaling takes place. If StatusHA handler is disabled for a role (by specifically setting Enabled
// to false then no check will take place and a role will be assumed to be safe).
// +k8s:openapi-gen=true
type ScalingProbe struct {
corev1.Handler `json:",inline"`
// Number of seconds after which the handler times out (only applies to http and tcp handlers).
// Defaults to 1 second. Minimum value is 1.
// +optional
TimeoutSeconds *int `json:"timeoutSeconds,omitempty"`
}
// Returns the timeout value in seconds.
func (in *ScalingProbe) GetTimeout() time.Duration {
if in == nil || in.TimeoutSeconds == nil || *in.TimeoutSeconds <= 0 {
return time.Second
}
return time.Second * time.Duration(*in.TimeoutSeconds)
}
// DeepCopyWithDefaults returns a copy of this ReadinessProbeSpec struct with any nil or not set values set
// by the corresponding value in the defaults ReadinessProbeSpec struct.
func (in *ScalingProbe) DeepCopyWithDefaults(defaults *ScalingProbe) *ScalingProbe {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ScalingProbe{}
if in.TimeoutSeconds != nil {
clone.TimeoutSeconds = in.TimeoutSeconds
} else {
clone.TimeoutSeconds = defaults.TimeoutSeconds
}
if in.Handler.HTTPGet != nil {
clone.Handler.HTTPGet = in.Handler.HTTPGet
} else {
clone.Handler.HTTPGet = defaults.Handler.HTTPGet
}
if in.Handler.TCPSocket != nil {
clone.Handler.TCPSocket = in.Handler.TCPSocket
} else {
clone.Handler.TCPSocket = defaults.Handler.TCPSocket
}
if in.Handler.Exec != nil {
clone.Handler.Exec = in.Handler.Exec
} else {
clone.Handler.Exec = defaults.Handler.Exec
}
return &clone
}
// ----- ReadinessProbeSpec struct ------------------------------------------
// ReadinessProbeSpec defines the settings for the Coherence Pod readiness probe
// +k8s:openapi-gen=true
type ReadinessProbeSpec struct {
// The action taken to determine the health of a container
ProbeHandler `json:",inline"`
// Number of seconds after the container has started before liveness probes are initiated.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
InitialDelaySeconds *int32 `json:"initialDelaySeconds,omitempty"`
// Number of seconds after which the probe times out.
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes
// +optional
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty"`
// How often (in seconds) to perform the probe.
// +optional
PeriodSeconds *int32 `json:"periodSeconds,omitempty"`
// Minimum consecutive successes for the probe to be considered successful after having failed.
// +optional
SuccessThreshold *int32 `json:"successThreshold,omitempty"`
// Minimum consecutive failures for the probe to be considered failed after having succeeded.
// +optional
FailureThreshold *int32 `json:"failureThreshold,omitempty"`
}
type ProbeHandler struct {
// One and only one of the following should be specified.
// Exec specifies the action to take.
// +optional
Exec *corev1.ExecAction `json:"exec,omitempty"`
// HTTPGet specifies the http request to perform.
// +optional
HTTPGet *corev1.HTTPGetAction `json:"httpGet,omitempty"`
// TCPSocket specifies an action involving a TCP port.
// TCP hooks not yet supported
// +optional
TCPSocket *corev1.TCPSocketAction `json:"tcpSocket,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this ReadinessProbeSpec struct with any nil or not set values set
// by the corresponding value in the defaults ReadinessProbeSpec struct.
func (in *ReadinessProbeSpec) DeepCopyWithDefaults(defaults *ReadinessProbeSpec) *ReadinessProbeSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := ReadinessProbeSpec{}
if in.InitialDelaySeconds != nil {
clone.InitialDelaySeconds = in.InitialDelaySeconds
} else {
clone.InitialDelaySeconds = defaults.InitialDelaySeconds
}
if in.TimeoutSeconds != nil {
clone.TimeoutSeconds = in.TimeoutSeconds
} else {
clone.TimeoutSeconds = defaults.TimeoutSeconds
}
if in.PeriodSeconds != nil {
clone.PeriodSeconds = in.PeriodSeconds
} else {
clone.PeriodSeconds = defaults.PeriodSeconds
}
if in.SuccessThreshold != nil {
clone.SuccessThreshold = in.SuccessThreshold
} else {
clone.SuccessThreshold = defaults.SuccessThreshold
}
if in.FailureThreshold != nil {
clone.FailureThreshold = in.FailureThreshold
} else {
clone.FailureThreshold = defaults.FailureThreshold
}
return &clone
}
// ----- FluentdSpec struct -------------------------------------------------
// FluentdSpec defines the settings for the fluentd image
// +k8s:openapi-gen=true
type FluentdSpec struct {
ImageSpec `json:",inline"`
// Controls whether or not log capture via a Fluentd sidecar container to an EFK stack is enabled.
// If this flag i set to true it is expected that the coherence-monitoring-config secret exists in
// the namespace that the cluster is being deployed to. This secret is either created by the
// Coherence Operator Helm chart if it was installed with the correct parameters or it should
// have already been created manually.
Enabled *bool `json:"enabled,omitempty"`
// The Fluentd configuration file configuring source for application log.
// +optional
ConfigFile *string `json:"configFile,omitempty"`
// This value should be source.tag from fluentd.application.configFile.
// +optional
Tag *string `json:"tag,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this FluentdSpec struct with any nil or not set values set
// by the corresponding value in the defaults FluentdSpec struct.
func (in *FluentdSpec) DeepCopyWithDefaults(defaults *FluentdSpec) *FluentdSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := FluentdSpec{}
clone.ImageSpec = *in.ImageSpec.DeepCopyWithDefaults(&defaults.ImageSpec)
if in.Enabled != nil {
clone.Enabled = in.Enabled
} else {
clone.Enabled = defaults.Enabled
}
if in.ConfigFile != nil {
clone.ConfigFile = in.ConfigFile
} else {
clone.ConfigFile = defaults.ConfigFile
}
if in.Tag != nil {
clone.Tag = in.Tag
} else {
clone.Tag = defaults.Tag
}
return &clone
}
// ----- ScalingPolicy type -------------------------------------------------
// ScalingPolicy describes a policy for scaling a cluster role
type ScalingPolicy string
// Scaling policy constants
const (
// Safe means that a role will be scaled up or down in a safe manner to ensure no data loss.
SafeScaling ScalingPolicy = "Safe"
// Parallel means that a role will be scaled up or down by adding or removing members in parallel.
// If the members of the role are storage enabled then this could cause data loss
ParallelScaling ScalingPolicy = "Parallel"
// ParallelUpSafeDown means that a role will be scaled up by adding or removing members in parallel
// but will be scaled down in a safe manner to ensure no data loss.
ParallelUpSafeDownScaling ScalingPolicy = "ParallelUpSafeDown"
)
// ----- LocalObjectReference -----------------------------------------------
// LocalObjectReference contains enough information to let you locate the
// referenced object inside the same namespace.
type LocalObjectReference struct {
// Name of the referent.
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
// +optional
Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"`
}
// ----- NetworkSpec --------------------------------------------------------
// NetworkSpec configures various networking and DNS settings for Pods in a role.
// +k8s:openapi-gen=true
type NetworkSpec struct {
// Specifies the DNS parameters of a pod. Parameters specified here will be merged to the
// generated DNS configuration based on DNSPolicy.
// +optional
DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty"`
// Set DNS policy for the pod. Defaults to "ClusterFirst". Valid values are 'ClusterFirstWithHostNet',
// 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy
// selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS
// policy explicitly to 'ClusterFirstWithHostNet'.
// +optional
DNSPolicy *string `json:"dnsPolicy,omitempty"`
// HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified.
// This is only valid for non-hostNetwork pods.
// +listType=map
// +listMapKey=ip
// +optional
HostAliases []corev1.HostAlias `json:"hostAliases,omitempty"`
// Host networking requested for this pod. Use the host's network namespace. If this option is set,
// the ports that will be used must be specified. Default to false.
// +optional
HostNetwork *bool `json:"hostNetwork,omitempty"`
// Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
// +optional
Hostname *string `json:"hostname,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this NetworkSpec struct with any nil or not set values set
// by the corresponding value in the defaults NetworkSpec struct.
func (in *NetworkSpec) DeepCopyWithDefaults(defaults *NetworkSpec) *NetworkSpec {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := NetworkSpec{}
clone.DNSConfig = in.DNSConfig.DeepCopyWithDefaults(defaults.DNSConfig)
if in.DNSPolicy != nil {
clone.DNSPolicy = in.DNSPolicy
} else {
clone.DNSPolicy = defaults.DNSPolicy
}
// merge HostAlias list
m := make(map[string]corev1.HostAlias)
if defaults.HostAliases != nil {
for _, h := range defaults.HostAliases {
m[h.IP] = h
}
}
if in.HostAliases != nil {
for _, h := range in.HostAliases {
m[h.IP] = h
}
}
if len(m) > 0 {
i := 0
clone.HostAliases = make([]corev1.HostAlias, len(m))
for _, h := range m {
clone.HostAliases[i] = h
i++
}
}
if in.HostNetwork != nil {
clone.HostNetwork = in.HostNetwork
} else {
clone.HostNetwork = defaults.HostNetwork
}
if in.Hostname != nil {
clone.Hostname = in.Hostname
} else {
clone.Hostname = defaults.Hostname
}
return &clone
}
// ----- PodDNSConfig -------------------------------------------------------
// PodDNSConfig defines the DNS parameters of a pod in addition to
// those generated from DNSPolicy.
// +k8s:openapi-gen=true
type PodDNSConfig struct {
// A list of DNS name server IP addresses.
// This will be appended to the base nameservers generated from DNSPolicy.
// Duplicated nameservers will be removed.
// +listType=atomic
// +optional
Nameservers []string `json:"nameservers,omitempty"`
// A list of DNS search domains for host-name lookup.
// This will be appended to the base search paths generated from DNSPolicy.
// Duplicated search paths will be removed.
// +listType=atomic
// +optional
Searches []string `json:"searches,omitempty"`
// A list of DNS resolver options.
// This will be merged with the base options generated from DNSPolicy.
// Duplicated entries will be removed. Resolution options given in Options
// will override those that appear in the base DNSPolicy.
// +listType=map
// +listMapKey=name
// +optional
Options []corev1.PodDNSConfigOption `json:"options,omitempty"`
}
// DeepCopyWithDefaults returns a copy of this PodDNSConfig struct with any nil or not set values set
// by the corresponding value in the defaults PodDNSConfig struct.
func (in *PodDNSConfig) DeepCopyWithDefaults(defaults *PodDNSConfig) *PodDNSConfig {
if in == nil {
if defaults != nil {
return defaults.DeepCopy()
}
return nil
}
if defaults == nil {
return in.DeepCopy()
}
clone := PodDNSConfig{}
// merge Options list
m := make(map[string]corev1.PodDNSConfigOption)
if defaults.Options != nil {
for _, opt := range defaults.Options {
m[opt.Name] = opt
}
}
if in.Options != nil {
for _, opt := range in.Options {
m[opt.Name] = opt
}
}
if len(m) > 0 {
i := 0
clone.Options = make([]corev1.PodDNSConfigOption, len(m))
for _, opt := range m {
clone.Options[i] = opt
i++
}
}
if in.Nameservers != nil {
clone.Nameservers = []string{}
clone.Nameservers = append(clone.Nameservers, defaults.Nameservers...)
clone.Nameservers = append(clone.Nameservers, in.Nameservers...)
} else if defaults.Nameservers != nil {
clone.Nameservers = []string{}
clone.Nameservers = append(clone.Nameservers, defaults.Nameservers...)
}
if in.Searches != nil {
clone.Searches = []string{}
clone.Searches = append(clone.Searches, defaults.Searches...)
clone.Searches = append(clone.Searches, in.Searches...)
} else if defaults.Searches != nil {
clone.Searches = []string{}
clone.Searches = append(clone.Searches, defaults.Searches...)
}
return &clone
}
// ----- StartQuorum --------------------------------------------------------
// StartQuorum defines the order that roles will be created when initially
// creating a new cluster.
// +k8s:openapi-gen=true
type StartQuorum struct {
// The list of roles to start first.
// +optional
Role string `json:"role"`
// The number of the dependency Pods that should have been started
// before this roles will be started.
// +optional
PodCount int32 `json:"podCount,omitempty"`
}
// ----- StartStatus --------------------------------------------------------
// StartQuorumStatus tracks the state of a role's start quorums.
// +k8s:openapi-gen=true
type StartQuorumStatus struct {
// The inlined start quorum.
StartQuorum `json:",inline"`
// Whether this quorum's condition has been met
Ready bool `json:"ready"`
} | }
clone := PortSpecWithSSL{}
|
test_utils.go | package go_tests
import (
"errors"
"fmt"
"github.com/cloudevents/sdk-go/v2"
"github.com/google/uuid"
"github.com/imroc/req"
"github.com/keptn/go-utils/pkg/api/models"
"github.com/keptn/go-utils/pkg/common/osutils"
keptncommon "github.com/keptn/go-utils/pkg/lib/keptn"
keptnv2 "github.com/keptn/go-utils/pkg/lib/v0_2_0"
"github.com/keptn/kubernetes-utils/pkg"
"io/ioutil"
"net/http"
"os"
"strings"
"testing"
)
const (
KeptnSpecVersion = "0.2.0"
KeptnNamespaceEnvVar = "KEPTN_NAMESPACE"
DefaultKeptnNamespace = "keptn"
)
type APIEventSender struct {
}
func (sender *APIEventSender) SendEvent(event v2.Event) error {
_, err := ApiPOSTRequest("/v1/event", event)
return err
}
func CreateProject(projectName, shipyardFilePath string, recreateIfAlreadyThere bool) error {
resp, err := ApiGETRequest("/controlPlane/v1/project/" + projectName)
if err != nil {
return err
}
if resp.Response().StatusCode != http.StatusNotFound {
if recreateIfAlreadyThere {
// delete project if it exists
_, err = ExecuteCommand(fmt.Sprintf("keptn delete project %s", projectName))
if err != nil {
return err
}
} else {
return errors.New("project already exists")
}
}
_, err = ExecuteCommand(fmt.Sprintf("keptn create project %s --shipyard=./%s", projectName, shipyardFilePath))
return err
}
func TriggerSequence(projectName, serviceName, stageName, sequenceName string, eventData keptncommon.EventProperties) (string, error) {
source := "golang-test"
eventType := keptnv2.GetTriggeredEventType(stageName + "." + sequenceName)
if eventData == nil {
eventData = &keptnv2.EventData{}
}
eventData.SetProject(projectName)
eventData.SetService(serviceName)
eventData.SetStage(stageName)
resp, err := ApiPOSTRequest("/v1/event", models.KeptnContextExtendedCE{
Contenttype: "application/json",
Data: eventData,
ID: uuid.NewString(),
Shkeptnspecversion: KeptnSpecVersion,
Source: &source,
Specversion: "1.0",
Type: &eventType,
})
if err != nil {
return "", err
}
context := &models.EventContext{}
err = resp.ToJSON(context)
if err != nil {
return "", err
}
return *context.KeptnContext, nil
}
func ApiDELETERequest(path string) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken) | r, err := req.Delete(keptnAPIURL+path, authHeader)
if err != nil {
return nil, err
}
return r, nil
}
func getAuthHeader(apiToken string) req.Header {
authHeader := req.Header{
"Accept": "application/json",
"x-token": apiToken,
}
return authHeader
}
func ApiGETRequest(path string) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken)
r, err := req.Get(keptnAPIURL+path, authHeader)
if err != nil {
return nil, err
}
return r, nil
}
func ApiPOSTRequest(path string, payload interface{}) (*req.Resp, error) {
apiToken, keptnAPIURL, err := GetApiCredentials()
if err != nil {
return nil, err
}
authHeader := getAuthHeader(apiToken)
r, err := req.Post(keptnAPIURL+path, authHeader, req.BodyJSON(payload))
if err != nil {
return nil, err
}
return r, nil
}
func GetApiCredentials() (string, string, error) {
apiToken, err := keptnkubeutils.GetKeptnAPITokenFromSecret(false, GetKeptnNameSpaceFromEnv(), "keptn-api-token")
if err != nil {
return "", "", err
}
keptnAPIURL := os.Getenv("KEPTN_ENDPOINT")
if keptnAPIURL == "" {
serviceIP, err := keptnkubeutils.GetKeptnEndpointFromService(false, GetKeptnNameSpaceFromEnv(), "api-gateway-nginx")
if err != nil {
return "", "", err
}
keptnAPIURL = "http://" + serviceIP + "/api"
}
return apiToken, keptnAPIURL, nil
}
func ScaleDownUniform(deployments []string) error {
for _, deployment := range deployments {
if err := keptnkubeutils.ScaleDeployment(false, deployment, GetKeptnNameSpaceFromEnv(), 0); err != nil {
// log the error but continue
fmt.Println("could not scale down deployment: " + err.Error())
}
}
return nil
}
func ScaleUpUniform(deployments []string) error {
for _, deployment := range deployments {
if err := keptnkubeutils.ScaleDeployment(false, deployment, GetKeptnNameSpaceFromEnv(), 1); err != nil {
// log the error but continue
fmt.Println("could not scale up deployment: " + err.Error())
}
}
return nil
}
func RestartPod(deploymentName string) error {
return keptnkubeutils.RestartPodsWithSelector(false, GetKeptnNameSpaceFromEnv(), "app.kubernetes.io/name="+deploymentName)
}
func WaitForPodOfDeployment(deploymentName string) error {
return keptnkubeutils.WaitForDeploymentToBeRolledOut(false, deploymentName, GetKeptnNameSpaceFromEnv())
}
func CreateTmpShipyardFile(shipyardContent string) (string, error) {
return CreateTmpFile("shipyard-*.yaml", shipyardContent)
}
func CreateTmpFile(fileNamePattern, fileContent string) (string, error) {
file, err := ioutil.TempFile(".", fileNamePattern)
if err != nil {
return "", err
}
if err := ioutil.WriteFile(file.Name(), []byte(fileContent), os.ModeAppend); err != nil {
os.Remove(file.Name())
return "", err
}
return file.Name(), nil
}
func ExecuteCommand(cmd string) (string, error) {
split := strings.Split(cmd, " ")
if len(split) == 0 {
return "", errors.New("invalid command")
}
return keptnkubeutils.ExecuteCommand(split[0], split[1:])
}
func GetKeptnNameSpaceFromEnv() string {
return osutils.GetOSEnvOrDefault(KeptnNamespaceEnvVar, DefaultKeptnNamespace)
}
func GetLatestEventOfType(keptnContext, projectName, stage, eventType string) (*models.KeptnContextExtendedCE, error) {
resp, err := ApiGETRequest("/mongodb-datastore/event?project=" + projectName + "&keptnContext=" + keptnContext + "&stage=" + stage + "&type=" + eventType)
if err != nil {
return nil, err
}
events := &models.Events{}
if err := resp.ToJSON(events); err != nil {
return nil, err
}
if len(events.Events) > 0 {
return events.Events[0], nil
}
return nil, nil
}
func GetEventTraceForContext(keptnContext, projectName string) ([]*models.KeptnContextExtendedCE, error) {
resp, err := ApiGETRequest("/mongodb-datastore/event?project=" + projectName + "&keptnContext=" + keptnContext)
if err != nil {
return nil, err
}
events := &models.Events{}
if err := resp.ToJSON(events); err != nil {
return nil, err
}
if len(events.Events) > 0 {
return events.Events, nil
}
return nil, nil
}
func IsEqual(t *testing.T, expected, actual interface{}, property string) bool {
if expected != actual {
t.Logf("%s: expected %v, got %v", property, expected, actual)
return false
}
return true
}
func StringArr(el ...string) []string {
return el
} | |
manager.go | package manager
import (
"context"
"k8s.io/apimachinery/pkg/runtime"
"yunion.io/x/log"
"yunion.io/x/onecloud/pkg/cloudcommon/db"
"yunion.io/x/onecloud/pkg/mcclient"
"yunion.io/x/kubecomps/pkg/kubeserver/api"
)
type ICluster interface {
GetName() string
GetId() string
RealDelete(ctx context.Context, userCred mcclient.TokenCredential) error
SetStatus(userCred mcclient.TokenCredential, status string, reason string) error
//SetKubeconfig(kubeconfig string) error
GetAPIServer() (string, error)
GetKubeconfig() (string, error)
GetStatus() string
GetK8sResourceManager(kindName string) IK8sResourceManager
}
// bidirect sync callback
type IK8sResourceManager interface {
db.IModelManager
OnRemoteObjectCreate(ctx context.Context, userCred mcclient.TokenCredential, cluster ICluster, resMan IK8sResourceManager, obj runtime.Object)
OnRemoteObjectUpdate(ctx context.Context, userCred mcclient.TokenCredential, cluster ICluster, resMan IK8sResourceManager, oldObj, newObj runtime.Object)
OnRemoteObjectDelete(ctx context.Context, userCred mcclient.TokenCredential, cluster ICluster, resMan IK8sResourceManager, obj runtime.Object)
}
type IClusterManager interface {
IsClusterExists(userCred mcclient.TokenCredential, id string) (ICluster, bool, error)
FetchClusterByIdOrName(userCred mcclient.TokenCredential, id string) (ICluster, error)
CreateCluster(ctx context.Context, userCred mcclient.TokenCredential, data api.ClusterCreateInput) (ICluster, error)
//GetNonSystemClusters() ([]ICluster, error)
GetRunningClusters() ([]ICluster, error)
}
type IMachine interface {
GetId() string
GetName() string
IsFirstNode() bool
GetResourceId() string
IsControlplane() bool
IsRunning() bool
GetPrivateIP() (string, error)
RealDelete(ctx context.Context, userCred mcclient.TokenCredential) error
GetStatus() string
SetStatus(userCred mcclient.TokenCredential, status string, reason string) error
SetPrivateIP(address string) error
GetRole() string
}
type IMachineManager interface {
FetchMachineByIdOrName(userCred mcclient.TokenCredential, id string) (IMachine, error)
GetMachines(clusterId string) ([]IMachine, error)
IsMachineExists(userCred mcclient.TokenCredential, id string) (IMachine, bool, error)
CreateMachine(ctx context.Context, userCred mcclient.TokenCredential, data *api.CreateMachineData) (IMachine, error)
}
var (
clusterManager IClusterManager
machineManager IMachineManager
)
func RegisterClusterManager(man IClusterManager) |
func RegisterMachineManager(man IMachineManager) {
if machineManager != nil {
log.Fatalf("MachineManager already registered")
}
machineManager = man
}
func ClusterManager() IClusterManager {
return clusterManager
}
func MachineManager() IMachineManager {
return machineManager
}
| {
if clusterManager != nil {
log.Fatalf("ClusterManager already registered")
}
clusterManager = man
} |
pairwise.rs | use super::*; | use crate::{CString, CommandHandle, Error, WalletHandle};
extern "C" {
pub fn indy_is_pairwise_exists(
command_handle: CommandHandle,
wallet_handle: WalletHandle,
their_did: CString,
cb: Option<ResponseBoolCB>,
) -> Error;
pub fn indy_create_pairwise(
command_handle: CommandHandle,
wallet_handle: WalletHandle,
their_did: CString,
my_did: CString,
metadata: CString,
cb: Option<ResponseEmptyCB>,
) -> Error;
pub fn indy_list_pairwise(
command_handle: CommandHandle,
wallet_handle: WalletHandle,
cb: Option<ResponseStringCB>,
) -> Error;
pub fn indy_get_pairwise(
command_handle: CommandHandle,
wallet_handle: WalletHandle,
their_did: CString,
cb: Option<ResponseStringCB>,
) -> Error;
pub fn indy_set_pairwise_metadata(
command_handle: CommandHandle,
wallet_handle: WalletHandle,
their_did: CString,
metadata: CString,
cb: Option<ResponseEmptyCB>,
) -> Error;
} | |
__init__.py | # Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from ._criterion import Criterion, RegressionCriterion, MSE
from ._splitter import Splitter, BestSplitter
from ._tree import DepthFirstTreeBuilder
from ._tree import Tree
__all__ = ["Tree",
"Splitter",
| "MSE"] | "BestSplitter",
"DepthFirstTreeBuilder",
"Criterion",
"RegressionCriterion",
|
multicore_sorted.py | """
>>> DRAFT "py_merge"! <<<
Builtin "sorted()" function, but using all CPU cores available for speedup!
It supports all kwargs of "sorted()": "cmp", "key" and "reverse",
however items of "iterable" and all of these kwargs should be picklable:
https://docs.python.org/2/library/pickle.html#what-can-be-pickled-and-unpickled
Under the hood it uses map-reduce via "multiprocessing.Pool().map()" with builtin "sorted()"
and then merges sorted chunks as in merge-sort.
"processes" kwarg allows to set number of processes different from "cpu_count()".
Usage:
pip install multicore_sorted
cat <<END >test.py
from multicore_sorted import multicore_sorted
in_data = [1, 5, 2, 4, 3]
out_data = [1, 2, 3, 4, 5]
def cmp(a, b):
return b - a
def key(a):
return -a
if __name__ == '__main__':
assert multicore_sorted(in_data) == sorted(in_data) == out_data
# But N times faster, given Big data and N CPU cores!
assert (
multicore_sorted(in_data, cmp=cmp) ==
multicore_sorted(in_data, key=key) ==
multicore_sorted(in_data, reverse=True) ==
list(reversed(out_data))
)
print('OK')
END
python test.py
drafts/py_merge/multicore_sorted version 0.1.0
Copyright (C) 2014 by Denis Ryzhkov <[email protected]>
MIT License, see http://opensource.org/licenses/MIT
"""
#### export
__all__ = ['multicore_sorted']
#### import
from bn import Bn
from functools import cmp_to_key
from multiprocessing import cpu_count, Pool
#### multicore_sorted
def multicore_sorted(iterable, **kwargs):
bn = Bn()
#### processes
bn('processes')
processes = kwargs.pop('processes', None)
if processes is None:
try:
processes = cpu_count() # Yes, "Pool()" does the same, but we need "processes" before calling "Pool()".
except NotImplementedError:
processes = 1
if processes < 2:
return sorted(iterable, **kwargs)
# No need for multiprocessing if less than 2 processes!
# It is tempting to do the same for small enough "len(iterable)",
# but then the code below would be not efficient for generators having no "__len__".
#### chunks
bn('chunks')
chunks = [[] for _ in xrange(processes)]
# "[[]] * processes" would have created N links to the same list,
# while we need separate lists.
for i, item in enumerate(iterable): # Efficient even if "iterable" is a generator.
chunks[i % processes].append(item) # Round-robin chunking.
chunks = [ # Packing for "picklable_sorted" below.
(chunk, kwargs) # "chunk" here is just a ref to one of big lists created above. So it is efficient.
for chunk in chunks
]
#### map-reduce
bn('pool')
pool = Pool(processes=processes) # No "maxtasksperchild" - the pool will be GC-ed after the sort.
bn('map')
chunks = pool.map(picklable_sorted, chunks)
#bn('pool')
#pool.close() # Test!
#bn('merge_sorted')
result = merge_sorted(chunks, **kwargs) # Alas "heapq.merge()" does not support "key=lambda", etc.
#bn('test_import')
#from itertools import chain
#bn('test_timsort')
#result = sorted(chain(*chunks), **kwargs)
print(bn)
return result
#### picklable_sorted
def picklable_sorted(chunk):
# "Pool().map()" does not support additional kwargs like "key=lambda" for the "func".
# Natural closure inside "multicore_sorted" is not picklable.
# This is a picklable single-argument workaround.
chunk, kwargs = chunk # Unpacking via efficient refs.
#print((chunk, kwargs))
return sorted(chunk, **kwargs)
#### merge_sorted
def merge_sorted(chunks, cmp=None, key=None, reverse=False):
#bn = Bn()
#bn('init')
#### K - combined key.
if cmp:
cmp_key = cmp_to_key(cmp)
K = (lambda a: cmp_key(key(a))) if key else cmp_key
elif key:
K = key
else:
K = lambda a: a
# NOTE: "reverse" is processed below.
#### init
chunks = [iter(chunk) for chunk in chunks] # Prepare to fetch from each chunk.
items = [chunk.next() for chunk in chunks] # Fetch first item from each chunk. Should be no empty chunks here.
skip_me = object() # Unique marker.
result = []
while True:
min_item = min_key = min_index = None
#### Find "min".
#bn('min')
for chunk_index, item in enumerate(items): # Bultin "min()" does not fit, even with its "key" kwarg.
if item is not skip_me and (
min_index is None or # First not "skip_me" chunk becomes "min" chunk.
not reverse and K(item) < min_key or # Default case "reverse=False" should be the first one.
reverse and K(item) > min_key # Attempt to use "not <" would lead to extra computations below on "==".
):
min_item = item
min_key = K(item)
min_index = chunk_index
if min_index is None: # All chunks are "skip_me".
break
#bn('append')
result.append(min_item)
#### Fetch next item instead of "min".
#bn('fetch')
try:
items[min_index] = chunks[min_index].next()
except StopIteration:
items[min_index] = skip_me
#print(bn)
return result
#### tests
def cmp(a, b):
return b - a
def key(a):
|
def tests():
from random import randint
in_data = [randint(-100, 100) for _ in xrange(4 * 10**6)]
out_data = sorted(in_data)
reversed_out_data = list(reversed(out_data))
bn = Bn()
bn('sorted')
assert sorted(in_data) == out_data
bn('multicore_sorted')
assert multicore_sorted(in_data) == out_data
print(bn)
#"""
assert multicore_sorted(in_data) == sorted(in_data) == out_data
assert multicore_sorted(in_data, cmp=cmp) == reversed_out_data
assert multicore_sorted(in_data, key=key) == reversed_out_data
assert multicore_sorted(in_data, reverse=True) == reversed_out_data
assert multicore_sorted(in_data, cmp=cmp, key=key) == out_data
assert multicore_sorted(in_data, cmp=cmp, reverse=True) == out_data
assert multicore_sorted(in_data, key=key, reverse=True) == out_data
assert multicore_sorted(in_data, cmp=cmp, key=key, reverse=True) == reversed_out_data
#"""
print('OK')
if __name__ == '__main__':
tests()
| return -a |
callbacks_available.py | """Dataclasses just to initialize and return Callback objects"""
from typing import Optional, TYPE_CHECKING
from omegaconf import DictConfig
from dataclasses import dataclass
from pytorch_lightning.callbacks import Callback, EarlyStopping, LearningRateMonitor, ModelCheckpoint
from prostate_cancer_segmentation.callbacks.log_media import LogMedia
from prostate_cancer_segmentation.config_parse.conf_utils import asdict_filtered
if TYPE_CHECKING:
pydantic_dataclass = dataclass
else:
from pydantic.dataclasses import dataclass as pydantic_dataclass
@pydantic_dataclass(frozen=True)
class EarlyStopConf:
monitor: str
min_delta: float
patience: int
mode: str
verbose: bool = False
def get_callback(self) -> Callback:
return EarlyStopping(**asdict_filtered(self))
@pydantic_dataclass(frozen=True)
class CheckpointConf:
filename: Optional[str]
monitor: Optional[str]
mode: str
save_last: Optional[bool]
period: int
save_top_k: Optional[int]
verbose: bool = False
def get_callback(self, logs_dir) -> Callback:
return ModelCheckpoint(dirpath=logs_dir, **asdict_filtered(self))
@pydantic_dataclass(frozen=True)
class LogMediaConf:
max_samples: int
period_epoch: int
period_step: int
save_to_disk: bool
save_latest_only: bool
verbose: bool = False
def get_callback(self, exp_dir: str, cfg: DictConfig) -> Callback:
|
@pydantic_dataclass(frozen=True)
class LearningRateMonitorConf:
logging_interval: Optional[str]
log_momentum: bool = False
def get_callback(self) -> Callback:
return LearningRateMonitor(**asdict_filtered(self))
| return LogMedia(exp_dir=exp_dir, cfg=cfg, **asdict_filtered(self)) |
allennlp_coreference_resolution.py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from allennlp.predictors.predictor import Predictor
# ## Instantiate AllenNLP `Predictor`
# 1. Load the same model that is used in the [demo](https://demo.allennlp.org/coreference-resolution) (*don't get alarmed by the warning - we don't need to fine-tune the model to use it*).
# 2. Get the prediction :)
# In[2]:
model_url = 'https://storage.googleapis.com/allennlp-public-models/coref-spanbert-large-2020.02.27.tar.gz'
predictor = Predictor.from_path(model_url)
# In[3]:
text = "Eva and Martha didn't want their friend Jenny to feel lonely so they invited her to the party in Las Vegas."
prediction = predictor.predict(document=text)
# ## Coreference resolution with `Allen Institute`
# What we get as a result (`prediction`) is a dictionary as Allen outputs multiple different information at once.
# The ones that we found to be using the most are:
#
# | Key | Type | Description
# |:--------------------------|:-------------------|:----------------------------------------------------
# | `top_spans` | `List[List[int]]` | List of `spaCy` token indices pairs representing spans
# | `document` | `List[str]` | Document's tokens (from `spaCy`; but represented as string not Token)
# | `clusters` | `List[List[List[int]]]` | Clusters of spans (represented by token indices pairs)
# In[4]:
# it's our original text (with extra whitespaces as we trivialy just joined tokens with ' ')
' '.join(prediction['document'])
# In[5]:
# and the found clusters - however, they are not easily understood...
prediction['clusters']
# In[6]:
# but that's how it looks after coreference resolution (notice the possessive!)
predictor.coref_resolved(text)
# As Allen's coreference resolution `Predictor` has quite a limited number of functionalities, in order to turn its output to a more readable one, we need to manually write some functions:
# In[7]:
def get_span_words(span, document):
return ' '.join(document[span[0]:span[1]+1])
def | (prediction):
document, clusters = prediction['document'], prediction['clusters']
for cluster in clusters:
print(get_span_words(cluster[0], document) + ': ', end='')
print(f"[{'; '.join([get_span_words(span, document) for span in cluster])}]")
# In[8]:
print_clusters(prediction)
| print_clusters |
__init__.py | # -*- coding: utf-8 -*-
"""
@file |
* ``irsocsd2014_G10.xlsx``: ?
* ``fm-fecondite-age-mere.csv``: `INSEE Bilan Démographique 2016 <https://www.insee.fr/fr/statistiques/1892259?sommaire=1912926>`_
* ``pop-totale-france.xlsx``: `INED Population totale
<https://www.ined.fr/fr/tout-savoir-population/chiffres/france/evolution-population/population-totale/>`_
* ``TF00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
* ``TH00-02_D.xls``: `spac-actuaires, tables de mortalité <http://www.spac-actuaires.fr/glossaire/Table_de_mortalit%C3%A9>`_
""" # pragma: no cover | @brief Data from INSEE
**Source** |
digital_twin_interfaces_patch_interfaces_value.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class DigitalTwinInterfacesPatchInterfacesValue(Model):
"""DigitalTwinInterfacesPatchInterfacesValue.
:param properties: List of properties to update in an interface.
:type properties: dict[str,
~service.models.DigitalTwinInterfacesPatchInterfacesValuePropertiesValue]
"""
_attribute_map = {
'properties': {'key': 'properties', 'type': '{DigitalTwinInterfacesPatchInterfacesValuePropertiesValue}'},
}
def __init__(self, **kwargs):
| super(DigitalTwinInterfacesPatchInterfacesValue, self).__init__(**kwargs)
self.properties = kwargs.get('properties', None) |
|
script.py | import click
from flask.cli import FlaskGroup
from app import app, db
@click.group(cls=FlaskGroup, create_app=lambda: app)
def cli():
"""Management script for the flask application."""
@cli.command()
def init_db():
"""Creates database tables"""
db.create_all()
|
if __name__ == '__main__':
cli() | @cli.command()
def delete_db():
"""Deletes all database tables"""
db.drop_all() |
main.go | package main
import (
"bytes"
"context"
"crypto/tls"
"html/template"
"io"
"io/ioutil"
"log"
"net/http"
"net/http/httputil"
"os"
"os/signal"
"strings"
"syscall"
"time"
)
var (
tmpl *template.Template
)
func main() {
port := os.Getenv("PORT")
if port == "" {
log.Fatal("$PORT must be set")
}
// disable HTTP/2 client
tr := http.DefaultTransport.(*http.Transport)
tr.TLSClientConfig = &tls.Config{
ClientSessionCache: tls.NewLRUClientSessionCache(0),
}
tr.MaxIdleConns = 0
tr.MaxIdleConnsPerHost = int(^uint(0) >> 1) // unlimited
tr.IdleConnTimeout = 300 * time.Second
tr.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{}
tmpl = template.Must(template.ParseFiles("app.gohtml"))
http.Handle("/", &httputil.ReverseProxy{Director: director, ModifyResponse: modifyResponse})
http.Handle("/md/", http.NotFoundHandler())
http.Handle("/assets/", addHeaders(http.HandlerFunc(assetsHandler)))
http.Handle("/assets/md/", http.StripPrefix("/assets/md", addHeaders(http.FileServer(http.Dir("static")))))
srv := &http.Server{Addr: ":" + port}
shutdown := make(chan os.Signal, 1)
signal.Notify(shutdown, os.Interrupt, syscall.SIGTERM)
idleConnsClosed := make(chan struct{})
go func() {
<-shutdown
// We received an interrupt/termination signal, shut down.
if err := srv.Shutdown(context.Background()); err != nil {
// Error from closing listeners, or context timeout:
log.Printf("HTTP server Shutdown: %v", err)
}
close(idleConnsClosed)
}()
if err := srv.ListenAndServe(); err != http.ErrServerClosed {
// Error starting or closing listener:
log.Fatalf("HTTP server ListenAndServe: %v", err)
}
<-idleConnsClosed
}
func getOrigin(req *http.Request) string {
if strings.HasPrefix(req.Host, "canary") {
return "canary.discord.com"
} else if strings.HasPrefix(req.Host, "ptb") {
return "ptb.discord.com"
} else {
return "discord.com"
}
}
func director(req *http.Request) {
req.URL.Scheme = "https"
req.URL.Host = getOrigin(req)
req.Host = req.URL.Host
if !strings.HasPrefix(req.URL.Path, "/assets/") {
// read uncompressed response
delete(req.Header, "Accept-Encoding")
}
// remove Cloudflare headers (Cloudflare rejects requests with Cf-Connecting-Ip)
for k := range req.Header {
if strings.HasPrefix(k, "Cf-") {
delete(req.Header, k)
}
}
if _, ok := req.Header["User-Agent"]; !ok {
// explicitly disable User-Agent so it's not set to default value
req.Header.Set("User-Agent", "")
}
}
func modifyResponse(res *http.Response) error {
// remove __cfduid cookie to let Cloudflare cache
delete(res.Header, "Set-Cookie")
if res.StatusCode >= 500 {
return nil
} | res.Header.Set("X-Robots-Tag", "noindex, nofollow, noarchive, nocache, noimageindex, noodp")
if strings.HasPrefix(res.Request.URL.Path, "/assets/") {
return nil
}
// prevent caching HTML (assets might not load while offline)
if cc := res.Header.Get("Cache-Control"); !strings.Contains(cc, "no-cache") &&
!strings.Contains(cc, "no-store") &&
!strings.Contains(cc, "max-age") {
res.Header.Add("Cache-Control", "max-age=0")
}
if !strings.HasPrefix(res.Header.Get("Content-Type"), "text/html") {
return nil
}
// inject links and scripts
b, err := ioutil.ReadAll(res.Body)
if err != nil {
return err
}
res.Body.Close()
s := string(b)
s = strings.ReplaceAll(s, " integrity=", " data-integrity=")
if iMeta := strings.Index(s, "<meta "); iMeta == -1 {
log.Print("modifyResponse: missing <meta> tag")
} else if iHead := strings.Index(s[iMeta:], "</head>"); iHead == -1 {
log.Print("modifyResponse: missing </head> tag")
} else if iScript := strings.Index(s[iMeta+iHead:], "<script src="); iScript == -1 {
log.Print("modifyResponse: missing <script> tag")
} else {
iHead += iMeta
iScript += iHead
origin := "https://" + res.Request.URL.Host
r, w := io.Pipe()
go func() {
data := struct {
Meta template.HTML
Head template.HTML
Body template.HTML
Scripts template.HTML
Origin string
}{
Meta: template.HTML(s[:iMeta]),
Head: template.HTML(s[iMeta:iHead]),
Body: template.HTML(s[iHead:iScript]),
Scripts: template.HTML(s[iScript:]),
Origin: origin,
}
err := tmpl.Execute(w, data)
w.CloseWithError(err)
}()
res.Body = r
res.Header.Del("Content-Length")
res.Header.Del("Etag")
for _, key := range []string{"Content-Security-Policy", "Content-Security-Policy-Report-Only"} {
v := res.Header[key]
for i, csp := range v {
// add origin for API requests
csp = strings.Replace(csp, "'self'", "'self' "+origin, -1)
// icons use wrong protocol
if res.Request.Header.Get("X-Forwarded-Proto") != "https" {
csp = strings.Replace(csp, "https://*.discord.com", "*.discord.com", -1)
csp = strings.Replace(csp, "wss://*.discord.media", "ws://*.discord.media", -1)
}
v[i] = csp
}
}
return nil
}
res.Body = ioutil.NopCloser(bytes.NewReader(b))
return nil
}
func addHeaders(h http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Cache-Control", "public, max-age=600, stale-if-error=1200")
h.ServeHTTP(w, r)
})
}
func assetsHandler(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "https://"+getOrigin(r)+r.RequestURI, http.StatusFound)
} |
// hide from search engines |
index.ts | export { getTheme, loadTheme, BUNDLED_THEMES } from 'shiki-themes'
export { BUNDLED_LANGUAGES } from 'shiki-languages'
export { getHighlighter } from './highlighter'
export { renderToHtml } from './renderer' | export { IThemedToken } from './themedTokenizer' |
|
__init__.py | from .custom_user import User |
||
05_mutexlock.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import threading
import time
g_num = 0
def work1(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
# print("----in work1, g_num is %d---" % g_num)
def work2(num):
global g_num
for i in range(num):
mutex.acquire() # 上锁
g_num += 1
mutex.release() # 解锁
# print("----in work2, g_num is %d---" % g_num)
# 创建⼀个互斥锁
# 默认是未上锁的状态
mutex = threading.Lock()
t1 = threading.Thread(target=work1, args=(1000000,))
t2 = threading.Thread(target=work2, args=(1000000,))
t1.start()
t2.start()
# 如果t1、t2未执行完,则休眠1s
while len(threading.enumerate()) != 1:
time.sleep(1) |
print("g_num最终值:%d" % g_num) | |
swarm.rs | // Copyright 2018 Parity Technologies (UK) Ltd.
//
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use futures::stream::StreamFuture;
use futures::sync::oneshot;
use futures::{Async, Future, IntoFuture, Poll, Stream};
use futures::task;
use parking_lot::Mutex;
use std::fmt;
use std::io::{Error as IoError, ErrorKind as IoErrorKind};
use std::sync::Arc;
use {Multiaddr, MuxedTransport, Transport};
/// Creates a swarm.
///
/// Requires an upgraded transport, and a function or closure that will turn the upgrade into a
/// `Future` that produces a `()`.
///
/// Produces a `SwarmController` and an implementation of `Future`. The controller can be used to
/// control, and the `Future` must be driven to completion in order for things to work.
pub fn swarm<T, H, F>(
transport: T,
handler: H,
) -> (SwarmController<T, F::Future>, SwarmEvents<T, F::Future, H>)
where
T: MuxedTransport + Clone + 'static, // TODO: 'static :-/
H: FnMut(T::Output, Box<Future<Item = Multiaddr, Error = IoError> + Send>) -> F,
F: IntoFuture<Item = (), Error = IoError>,
{
let shared = Arc::new(Mutex::new(Shared {
next_incoming: transport.clone().next_incoming(),
listeners: Vec::new(),
listeners_upgrade: Vec::new(),
dialers: Vec::new(),
to_process: Vec::new(),
task_to_notify: None,
}));
let future = SwarmEvents {
transport: transport.clone(),
shared: shared.clone(),
handler: handler,
};
let controller = SwarmController {
transport,
shared,
};
(controller, future)
}
/// Allows control of what the swarm is doing.
pub struct SwarmController<T, F>
where
T: MuxedTransport + 'static, // TODO: 'static :-/
{
/// Shared between the swarm infrastructure.
shared: Arc<Mutex<Shared<T, F>>>,
/// Transport used to dial or listen.
transport: T,
}
impl<T, F> fmt::Debug for SwarmController<T, F>
where
T: fmt::Debug + MuxedTransport + 'static, // TODO: 'static :-/
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_tuple("SwarmController")
.field(&self.transport)
.finish()
}
}
impl<T, F> Clone for SwarmController<T, F>
where
T: MuxedTransport + Clone + 'static, // TODO: 'static :-/
{
fn clone(&self) -> Self {
SwarmController {
transport: self.transport.clone(),
shared: self.shared.clone(),
}
}
}
impl<T, F> SwarmController<T, F>
where
T: MuxedTransport + Clone + 'static, // TODO: 'static :-/
T::Dial: Send,
T::MultiaddrFuture: Send,
T::Listener: Send,
T::ListenerUpgrade: Send,
T::Output: Send,
F: 'static,
{
/// Asks the swarm to dial the node with the given multiaddress. The connection is then
/// upgraded using the `upgrade`, and the output is sent to the handler that was passed when
/// calling `swarm`.
///
/// Returns a future that is signalled once the closure in the `swarm` has returned its future.
/// Therefore if the closure in the swarm has some side effect (eg. write something in a
/// variable), this side effect will be observable when this future succeeds.
#[inline]
pub fn dial<Du>(&self, multiaddr: Multiaddr, transport: Du)
-> Result<impl Future<Item = (), Error = IoError>, Multiaddr>
where
Du: Transport + 'static, // TODO: 'static :-/
Du::Dial: Send,
Du::MultiaddrFuture: Send,
Du::Output: Into<T::Output>,
{
self.dial_then(multiaddr, transport, |v| v)
}
| /// dialing fails or the handler has been called with the resulting future.
///
/// The returned future is filled with the output of `then`.
pub(crate) fn dial_then<Du, TThen>(&self, multiaddr: Multiaddr, transport: Du, then: TThen)
-> Result<impl Future<Item = (), Error = IoError>, Multiaddr>
where
Du: Transport + 'static, // TODO: 'static :-/
Du::Dial: Send,
Du::MultiaddrFuture: Send,
Du::Output: Into<T::Output>,
TThen: FnOnce(Result<(), IoError>) -> Result<(), IoError> + Send + 'static,
{
trace!("Swarm dialing {}", multiaddr);
match transport.dial(multiaddr.clone()) {
Ok(dial) => {
let (tx, rx) = oneshot::channel();
let mut then = Some(move |val| {
let _ = tx.send(then(val));
});
// Unfortunately the `Box<FnOnce(_)>` type is still unusable in Rust right now,
// so we use a `Box<FnMut(_)>` instead and panic if it is called multiple times.
let mut then = Box::new(move |val: Result<(), IoError>| {
let then = then.take().expect("The Boxed FnMut should only be called once");
then(val);
}) as Box<FnMut(_) + Send>;
let dial = dial.then(|result| {
match result {
Ok((output, client_addr)) => {
let client_addr = Box::new(client_addr) as Box<Future<Item = _, Error = _> + Send>;
Ok((output.into(), then, client_addr))
}
Err(err) => {
debug!("Error in dialer upgrade: {:?}", err);
let err_clone = IoError::new(err.kind(), err.to_string());
then(Err(err));
Err(err_clone)
}
}
});
let mut shared = self.shared.lock();
shared.dialers.push((multiaddr, Box::new(dial) as Box<_>));
if let Some(task) = shared.task_to_notify.take() {
task.notify();
}
Ok(rx.then(|result| {
match result {
Ok(Ok(())) => Ok(()),
Ok(Err(err)) => Err(err),
Err(_) => Err(IoError::new(IoErrorKind::ConnectionAborted,
"dial cancelled the swarm future has been destroyed")),
}
}))
}
Err((_, multiaddr)) => Err(multiaddr),
}
}
/// Interrupts all dialing attempts to a specific multiaddress.
///
/// Has no effect if the dialing attempt has already succeeded, in which case it will be
/// dispatched to the handler.
pub fn interrupt_dial(&self, multiaddr: &Multiaddr) {
let mut shared = self.shared.lock();
shared.dialers.retain(|dialer| {
&dialer.0 != multiaddr
});
}
/// Adds a multiaddr to listen on. All the incoming connections will use the `upgrade` that
/// was passed to `swarm`.
// TODO: add a way to cancel a listener
pub fn listen_on(&self, multiaddr: Multiaddr) -> Result<Multiaddr, Multiaddr> {
match self.transport.clone().listen_on(multiaddr) {
Ok((listener, new_addr)) => {
trace!("Swarm listening on {}", new_addr);
let mut shared = self.shared.lock();
let listener = Box::new(
listener.map(|f| {
let f = f.map(|(out, maf)| {
(out, Box::new(maf) as Box<Future<Item = Multiaddr, Error = IoError> + Send>)
});
Box::new(f) as Box<Future<Item = _, Error = _> + Send>
}),
) as Box<Stream<Item = _, Error = _> + Send>;
shared.listeners.push((new_addr.clone(), listener.into_future()));
if let Some(task) = shared.task_to_notify.take() {
task.notify();
}
Ok(new_addr)
}
Err((_, multiaddr)) => Err(multiaddr),
}
}
}
/// Future that must be driven to completion in order for the swarm to work.
#[must_use = "futures do nothing unless polled"]
pub struct SwarmEvents<T, F, H>
where
T: MuxedTransport + 'static, // TODO: 'static :-/
{
/// Shared between the swarm infrastructure.
shared: Arc<Mutex<Shared<T, F>>>,
/// The transport used to dial.
transport: T,
/// Swarm handler.
handler: H,
}
impl<T, H, If, F> Stream for SwarmEvents<T, F, H>
where
T: MuxedTransport + Clone + 'static, // TODO: 'static :-/,
T::MultiaddrFuture: Send,
T::IncomingUpgrade: Send,
H: FnMut(T::Output, Box<Future<Item = Multiaddr, Error = IoError> + Send>) -> If,
If: IntoFuture<Future = F, Item = (), Error = IoError>,
F: Future<Item = (), Error = IoError> + 'static, // TODO: 'static :-/
{
type Item = SwarmEvent<F>;
type Error = IoError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let mut shared = self.shared.lock();
let handler = &mut self.handler;
loop {
match shared.next_incoming.poll() {
Ok(Async::Ready(connec)) => {
debug!("Swarm received new multiplexed incoming connection");
shared.next_incoming = self.transport.clone().next_incoming();
let connec = connec.map(|(out, maf)| {
(out, Box::new(maf) as Box<Future<Item = Multiaddr, Error = IoError> + Send>)
});
shared.listeners_upgrade.push(Box::new(connec) as Box<_>);
}
Ok(Async::NotReady) => break,
Err(err) => {
// TODO: should that stop everything?
debug!("Error in multiplexed incoming connection: {:?}", err);
shared.next_incoming = self.transport.clone().next_incoming();
return Ok(Async::Ready(Some(SwarmEvent::IncomingError(err))));
}
}
}
// We remove each element from `shared.listeners` one by one and add them back only
// if relevant.
for n in (0 .. shared.listeners.len()).rev() {
let (listen_addr, mut listener) = shared.listeners.swap_remove(n);
loop {
match listener.poll() {
Ok(Async::Ready((Some(upgrade), remaining))) => {
trace!("Swarm received new connection on listener socket");
shared.listeners_upgrade.push(upgrade);
listener = remaining.into_future();
}
Ok(Async::Ready((None, _))) => {
debug!("Listener closed gracefully");
return Ok(Async::Ready(Some(SwarmEvent::ListenerClosed {
listen_addr
})));
},
Err((error, _)) => {
debug!("Error in listener: {:?}", error);
return Ok(Async::Ready(Some(SwarmEvent::ListenerError {
listen_addr,
error,
})));
}
Ok(Async::NotReady) => {
shared.listeners.push((listen_addr, listener));
break;
}
}
}
}
// We remove each element from `shared.listeners_upgrade` one by one and add them back
// only if relevant.
for n in (0 .. shared.listeners_upgrade.len()).rev() {
let mut listener_upgrade = shared.listeners_upgrade.swap_remove(n);
match listener_upgrade.poll() {
Ok(Async::Ready((output, client_addr))) => {
debug!("Successfully upgraded incoming connection");
// TODO: unlock mutex before calling handler, in order to avoid deadlocks if
// the user does something stupid
shared.to_process.push(handler(output, client_addr).into_future());
}
Err(err) => {
debug!("Error in listener upgrade: {:?}", err);
return Ok(Async::Ready(Some(SwarmEvent::ListenerUpgradeError(err))));
}
Ok(Async::NotReady) => {
shared.listeners_upgrade.push(listener_upgrade);
},
}
}
// We remove each element from `shared.dialers` one by one and add them back only
// if relevant.
for n in (0 .. shared.dialers.len()).rev() {
let (client_addr, mut dialer) = shared.dialers.swap_remove(n);
match dialer.poll() {
Ok(Async::Ready((output, mut notifier, addr))) => {
trace!("Successfully upgraded dialed connection");
// TODO: unlock mutex before calling handler, in order to avoid deadlocks if
// the user does something stupid
shared.to_process.push(handler(output, addr).into_future());
notifier(Ok(()));
}
Err(error) => {
return Ok(Async::Ready(Some(SwarmEvent::DialFailed {
client_addr,
error,
})));
},
Ok(Async::NotReady) => {
shared.dialers.push((client_addr, dialer));
},
}
}
// We remove each element from `shared.to_process` one by one and add them back only
// if relevant.
for n in (0 .. shared.to_process.len()).rev() {
let mut to_process = shared.to_process.swap_remove(n);
match to_process.poll() {
Ok(Async::Ready(())) => {
trace!("Future returned by swarm handler driven to completion");
return Ok(Async::Ready(Some(SwarmEvent::HandlerFinished {
handler_future: to_process,
})));
}
Err(error) => {
debug!("Error in processing: {:?}", error);
return Ok(Async::Ready(Some(SwarmEvent::HandlerError {
handler_future: to_process,
error,
})));
}
Ok(Async::NotReady) => {
shared.to_process.push(to_process);
}
}
}
// TODO: we never return `Ok(Ready)` because there's no way to know whether
// `next_incoming()` can produce anything more in the future ; also we would need to
// know when the controller has been dropped
shared.task_to_notify = Some(task::current());
Ok(Async::NotReady)
}
}
// TODO: stronger typing
struct Shared<T, F> where T: MuxedTransport + 'static {
/// Next incoming substream on the transport.
next_incoming: T::Incoming,
/// All the active listeners.
listeners: Vec<(
Multiaddr,
StreamFuture<
Box<
Stream<
Item = Box<Future<Item = (T::Output, Box<Future<Item = Multiaddr, Error = IoError> + Send>), Error = IoError> + Send>,
Error = IoError,
> + Send,
>,
>,
)>,
/// Futures that upgrade an incoming listening connection to a full connection.
listeners_upgrade:
Vec<Box<Future<Item = (T::Output, Box<Future<Item = Multiaddr, Error = IoError> + Send>), Error = IoError> + Send>>,
/// Futures that dial a remote address.
///
/// Contains the address we dial, so that we can cancel it if necessary.
dialers: Vec<(Multiaddr, Box<Future<Item = (T::Output, Box<FnMut(Result<(), IoError>) + Send>, Box<Future<Item = Multiaddr, Error = IoError> + Send>), Error = IoError> + Send>)>,
/// List of futures produced by the swarm closure. Must be processed to the end.
to_process: Vec<F>,
/// The task to notify whenever we add a new element in one of the lists.
/// Necessary so that the task wakes up and the element gets polled.
task_to_notify: Option<task::Task>,
}
/// Event that happens in the swarm.
#[derive(Debug)]
pub enum SwarmEvent<F> {
/// An error has happened while polling the muxed transport for incoming connections.
IncomingError(IoError),
/// A listener has gracefully closed.
ListenerClosed {
/// Address the listener was listening on.
listen_addr: Multiaddr,
},
/// A listener has stopped because it produced an error.
ListenerError {
/// Address the listener was listening on.
listen_addr: Multiaddr,
/// The error that happened.
error: IoError,
},
/// An error happened while upgrading an incoming connection.
ListenerUpgradeError(IoError),
/// Failed to dial a remote address.
DialFailed {
/// Address we were trying to dial.
client_addr: Multiaddr,
/// Error that happened.
error: IoError,
},
/// A future returned by the handler has finished.
HandlerFinished {
/// The future originally returned by the handler.
handler_future: F,
},
/// A future returned by the handler has produced an error.
HandlerError {
/// The future originally returned by the handler.
handler_future: F,
/// The error that happened.
error: IoError,
},
}
#[cfg(test)]
mod tests {
use futures::{Future, Stream, future};
use rand;
use transport::{self, DeniedTransport, Transport};
use std::io::Error as IoError;
use std::sync::{atomic, Arc};
use swarm;
use tokio::runtime::current_thread;
#[test]
fn transport_error_propagation_listen() {
let (swarm_ctrl, _swarm_future) = swarm(DeniedTransport, |_, _| future::empty());
assert!(swarm_ctrl.listen_on("/ip4/127.0.0.1/tcp/10000".parse().unwrap()).is_err());
}
#[test]
fn transport_error_propagation_dial() {
let (swarm_ctrl, _swarm_future) = swarm(DeniedTransport, |_, _| future::empty());
let addr = "/ip4/127.0.0.1/tcp/10000".parse().unwrap();
assert!(swarm_ctrl.dial(addr, DeniedTransport).is_err());
}
#[test]
fn basic_dial() {
let (tx, rx) = transport::connector();
let reached_tx = Arc::new(atomic::AtomicBool::new(false));
let reached_tx2 = reached_tx.clone();
let reached_rx = Arc::new(atomic::AtomicBool::new(false));
let reached_rx2 = reached_rx.clone();
let (swarm_ctrl1, swarm_future1) = swarm(rx.with_dummy_muxing(), |_, _| {
reached_rx2.store(true, atomic::Ordering::SeqCst);
future::empty()
});
swarm_ctrl1.listen_on("/memory".parse().unwrap()).unwrap();
let (swarm_ctrl2, swarm_future2) = swarm(tx.clone().with_dummy_muxing(), |_, _| {
reached_tx2.store(true, atomic::Ordering::SeqCst);
future::empty()
});
let dial_success = swarm_ctrl2.dial("/memory".parse().unwrap(), tx).unwrap();
let future = swarm_future2.for_each(|_| Ok(()))
.select(swarm_future1.for_each(|_| Ok(()))).map(|_| ()).map_err(|(err, _)| err)
.select(dial_success).map(|_| ()).map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert!(reached_tx.load(atomic::Ordering::SeqCst));
assert!(reached_rx.load(atomic::Ordering::SeqCst));
}
#[test]
fn dial_multiple_times() {
let (tx, rx) = transport::connector();
let reached = Arc::new(atomic::AtomicUsize::new(0));
let reached2 = reached.clone();
let (swarm_ctrl, swarm_future) = swarm(rx.with_dummy_muxing(), |_, _| {
reached2.fetch_add(1, atomic::Ordering::SeqCst);
future::empty()
});
swarm_ctrl.listen_on("/memory".parse().unwrap()).unwrap();
let num_dials = 20000 + rand::random::<usize>() % 20000;
let mut dials = Vec::new();
for _ in 0 .. num_dials {
let f = swarm_ctrl.dial("/memory".parse().unwrap(), tx.clone()).unwrap();
dials.push(f);
}
let future = future::join_all(dials)
.map(|_| ())
.select(swarm_future.for_each(|_| Ok(())))
.map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
assert_eq!(reached.load(atomic::Ordering::SeqCst), num_dials);
}
#[test]
fn future_isnt_dropped() {
// Tests that the future in the closure isn't being dropped.
let (tx, rx) = transport::connector();
let (swarm_ctrl, swarm_future) = swarm(rx.with_dummy_muxing(), |_, _| {
future::empty()
.then(|_: Result<(), ()>| -> Result<(), IoError> { panic!() }) // <-- the test
});
swarm_ctrl.listen_on("/memory".parse().unwrap()).unwrap();
let dial_success = swarm_ctrl.dial("/memory".parse().unwrap(), tx).unwrap();
let future = dial_success.select(swarm_future.for_each(|_| Ok(())))
.map_err(|(err, _)| err);
current_thread::Runtime::new().unwrap().block_on(future).unwrap();
}
} | /// Internal version of `dial` that allows adding a closure that is called after either the |
device.rs | use core::{self as c, device as d, handle, texture as t, format, shade, pso, buffer, mapping};
use core::memory::Bind;
use core::ShaderSet;
use native;
use std::sync::Arc;
use {Resources as R, Device};
// TODO: dummy only
impl d::Device<R> for Device {
fn get_capabilities(&self) -> &c::Capabilities { unimplemented!() }
fn create_buffer_raw(&mut self, _: buffer::Info) -> Result<handle::RawBuffer<R>, buffer::CreationError> { unimplemented!() }
fn create_buffer_immutable_raw(&mut self, data: &[u8], stride: usize, _: buffer::Role, _: Bind)
-> Result<handle::RawBuffer<R>, buffer::CreationError> { unimplemented!() }
fn create_pipeline_state_raw(&mut self, _: &handle::Program<R>, _: &pso::Descriptor)
-> Result<handle::RawPipelineState<R>, pso::CreationError> { unimplemented!() }
fn create_program(&mut self, shader_set: &ShaderSet<R>)
-> Result<handle::Program<R>, shade::CreateProgramError> { unimplemented!() }
fn create_shader(&mut self, stage: shade::Stage, code: &[u8]) ->
Result<handle::Shader<R>, shade::CreateShaderError> { unimplemented!() }
fn create_sampler(&mut self, _: t::SamplerInfo) -> handle::Sampler<R> { unimplemented!() }
fn create_semaphore(&mut self) -> handle::Semaphore<R> { unimplemented!() }
fn create_fence(&mut self, _signalled: bool) -> handle::Fence<R> {
unimplemented!()
}
fn reset_fences(&mut self, fences: &[&handle::Fence<R>]) {
unimplemented!()
}
fn wait_for_fences(&mut self, _fences: &[&handle::Fence<R>], _wait: d::WaitFor, _timeout_ms: u32) -> bool {
unimplemented!()
}
fn read_mapping<'a, 'b, T>(&'a mut self, buf: &'b handle::Buffer<R, T>)
-> Result<mapping::Reader<'b, R, T>,
mapping::Error>
where T: Copy { unimplemented!() }
fn write_mapping<'a, 'b, T>(&'a mut self, buf: &'b handle::Buffer<R, T>)
-> Result<mapping::Writer<'b, R, T>,
mapping::Error>
where T: Copy { unimplemented!() }
fn create_texture_raw(&mut self, _: t::Info, _: Option<format::ChannelType>, _: Option<&[&[u8]]>)
-> Result<handle::RawTexture<R>, t::CreationError> { unimplemented!() }
fn view_buffer_as_shader_resource_raw(&mut self, _: &handle::RawBuffer<R>, _: format::Format)
-> Result<handle::RawShaderResourceView<R>, d::ResourceViewError> { unimplemented!() }
fn view_buffer_as_unordered_access_raw(&mut self, _: &handle::RawBuffer<R>)
-> Result<handle::RawUnorderedAccessView<R>, d::ResourceViewError> { unimplemented!() }
fn view_texture_as_shader_resource_raw(&mut self, _: &handle::RawTexture<R>, _: t::ResourceDesc)
-> Result<handle::RawShaderResourceView<R>, d::ResourceViewError> |
fn view_texture_as_unordered_access_raw(&mut self, _: &handle::RawTexture<R>)
-> Result<handle::RawUnorderedAccessView<R>, d::ResourceViewError> { unimplemented!() }
fn view_texture_as_render_target_raw(&mut self, _: &handle::RawTexture<R>, _: t::RenderDesc)
-> Result<handle::RawRenderTargetView<R>, d::TargetViewError> { unimplemented!() }
fn view_texture_as_depth_stencil_raw(&mut self, _: &handle::RawTexture<R>, _: t::DepthStencilDesc)
-> Result<handle::RawDepthStencilView<R>, d::TargetViewError> { unimplemented!() }
} | { unimplemented!() } |
container.rs | //! The primary module containing the implementations of the transaction pool
//! and its top-level members.
use crate::{component::entry::TxEntry, error::Reject};
use ckb_types::{
core::Capacity,
packed::{OutPoint, ProposalShortId},
};
use std::borrow::Cow;
use std::cmp::Ordering;
use std::collections::hash_map::Entry as HashMapEntry;
use std::collections::{BTreeSet, HashMap, HashSet};
/// A struct to use as a sorted key
#[derive(Eq, PartialEq, Clone, Debug)]
pub struct AncestorsScoreSortKey {
pub fee: Capacity,
pub vbytes: u64,
pub id: ProposalShortId,
pub ancestors_fee: Capacity,
pub ancestors_vbytes: u64,
pub ancestors_size: usize,
}
impl AncestorsScoreSortKey {
/// compare tx fee rate with ancestors fee rate and return the min one
pub(crate) fn min_fee_and_vbytes(&self) -> (Capacity, u64) {
// avoid division a_fee/a_vbytes > b_fee/b_vbytes
let tx_weight = u128::from(self.fee.as_u64()) * u128::from(self.ancestors_vbytes);
let ancestors_weight = u128::from(self.ancestors_fee.as_u64()) * u128::from(self.vbytes);
if tx_weight < ancestors_weight {
(self.fee, self.vbytes)
} else {
(self.ancestors_fee, self.ancestors_vbytes)
}
}
}
impl PartialOrd for AncestorsScoreSortKey {
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
| // avoid division a_fee/a_vbytes > b_fee/b_vbytes
let (fee, vbytes) = self.min_fee_and_vbytes();
let (other_fee, other_vbytes) = other.min_fee_and_vbytes();
let self_weight = u128::from(fee.as_u64()) * u128::from(other_vbytes);
let other_weight = u128::from(other_fee.as_u64()) * u128::from(vbytes);
if self_weight == other_weight {
// if fee rate weight is same, then compare with ancestor vbytes
if self.ancestors_vbytes == other.ancestors_vbytes {
self.id.raw_data().cmp(&other.id.raw_data())
} else {
self.ancestors_vbytes.cmp(&other.ancestors_vbytes)
}
} else {
self_weight.cmp(&other_weight)
}
}
}
#[derive(Default, Debug, Clone)]
pub struct TxLinks {
pub parents: HashSet<ProposalShortId>,
pub children: HashSet<ProposalShortId>,
}
#[derive(Clone, Copy)]
enum Relation {
Parents,
Children,
}
impl TxLinks {
fn get_direct_ids(&self, relation: Relation) -> &HashSet<ProposalShortId> {
match relation {
Relation::Parents => &self.parents,
Relation::Children => &self.children,
}
}
}
fn calc_relation_ids(
stage: Cow<HashSet<ProposalShortId>>,
map: &TxLinksMap,
relation: Relation,
) -> HashSet<ProposalShortId> {
let mut stage = stage.into_owned();
let mut relation_ids = HashSet::with_capacity(stage.len());
while let Some(id) = stage.iter().next().cloned() {
relation_ids.insert(id.clone());
stage.remove(&id);
//recursively
for id in map
.inner
.get(&id)
.map(|link| link.get_direct_ids(relation))
.cloned()
.unwrap_or_default()
{
if !relation_ids.contains(&id) {
stage.insert(id);
}
}
}
relation_ids
}
#[derive(Default, Debug, Clone)]
pub struct TxLinksMap {
pub(crate) inner: HashMap<ProposalShortId, TxLinks>,
}
impl TxLinksMap {
fn new() -> Self {
TxLinksMap {
inner: Default::default(),
}
}
fn calc_relative_ids(
&self,
short_id: &ProposalShortId,
relation: Relation,
) -> HashSet<ProposalShortId> {
let direct = self
.inner
.get(short_id)
.map(|link| link.get_direct_ids(relation))
.cloned()
.unwrap_or_default();
calc_relation_ids(Cow::Owned(direct), self, relation)
}
pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet<ProposalShortId> {
self.calc_relative_ids(short_id, Relation::Parents)
}
pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet<ProposalShortId> {
self.calc_relative_ids(short_id, Relation::Children)
}
pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet<ProposalShortId>> {
self.inner.get(short_id).map(|link| &link.children)
}
pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet<ProposalShortId>> {
self.inner.get(short_id).map(|link| &link.parents)
}
pub fn remove(&mut self, short_id: &ProposalShortId) -> Option<TxLinks> {
self.inner.remove(short_id)
}
fn remove_child(
&mut self,
short_id: &ProposalShortId,
child: &ProposalShortId,
) -> Option<bool> {
self.inner
.get_mut(short_id)
.map(|links| links.children.remove(child))
}
fn remove_parent(
&mut self,
short_id: &ProposalShortId,
parent: &ProposalShortId,
) -> Option<bool> {
self.inner
.get_mut(short_id)
.map(|links| links.parents.remove(parent))
}
fn add_child(&mut self, short_id: &ProposalShortId, child: ProposalShortId) -> Option<bool> {
self.inner
.get_mut(short_id)
.map(|links| links.children.insert(child))
}
fn clear(&mut self) {
self.inner.clear();
}
}
#[derive(Debug, Clone)]
pub(crate) struct SortedTxMap {
entries: HashMap<ProposalShortId, TxEntry>,
sorted_index: BTreeSet<AncestorsScoreSortKey>,
deps: HashMap<OutPoint, HashSet<ProposalShortId>>,
/// A map track transaction ancestors and descendants
links: TxLinksMap,
max_ancestors_count: usize,
}
impl SortedTxMap {
pub fn new(max_ancestors_count: usize) -> Self {
SortedTxMap {
entries: Default::default(),
sorted_index: Default::default(),
links: TxLinksMap::new(),
deps: Default::default(),
max_ancestors_count,
}
}
pub fn size(&self) -> usize {
self.entries.len()
}
pub fn iter(&self) -> impl Iterator<Item = (&ProposalShortId, &TxEntry)> {
self.entries.iter()
}
pub fn add_entry(&mut self, mut entry: TxEntry) -> Result<bool, Reject> {
let short_id = entry.proposal_short_id();
if self.contains_key(&short_id) {
return Ok(false);
};
// find in pool parents
let mut parents: HashSet<ProposalShortId> = HashSet::with_capacity(
entry.transaction().inputs().len() + entry.transaction().cell_deps().len(),
);
for input in entry.transaction().inputs() {
let input_pt = input.previous_output();
if let Some(deps) = self.deps.get(&input_pt) {
parents.extend(deps.iter().cloned());
}
let parent_hash = &input_pt.tx_hash();
let id = ProposalShortId::from_tx_hash(parent_hash);
if self.links.inner.contains_key(&id) {
parents.insert(id);
}
}
for cell_dep in entry.transaction().cell_deps() {
let dep_pt = cell_dep.out_point();
let id = ProposalShortId::from_tx_hash(&dep_pt.tx_hash());
if self.links.inner.contains_key(&id) {
parents.insert(id);
}
// insert dep-ref map
self.deps
.entry(dep_pt)
.or_insert_with(HashSet::new)
.insert(short_id.clone());
}
let ancestors = calc_relation_ids(Cow::Borrowed(&parents), &self.links, Relation::Parents);
// update parents references
for ancestor_id in &ancestors {
let ancestor = self.entries.get(ancestor_id).expect("pool consistent");
entry.add_entry_weight(ancestor);
}
if entry.ancestors_count > self.max_ancestors_count {
return Err(Reject::ExceededMaximumAncestorsCount);
}
for parent in &parents {
self.links.add_child(parent, short_id.clone());
}
// insert links
let links = TxLinks {
parents,
children: Default::default(),
};
self.links.inner.insert(short_id.clone(), links);
self.sorted_index.insert(entry.as_sorted_key());
self.entries.insert(short_id, entry);
Ok(true)
}
pub fn contains_key(&self, id: &ProposalShortId) -> bool {
self.entries.contains_key(id)
}
pub fn get(&self, id: &ProposalShortId) -> Option<&TxEntry> {
self.entries.get(id)
}
fn update_deps_for_remove(&mut self, entry: &TxEntry) {
for cell_dep in entry.transaction().cell_deps() {
let dep_pt = cell_dep.out_point();
if let HashMapEntry::Occupied(mut o) = self.deps.entry(dep_pt) {
let set = o.get_mut();
if set.remove(&entry.proposal_short_id()) && set.is_empty() {
o.remove_entry();
}
}
}
}
fn update_children_for_remove(&mut self, id: &ProposalShortId) {
if let Some(children) = self.get_children(id).cloned() {
for child in children {
self.links.remove_parent(&child, id);
}
}
}
fn update_parents_for_remove(&mut self, id: &ProposalShortId) {
if let Some(parents) = self.get_parents(id).cloned() {
for parent in parents {
self.links.remove_child(&parent, id);
}
}
}
fn remove_unchecked(&mut self, id: &ProposalShortId) -> Option<TxEntry> {
self.entries.remove(id).map(|entry| {
self.sorted_index.remove(&entry.as_sorted_key());
self.update_deps_for_remove(&entry);
entry
})
}
pub fn remove_entry_and_descendants(&mut self, id: &ProposalShortId) -> Vec<TxEntry> {
let mut removed_ids = vec![id.to_owned()];
let mut removed = vec![];
let descendants = self.calc_descendants(id);
removed_ids.extend(descendants);
// update links state for remove
for id in &removed_ids {
self.update_parents_for_remove(id);
self.update_children_for_remove(id);
}
for id in removed_ids {
if let Some(entry) = self.remove_unchecked(&id) {
self.links.remove(&id);
removed.push(entry);
}
}
removed
}
// notice:
// we are sure that all in-pool ancestor have already been processed.
// otherwise `links` will differ from the set of parents we'd calculate by searching
pub fn remove_entry(&mut self, id: &ProposalShortId) -> Option<TxEntry> {
let descendants = self.calc_descendants(id);
self.remove_unchecked(id).map(|entry| {
// We're not recursively removing a tx and all its descendants
// So we need update statistics state
for desc_id in &descendants {
if let Some(desc_entry) = self.entries.get_mut(desc_id) {
let deleted = self.sorted_index.remove(&desc_entry.as_sorted_key());
debug_assert!(deleted, "pool inconsistent");
desc_entry.sub_entry_weight(&entry);
self.sorted_index.insert(desc_entry.as_sorted_key());
}
}
self.update_parents_for_remove(id);
self.update_children_for_remove(id);
self.links.remove(id);
entry
})
}
/// calculate all ancestors from pool
pub fn calc_ancestors(&self, short_id: &ProposalShortId) -> HashSet<ProposalShortId> {
self.links.calc_ancestors(short_id)
}
/// calculate all descendants from pool
pub fn calc_descendants(&self, short_id: &ProposalShortId) -> HashSet<ProposalShortId> {
self.links.calc_descendants(short_id)
}
/// find children from pool
pub fn get_children(&self, short_id: &ProposalShortId) -> Option<&HashSet<ProposalShortId>> {
self.links.get_children(short_id)
}
/// find parents from pool
pub fn get_parents(&self, short_id: &ProposalShortId) -> Option<&HashSet<ProposalShortId>> {
self.links.get_parents(short_id)
}
/// sorted by ancestor score from higher to lower
pub fn score_sorted_iter(&self) -> impl Iterator<Item = &TxEntry> {
self.sorted_index
.iter()
.rev()
.map(move |key| self.entries.get(&key.id).expect("consistent"))
}
pub(crate) fn clear(&mut self) {
self.sorted_index.clear();
self.deps.clear();
self.links.clear();
self.entries.clear();
}
} | impl Ord for AncestorsScoreSortKey {
fn cmp(&self, other: &Self) -> Ordering { |
lib.rs | //! # crate2nix
//!
//! Internal library for the crate2nix binary. This is not meant to be used separately, I just enjoy
//! writing doc tests ;)
//#![deny(missing_docs)]
use std::collections::{BTreeMap, HashMap, HashSet, VecDeque};
use std::env;
use std::path::PathBuf;
use cargo_metadata::Metadata;
use cargo_metadata::PackageId;
use failure::format_err;
use failure::Error;
use failure::ResultExt;
use serde::Deserialize;
use serde::Serialize;
use crate::metadata::IndexedMetadata;
use crate::resolve::{CrateDerivation, ResolvedSource};
use itertools::Itertools;
mod lock;
mod metadata;
pub mod nix_build;
mod prefetch;
pub mod render;
mod resolve;
mod target_cfg;
pub mod util;
/// The resolved build info and the input for rendering the build.nix.tera template.
#[derive(Debug, Deserialize, Serialize)]
pub struct BuildInfo {
// The package ID of the root crate.
pub root_package_id: Option<PackageId>,
// Workspaces member package IDs by package names.
pub workspace_members: BTreeMap<String, PackageId>,
// Build info for all crates needed for this build.
pub crates: Vec<CrateDerivation>,
// For convenience include the source for tests.
pub indexed_metadata: IndexedMetadata,
// The generation configuration.
pub info: GenerateInfo,
// The generation configuration.
pub config: GenerateConfig,
}
impl BuildInfo {
/// Return the `NixBuildInfo` data ready for rendering the nix build file.
pub fn for_config(info: &GenerateInfo, config: &GenerateConfig) -> Result<BuildInfo, Error> {
let metadata = cargo_metadata(config)?;
let indexed_metadata = IndexedMetadata::new_from(metadata).map_err(|e| {
format_err!(
"while indexing metadata for {}: {}",
config.cargo_toml.to_string_lossy(),
e
)
})?;
let mut default_nix = BuildInfo::new(info, config, indexed_metadata)?;
default_nix.prune_unneeded_crates();
prefetch_and_fill_crates_sha256(config, &mut default_nix)?;
Ok(default_nix)
}
fn prune_unneeded_crates(&mut self) {
let mut queue: VecDeque<&PackageId> = self
.root_package_id
.iter()
.chain(self.workspace_members.values())
.collect();
let mut reachable = HashSet::new();
let indexed_crates: BTreeMap<_, _> =
self.crates.iter().map(|c| (&c.package_id, c)).collect();
while let Some(next_package_id) = queue.pop_back() {
if !reachable.insert(next_package_id.clone()) {
continue;
}
queue.extend(
indexed_crates
.get(next_package_id)
.iter()
.flat_map(|c| {
c.dependencies
.iter()
.chain(c.build_dependencies.iter())
.chain(c.dev_dependencies.iter())
})
.map(|d| &d.package_id),
);
}
self.crates.retain(|c| reachable.contains(&c.package_id));
}
fn new(
info: &GenerateInfo,
config: &GenerateConfig,
metadata: IndexedMetadata,
) -> Result<BuildInfo, Error> {
Ok(BuildInfo {
root_package_id: metadata.root.clone(),
workspace_members: metadata
.workspace_members
.iter()
.flat_map(|pkg_id| {
metadata
.pkgs_by_id
.get(pkg_id)
.map(|pkg| (pkg.name.clone(), pkg_id.clone()))
})
.collect(),
crates: metadata
.pkgs_by_id
.values()
.map(|package| CrateDerivation::resolve(config, &metadata, package))
.collect::<Result<_, Error>>()?,
indexed_metadata: metadata,
info: info.clone(),
config: config.clone(),
})
}
}
/// Call `cargo metadata` and return result.
fn cargo_metadata(config: &GenerateConfig) -> Result<Metadata, Error> {
let mut cmd = cargo_metadata::MetadataCommand::new();
let mut other_options = config.other_metadata_options.clone();
other_options.push("--locked".into());
cmd.manifest_path(&config.cargo_toml)
.other_options(&other_options);
cmd.exec().map_err(|e| {
format_err!(
"while retrieving metadata about {}: {}",
&config.cargo_toml.to_string_lossy(),
e
)
})
}
/// Prefetch hashes when necessary.
fn prefetch_and_fill_crates_sha256(
config: &GenerateConfig,
default_nix: &mut BuildInfo,
) -> Result<(), Error> {
let mut from_lock_file: HashMap<PackageId, String> =
extract_hashes_from_lockfile(&config, default_nix)?;
for (_package_id, hash) in from_lock_file.iter_mut() {
let bytes =
hex::decode(&hash).map_err(|e| format_err!("while decoding '{}': {}", hash, e))?;
*hash = nix_base32::to_nix_base32(&bytes);
}
let prefetched = prefetch::prefetch(config, &from_lock_file, &default_nix.crates)
.map_err(|e| format_err!("while prefetching crates for calculating sha256: {}", e))?;
for package in default_nix.crates.iter_mut() {
if package.source.sha256().is_none() |
}
Ok(())
}
fn extract_hashes_from_lockfile(
config: &GenerateConfig,
default_nix: &mut BuildInfo,
) -> Result<HashMap<PackageId, String>, Error> {
if !config.use_cargo_lock_checksums {
return Ok(HashMap::new());
}
let lock_file = crate::lock::EncodableResolve::load_lock_file(
&config.cargo_toml.parent().unwrap().join("Cargo.lock"),
)?;
let hashes = lock_file
.get_hashes_by_package_id()
.context("while parsing checksums from Lockfile")?;
let mut missing_hashes = Vec::new();
for package in default_nix.crates.iter_mut().filter(|c| match &c.source {
ResolvedSource::CratesIo { .. } => !hashes.contains_key(&c.package_id),
_ => false,
}) {
missing_hashes.push(format!("{} {}", package.crate_name, package.version));
}
if !missing_hashes.is_empty() {
eprintln!(
"Did not find all crates.io hashes in Cargo.lock. Hashes for e.g. {} are missing.\n\
This is probably a bug.",
missing_hashes.iter().take(10).join(", ")
);
}
Ok(hashes)
}
/// Some info about the crate2nix invocation.
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GenerateInfo {
pub crate2nix_version: String,
pub crate2nix_arguments: Vec<String>,
}
impl Default for GenerateInfo {
fn default() -> GenerateInfo {
GenerateInfo {
crate2nix_version: env!("CARGO_PKG_VERSION").to_string(),
crate2nix_arguments: env::args().skip(1).collect(),
}
}
}
/// Configuration for the default.nix generation.
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct GenerateConfig {
pub cargo_toml: PathBuf,
pub use_cargo_lock_checksums: bool,
pub output: PathBuf,
pub crate_hashes_json: PathBuf,
pub nixpkgs_path: String,
pub other_metadata_options: Vec<String>,
pub read_crate_hashes: bool,
}
| {
if let Some(hash) = prefetched
.get(&package.package_id)
.or_else(|| from_lock_file.get(&package.package_id))
{
package.source = package.source.with_sha256(hash.clone());
}
} |
converters.py | def convert_to_bool(string):
"""
Converts string to bool
:param string: String
:str string: str
:return: True or False
"""
if isinstance(string, bool): | return string
return string in ['true', 'True', '1'] | |
sessions.rs | use std::os::unix::fs::FileTypeExt;
use std::time::SystemTime;
use std::{fs, io, process};
use suggest::Suggest;
use zellij_utils::{
consts::ZELLIJ_SOCK_DIR,
envs,
interprocess::local_socket::LocalSocketStream,
ipc::{ClientToServerMsg, IpcReceiverWithContext, IpcSenderWithContext, ServerToClientMsg},
};
pub(crate) fn get_sessions() -> Result<Vec<String>, io::ErrorKind> {
match fs::read_dir(&*ZELLIJ_SOCK_DIR) {
Ok(files) => {
let mut sessions = Vec::new();
files.for_each(|file| {
let file = file.unwrap();
let file_name = file.file_name().into_string().unwrap();
if file.file_type().unwrap().is_socket() && assert_socket(&file_name) {
sessions.push(file_name);
}
});
Ok(sessions)
}
Err(err) if io::ErrorKind::NotFound != err.kind() => Err(err.kind()),
Err(_) => Ok(Vec::with_capacity(0)),
}
}
pub(crate) fn get_sessions_sorted_by_mtime() -> anyhow::Result<Vec<String>> {
match fs::read_dir(&*ZELLIJ_SOCK_DIR) {
Ok(files) => {
let mut sessions_with_mtime: Vec<(String, SystemTime)> = Vec::new();
for file in files {
let file = file?;
let file_name = file.file_name().into_string().unwrap();
let file_modified_at = file.metadata()?.modified()?;
if file.file_type()?.is_socket() && assert_socket(&file_name) {
sessions_with_mtime.push((file_name, file_modified_at));
}
}
sessions_with_mtime.sort_by_key(|x| x.1); // the oldest one will be the first
let sessions = sessions_with_mtime.iter().map(|x| x.0.clone()).collect();
Ok(sessions)
}
Err(err) if io::ErrorKind::NotFound != err.kind() => Err(err.into()),
Err(_) => Ok(Vec::with_capacity(0)),
}
}
fn assert_socket(name: &str) -> bool {
let path = &*ZELLIJ_SOCK_DIR.join(name);
match LocalSocketStream::connect(path) {
Ok(stream) => {
let mut sender = IpcSenderWithContext::new(stream);
sender.send(ClientToServerMsg::ConnStatus);
let mut receiver: IpcReceiverWithContext<ServerToClientMsg> = sender.get_receiver();
match receiver.recv() {
Some((instruction, _)) => {
matches!(instruction, ServerToClientMsg::Connected)
}
None => false,
}
}
Err(e) if e.kind() == io::ErrorKind::ConnectionRefused => {
drop(fs::remove_file(path));
false
}
Err(_) => false,
}
}
pub(crate) fn print_sessions(sessions: Vec<String>) {
let curr_session = envs::get_session_name().unwrap_or_else(|_| "".into());
sessions.iter().for_each(|session| {
let suffix = if curr_session == *session {
" (current)"
} else {
""
};
println!("{}{}", session, suffix);
})
}
pub(crate) fn print_sessions_with_index(sessions: Vec<String>) {
let curr_session = envs::get_session_name().unwrap_or_else(|_| "".into());
for (i, session) in sessions.iter().enumerate() {
let suffix = if curr_session == *session {
" (current)"
} else {
""
};
println!("{}: {}{}", i, session, suffix);
}
}
pub(crate) enum ActiveSession {
None,
One(String),
Many,
}
pub(crate) fn get_active_session() -> ActiveSession {
match get_sessions() {
Ok(sessions) if sessions.is_empty() => ActiveSession::None,
Ok(mut sessions) if sessions.len() == 1 => ActiveSession::One(sessions.pop().unwrap()),
Ok(_) => ActiveSession::Many,
Err(e) => {
eprintln!("Error occurred: {:?}", e);
process::exit(1);
}
}
}
pub(crate) fn kill_session(name: &str) {
let path = &*ZELLIJ_SOCK_DIR.join(name);
match LocalSocketStream::connect(path) {
Ok(stream) => {
IpcSenderWithContext::new(stream).send(ClientToServerMsg::KillSession);
}
Err(e) => {
eprintln!("Error occurred: {:?}", e);
process::exit(1);
}
};
}
pub(crate) fn list_sessions() {
let exit_code = match get_sessions() {
Ok(sessions) if !sessions.is_empty() => {
print_sessions(sessions);
0
}
Ok(_) => {
eprintln!("No active zellij sessions found.");
1
}
Err(e) => {
eprintln!("Error occurred: {:?}", e);
1
}
};
process::exit(exit_code);
}
#[derive(Debug, Clone)]
pub enum SessionNameMatch {
AmbiguousPrefix(Vec<String>),
UniquePrefix(String),
Exact(String),
None,
}
pub(crate) fn | (prefix: &str) -> Result<SessionNameMatch, io::ErrorKind> {
return match get_sessions() {
Ok(sessions) => Ok({
let filtered_sessions: Vec<String> = sessions
.iter()
.filter(|s| s.starts_with(prefix))
.cloned()
.collect();
if filtered_sessions.iter().any(|s| s == prefix) {
return Ok(SessionNameMatch::Exact(prefix.to_string()));
}
match &filtered_sessions[..] {
[] => SessionNameMatch::None,
[s] => SessionNameMatch::UniquePrefix(s.to_string()),
_ => SessionNameMatch::AmbiguousPrefix(filtered_sessions),
}
}),
Err(e) => Err(e),
};
}
pub(crate) fn session_exists(name: &str) -> Result<bool, io::ErrorKind> {
match match_session_name(name) {
Ok(SessionNameMatch::Exact(_)) => Ok(true),
Ok(_) => Ok(false),
Err(e) => Err(e),
}
}
pub(crate) fn assert_session(name: &str) {
match session_exists(name) {
Ok(result) => {
if result {
return;
} else {
println!("No session named {:?} found.", name);
if let Some(sugg) = get_sessions().unwrap().suggest(name) {
println!(" help: Did you mean `{}`?", sugg);
}
}
}
Err(e) => {
eprintln!("Error occurred: {:?}", e);
}
};
process::exit(1);
}
pub(crate) fn assert_session_ne(name: &str) {
match session_exists(name) {
Ok(result) if !result => return,
Ok(_) => println!("Session with name {:?} already exists. Use attach command to connect to it or specify a different name.", name),
Err(e) => eprintln!("Error occurred: {:?}", e),
};
process::exit(1);
}
| match_session_name |
input_preprocessors.py | # Lint as: python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Input preprocessors."""
from lingvo import compat as tf
from lingvo.core import base_layer
from lingvo.core import py_utils
from lingvo.core import schedule
from lingvo.tasks.car import car_lib
from lingvo.tasks.car import detection_3d_lib
from lingvo.tasks.car import geometry
from lingvo.tasks.car import ops
import numpy as np
# pylint:disable=g-direct-tensorflow-import
from tensorflow.python.ops import inplace_ops
# pylint:enable=g-direct-tensorflow-import
def _ConsistentShuffle(tensors, seed):
"""Shuffle multiple tensors with the same shuffle order."""
shuffled_idx = tf.range(tf.shape(tensors[0])[0])
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=seed)
return tuple([tf.gather(t, shuffled_idx) for t in tensors])
def _GetApplyPointMaskFn(points_mask):
"""Returns a function that applies a mask to one of our points tensors."""
def _ApplyPointMaskFn(points_tensor):
"""Applies a mask to the points tensor."""
if points_tensor is None:
return points_tensor
return tf.boolean_mask(points_tensor, points_mask)
return _ApplyPointMaskFn
def _Dense(sparse):
return tf.sparse_to_dense(
sparse_indices=sparse.indices,
output_shape=sparse.dense_shape,
sparse_values=sparse.values,
default_value=0)
class Preprocessor(base_layer.BaseLayer):
"""Base class for input preprocessor.
Input preprocessors expect the combined output of all extractors and performs
a transformation on them. Input preprocessors can add/edit/remove fields
from the NestedMap of features.
Note: Features correspond to that for one example (no batch dimension).
Sub-classes need to implement the following three functions:
1) TransformFeatures(features): Given a NestedMap of features representing the
output of all the extractors, apply a transformation on the features.
2) TransformShapes(shapes): Given a corresponding NestedMap of shapes,
produce a NestedMap of shapes that corresponds to the transformation of the
features after TransformFeatures.
3) TransformDTypes(dtypes): Given a corresponding NestedMap of dtypes,
produce a NestedMap of dtypes that corresponds to the transformation of the
features after TransformFeatures.
The preprocessor is expected to explicitly pass through untouched fields.
For example, a preprocessor that does data augmentation should modify the
features NestedMap on the fields it cares about augmenting, and then return
the features NestedMap.
"""
@classmethod
def Params(cls):
"""Default params."""
p = super().Params()
p.name = cls.__name__
return p
def FProp(self, theta, features):
"""Performs TransformFeatures."""
del theta # unused
return self.TransformFeatures(features)
def TransformFeatures(self, features):
"""Transforms the features for one example.
Args:
features: A `NestedMap` of tensors.
Returns:
A `NestedMap` of tensors corresponding.
"""
raise NotImplementedError()
def TransformShapes(self, shapes):
"""Sets correct shapes corresponding to TransformFeatures.
Args:
shapes: A `NestedMap` of TensorShapes, corresponding to the
pre-transformed features.
Returns:
A `NestedMap` of TensorShapes corresponding to the transformed features.
"""
raise NotImplementedError()
def TransformDTypes(self, dtypes):
"""Sets correct dtypes corresponding to TransformFeatures.
Args:
dtypes: A `NestedMap` of DTypes, corresponding to the pre-transformed
features.
Returns:
A `NestedMap` of DTypes corresponding to the transformed features.
"""
raise NotImplementedError()
class EntryPreprocessor(Preprocessor):
"""A Preprocessor that transforms a NestedMap sub-structure.
Some preprocessors want to apply a function to any NestedMap whose key matches
a specific prefix. An EntryPreprocessor provides an interface for specifying
the function transformation for a NestedMap of inputs, adding, modifying, or
deleting the entries in that NestedMap.
For example, if an input contains a nested structure such as:
- lasers.front.xyz
.features
- lasers.side.xyz
.features
and one wants to apply a transform that modifies the .xyz features
on both structures, one can define an EntryPreprocessor that implements:
UpdateEntry(entry):
UpdateEntryShape(shapes):
UpdateEntryDType(dtypes):
and set self.params.prefixes = ['lasers.front', 'lasers.side']
where the prefixes refer to a fully-qualified NestedMap sub-structure.
The arguments to these functions will contain just the NestedMap structure
whose key prefix can be found in self.params.prefixes. One can then modify
these structures as desired.
Example:
def UpdateEntry(self, entry):
# entry is a NestedMap.
assert 'xyz' in entry
entry.xyz = self._ApplyFn(entry.xyz)
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prefixes', ['pseudo_ri'], 'List of keys to apply to.')
return p
def _ApplyToMatchingStructure(self, nested_map, fn):
"""Apply fn to any NestedMap sub-structure whose prefix is in p.prefixes."""
p = self.params
# Don't mutate the original.
nested_map = nested_map.DeepCopy()
updated_entries = []
for prefix in p.prefixes:
entry = nested_map.GetItem(prefix)
if not isinstance(entry, py_utils.NestedMap):
raise TypeError('Prefix key {} selected a {}, not a NestedMap!'.format(
prefix, type(entry)))
fn(entry)
updated_entries.append(entry)
return nested_map, updated_entries
def UpdateEntry(self, entry):
"""Update the Tensors in a NestedMap entry.
Args:
entry: A NestedMap of Tensors.
"""
raise NotImplementedError()
def UpdateEntryShape(self, shapes):
"""Update the shapes in a NestedMap entry.
Args:
shapes: A NestedMap of TensorShapes.
"""
raise NotImplementedError()
def UpdateEntryDType(self, dtypes):
"""Transform the dtypes in a NestedMap entry.
Args:
dtypes: A NestedMap of dtypes.
"""
raise NotImplementedError()
def TransformFeatures(self, features):
features, _ = self._ApplyToMatchingStructure(features, self.UpdateEntry)
return features
def TransformShapes(self, shapes):
shapes, _ = self._ApplyToMatchingStructure(shapes, self.UpdateEntryShape)
return shapes
def TransformDTypes(self, dtypes):
dtypes, _ = self._ApplyToMatchingStructure(dtypes, self.UpdateEntryDType)
return dtypes
class CreateDecoderCopy(Preprocessor):
"""Creates references to current lasers, images, and labels.
This is useful if the data is further transformed.
If desired, the keys that are copied can be customized by overriding the
default keys param.
This preprocessor expects features to optionally contain the following keys:
- lasers - a NestedMap of tensors
- images - a NestedMap of tensors
- labels - a NestedMap of tensors
Adds the following features (if the features existed):
- decoder_copy.lasers - a copy of the lasers NestedMap
- decoder_copy.images - a copy of the images NestedMap
- decoder_copy.labels - a copy of the labels NestedMap
The processor also by default pads the laser features; this can be disabled
by setting the pad_lasers param to None.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keys', ['lasers', 'labels', 'images'],
'Keys to look for and copy if exists.')
p.Define('parent_key', 'decoder_copy', 'The key to nest the copies under.')
p.Define('pad_lasers', PadLaserFeatures.Params(),
'Params for a layer that pads the laser features.')
p.name = 'create_decoder_copy'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.pad_lasers is not None:
self.CreateChild('pad_lasers', p.pad_lasers)
def _DeepCopyIfExists(self, keys, nested_map, parent_key):
|
def TransformFeatures(self, features):
p = self.params
features = self._DeepCopyIfExists(p.keys, features, p.parent_key)
if p.pad_lasers is not None:
features[p.parent_key] = self.pad_lasers.TransformFeatures(
features[p.parent_key])
return features
def TransformShapes(self, shapes):
p = self.params
shapes = self._DeepCopyIfExists(p.keys, shapes, p.parent_key)
if p.pad_lasers is not None:
shapes[p.parent_key] = self.pad_lasers.TransformShapes(
shapes[p.parent_key])
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes = self._DeepCopyIfExists(p.keys, dtypes, p.parent_key)
if p.pad_lasers is not None:
dtypes[p.parent_key] = self.pad_lasers.TransformDTypes(
dtypes[p.parent_key])
return dtypes
class FilterByKey(Preprocessor):
"""Filters features to keep only specified keys.
This keeps only feature entries that are specified. This allows us to reduce
the number of fields returned. For example, during training, one may not
need the actual laser points if training with a pillars based model that
has a preprocessor that already maps the points to grid.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'keep_key_prefixes', [''], 'Prefixes of keys to keep. If this '
'contains the empty string, then it will keep all the keys.')
return p
def _FilterFn(self, key, entry):
"""Filter a nested map."""
del entry # unused
p = self.params
for prefix in p.keep_key_prefixes:
if key.startswith(prefix):
return True
return False
def TransformFeatures(self, features):
return features.FilterKeyVal(self._FilterFn)
def TransformShapes(self, shapes):
return shapes.FilterKeyVal(self._FilterFn)
def TransformDTypes(self, dtypes):
return dtypes.FilterKeyVal(self._FilterFn)
class FilterGroundTruthByNumPoints(Preprocessor):
"""Removes ground truth boxes with less than params.min_num_points points.
This preprocessor expects features to contain the following keys::
labels.labels of shape [..., L]
labels.bboxes_3d of shape [..., L, 7]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
labels.bboxes_3d_num_points of shape [..., L].
Modifies the bounding box data to turn off ground truth objects that don't
meet the params.min_num_points point filter:
labels.labels: Boxes with less than params.min_num_points have their label
set to params.background_id (defaults to 0).
labels.bboxes_3d_mask: Boxes with less than params.min_num_points are set
to 0.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'min_num_points', 1, 'The minimum number of points allowed before '
'the associated ground truth box is turned off. Defaults to 1.')
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.greater_equal(features.labels.bboxes_3d_num_points,
p.min_num_points)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FilterGroundTruthByDifficulty(Preprocessor):
"""Removes groundtruth boxes based on detection difficulty.
This preprocessor expects features to contain the following keys::
labels.single_frame_detection_difficulties of shape [..., L]
labels.labels of shape [..., L]
labels.bboxes_3d_mask of shape [..., L]
labels.unfiltered_bboxes_3d_mask of shape [..., L]
The preprocessor masks out the bboxes_3d_mask / labels based on whether
single_frame_detection_difficulties is greater than p.difficulty_threshold.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'background_id', 0, 'The ID of the background class we set '
'filtered boxes to. Defaults to 0.')
p.Define(
'difficulty_threshold', 1,
'Filter groundtruth bounding boxes whose detection difficulty is '
'greater than `difficulty_threshold`')
return p
def TransformFeatures(self, features):
p = self.params
bbox_is_valid = tf.less_equal(
features.labels.single_frame_detection_difficulties,
p.difficulty_threshold)
features.labels.labels = tf.where(
bbox_is_valid, features.labels.labels,
p.background_id * tf.ones_like(features.labels.labels))
features.labels.bboxes_3d_mask *= tf.cast(bbox_is_valid, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class CountNumberOfPointsInBoxes3D(Preprocessor):
"""Computes bboxes_3d_num_points.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
labels.bboxes_3d_num_points: [L] - integer tensor containing the number of
laser points for each corresponding bbox.
"""
def TransformFeatures(self, features):
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz,
features.labels.bboxes_3d)
bboxes_3d_num_points = tf.reduce_sum(
tf.cast(points_in_bboxes_mask, tf.int32), axis=0, keepdims=False)
bboxes_3d_num_points *= tf.cast(features.labels.bboxes_3d_mask, tf.int32)
features.labels.bboxes_3d_num_points = bboxes_3d_num_points
return features
def TransformShapes(self, shapes):
num_bboxes = shapes.labels.bboxes_3d[0]
shapes.labels.bboxes_3d_num_points = tf.TensorShape([num_bboxes])
return shapes
def TransformDTypes(self, dtypes):
dtypes.labels.bboxes_3d_num_points = tf.int32
return dtypes
class AddPerPointLabels(Preprocessor):
"""Computes the class and bbox id of each point.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
- labels.labels of shape [L]
This makes an assumption that each point is only in 1 box, which should
almost always true in 3D. In cases where this is not true, the largest
label integer and largest bbox_id will be assigned.
NOTE: Be very careful that this is performed after any modifications
to the semantic labels of each point in the pointcloud. Examples of this
would be operators like GroundTruthAugmentation, or DropBoxesOutOfRange.
Adds the following features:
lasers.points_label: [P] - integer tensor containing the class id of each
point.
lasers.points_bbox_id: [P] - integer tensor containing box id of each
point from 0 to num_bboxes, where an id of num_bboxes indicates a
background point.
lasers.points_bbox_3d: [P, 7] - float tensor containing bounding box of
each point.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'per_dimension_adjustment', None,
'A list of len 3 of floats with the amount (in meters) to add to '
'each dimension of the box before using it to select points. '
'If enabled, this is designed to protect against overly tight box '
'annotations that appear in KITTI.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
bboxes_3d = features.labels.bboxes_3d
num_points, _ = py_utils.GetShape(points_xyz)
num_bboxes, _ = py_utils.GetShape(bboxes_3d)
if p.per_dimension_adjustment:
if len(p.per_dimension_adjustment) != 3:
raise ValueError(
'param `per_dimension_adjustment` expected to be len 3.')
dims_adjustment = tf.constant([0, 0, 0] + p.per_dimension_adjustment +
[0])
bboxes_3d = bboxes_3d + dims_adjustment
# Find which points are in each box and what class each box is.
points_in_bboxes_mask = geometry.IsWithinBBox3D(points_xyz, bboxes_3d)
points_in_bboxes_mask = tf.cast(points_in_bboxes_mask, tf.int32)
points_in_bboxes_mask = py_utils.HasShape(points_in_bboxes_mask,
[num_points, num_bboxes])
# points_in_bboxes_mask is a [num_points, num_bboxes] 0/1 tensor
# indicating whether that point is in a given box.
# Each point should only be in one box, so after broadcasting the label
# across the binary mask, we do a reduce_max to get the max label id
# for each point. Since each point only belongs to one box, it will be
# the only non-zero (background) label in that box.
# Note: We assume background to be class_id == 0
points_label = tf.reduce_max(
points_in_bboxes_mask * features.labels.labels, axis=1)
points_bbox_id = tf.argmax(
points_in_bboxes_mask, axis=1, output_type=tf.int32)
# If the class is background, make its id == num_bboxes
points_bbox_id = tf.where(points_label > 0, points_bbox_id,
tf.broadcast_to(num_bboxes, [num_points]))
# For each point, get the bbox_3d data.
dummy_bbox = tf.constant([[0, 0, 0, 0, 0, 0, 0]], dtype=tf.float32)
bboxes_3d = tf.concat([bboxes_3d, dummy_bbox], axis=0)
points_bbox_3d = tf.gather(bboxes_3d, points_bbox_id)
points_label = tf.reshape(points_label, [num_points])
points_bbox_id = tf.reshape(points_bbox_id, [num_points])
features.lasers.points_label = points_label
features.lasers.points_bbox_id = points_bbox_id
features.lasers.points_bbox_3d = points_bbox_3d
return features
def TransformShapes(self, shapes):
num_points = shapes.lasers.points_xyz[0]
shapes.lasers.points_label = tf.TensorShape([num_points])
shapes.lasers.points_bbox_id = tf.TensorShape([num_points])
shapes.lasers.points_bbox_3d = tf.TensorShape([num_points, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_label = tf.int32
dtypes.lasers.points_bbox_id = tf.int32
dtypes.lasers.points_bbox_3d = tf.float32
return dtypes
class PointsToGrid(Preprocessor):
"""Bins points to a 3D-grid using custom op: ops.point_to_grid.
Expects features to have keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If normalizing the labels is enabled, then also expects:
- labels.weights
- labels.bboxes_td
- labels.bboxes_td_mask
- labels.bboxes_3d_mask
Let:
gx, gy, gz = p.grid_size
F = 3 + num_laser_features
Adds the following features:
grid_centers: [gx, gy, gz, 3]: For each grid cell, the (x,y,z)
floating point coordinate of its center.
grid_num_points: [gx, gy, gz]: The number of points in each grid
cell (integer).
laser_grid: [gx, gy, gz, num_points_per_cell, F] - A 5D floating
point Tensor containing the laser data placed into a fixed grid.
Modifies the bboxes in labels to also be within the grid range x/y by default.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('grid_size', (40, 40, 1), 'Grid size along x,y,z axis.')
# The max range of x and y is [-80, 80].
p.Define('grid_range_x', (-80, 80), 'The X-axis Range covered by the grid')
p.Define('grid_range_y', (-80, 80), 'The Y-axis Range covered by the grid')
p.Define('grid_range_z', (-2, 4), 'The Z-axis Range covered by the grid')
p.Define('normalize_td_labels', True,
'Whether to clip the labels to the grid limits.')
return p
def _NormalizeLabels(self, ymin, xmin, ymax, xmax, x_range, y_range):
"""Normalizes the bboxes within a given range."""
assert x_range, 'Must specify x_range if clipping.'
assert y_range, 'Must specify y_range if clipping.'
assert len(x_range) == 2, 'x_range %s must be 2 elements.' % x_range
assert len(y_range) == 2, 'y_range %s must be 2 elements.' % y_range
x_range_min = x_range[0]
x_range_len = x_range[1] - x_range[0]
y_range_min = y_range[0]
y_range_len = y_range[1] - y_range[0]
xmin = tf.cast(xmin - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
xmax = tf.cast(xmax - x_range_min, tf.float32) / tf.cast(
x_range_len, tf.float32)
ymin = tf.cast(ymin - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
ymax = tf.cast(ymax - y_range_min, tf.float32) / tf.cast(
y_range_len, tf.float32)
return ymin, xmin, ymax, xmax
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if ('points_padding' in features.lasers and
features.lasers.points_padding is not None):
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
points_full = tf.concat([points_xyz, points_feature], axis=-1)
points_grid_full, grid_centers, num_points = ops.point_to_grid(
points_full, p.num_points_per_cell, p.grid_size[0], p.grid_size[1],
p.grid_size[2], p.grid_range_x, p.grid_range_y, p.grid_range_z)
features.laser_grid = points_grid_full
features.grid_centers = grid_centers
features.grid_num_points = num_points
if p.normalize_td_labels:
# Normalize bboxes_td w.r.t grid range.
obb = features.labels
x_range = p.grid_range_x
y_range = p.grid_range_y
ymin, xmin, ymax, xmax = tf.unstack(obb.bboxes_td[..., :4], axis=-1)
ymin, xmin, ymax, xmax = self._NormalizeLabels(
ymin, xmin, ymax, xmax, x_range=x_range, y_range=y_range)
obb.bboxes_td = tf.concat(
[tf.stack([ymin, xmin, ymax, xmax], axis=-1), obb.bboxes_td[..., 4:]],
axis=-1)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.grid_centers = tf.TensorShape(list(p.grid_size) + [3])
shapes.grid_num_points = tf.TensorShape(list(p.grid_size))
shapes.laser_grid = tf.TensorShape(
list(p.grid_size) +
[p.num_points_per_cell, 3 + shapes.lasers.points_feature[-1]])
return shapes
def TransformDTypes(self, dtypes):
dtypes.grid_centers = tf.float32
dtypes.grid_num_points = tf.int32
dtypes.laser_grid = tf.float32
return dtypes
class _PointPillarGridSettings:
"""Settings for PointPillars model defined in paper.
https://arxiv.org/abs/1812.05784
"""
# Chooses grid sizes that are a multiple of 16 to support point pillars
# model requirements. These also happen to match the values
# in the PointPillars paper (voxel width of 0.16m in x, y)
GRID_X = 432
GRID_Y = 496
GRID_Z = 1
# These fields are set in the subclasses.
GRID_X_RANGE = None
GRID_Y_RANGE = None
GRID_Z_RANGE = None
@classmethod
def UpdateGridParams(cls, grid_params):
"""Apply PointPillars settings to grid_params."""
grid_params.grid_size = (cls.GRID_X, cls.GRID_Y, cls.GRID_Z)
grid_params.grid_range_x = cls.GRID_X_RANGE
grid_params.grid_range_y = cls.GRID_Y_RANGE
grid_params.grid_range_z = cls.GRID_Z_RANGE
@classmethod
def UpdateAnchorGridParams(cls, anchor_params, output_stride=2):
"""Apply PointPillars settings to anchor_params."""
# Set anchor settings to match grid settings.
# Grid size for anchors is half the resolution.
anchor_params.grid_size = (cls.GRID_X // output_stride,
cls.GRID_Y // output_stride, cls.GRID_Z)
anchor_params.grid_range_x = cls.GRID_X_RANGE
anchor_params.grid_range_y = cls.GRID_Y_RANGE
# Grid along z axis should be pinned to 0.
anchor_params.grid_range_z = (0, 0)
def MakeGridSettings(grid_x_range, grid_y_range, grid_z_range, grid_x, grid_y,
grid_z):
"""Returns configured class for PointPillar grid settings."""
class GridSettings(_PointPillarGridSettings):
GRID_X_RANGE = grid_x_range
GRID_Y_RANGE = grid_y_range
GRID_Z_RANGE = grid_z_range
GRID_X = grid_x
GRID_Y = grid_y
GRID_Z = grid_z
return GridSettings
PointPillarGridCarSettings = MakeGridSettings(
grid_x_range=(0, 69.12),
grid_y_range=(-39.68, 39.68),
grid_z_range=(-3, 1),
grid_x=432,
grid_y=496,
grid_z=1)
PointPillarGridPedCycSettings = MakeGridSettings(
grid_x_range=(0, 47.36),
grid_y_range=(-19.84, 19.84),
grid_z_range=(-2.5, 0.5),
grid_x=432,
grid_y=496,
grid_z=1)
class GridToPillars(Preprocessor):
"""Create pillars from a grid of points.
Expects features to have keys:
grid_centers: [gx, gy, gz, 3]
grid_num_points: [gx, gy, gz]
laser_grid: [gx, gy, gz, num_points_per_cell, F]
Adds the following features:
point_count: [num_pillars]. The number of points in the pillar.
point_locations: [num_pillars, 3]. The grid location of each pillar.
pillar_points: [num_pillars, num_points_per_cell, F]. Points of each
pillar.
Drops the following features by default:
laser_grid
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 100,
'The maximum number of points per cell.')
p.Define('num_pillars', 12000, 'The maximum number of pillars to produce.')
p.Define('drop_laser_grid', True, 'Whether to drop the laser_grid feature.')
# The density based sampler is more expensive.
p.Define('use_density_sampler', False,
'Use a density based sampler during pillar selection.')
return p
def _GumbelTransform(self, probs):
"""Adds gumbel noise to log probabilities for multinomial sampling.
This enables fast sampling from a multinomial distribution without
replacement. See https://arxiv.org/abs/1611.01144 for details.
A colab that demonstrates this in practice is here:
http://colab/drive/1iuMt2n_r7dKPQG9T0UVMuK3fkbBayKjd
Args:
probs: A 1-D float tensor containing probabilities, summing to 1.
Returns:
A 1-D float tensor of the same size of probs, with gumbel noise added to
log probabilities. Taking the top k elements from this provides a
multinomial sample without replacement.
"""
p = self.params
log_prob = tf.math.log(probs)
probs_shape = tf.shape(probs)
uniform_samples = tf.random.uniform(
shape=probs_shape,
dtype=probs.dtype,
seed=p.random_seed,
name='uniform_samples')
gumbel_noise = -tf.math.log(-tf.math.log(uniform_samples))
return gumbel_noise + log_prob
def _DensitySample(self, num_points):
p = self.params
# Flatten to [nx * ny * nz] for convenience during sampling.
num_grid_points = np.prod(p.grid_size)
flattened_num_points = tf.reshape(num_points, [num_grid_points])
# Normalize flattened_num_points to sum to 1.
flattened_num_points = tf.cast(flattened_num_points, tf.float32)
flattened_num_points /= tf.reduce_sum(flattened_num_points)
# TODO(jngiam): Consider generalizing this to enable other methods of
# sampling: e.g., use largest deviation in z-axis. The gumbel transform
# can still be applied regardless.
# Add gumbel noise for multinomial sampling.
sampling_logits = self._GumbelTransform(flattened_num_points)
_, locations = tf.nn.top_k(
sampling_logits, k=min(p.num_pillars, num_grid_points))
# Unravel coordinates back to grid locations.
locations = tf.unravel_index(locations, p.grid_size)
# Unravel index will return a 3 x num_locations tensor, this needs to be
# transposed so that we have it as num_locations x 3.
locations = py_utils.HasShape(locations, [3, -1])
locations = tf.transpose(locations)
return locations
def TransformFeatures(self, features):
p = self.params
num_points = features.grid_num_points
if p.use_density_sampler:
locations = self._DensitySample(num_points)
else:
# Select non-empty cells uniformly at random.
locations = tf.random.shuffle(tf.cast(tf.where(num_points > 0), tf.int32))
num_features = py_utils.GetShape(features.laser_grid)[-1]
# [nx, ny, nz, np, 4] (x, y, z, f)
points = features.laser_grid
# [K, np, 4] (x, y, z, f)
points = tf.gather_nd(points, locations)
# [nx, ny, nz, 1, 3] (cx, cy, cz)
centers = features.grid_centers[..., tf.newaxis, :]
# [K, 1, 3] (cx, cy, cz)
centers = tf.gather_nd(centers, locations)
# NOTE: If there are fewer pillars than p.num_pillars, the following
# padding creates many 'fake' pillars at grid cell (0, 0, 0) with
# an all-zero pillar. Hopefully, the model can learn to ignore these.
#
# pillar_points[i, :, :] is the pillar located at pillar_locations[i, :3],
# and pillar_points[i, :, :] == points_grid_full[pillar_locations[i, :3]].
# for 0 <= i < pillar_count;
# pillar_locations[i, :3] are zero-ed, for i >= pillar_count.
features.pillar_count = tf.shape(locations)[0]
features.pillar_locations = py_utils.PadOrTrimTo(locations,
[p.num_pillars, 3])
features.pillar_points = py_utils.PadOrTrimTo(
points, [p.num_pillars, p.num_points_per_cell, num_features])
features.pillar_centers = py_utils.PadOrTrimTo(centers,
[p.num_pillars, 1, 3])
if p.drop_laser_grid:
del features['laser_grid']
return features
def TransformShapes(self, shapes):
p = self.params
num_features = shapes.laser_grid[-1]
shapes.pillar_count = tf.TensorShape([])
shapes.pillar_locations = tf.TensorShape([p.num_pillars, 3])
shapes.pillar_points = tf.TensorShape(
[p.num_pillars, p.num_points_per_cell, num_features])
shapes.pillar_centers = tf.TensorShape([p.num_pillars, 1, 3])
if p.drop_laser_grid:
del shapes['laser_grid']
return shapes
def TransformDTypes(self, dtypes):
p = self.params
dtypes.pillar_count = tf.int32
dtypes.pillar_locations = tf.int32
dtypes.pillar_points = tf.float32
dtypes.pillar_centers = tf.float32
if p.drop_laser_grid:
del dtypes['laser_grid']
return dtypes
class GridAnchorCenters(Preprocessor):
"""Create anchor centers on a grid.
Anchors are placed in the middle of each grid cell. For example, on a 2D grid
range (0 -> 10, 0 -> 10) with a 10 x 5 grid size, the anchors will be placed
at [(0.5, 1), (0.5, 3), ... , (9.5, 7), (9.5, 9)].
Adds the following features:
anchor_centers: [num_locations, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'grid_size', (20, 20, 1), 'Grid size along x,y,z axis. This will '
'be used to generate the anchor center locations. Note that this '
'would likely be different from the grid_* parameters in '
'LaserGridExtractor: the grid extractor may choose to extract '
'points more densely. Instead, this should correspond to the '
'model\'s prediction layer: the predicted anchor box residuals '
'should match this grid.')
p.Define('grid_range_x', (-25, 25), 'The x-axis range covered by the grid.')
p.Define('grid_range_y', (-25, 25), 'The y-axis range covered by the grid.')
p.Define('grid_range_z', (0, 0), 'The z-axis range covered by the grid.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# Compute the grid cell size and adjust the range sent to dense coordinates
# by half a cell size so as to ensure that the anchors are placed in the
# center of each grid cell.
grid_size_x, grid_size_y, grid_size_z = p.grid_size
grid_cell_sizes = [
float(p.grid_range_x[1] - p.grid_range_x[0]) / grid_size_x,
float(p.grid_range_y[1] - p.grid_range_y[0]) / grid_size_y,
float(p.grid_range_z[1] - p.grid_range_z[0]) / grid_size_z,
]
half_size_x, half_size_y, half_size_z = np.asarray(grid_cell_sizes) / 2.0
grid_shape = list(p.grid_size) + [3]
anchor_centers = utils_3d.CreateDenseCoordinates([
[
p.grid_range_x[0] + half_size_x,
p.grid_range_x[1] - half_size_x,
grid_size_x
],
[
p.grid_range_y[0] + half_size_y,
p.grid_range_y[1] - half_size_y,
grid_size_y
],
[
p.grid_range_z[0] + half_size_z,
p.grid_range_z[1] - half_size_z,
grid_size_z
],
]) # pyformat: disable
features.anchor_centers = tf.reshape(anchor_centers, grid_shape)
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape(list(p.grid_size) + [3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
return dtypes
class SparseCenterSelector(Preprocessor):
"""Select centers for anchors and cells.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
If lasers.num_seeded_points of shape [] is provided, it indicates that the
first num_seeded_points of lasers.points_xyz should be used as seeds for
farthest point sampling (e.g., always chosen). Currently the concept
of seeding is not implemented for anything but farthest point sampling.
Adds the following features:
anchor_centers: [num_cell_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz: [num_cell_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
"""
_SAMPLING_METHODS = ['farthest_point', 'random_uniform']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 256, 'Number of centers.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'sampling_method', 'farthest_point',
'Which sampling method to use. One of {}'.format(cls._SAMPLING_METHODS))
p.Define(
'fix_z_to_zero', True, 'Whether to fix z to 0 when retrieving the '
'center xyz coordinates.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sampling_method not in self._SAMPLING_METHODS:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
if p.features_preparation_layers is not None:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def _FarthestPointSampleCenters(self, points_xyz, num_seeded_points):
"""Samples centers with Farthest Point Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
num_seeded_points: integer indicating how many of the first
num_seeded_points points in points_xyz should be considered
as seeds for FPS (always chosen).
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
num_points = tf.shape(points_xyz)[0]
points_padding = tf.zeros((num_points,), dtype=tf.float32)
padded_num_points = tf.maximum(num_points, p.num_cell_centers)
# Pad both the points and padding if for some reason the input pointcloud
# has less points than p.num_cell_centers.
points_xy = py_utils.PadOrTrimTo(points_xyz[:, :2], [padded_num_points, 2])
points_padding = py_utils.PadOrTrimTo(
points_padding, [padded_num_points], pad_val=1.0)
sampled_idx, _ = car_lib.FarthestPointSampler(
points_xy[tf.newaxis, ...],
points_padding[tf.newaxis, ...],
p.num_cell_centers,
num_seeded_points=num_seeded_points,
random_seed=p.random_seed)
sampled_idx = sampled_idx[0, :]
# Gather centers.
if p.fix_z_to_zero:
centers = tf.concat([
tf.gather(points_xy, sampled_idx),
tf.zeros((p.num_cell_centers, 1)),
], axis=-1) # pyformat: disable
else:
centers = tf.gather(points_xyz, sampled_idx)
return centers
def _RandomUniformSampleCenters(self, points_xyz):
"""Samples centers with Random Uniform Sampling.
Args:
points_xyz: An unpadded tf.float32 Tensor of shape [P, 3] with per point
(x, y, z) locations. We expect any padded points to be removed before
this function is called.
Returns:
A tf.float32 Tensor of shape [p.num_cell_centers, 3] with selected centers
to use as anchors.
"""
p = self.params
# We want the center Z value to be 0 so just exclude it
centers_xy = tf.random.shuffle(points_xyz[:, :2], seed=p.random_seed)
selected_centers_xy = py_utils.PadOrTrimTo(centers_xy,
[p.num_cell_centers, 2])
return tf.concat([selected_centers_xy,
tf.zeros((p.num_cell_centers, 1))],
axis=-1)
def _SampleCenters(self, points_xyz, num_seeded_points):
p = self.params
if p.sampling_method == 'farthest_point':
return self._FarthestPointSampleCenters(points_xyz, num_seeded_points)
elif p.sampling_method == 'random_uniform':
if num_seeded_points > 0:
raise NotImplementedError(
'Random sampling with seeded points not yet implemented.')
return self._RandomUniformSampleCenters(points_xyz)
else:
raise ValueError('Param `sampling_method` must be one of {}.'.format(
self._SAMPLING_METHODS))
def TransformFeatures(self, features):
p = self.params
prepared_features = features.DeepCopy()
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
num_seeded_points = prepared_features.lasers.get('num_seeded_points', 0)
points_data = prepared_features.lasers
points_xyz = points_data.points_xyz
if 'points_padding' in points_data:
points_padding = points_data.points_padding
points_mask = 1 - points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
centers = self._SampleCenters(points_xyz, num_seeded_points)
centers = py_utils.HasShape(centers, [p.num_cell_centers, 3])
features.anchor_centers = centers
features.cell_center_xyz = centers
return features
def TransformShapes(self, shapes):
p = self.params
shapes.anchor_centers = tf.TensorShape([p.num_cell_centers, 3])
shapes.cell_center_xyz = tf.TensorShape([p.num_cell_centers, 3])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_xyz = tf.float32
return dtypes
class SparseCellGatherFeatures(Preprocessor):
"""Select local features for each cell.
This preprocessor expects features to contain:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- cell_center_xyz of shape [C, 3]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Adds the following features:
cell_points_xyz: [num_centers, num_points_per_cell, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature: [num_centers, num_points_per_cell, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding: [num_centers, num_points_per_cell] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_points_per_cell', 128, 'The number of points per cell.')
p.Define('max_distance', 3.0, 'Max distance of point to cell center.')
p.Define(
'sample_neighbors_uniformly', False,
'Whether to sample the neighbor points for every cell center '
'uniformly at random. If False, this will default to selecting by '
'distance.')
return p
def TransformFeatures(self, features):
p = self.params
num_centers = py_utils.GetShape(features.cell_center_xyz, 1)[0]
num_features = py_utils.GetShape(features.lasers.points_feature)[-1]
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(points_xyz, points_mask)
points_feature = tf.boolean_mask(points_feature, points_mask)
# Note: points_xyz and points_feature must be unpadded as we pass
# padding=None to neighborhood indices. Ensuring that it is unpadded
# helps improve performance.
# Get nearby points using kNN.
sample_indices, sample_indices_padding = car_lib.NeighborhoodIndices(
tf.expand_dims(points_xyz, 0),
tf.expand_dims(features.cell_center_xyz, 0),
p.num_points_per_cell,
points_padding=None,
max_distance=p.max_distance,
sample_neighbors_uniformly=p.sample_neighbors_uniformly)
# Take first example since NeighboorhoodIndices expects batch dimension.
sample_indices = sample_indices[0, :, :]
sample_indices_padding = sample_indices_padding[0, :, :]
sample_indices = py_utils.HasShape(sample_indices,
[num_centers, p.num_points_per_cell])
cell_points_xyz = tf.gather(points_xyz, sample_indices)
cell_points_xyz = py_utils.HasShape(cell_points_xyz,
[num_centers, p.num_points_per_cell, 3])
cell_feature = tf.gather(points_feature, sample_indices)
cell_feature = py_utils.HasShape(
cell_feature, [num_centers, p.num_points_per_cell, num_features])
cell_points_padding = py_utils.HasShape(
sample_indices_padding, [num_centers, p.num_points_per_cell])
features.update({
'cell_points_xyz': cell_points_xyz,
'cell_feature': cell_feature,
'cell_points_padding': cell_points_padding,
})
return features
def TransformShapes(self, shapes):
p = self.params
num_centers = shapes.cell_center_xyz[0]
base_shape = [num_centers, p.num_points_per_cell]
num_features = shapes.lasers.points_feature[-1]
shapes.cell_points_xyz = tf.TensorShape(base_shape + [3])
shapes.cell_feature = tf.TensorShape(base_shape + [num_features])
shapes.cell_points_padding = tf.TensorShape(base_shape)
return shapes
def TransformDTypes(self, dtypes):
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
class SparseCellCentersTopK(Preprocessor):
"""Given selected centers and gathered points/features, apply a filter.
This preprocessor expects features to contain `cell_center_xyz` and all
entries in params.features_to_modify, and that the leading dimension should
all be the same (num_cell_centers from SparseCenterSelector).
We then modify all values in features that are specified in
params.features_to_modify by sorting them with the specified sort function
(specified by params.sort_by) operating on features.cell_center_xyz, and then
taking the top K (specified by params.num_cell_centers) along the first
dimension.
"""
_REGISTERED_SORT_FUNCTIONS = ['distance']
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_cell_centers', 512, 'The number of centers after filtering.')
p.Define(
'sort_by', 'distance', 'A string specifying which sort function '
'to use. Currently we just support `distance`.')
p.Define('features_to_modify', [
'cell_center_xyz', 'anchor_centers', 'cell_points_xyz', 'cell_feature',
'cell_points_padding'
], 'A list of keys from the features dict to modify.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.sort_by not in self._REGISTERED_SORT_FUNCTIONS:
raise ValueError('{} not supported. We only support {}.'.format(
p.sort_by, self._REGISTERED_SORT_FUNCTIONS))
if len(p.features_to_modify) < 1:
raise ValueError('Need to modify at least one feature.')
def _SortByDistance(self, features):
dist = tf.linalg.norm(features.cell_center_xyz, axis=-1)
return tf.argsort(dist, axis=-1, direction='ASCENDING')
def _Sort(self, features):
p = self.params
if p.sort_by == 'distance':
return self._SortByDistance(features)
else:
raise ValueError('Unsupported sort function: {}.'.format(p.sort_by))
def TransformFeatures(self, features):
p = self.params
sort_indices = self._Sort(features)
sort_indices_top_k = sort_indices[:p.num_cell_centers, ...]
# Gather each of the relevant items
for key in p.features_to_modify:
shape = py_utils.GetShape(features[key])
output_shape = [p.num_cell_centers] + shape[1:]
features[key] = py_utils.PadOrTrimTo(
tf.gather(features[key], sort_indices_top_k), output_shape)
return features
def TransformShapes(self, shapes):
p = self.params
for key in p.features_to_modify:
shapes[key] = tf.TensorShape([p.num_cell_centers] + shapes[key][1:])
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class TileAnchorBBoxes(Preprocessor):
"""Creates anchor_bboxes given anchor_centers.
This preprocessor expects features to contain the following keys:
- anchor_centers of shape [...base shape..., 3]
Adds the following features:
anchor_bboxes: base_shape + [7] - Floating point anchor box
output containing the anchor boxes and the 7 floating point
values for each box that define the box (x, y, z, dx, dy, dz, phi).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('anchor_box_dimensions', [],
'List of anchor box sizes per center.')
p.Define('anchor_box_offsets', [], 'List of anchor box offsets per center.')
p.Define('anchor_box_rotations', [],
'List of anchor box rotations per center.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
assert p.anchor_box_dimensions
assert p.anchor_box_offsets
assert p.anchor_box_rotations
base_shape = py_utils.GetShape(features.anchor_centers)[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
anchor_centers = tf.reshape(features.anchor_centers, [-1, 3])
anchor_bboxes = utils_3d.MakeAnchorBoxes(
anchor_centers, tf.identity(p.anchor_box_dimensions),
tf.identity(p.anchor_box_offsets), tf.identity(p.anchor_box_rotations))
features.anchor_bboxes = tf.reshape(anchor_bboxes,
base_shape + [num_box_per_center, 7])
return features
def TransformShapes(self, shapes):
p = self.params
base_shape = shapes.anchor_centers[:-1]
num_box_per_center = len(p.anchor_box_dimensions)
shapes.anchor_bboxes = base_shape.concatenate([num_box_per_center, 7])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_bboxes = tf.float32
return dtypes
class _AnchorBoxSettings:
"""Helper class to parameterize and update anchor box settings."""
# Implementations should fill out the following class members.
DIMENSION_PRIORS = []
ROTATIONS = []
CENTER_X_OFFSETS = []
CENTER_Y_OFFSETS = []
CENTER_Z_OFFSETS = []
@classmethod
def NumAnchors(cls):
return np.prod([
len(cls.DIMENSION_PRIORS),
len(cls.ROTATIONS),
len(cls.CENTER_X_OFFSETS),
len(cls.CENTER_Y_OFFSETS),
len(cls.CENTER_Z_OFFSETS)
])
@classmethod
def GenerateAnchorSettings(cls):
"""Generate anchor settings.
Returns:
A `NestedMap` containing three lists of the same length:
- anchor_box_dimensions
- anchor_box_rotations
- anchor_box_offsets
These can be used with the TileAnchorBBoxes preprocessor.
"""
anchor_box_dimensions = []
anchor_box_rotations = []
anchor_box_offsets = []
# The following is equivalent to a formulation of itertools.product, but
# is explicitly listed for readability.
# *Please note*: The ordering is important for ModelV2, which makes
# assumptions that the offset dimensions come first.
for cx in cls.CENTER_X_OFFSETS:
for cy in cls.CENTER_Y_OFFSETS:
for cz in cls.CENTER_Z_OFFSETS:
for rot in cls.ROTATIONS:
for dims in cls.DIMENSION_PRIORS:
anchor_box_dimensions += [dims]
anchor_box_rotations += [rot]
anchor_box_offsets += [(cx, cy, cz)]
# Check one of the lists has entries.
assert anchor_box_dimensions
return py_utils.NestedMap(
anchor_box_dimensions=anchor_box_dimensions,
anchor_box_rotations=anchor_box_rotations,
anchor_box_offsets=anchor_box_offsets)
@classmethod
def Update(cls, params):
"""Updates anchor box settings from input configuration lists.
Given dimensions priors, rotations, and offsets, computes the cartesian
product of the settings.
Args:
params: The KITTIAnchorExtractorBase.Params() object to update.
Returns:
Params updated with the anchor settings.
In total there are N combinations, where each (anchor_box_dimensions[i],
anchor_box_rotations[i], anchor_box_offsets[i]) for i in range(N) is an
option.
"""
p = params
settings = cls.GenerateAnchorSettings()
p.anchor_box_dimensions = settings.anchor_box_dimensions
p.anchor_box_rotations = settings.anchor_box_rotations
p.anchor_box_offsets = settings.anchor_box_offsets
return p
def MakeAnchorBoxSettings(dimension_priors, rotations, center_x_offsets,
center_y_offsets, center_z_offsets):
"""Returns a configured class for setting anchor box settings."""
class CustomAnchorBoxSettings(_AnchorBoxSettings):
DIMENSION_PRIORS = dimension_priors
ROTATIONS = rotations
CENTER_X_OFFSETS = center_x_offsets
CENTER_Y_OFFSETS = center_y_offsets
CENTER_Z_OFFSETS = center_z_offsets
return CustomAnchorBoxSettings
class SparseCarV1AnchorBoxSettings(_AnchorBoxSettings):
"""Anchor box settings for training on Cars for Sparse models."""
# Borrowed from PointPillar dimension prior for cars.
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
# 4 Rotations with axis aligned and both diagonals.
ROTATIONS = [0, np.pi / 2, np.pi / 4, 3 * np.pi / 4]
# 25 offsets per anchor box with fixed z offset at -1.
CENTER_X_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Y_OFFSETS = np.linspace(-1.5, 1.5, 5)
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsCar(_AnchorBoxSettings):
DIMENSION_PRIORS = [(1.6, 3.9, 1.56)]
ROTATIONS = [0, np.pi / 2]
# Fixed offset for every anchor box, based on a reading of the paper / code
# 0 offsets for x and y, and -1 for z.
CENTER_X_OFFSETS = [0.]
CENTER_Y_OFFSETS = [0.]
CENTER_Z_OFFSETS = [-1.]
class PointPillarAnchorBoxSettingsPed(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class PointPillarAnchorBoxSettingsPedCyc(PointPillarAnchorBoxSettingsCar):
DIMENSION_PRIORS = [(0.6, 0.8, 1.7), (0.6, 1.76, 1.73)]
CENTER_Z_OFFSETS = [-0.6]
class AnchorAssignment(Preprocessor):
"""Perform anchor assignment on the features.
This preprocessor expects features to contain the following keys:
- anchor_bboxes of shape [...base shape..., 7]
- labels.bboxes_3d
- labels.labels
- labels.bboxes_3d_mask
Adds the following features:
anchor_localization_residuals: base_shape + [7] floating point tensor of
residuals. The model is expected to regress against these residuals as
targets. The residuals can be converted back into bboxes using
detection_3d_lib.Utils3D.ResidualsToBBoxes.
assigned_gt_idx: base_shape - The corresponding index of the ground
truth bounding box for each anchor box in anchor_bboxes, anchors not
assigned will have idx be set to -1.
assigned_gt_bbox: base_shape + [7] - The corresponding ground
truth bounding box for each anchor box in anchor_bboxes.
assigned_gt_labels: base_shape - The assigned groundtruth label
for each anchor box.
assigned_gt_similarity_score: base_shape - The similarity score
for each assigned anchor box.
assigned_cls_mask: base_shape mask for classification loss per anchor.
This should be 1.0 if the anchor has a foreground or background
assignment; otherwise, it will be assigned to 0.0.
assigned_reg_mask: base_shape mask for regression loss per anchor.
This should be 1.0 if the anchor has a foreground assignment;
otherwise, it will be assigned to 0.0.
Note: background anchors do not have regression targets.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'foreground_assignment_threshold', 0.5,
'Score (usually IOU) threshold for assigning a box as foreground.')
p.Define(
'background_assignment_threshold', 0.35,
'Score (usually IOU) threshold for assigning a box as background.')
return p
def TransformFeatures(self, features):
p = self.params
utils_3d = detection_3d_lib.Utils3D()
# anchor_bboxes will be returned with shape [#centers, #boxes_per_center, 7]
# flatten boxes here for matching.
base_shape = py_utils.GetShape(features.anchor_bboxes)[:-1]
anchor_bboxes = tf.reshape(features.anchor_bboxes, [-1, 7])
assigned_anchors = utils_3d.AssignAnchors(
anchor_bboxes,
features.labels.bboxes_3d,
features.labels.labels,
features.labels.bboxes_3d_mask,
foreground_assignment_threshold=p.foreground_assignment_threshold,
background_assignment_threshold=p.background_assignment_threshold)
# Add new features.
features.assigned_gt_idx = tf.reshape(assigned_anchors.assigned_gt_idx,
base_shape)
features.assigned_gt_bbox = tf.reshape(assigned_anchors.assigned_gt_bbox,
base_shape + [7])
features.assigned_gt_labels = tf.reshape(
assigned_anchors.assigned_gt_labels, base_shape)
features.assigned_gt_similarity_score = tf.reshape(
assigned_anchors.assigned_gt_similarity_score, base_shape)
features.assigned_cls_mask = tf.reshape(assigned_anchors.assigned_cls_mask,
base_shape)
features.assigned_reg_mask = tf.reshape(assigned_anchors.assigned_reg_mask,
base_shape)
# Compute residuals.
features.anchor_localization_residuals = utils_3d.LocalizationResiduals(
features.anchor_bboxes, features.assigned_gt_bbox)
return features
def TransformShapes(self, shapes):
base_shape = shapes.anchor_bboxes[:-1]
box_shape = base_shape.concatenate([7])
shapes.anchor_localization_residuals = box_shape
shapes.assigned_gt_idx = base_shape
shapes.assigned_gt_bbox = box_shape
shapes.assigned_gt_labels = base_shape
shapes.assigned_gt_similarity_score = base_shape
shapes.assigned_cls_mask = base_shape
shapes.assigned_reg_mask = base_shape
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_localization_residuals = tf.float32
dtypes.assigned_gt_idx = tf.int32
dtypes.assigned_gt_bbox = tf.float32
dtypes.assigned_gt_labels = tf.int32
dtypes.assigned_gt_similarity_score = tf.float32
dtypes.assigned_cls_mask = tf.float32
dtypes.assigned_reg_mask = tf.float32
return dtypes
class DropLaserPointsOutOfRange(Preprocessor):
"""Drops laser points that are out of pre-defined x/y/z ranges.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
Removes or sets padding to 1 for all points outside a given range. Modifies
all items in the lasers subdictionary like lasers.points_xyz,
lasers.points_feature, lasers.points_padding, and optionally
lasers.points_label, lasers.points_bbox_id.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only points that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only points that have y coordinates within this range are kept.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
return p
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
if 'points_padding' in features.lasers:
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
else:
# All points are real, we keep points unpadded by applying boolean_mask
# on points_mask later.
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
if min_x != -np.inf:
points_mask &= points_xyz[:, 0] >= min_x
if min_y != -np.inf:
points_mask &= points_xyz[:, 1] >= min_y
if min_z != -np.inf:
points_mask &= points_xyz[:, 2] >= min_z
if max_x != np.inf:
points_mask &= points_xyz[:, 0] <= max_x
if max_y != np.inf:
points_mask &= points_xyz[:, 1] <= max_y
if max_z != np.inf:
points_mask &= points_xyz[:, 2] <= max_z
if 'points_padding' in features.lasers:
# Suffices to just update the padding.
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class KITTIDropPointsOutOfFrustum(Preprocessor):
"""Drops laser points that are outside of the camera frustum.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
- images.velo_to_image_plane of shape [3, 4]
- images.width of shape [1]
- images.height of shape [1]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding, and
optionally lasers.points_label, lasers.points_bbox_id so that
points outside the frustum have padding set to 1 or are removed.
"""
def TransformFeatures(self, features):
# Drop points behind the car (behind x-axis = 0).
images = features.images
front_indices = features.lasers.points_xyz[:, 0] >= 0
if 'points_padding' not in features.lasers:
# Keep tensors unpadded and small using boolean_mask.
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
front_indices)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, front_indices)
# Drop those points outside the image plane.
points_image = geometry.PointsToImagePlane(features.lasers.points_xyz,
images.velo_to_image_plane)
in_image_plane = (
(points_image[:, 0] >= 0) &
(points_image[:, 0] <= tf.cast(images.width, tf.float32)) &
(points_image[:, 1] >= 0) &
(points_image[:, 1] <= tf.cast(images.height, tf.float32)))
if 'points_padding' in features.lasers:
# Update padding to only include front indices and in image plane.
points_mask = tf.cast(1 - features.lasers.points_padding, tf.bool)
points_mask &= front_indices
points_mask &= in_image_plane
features.lasers.points_padding = 1. - tf.cast(points_mask, tf.float32)
else:
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(in_image_plane))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomWorldRotationAboutZAxis(Preprocessor):
"""Rotates the world randomly as a form of data augmentation.
Rotations are performed around the *z-axis*. This assumes that the car is
always level. In general, we'd like to instead rotate the car on the spot,
this would then make sense for cases where the car is on a slope.
When there are leading dimensions, this will rotate the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same rotation applied to both.
Adds the following features:
world_rot_z which contains the rotation applied to the example.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
p.Define(
'include_world_rot_z', True,
'Whether to include the applied rotation as an additional tensor. '
'It can be helpful to disable this when using the preprocessor in a '
'way that expects the structure of the features to be the same '
'(e.g., as a branch in tf.cond).')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
def TransformFeatures(self, features):
p = self.params
rot = tf.random.uniform((),
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
# Rotating about the z-axis is equal to experiencing yaw.
pose = [0., 0., 0., rot, 0., 0.]
# Rotate points.
features.lasers.points_xyz = geometry.CoordinateTransform(
features.lasers.points_xyz, pose)
# Rotate bboxes, note that heading has a special case.
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
# The heading correction should subtract rot from the bboxes rotations.
bboxes_rot = geometry.WrapAngleRad(bboxes_rot - rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
if p.include_world_rot_z:
features.world_rot_z = rot
return features
def TransformShapes(self, shapes):
if self.params.include_world_rot_z:
shapes.world_rot_z = tf.TensorShape([])
return shapes
def TransformDTypes(self, dtypes):
if self.params.include_world_rot_z:
dtypes.world_rot_z = tf.float32
return dtypes
class DropPointsOutOfFrustum(Preprocessor):
"""Drops points outside of pre-defined theta / phi ranges.
Note that the ranges for keep_phi_range can be negative, this is because the
phi values wrap around 2*pi. Thus, a valid range that filters the 90 deg
frontal field of view of the car can be specified as [-pi/4, pi/4].
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 7]
- lasers.points_feature of shape [P]
Modifies the following features:
- lasers.points_xyz removing any points out of frustum.
- lasers.points_feature removing any points out of frustum.
Note: We expect a downstream processor that filters out boxes with few points
to drop the corresponding bboxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_theta_range', (0., np.pi),
'Only points that have theta coordinates within this range.')
p.Define('keep_phi_range', (0., 2. * np.pi),
'Only points that have phi coordinates within this range.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
raise ValueError('DropPointsOutOfFrustum preprocessor does not support '
'padded lasers.')
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
min_theta, max_theta = p.keep_theta_range
if (min_theta < 0. or min_theta > np.pi or max_theta < 0. or
max_theta > np.pi):
raise ValueError('Valid values for theta are between 0 and pi, '
'keep_theta_range={}'.format(p.keep_theta_range))
if min_theta > max_theta:
raise ValueError('min_theta must be <= max_theta, '
'keep_theta_range={}'.format(p.keep_theta_range))
min_phi, max_phi = p.keep_phi_range
if (min_phi < -2. * np.pi or min_phi > 2. * np.pi or
max_phi < -2. * np.pi or max_phi > 2. * np.pi):
raise ValueError('Valid values for phi are between -2*pi and 2*pi,'
'keep_phi_range={}'.format(p.keep_phi_range))
if min_phi > max_phi:
raise ValueError('min_phi must be <= max_phi, '
'keep_phi_range={}'.format(p.keep_phi_range))
_, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
# phi is returned in range [-pi, pi], we shift the values which are between
# [-pi, 0] to be [pi, 2pi] instead to make the logic below easier to follow.
# Hence, all phi values after this will be [0, 2pi].
phi = tf.where(phi >= 0., phi, 2. * np.pi + phi)
# Theta does not have circular boundary conditions, a simple check suffices.
points_mask = (theta >= min_theta) & (theta <= max_theta)
if min_phi < 0. and max_phi < 0.:
# Both are less than zero, we just just add 2pi and will use the regular
# check.
min_phi += 2. * np.pi
max_phi += 2. * np.pi
if min_phi < 0.:
# The minimum threshold is below 0, so we split into checking between
# (0 to min_phi) and (0 to max_phi). Note that min_phi is negative, but
# phi is always positive, so we take 2*pi + min_phi to get the range of
# appropriate values.
points_mask &= (phi >= (2. * np.pi + min_phi)) | (phi <= max_phi)
else:
# Both must be greater than 0 if we get to this condition.
assert min_phi >= 0.
assert max_phi >= 0.
points_mask &= (phi >= min_phi) & (phi <= max_phi)
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class DropBoxesOutOfRange(Preprocessor):
"""Drops boxes outside of pre-defined x/y/z ranges (boundaries inclusive).
This preprocessor expects features to contain the following keys:
- labels.bboxes_3d of shape [N, 7]
- labels.bboxes_3d_mask of shape [N]
Modifies the following features:
- labels.bboxes_3d_mask to mask out any additional boxes.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_x_range', (-np.inf, np.inf),
'Only boxes that have x coordinates within this range are kept.')
p.Define('keep_y_range', (-np.inf, np.inf),
'Only boxes that have y coordinates within this range are kept.')
p.Define('keep_z_range', (-np.inf, np.inf),
'Only boxes that have z coordinates within this range are kept.')
return p
def TransformFeatures(self, features):
p = self.params
min_x, max_x = p.keep_x_range
min_y, max_y = p.keep_y_range
min_z, max_z = p.keep_z_range
# Short-circuit if all ranges are set to -inf, inf.
if (np.all(np.isneginf([min_x, min_y, min_z])) and
np.all(np.isposinf([max_x, max_y, max_z]))):
return features
# For each bounding box, compute whether any of its extrema
# fall outside of the range.
bboxes_3d_corners = geometry.BBoxCorners(
features.labels.bboxes_3d[tf.newaxis, ...])[0]
bboxes_3d_corners = py_utils.HasShape(bboxes_3d_corners, [-1, 8, 3])
min_bbox_x = tf.reduce_min(bboxes_3d_corners[:, :, 0], axis=-1)
max_bbox_x = tf.reduce_max(bboxes_3d_corners[:, :, 0], axis=-1)
min_bbox_y = tf.reduce_min(bboxes_3d_corners[:, :, 1], axis=-1)
max_bbox_y = tf.reduce_max(bboxes_3d_corners[:, :, 1], axis=-1)
min_bbox_z = tf.reduce_min(bboxes_3d_corners[:, :, 2], axis=-1)
max_bbox_z = tf.reduce_max(bboxes_3d_corners[:, :, 2], axis=-1)
mask = (
tf.math.logical_and(min_bbox_x >= min_x, max_bbox_x <= max_x)
& tf.math.logical_and(min_bbox_y >= min_y, max_bbox_y <= max_y)
& tf.math.logical_and(min_bbox_z >= min_z, max_bbox_z <= max_z))
max_num_boxes = py_utils.GetShape(features.labels.bboxes_3d_mask)
mask = py_utils.HasShape(mask, max_num_boxes)
features.labels.bboxes_3d_mask *= tf.cast(mask, tf.float32)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class PadLaserFeatures(Preprocessor):
"""Pads laser features so that the dimensions are fixed.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
and optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz and lasers.points_feature to add padding.
Optionally also modifies lasers.points_label and lasers.points_bbox_id
if they exist to add padding.
Modifies/adds the following features:
labels.points_padding of shape [P] representing the padding.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('max_num_points', 128500,
'Max number of points to pad the points to.')
return p
def TransformFeatures(self, features):
p = self.params
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_mask = tf.cast(points_mask, tf.bool)
features.lasers = features.lasers.Transform(
_GetApplyPointMaskFn(points_mask))
npoints = tf.shape(features.lasers.points_xyz)[0]
features.lasers.points_padding = tf.ones([npoints])
shuffled_idx = tf.range(npoints)
shuffled_idx = tf.random.shuffle(shuffled_idx, seed=p.random_seed)
def _PadOrTrimFn(points_tensor):
# Shuffle before trimming so we have a random sampling
points_tensor = tf.gather(points_tensor, shuffled_idx)
return py_utils.PadOrTrimTo(points_tensor, [p.max_num_points] +
points_tensor.shape[1:].as_list())
features.lasers = features.lasers.Transform(_PadOrTrimFn)
features.lasers.points_padding = 1.0 - features.lasers.points_padding
return features
def TransformShapes(self, shapes):
p = self.params
def _TransformShape(points_shape):
return tf.TensorShape([p.max_num_points] + points_shape[1:].as_list())
shapes.lasers = shapes.lasers.Transform(_TransformShape)
shapes.lasers.points_padding = tf.TensorShape([p.max_num_points])
return shapes
def TransformDTypes(self, dtypes):
dtypes.lasers.points_padding = tf.float32
return dtypes
class WorldScaling(Preprocessor):
"""Scale the world randomly as a form of data augmentation.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same scaling applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('scaling', None, 'The scaling range.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.scaling is None:
raise ValueError('scaling needs to be specified, instead of None.')
if len(p.scaling) != 2:
raise ValueError('scaling needs to be a list of two elements.')
def TransformFeatures(self, features):
p = self.params
scaling = tf.random.uniform((),
minval=p.scaling[0],
maxval=p.scaling[1],
seed=p.random_seed,
dtype=features.lasers.points_xyz.dtype)
# Scale points [num_points, 3].
features.lasers.points_xyz *= scaling
# Scaling bboxes (location and dimensions).
bboxes_xyz = features.labels.bboxes_3d[..., :3] * scaling
bboxes_dims = features.labels.bboxes_3d[..., 3:6] * scaling
bboxes_rot = features.labels.bboxes_3d[..., 6:]
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomDropLaserPoints(Preprocessor):
"""Randomly dropout laser points and the corresponding features.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, K]
Modifies the following features:
lasers.points_xyz, lasers.points_feature.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('keep_prob', 0.95, 'Probability for keeping points.')
return p
def TransformFeatures(self, features):
p = self.params
num_points, _ = py_utils.GetShape(features.lasers.points_xyz)
pts_keep_sample_prob = tf.random.uniform([num_points],
minval=0,
maxval=1,
seed=p.random_seed)
pts_keep_mask = pts_keep_sample_prob < p.keep_prob
if 'points_padding' in features.lasers:
# Update points_padding so that where pts_keep_mask is True,
# points_padding remains 0.
points_mask = 1 - features.lasers.points_padding
points_mask *= tf.cast(pts_keep_mask, tf.float32)
features.lasers.points_padding = 1 - points_mask
else:
features.lasers.points_xyz = tf.boolean_mask(features.lasers.points_xyz,
pts_keep_mask)
features.lasers.points_feature = tf.boolean_mask(
features.lasers.points_feature, pts_keep_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomFlipY(Preprocessor):
"""Flip the world along axis Y as a form of data augmentation.
When there are leading dimensions, this will flip the boxes with the same
transformation across all the frames. This is useful when the input is a
sequence of frames from the same run segment.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [..., 3]
- labels.bboxes_3d of shape [..., 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same flipping applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('flip_probability', 0.5, 'Probability of flipping.')
return p
def TransformFeatures(self, features):
p = self.params
threshold = 1. - p.flip_probability
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) >= threshold
# Flip points
points_xyz = features.lasers.points_xyz
points_y = tf.where(choice, -points_xyz[..., 1:2], points_xyz[..., 1:2])
features.lasers.points_xyz = tf.concat(
[points_xyz[..., 0:1], points_y, points_xyz[..., 2:3]], axis=-1)
# Flip boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_y = tf.where(choice, -bboxes_xyz[..., 1:2], bboxes_xyz[..., 1:2])
bboxes_xyz = tf.concat(
[bboxes_xyz[..., 0:1], bboxes_y, bboxes_xyz[..., 2:3]], axis=-1)
# Compensate rotation.
bboxes_dims = features.labels.bboxes_3d[..., 3:6]
bboxes_rot = features.labels.bboxes_3d[..., 6:]
bboxes_rot = tf.where(choice, geometry.WrapAngleRad(-bboxes_rot),
bboxes_rot)
features.labels.bboxes_3d = tf.concat([bboxes_xyz, bboxes_dims, bboxes_rot],
axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GlobalTranslateNoise(Preprocessor):
"""Add global translation noise of xyz coordinates to points and boxes.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- labels.bboxes_3d of shape [L, 7]
Modifies the following features:
lasers.points_xyz, labels.bboxes_3d with the same
random translation noise applied to both.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('noise_std', [0.2, 0.2, 0.2],
'Standard deviation of translation noise per axis.')
return p
def TransformFeatures(self, features):
p = self.params
# Use three different seeds but the same base seed so
# that the values are different.
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal((),
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
pose = tf.stack([
random_translate_x, random_translate_y, random_translate_z, 0.0, 0.0,
0.0
],
axis=0)
# Translate points.
points_xyz = features.lasers.points_xyz
features.lasers.points_xyz = geometry.CoordinateTransform(points_xyz, pose)
# Translate boxes
bboxes_xyz = features.labels.bboxes_3d[..., :3]
bboxes_xyz = geometry.CoordinateTransform(bboxes_xyz, pose)
features.labels.bboxes_3d = tf.concat(
[bboxes_xyz, features.labels.bboxes_3d[..., 3:]], axis=-1)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomBBoxTransform(Preprocessor):
"""Randomly transform bounding boxes and the points inside them.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, K]
- lasers.points_padding of shape [P]
- labels.bboxes_3d of shape [L, 7]
- labels.bboxes_3d_mask of shape [L]
Modifies the following features:
lasers.points_{xyz,feature,padding}, labels.bboxes_3d with the
transformed bounding boxes and points.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'max_rotation', None,
'The rotation amount will be randomly picked from '
'[-max_rotation, max_rotation).')
# At the moment we don't use this because it can cause boxes to collide with
# each other. We need to compute box intersections when deciding whether to
# apply the translation jitter. Theoretically we should also do this for
# rotation.
p.Define('noise_std', [0.0, 0.0, 0.0],
'Standard deviation of translation noise per axis.')
p.Define(
'max_scaling', None,
'When max_scaling is not none, delta parameters s_x, s_y, s_z are '
'drawn from [-max_scaling[i], max_scaling[i]] where i is in [0, 3].')
p.Define(
'max_shearing', None,
'When max_shearing is not none, shearing parameters sh_x^y, sh_x^z, '
'sh_y^x, sh_y^z, sh_z^x, sh_z^y are drawn from '
'[-max_shearing[i], max_shearing[i]], where i is in [0, 5].')
p.Define(
'max_num_points_per_bbox', 16384,
'The maximum number of points that fall within a bounding box. '
'Bounding boxes with more points than this value will '
'have some points droppped.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.max_rotation is None:
raise ValueError('max_rotation needs to be specified, instead of None.')
if p.max_scaling is not None:
if len(p.max_scaling) != 3:
raise ValueError('max_scaling needs to be specified as either None or '
'list of 3 floating point numbers, instead of {}.'
''.format(p.max_scaling))
if p.max_shearing is not None:
if len(p.max_shearing) != 6:
raise ValueError('max_shearing needs to be specified as either None or '
'list of 6 floating point numbers, instead of {}.'
''.format(p.max_shearing))
def _Foreground(self, features, points_xyz, points_feature, real_bboxes_3d,
points_in_bbox_mask, rotation, translate_pose, transform_fn):
"""Extract and transform foreground points and features."""
out_bbox_xyz, out_bbox_feature, out_bbox_mask = self._ForLoopBuffers(
features)
# Only iterate over the actual number of boxes in the scene.
actual_num_bboxes = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
ret = py_utils.ForLoop(
body=transform_fn,
start=0,
limit=actual_num_bboxes,
delta=1,
loop_state=py_utils.NestedMap(
points_xyz=points_xyz,
points_feature=points_feature,
bboxes_3d=real_bboxes_3d,
points_in_bbox_mask=points_in_bbox_mask,
rotation=rotation,
translate_pose=translate_pose,
out_bbox_points=out_bbox_xyz,
out_bbox_feature=out_bbox_feature,
out_bbox_mask=out_bbox_mask))
# Gather all of the transformed points and features
out_bbox_xyz = tf.reshape(ret.out_bbox_points, [-1, 3])
num_features = features.lasers.points_feature.shape[-1]
out_bbox_feature = tf.reshape(ret.out_bbox_feature, [-1, num_features])
out_bbox_mask = tf.cast(tf.reshape(ret.out_bbox_mask, [-1]), tf.bool)
fg_xyz = tf.boolean_mask(out_bbox_xyz, out_bbox_mask)
fg_feature = tf.boolean_mask(out_bbox_feature, out_bbox_mask)
return fg_xyz, fg_feature
def _Background(self, points_xyz, points_feature, points_in_bbox_mask):
# If a point is in any bounding box, it is a foreground point.
foreground_points_mask = tf.reduce_any(points_in_bbox_mask, axis=-1)
# All others are background. We rotate all of the foreground points to
# final_points_* and keep the background points unchanged
background_points_mask = tf.math.logical_not(foreground_points_mask)
background_points_xyz = tf.boolean_mask(points_xyz, background_points_mask)
background_points_feature = tf.boolean_mask(points_feature,
background_points_mask)
return background_points_xyz, background_points_feature
def _ForLoopBuffers(self, features):
"""Create and return the buffers for the for loop."""
p = self.params
bboxes_3d = features.labels.bboxes_3d
# Compute the shapes and create the buffers for the For loop.
max_num_bboxes = tf.shape(bboxes_3d)[0]
per_box_shape = [max_num_bboxes, p.max_num_points_per_bbox, 3]
out_bbox_points = inplace_ops.empty(
per_box_shape, dtype=tf.float32, init=True)
num_features = features.lasers.points_feature.shape[-1]
bbox_feature_shape = [
max_num_bboxes, p.max_num_points_per_bbox, num_features
]
out_bbox_feature = inplace_ops.empty(
bbox_feature_shape, dtype=tf.float32, init=True)
per_box_mask_shape = [max_num_bboxes, p.max_num_points_per_bbox]
out_bbox_mask = inplace_ops.empty(
per_box_mask_shape, dtype=tf.float32, init=True)
return out_bbox_points, out_bbox_feature, out_bbox_mask
def TransformFeatures(self, features):
p = self.params
num_features = features.lasers.points_feature.shape[-1]
def Transform(i, state):
"""Transform the points in bounding box `i`."""
state.points_xyz = tf.reshape(state.points_xyz, [-1, 3])
bbox_mask = tf.reshape(state.points_in_bbox_mask[:, i], [-1])
# Fetch only the points in the bounding box.
points_xyz_masked = tf.boolean_mask(state.points_xyz, bbox_mask)
points_feature_masked = tf.boolean_mask(state.points_feature, bbox_mask)
num_points = tf.shape(points_xyz_masked)[0]
# TODO(vrv): Fold the following into a single transformation
# matrix.
#
# Translate the box to the origin, then rotate the desired
# rotation angle.
translation_vec = state.bboxes_3d[i, 0:3]
rotation_vec = [state.rotation[i], 0., 0.]
pose = tf.concat([-translation_vec, rotation_vec], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_masked, pose)
if p.max_scaling is not None or p.max_shearing is not None:
# Translate the points in the bounding box by moving dz/2 so that the
# bottom of the bounding box is at Z = 0 when any of the two
# (max_scaling or max_shearing) is not None
translation_scale_or_shear = tf.stack(
[0., 0., state.bboxes_3d[i, 5] / 2], axis=0)
pose1 = tf.concat([translation_scale_or_shear, [0., 0., 0.]], axis=0)
points_xyz_adj = geometry.CoordinateTransform(points_xyz_adj, pose1)
else:
translation_scale_or_shear = tf.stack([0., 0., 0.], axis=0)
if p.max_scaling is not None:
# Perform scaling to the point cloud
# Scaling matrix
# [[s_x+1 0 0]
# [ 0 s_y+1 0]
# [ 0 0 s_z+1]]
sx = tf.random.uniform([],
minval=-p.max_scaling[0],
maxval=p.max_scaling[0],
seed=p.random_seed)
sy = tf.random.uniform([],
minval=-p.max_scaling[1],
maxval=p.max_scaling[1],
seed=p.random_seed)
sz = tf.random.uniform([],
minval=-p.max_scaling[2],
maxval=p.max_scaling[2],
seed=p.random_seed)
scaling_matrix = tf.stack(
[[sx + 1., 0., 0.], [0., sy + 1., 0.], [0., 0., sz + 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', scaling_matrix, points_xyz_adj)
if p.max_shearing is not None:
# Perform shearing to the point cloud
# Shearing matrix
# [[1 sh_x^y sh_x^z]
# [sh_y^x 1 sh_y^z]
# [sh_z^x sh_z^y 1 ]]
sxy = tf.random.uniform([],
minval=-p.max_shearing[0],
maxval=p.max_shearing[0],
seed=p.random_seed)
sxz = tf.random.uniform([],
minval=-p.max_shearing[1],
maxval=p.max_shearing[1],
seed=p.random_seed)
syx = tf.random.uniform([],
minval=-p.max_shearing[2],
maxval=p.max_shearing[2],
seed=p.random_seed)
syz = tf.random.uniform([],
minval=-p.max_shearing[3],
maxval=p.max_shearing[3],
seed=p.random_seed)
szx = tf.random.uniform([],
minval=-p.max_shearing[4],
maxval=p.max_shearing[4],
seed=p.random_seed)
szy = tf.random.uniform([],
minval=-p.max_shearing[5],
maxval=p.max_shearing[5],
seed=p.random_seed)
shearing_matrix = tf.stack(
[[1., sxy, sxz], [syx, 1., syz], [szx, szy, 1.]], axis=0)
points_xyz_adj = tf.einsum('ij,kj->ki', shearing_matrix, points_xyz_adj)
# Translate the points back, adding noise if needed.
translation_with_noise = (
translation_vec - translation_scale_or_shear +
state.translate_pose[i])
pose2 = tf.concat([translation_with_noise, [0., 0., 0.]], axis=0)
final_points_xyz = geometry.CoordinateTransform(points_xyz_adj, pose2)
# final_points_xyz is an [M, 3] Tensor where M is the number of points in
# the box.
points_mask = tf.ones([num_points], dtype=tf.float32)
final_points_xyz = py_utils.PadOrTrimTo(final_points_xyz,
[p.max_num_points_per_bbox, 3])
final_points_feature = py_utils.PadOrTrimTo(
points_feature_masked, [p.max_num_points_per_bbox, num_features])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
state.out_bbox_points = inplace_ops.alias_inplace_update(
state.out_bbox_points, [i], tf.expand_dims(final_points_xyz, 0))
state.out_bbox_feature = inplace_ops.alias_inplace_update(
state.out_bbox_feature, [i], tf.expand_dims(final_points_feature, 0))
state.out_bbox_mask = inplace_ops.alias_inplace_update(
state.out_bbox_mask, [i], tf.expand_dims(points_mask, 0))
return state
# Get the points and features that reside in boxes.
if 'points_padding' in features.lasers:
points_mask = 1 - features.lasers.points_padding
points_xyz = tf.boolean_mask(features.lasers.points_xyz, points_mask)
points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
else:
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
# Fetch real bounding boxes and compute point mask.
real_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d,
features.labels.bboxes_3d_mask)
points_in_bbox_mask = geometry.IsWithinBBox3D(points_xyz, real_bboxes_3d)
# Choose a random rotation for every real box.
num_boxes = tf.shape(real_bboxes_3d)[0]
rotation = tf.random.uniform([num_boxes],
minval=-p.max_rotation,
maxval=p.max_rotation,
seed=p.random_seed)
base_seed = p.random_seed
x_seed = base_seed
y_seed = None if base_seed is None else base_seed + 1
z_seed = None if base_seed is None else base_seed + 2
random_translate_x = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[0],
seed=x_seed)
random_translate_y = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[1],
seed=y_seed)
random_translate_z = tf.random.normal([num_boxes],
mean=0.0,
stddev=p.noise_std[2],
seed=z_seed)
translate_pose = tf.stack(
[random_translate_x, random_translate_y, random_translate_z], axis=1)
fg_xyz, fg_feature = self._Foreground(features, points_xyz, points_feature,
real_bboxes_3d, points_in_bbox_mask,
rotation, translate_pose, Transform)
# Concatenate them with the background points and features.
bg_xyz, bg_feature = self._Background(points_xyz, points_feature,
points_in_bbox_mask)
all_points = tf.concat([bg_xyz, fg_xyz], axis=0)
all_features = tf.concat([bg_feature, fg_feature], axis=0)
# Shuffle the points/features randomly.
all_points, all_features = _ConsistentShuffle((all_points, all_features),
p.random_seed)
# Padding should technically be unnecessary: the number of points before and
# after should be the same, but in practice we sometimes seem to drop a few
# points, and so we pad to make the shape fixed.
#
# TODO(vrv): Identify the source of this problem and then assert a shape
# matching check.
if 'points_padding' in features.lasers:
features.lasers.points_xyz = py_utils.PadOrTrimTo(
all_points, tf.shape(features.lasers.points_xyz))
features.lasers.points_feature = py_utils.PadOrTrimTo(
all_features, tf.shape(features.lasers.points_feature))
total_points = tf.shape(all_points)[0]
features.lasers.points_padding = 1.0 - py_utils.PadOrTrimTo(
tf.ones([total_points]), tf.shape(features.lasers.points_padding))
else:
features.lasers.points_xyz = all_points
features.lasers.points_feature = all_features
# Translate noise.
bboxes_xyz = real_bboxes_3d[..., :3]
bboxes_xyz += translate_pose[..., :3]
bboxes_dim = real_bboxes_3d[..., 3:6]
# Rotate bboxes by their corresponding rotation.
bboxes_rot = real_bboxes_3d[..., 6:]
bboxes_rot -= rotation[:, tf.newaxis]
features.labels.bboxes_3d = py_utils.PadOrTrimTo(
tf.concat([bboxes_xyz, bboxes_dim, bboxes_rot], axis=-1),
tf.shape(features.labels.bboxes_3d))
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(real_bboxes_3d)[0]),
tf.shape(features.labels.bboxes_3d_mask))
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class GroundTruthAugmentor(Preprocessor):
"""Augment bounding box labels and points from a database.
This preprocessor expects features to contain the following keys:
lasers.points_xyz of shape [P, 3]
lasers.points_feature of shape [P, K]
lasers.points_padding of shape [P]
labels.bboxes_3d of shape [L, 7]
labels.bboxes_3d_mask of shape [L]
labels.labels of shape [L]
Modifies the above features so that additional objects from
a groundtruth database are added.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'groundtruth_database', None,
'If not None, loads groundtruths from this database and adds '
'them to the current scene. Groundtruth database is expected '
'to be a TFRecord of KITTI or Waymo crops.')
p.Define(
'num_db_objects', None,
'Number of objects in the database. Because we use TFRecord '
'we cannot easily query the number of objects efficiencly.')
p.Define('max_num_points_per_bbox', 2048,
'Maximum number of points in each bbox to augment with.')
p.Define(
'filter_min_points', 0,
'Minimum number of points each database object must have '
'to be included in an example.')
p.Define(
'filter_max_points', None,
'Maximum number of points each database object must have '
'to be included in an example.')
p.Define(
'difficulty_sampling_probability', None,
'Probability for sampling ground truth example whose difficulty '
'equals {0, 1, 2, 3, ...}. Example: [1.0, 1.0, 1.0, 1.0] for '
'uniform sampling 4 different difficulties. Default value is '
'None = uniform sampling for all difficulties.')
p.Define(
'class_sampling_probability', None,
'Probability for sampling ground truth example based on its class index'
' Example: For KITTI classes are [Background, Car, Van, Truck, '
'Pedestrian, Person_sitting, Cyclist, Tram, Misc, DontCare], using '
'probability vector [0., 1.0, 1.0, 0., 0., 0., 0.,0., 0., 0.], we '
'uniformly sampling Car and Van. Default value is None: Uses '
'label_filter flag and does not sample based on class.')
p.Define('filter_min_difficulty', 0,
'Filter ground truth boxes whose difficulty is < this value.')
p.Define('max_augmented_bboxes', 15,
'Maximum number of augmented bounding boxes per scene.')
p.Define(
'label_filter', [],
'A list where if specified, only examples of these label integers will '
'be included in an example.')
p.Define(
'batch_mode', False, 'Bool value to control whether the whole'
'groundtruth database is loaded or partially loaded to save memory'
'usage. Setting to False loads the whole ground truth database into '
'memory. Otherwise, only a fraction of the data will be loaded into '
'the memory.')
return p
def _ReadDB(self, file_patterns):
"""Read the groundtruth database and return as a NestedMap of Tensors."""
p = self.params
def Process(record):
"""Process a groundtruth record."""
feature_map = {
'num_points': tf.io.FixedLenFeature((), tf.int64, 0),
'points': tf.io.VarLenFeature(dtype=tf.float32),
'points_feature': tf.io.VarLenFeature(dtype=tf.float32),
'bbox_3d': tf.io.VarLenFeature(dtype=tf.float32),
'label': tf.io.FixedLenFeature((), tf.int64, 0),
'difficulty': tf.io.FixedLenFeature((), tf.int64, 0),
'text': tf.io.VarLenFeature(dtype=tf.string),
}
example_data = tf.io.parse_single_example(record, feature_map)
num_points = example_data['num_points']
points = tf.reshape(_Dense(example_data['points']), [num_points, 3])
features = tf.reshape(
_Dense(example_data['points_feature']), [num_points, 1])
points_mask = tf.ones(num_points, dtype=tf.bool)
# TODO(vrv): Use random selection instead of first N points.
points = py_utils.PadOrTrimTo(points, [p.max_num_points_per_bbox, 3])
features = py_utils.PadOrTrimTo(features, [p.max_num_points_per_bbox, 1])
points_mask = py_utils.PadOrTrimTo(points_mask,
[p.max_num_points_per_bbox])
bboxes_3d = tf.reshape(_Dense(example_data['bbox_3d']), [7])
label = tf.cast(example_data['label'], tf.int32)
difficulty = tf.cast(example_data['difficulty'], tf.int32)
return (points, features, points_mask, bboxes_3d, label, difficulty)
if p.batch_mode:
# Prepare dataset for ground truth bounding boxes. Randomly shuffle the
# file patterns.
file_count = len(tf.io.gfile.glob(file_patterns))
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.apply(tf.stateless_cache_dataset())
dataset = dataset.apply(
tf.stateless_shuffle_dataset(
buffer_size=file_count, reshuffle_each_iteration=True))
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
dataset = dataset.repeat()
# Only prefetch a few objects from the database to reduce memory
# consumption.
dataset = dataset.map(Process, num_parallel_calls=10)
# We need more bboxes than max_augmented_bboxes in a batch, because some
# of the boxes are filtered out.
dataset = dataset.batch(p.max_augmented_bboxes * 10)
dataset = dataset.apply(tf.stateless_cache_dataset()).prefetch(
p.max_augmented_bboxes * 30)
else:
# Prepare dataset for ground truth bounding boxes.
dataset = tf.stateless_list_files(file_patterns)
dataset = dataset.interleave(
tf.data.TFRecordDataset, cycle_length=10, num_parallel_calls=10)
# Read the entire dataset into memory.
dataset = dataset.take(p.num_db_objects)
dataset = dataset.map(Process, num_parallel_calls=10)
# We batch the output of the dataset into a very large Tensor, then cache
# it in memory.
dataset = dataset.batch(p.num_db_objects)
dataset = dataset.apply(tf.stateless_cache_dataset()).repeat()
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
(db_points_xyz, db_points_feature, db_points_mask, db_bboxes, db_labels,
db_difficulties) = input_batch
return py_utils.NestedMap(
points_xyz=db_points_xyz,
points_feature=db_points_feature,
points_mask=db_points_mask,
bboxes_3d=db_bboxes,
labels=db_labels,
difficulties=db_difficulties)
def _CreateExampleFilter(self, db):
"""Construct db example filter.
Args:
db: NestedMap of the following Tensors: points_mask - [N, P] - The points
mask for every object in the database, where N is the number of objects
and P is the maximum number of points per object. labels - [N] - int32
Label for each object in the database. difficulties - [N] - int32
Difficulty for each label in the database.
Returns:
A [N] boolean Tensor for each object in the database, True if
that corresponding object passes the filter.
"""
p = self.params
db_points_mask = db.points_mask
db_label = db.labels
db_difficulty = db.difficulties
num_objects_in_database = tf.shape(db_points_mask)[0]
# Filter number of objects.
points_per_object = tf.reduce_sum(tf.cast(db_points_mask, tf.int32), axis=1)
example_filter = points_per_object >= p.filter_min_points
if p.filter_max_points:
example_filter = tf.math.logical_and(
example_filter, points_per_object <= p.filter_max_points)
if p.difficulty_sampling_probability is not None:
# Sample db based on difficulity of each example.
sampling_prob = p.difficulty_sampling_probability
db_difficulty_probability = tf.zeros_like(db_difficulty, dtype=tf.float32)
for difficulty_idx, difficulty_prob in enumerate(sampling_prob):
db_difficulty_probability += (
tf.cast(tf.equal(db_difficulty, difficulty_idx), tf.float32) *
difficulty_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_difficulty_probability
example_filter &= sampled_filter
else:
# Filter out db examples below min difficulty
example_filter = tf.math.logical_and(
example_filter, db_difficulty >= p.filter_min_difficulty)
example_filter = tf.reshape(example_filter, [num_objects_in_database])
db_label = tf.reshape(db_label, [num_objects_in_database])
if p.class_sampling_probability is not None:
# Sample example based on its class probability.
sampling_prob = p.class_sampling_probability
db_class_probability = tf.zeros_like(db_label, dtype=tf.float32)
for class_idx, class_prob in enumerate(sampling_prob):
db_class_probability += (
tf.cast(tf.equal(db_label, class_idx), tf.float32) * class_prob)
sampled_filter = tf.random.uniform(
tf.shape(example_filter),
minval=0,
maxval=1,
dtype=tf.float32,
seed=p.random_seed)
sampled_filter = sampled_filter < db_class_probability
example_filter &= sampled_filter
elif p.label_filter:
# Filter based on labels.
# Create a label filter where all is false
valid_labels = tf.constant(p.label_filter)
label_mask = tf.reduce_any(
tf.equal(db_label[..., tf.newaxis], valid_labels), axis=1)
example_filter = tf.math.logical_and(example_filter, label_mask)
return example_filter
# TODO(vrv): Create an overlap filter that also ensures that boxes don't
# overlap with groundtruth points, so that the scenes are more plausible.
def _FilterIndices(self, gt_bboxes_3d, db_bboxes, db_idx):
"""Identify database boxes that don't overlap with other boxes."""
# We accomplish overlap filtering by first computing the pairwise 3D IoU of
# all boxes (concatenated) as a way of computing pairwise box overlaps.
num_gt_bboxes = tf.shape(gt_bboxes_3d)[0]
filtered_bboxes = tf.gather(db_bboxes, db_idx)
all_bboxes = tf.concat([gt_bboxes_3d, filtered_bboxes], axis=0)
pairwise_overlap = ops.pairwise_iou3d(all_bboxes, all_bboxes)
# We now have an M x M matrix with 1s on the diagonal and non-zero entries
# whenever a box collides with another.
#
# To increase the number of boxes selected, we filter the upper triangular
# entries so that the boxes are chosen greedily: boxes with smaller indices
# will be selected before later boxes, because earlier boxes will not appear
# to collide with later boxes, but later boxes may collide with earlier
# ones.
pairwise_overlap = tf.linalg.band_part(pairwise_overlap, -1, 0)
# We compute the sum of the IoU overlaps for all database boxes.
db_overlap_sums = tf.reduce_sum(pairwise_overlap[num_gt_bboxes:], axis=1)
# Those boxes that don't overlap with any other boxes will only have
# a 1.0 IoU with itself.
non_overlapping_boxes = tf.reshape(db_overlap_sums <= 1., [-1])
# Filter to select only those object ids that pass this filter.
db_idx = tf.boolean_mask(db_idx, non_overlapping_boxes)
return db_idx
def TransformFeatures(self, features):
p = self.params
tf.logging.info('Loading groundtruth database at %s' %
(p.groundtruth_database))
db = p.groundtruth_database.Instantiate().BuildDataSource(self._ReadDB).data
original_features_shape = tf.shape(features.lasers.points_feature)
# Compute the number of bboxes to augment.
num_bboxes_in_scene = tf.reduce_sum(
tf.cast(features.labels.bboxes_3d_mask, tf.int32))
max_bboxes = tf.shape(features.labels.bboxes_3d_mask)[0]
num_augmented_bboxes = tf.minimum(max_bboxes - num_bboxes_in_scene,
p.max_augmented_bboxes)
# Compute an object index over all objects in the database.
num_objects_in_database = tf.shape(db.points_xyz)[0]
db_idx = tf.range(num_objects_in_database)
# Find those indices whose examples pass the filters, and select only those
# indices.
example_filter = self._CreateExampleFilter(db)
db_idx = tf.boolean_mask(db_idx, example_filter)
# At this point, we might still have a large number of object candidates,
# from which we only need a sample.
# To reduce the amount of computation, we randomly subsample to slightly
# more than we want to augment.
db_idx = tf.random.shuffle(
db_idx, seed=p.random_seed)[0:num_augmented_bboxes * 5]
# After filtering, further filter out the db boxes that would occlude with
# other boxes (including other database boxes).
#
# Gather the filtered ground truth bounding boxes according to the mask, so
# we can compute overlaps below.
gt_bboxes_3d_mask = tf.cast(features.labels.bboxes_3d_mask, tf.bool)
gt_bboxes_3d = tf.boolean_mask(features.labels.bboxes_3d, gt_bboxes_3d_mask)
gt_bboxes_3d = py_utils.HasShape(gt_bboxes_3d, [num_bboxes_in_scene, 7])
db_idx = self._FilterIndices(gt_bboxes_3d, db.bboxes_3d, db_idx)
# From the filtered object ids, select only as many boxes as we need.
shuffled_idx = db_idx[0:num_augmented_bboxes]
num_augmented_bboxes = tf.shape(shuffled_idx)[0]
# Gather based off the indices.
sampled_points_xyz = tf.gather(db.points_xyz, shuffled_idx)
sampled_points_feature = tf.gather(db.points_feature, shuffled_idx)
sampled_mask = tf.reshape(
tf.gather(db.points_mask, shuffled_idx),
[num_augmented_bboxes, p.max_num_points_per_bbox])
sampled_bboxes = tf.gather(db.bboxes_3d, shuffled_idx)
sampled_labels = tf.gather(db.labels, shuffled_idx)
# Mask points/features.
sampled_points_xyz = tf.boolean_mask(sampled_points_xyz, sampled_mask)
sampled_points_feature = tf.boolean_mask(sampled_points_feature,
sampled_mask)
# Flatten before concatenation with ground truths.
sampled_points_xyz = tf.reshape(sampled_points_xyz, [-1, 3])
sampled_points_feature = tf.reshape(sampled_points_feature,
[-1, original_features_shape[-1]])
sampled_bboxes = tf.reshape(sampled_bboxes, [-1, 7])
# Concatenate the samples with the ground truths.
if 'points_padding' in features.lasers:
points_mask = tf.cast(1. - features.lasers.points_padding, tf.bool)
# Densify the original points.
dense_points_xyz = tf.boolean_mask(features.lasers.points_xyz,
points_mask)
dense_points_feature = tf.boolean_mask(features.lasers.points_feature,
points_mask)
# Concatenate the dense original points with our new sampled oints.
points_xyz = tf.concat([dense_points_xyz, sampled_points_xyz], axis=0)
points_feature = tf.concat([dense_points_feature, sampled_points_feature],
axis=0)
original_points_shape = tf.shape(features.lasers.points_xyz)
features.lasers.points_xyz = py_utils.PadOrTrimTo(points_xyz,
original_points_shape)
features.lasers.points_feature = py_utils.PadOrTrimTo(
points_feature, original_features_shape)
# Compute the modified mask / padding.
final_points_mask = py_utils.PadOrTrimTo(
tf.ones(tf.shape(points_xyz)[0]),
tf.shape(features.lasers.points_padding))
features.lasers.points_padding = 1. - final_points_mask
else:
points_xyz = tf.concat([features.lasers.points_xyz, sampled_points_xyz],
axis=0)
points_feature = tf.concat(
[features.lasers.points_feature, sampled_points_feature], axis=0)
features.lasers.points_xyz = points_xyz
features.lasers.points_feature = points_feature
# Reconstruct a new, dense, bboxes_3d vector that includes the filtered
# groundtruth bounding boxes followed by the database augmented boxes.
bboxes_3d = tf.concat([gt_bboxes_3d, sampled_bboxes], axis=0)
bboxes_3d = py_utils.PadOrTrimTo(bboxes_3d, [max_bboxes, 7])
features.labels.bboxes_3d = bboxes_3d
bboxes_3d_mask = tf.ones(
num_bboxes_in_scene + num_augmented_bboxes, dtype=tf.float32)
features.labels.bboxes_3d_mask = py_utils.PadOrTrimTo(
bboxes_3d_mask, [max_bboxes])
gt_labels = tf.boolean_mask(features.labels.labels, gt_bboxes_3d_mask)
gt_labels = py_utils.HasShape(gt_labels, [num_bboxes_in_scene])
labels = tf.concat([gt_labels, sampled_labels], axis=0)
features.labels.labels = py_utils.PadOrTrimTo(labels, [max_bboxes])
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class FrustumDropout(Preprocessor):
"""Randomly drops out points in a frustum.
All points are first converted to spherical coordinates, and then a point
is randomly selected. All points in the frustum around that point within
a given phi, theta angle width and distance to the original greater than
a given value are dropped with probability = 1 - keep_prob.
Here, we can specify whether the dropped frustum is the union or intersection
of the phi and theta angle filters.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, K]
Optionally points_padding of shape [P] corresponding to the padding.
if points_padding is None, then all points are considered valid.
Modifies the following features:
lasers.points_xyz, lasers.points_feature, lasers.points_padding with points
randomly dropped out.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('theta_width', 0.03, 'Theta angle width for dropping points.')
p.Define('phi_width', 0.0, 'Phi angle width for dropping points.')
p.Define(
'distance', 0.0, 'Drop points that have larger distance to the'
'origin than the value given here.')
p.Define(
'keep_prob', 0.0, 'keep_prob: 1. = drop no points in the Frustum,'
'0 = drop all points, between 0 and 1 = down sample the points.')
p.Define(
'drop_type', 'union', 'Drop either the union or intersection of '
'phi width and theta width.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.phi_width < 0:
raise ValueError('phi_width must be >= 0, phi_width={}'.format(
p.phi_width))
if p.theta_width < 0:
raise ValueError('theta_width must be >= 0, theta_width={}'.format(
p.theta_width))
if p.distance < 0:
raise ValueError('distance must be >= 0, distance={}'.format(p.distance))
if p.keep_prob < 0 or p.keep_prob > 1:
raise ValueError('keep_prob must be >= 0 and <=1, keep_prob={}'.format(
p.keep_prob))
if p.drop_type not in ['union', 'intersection']:
raise ValueError('drop_type must be union or intersection ,'
'drop_type={}'.format(p.drop_type))
def TransformFeatures(self, features):
p = self.params
points_xyz = features.lasers.points_xyz
points_feature = features.lasers.points_feature
if 'points_padding' in features.lasers:
points_padding = features.lasers.points_padding
else:
points_padding = None
if points_padding is not None:
points_mask = tf.cast(1 - points_padding, tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
real_points_idx = tf.boolean_mask(
tf.range(0, num_total_points, dtype=tf.int32), points_mask)
num_points = py_utils.GetShape(real_points_idx)[0]
else:
points_mask = tf.ones_like(points_xyz[:, 0], dtype=tf.bool)
num_total_points = py_utils.GetShape(points_mask)[0]
num_points = py_utils.GetShape(points_xyz)[0]
r, theta, phi = tf.unstack(
geometry.SphericalCoordinatesTransform(points_xyz), axis=-1)
def _PickRandomPoint():
point_idx = tf.random.uniform((),
minval=0,
maxval=num_points,
dtype=tf.int32)
if points_padding is not None:
point_idx = real_points_idx[point_idx]
return point_idx
# Pick a point at random and drop all points that are near that point in the
# frustum for distance larger than r; repeat this for both theta and phi.
if p.theta_width > 0:
theta_half_width = p.theta_width / 2.
point_idx = _PickRandomPoint()
# Points within theta width and further than distance will be dropped.
theta_drop_filter = ((theta < (theta[point_idx] + theta_half_width)) &
(theta > (theta[point_idx] - theta_half_width)) &
(r > p.distance))
else:
theta_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
if p.phi_width > 0:
phi_half_width = p.phi_width / 2.
point_idx = _PickRandomPoint()
# Points within phi width and further than distance will be dropped.
phi_drop_filter = ((phi < (phi[point_idx] + phi_half_width)) &
(phi >
(phi[point_idx] - phi_half_width)) & (r > p.distance))
else:
phi_drop_filter = tf.zeros_like(points_mask, dtype=tf.bool)
# Create drop_filter by combining filters. This contains a filter for the
# points to be removed. One can use the intersection method to limit the
# dropped points be within both phi and theta ranges.
if p.drop_type == 'union':
drop_filter = theta_drop_filter | phi_drop_filter
elif p.drop_type == 'intersection':
drop_filter = theta_drop_filter & phi_drop_filter
if p.keep_prob == 0:
# Drop all points in drop_filter.
down_sampling_filter = drop_filter
else:
# Randomly drop points in drop_filter based on keep_prob.
sampling_drop_filter = tf.random.uniform([num_total_points],
minval=0,
maxval=1,
dtype=tf.float32)
# Points greater than the threshold (keep_prob) will be dropped.
sampling_drop_filter = sampling_drop_filter > p.keep_prob
# Instead of dropping all points in the frustum, we drop out points
# that are in the selected frustum (drop_filter).
down_sampling_filter = drop_filter & sampling_drop_filter
points_mask &= ~down_sampling_filter
if points_padding is not None:
features.lasers.points_padding = 1 - tf.cast(points_mask, tf.float32)
else:
features.lasers.points_xyz = tf.boolean_mask(points_xyz, points_mask)
features.lasers.points_feature = tf.boolean_mask(points_feature,
points_mask)
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RepeatPreprocessor(Preprocessor):
"""Repeat a preprocessor multiple times.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features multiple times (repeat_count).
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('repeat_count', 1, 'Number of times the subprocessor is applied to'
' features.')
p.Define('subprocessor', None, 'One of the input preprocessors.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.repeat_count < 0 or not isinstance(p.repeat_count, int):
raise ValueError(
'repeat_count must be >= 0 and int, repeat_count={}'.format(
p.repeat_count))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
for _ in range(p.repeat_count):
features = self.subprocessor.FPropDefaultTheta(features)
return features
def TransformShapes(self, shapes):
p = self.params
for _ in range(p.repeat_count):
shapes = self.subprocessor.TransformShapes(shapes)
return shapes
def TransformDTypes(self, dtypes):
p = self.params
for _ in range(p.repeat_count):
dtypes = self.subprocessor.TransformDTypes(dtypes)
return dtypes
class RandomApplyPreprocessor(Preprocessor):
"""Randomly apply a preprocessor with certain probability.
This preprocessor takes a preprocessor as a subprocessor and apply the
subprocessor to features with certain probability.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('prob', 1.0, 'The probability the subprocessor being executed.')
p.Define('subprocessor', None, 'Params for an input preprocessor.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.subprocessor is None:
raise ValueError('No subprocessor was specified for RepeatPreprocessor.')
if p.prob < 0 or p.prob > 1 or not isinstance(p.prob, float):
raise ValueError(
'prob must be >= 0 and <=1 and float type, prob={}'.format(p.prob))
self.CreateChild('subprocessor', p.subprocessor)
def TransformFeatures(self, features):
p = self.params
choice = tf.random.uniform(
(), minval=0.0, maxval=1.0, seed=p.random_seed) <= p.prob
# Features is passed downstream and may be modified, we make deep copies
# here to use with tf.cond to avoid having tf.cond access updated
# versions. Note that we need one copy for each branch in case the branches
# further modify features.
features_0, features_1 = features.DeepCopy(), features.DeepCopy()
features = tf.cond(choice,
lambda: self.subprocessor.TransformFeatures(features_0),
lambda: features_1)
return features
def TransformShapes(self, shapes):
shapes_transformed = self.subprocessor.TransformShapes(shapes)
if not shapes.IsCompatible(shapes_transformed):
raise ValueError(
'NestedMap structures are different between shapes and transformed'
'shapes. Original shapes: {}. Transformed shapes: {}'.format(
shapes, shapes_transformed))
def IsCompatibleWith(a, b):
return a.is_compatible_with(b)
if not all(
py_utils.Flatten(
py_utils.Transform(IsCompatibleWith, shapes, shapes_transformed))):
raise ValueError(
'Shapes after transformation - {} are different from original '
'shapes - {}.'.format(shapes_transformed, shapes))
return shapes
def TransformDTypes(self, dtypes):
transformed_dtypes = self.subprocessor.TransformDTypes(dtypes)
if transformed_dtypes != dtypes:
raise ValueError(
'DTypes after transformation of preprocessor - {} should be '
'the same as {}, but get {}.'.format(self.params.subprocessor, dtypes,
transformed_dtypes))
return dtypes
class ConstantPreprocessor(Preprocessor):
"""Preprocessor that produces specified constant values in a nested output."""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'constants', py_utils.NestedMap(),
'Map of key names to numpy arrays of constant values to use. '
'Must be a NestedMap or dict convertible to NestedMap.')
return p
def TransformFeatures(self, features):
constants = py_utils.NestedMap(self.params.constants)
features.update(constants.Transform(tf.constant))
return features
def TransformShapes(self, shapes):
constants = py_utils.NestedMap(self.params.constants)
shapes.update(
constants.Transform(lambda x: tf.TensorShape(np.array(x).shape)))
return shapes
def TransformDTypes(self, dtypes):
constants = py_utils.NestedMap(self.params.constants)
dtypes.update(constants.Transform(lambda x: tf.as_dtype(np.array(x).dtype)))
return dtypes
class IdentityPreprocessor(Preprocessor):
"""Preprocessor that passes all inputs through.
This may be useful for situations where one wants a 'no-op' preprocessor, such
as being able to randomly choose to do nothing among a set of preprocessor
choices.
"""
def TransformFeatures(self, features):
return features
def TransformShapes(self, shapes):
return shapes
def TransformDTypes(self, dtypes):
return dtypes
class RandomChoicePreprocessor(Preprocessor):
"""Randomly applies a preprocessor with specified weights.
The input at features[p.weight_tensor_key] must be a floating point vector
Tensor whose length matches the number of subprocessors to select among. The
values in that Tensor are interpreted as relative weights.
For example, if p.subprocessors = [preprocessor1, preprocessor2] and the
weights are [1., 2.], then preprocessor1 will be applied with probability 1/3,
and preprocessor2 will be applied with probability 2/3.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'subprocessors', [],
'Params for preprocessors. Each value should be a tuple of '
'(Preprocessor.Params(), BaseSchedule.Params()), where the schedule '
'defines the weights to use over time.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if not p.subprocessors:
raise ValueError('No subprocessors were specified.')
subprocessors, schedules = zip(*p.subprocessors)
def _FilterNonSchedules(v):
return not issubclass(getattr(v, 'cls', False), schedule.BaseSchedule)
invalid_values = [_FilterNonSchedules(s) for s in schedules]
if any(invalid_values):
raise TypeError('Not all schedule values were schedules: '
f'{invalid_values}')
self.CreateChildren('subprocessors', list(subprocessors))
self.CreateChildren('schedules', list(schedules))
def TransformFeatures(self, features):
p = self.params
choice_list = []
weight_list = []
# Pass a unique copy of the input to each branch, in case the
# subprocessor destructively modifies the features in unexpected ways.
for subp, sched in zip(self.subprocessors, self.schedules):
choice_list.append(
lambda subp=subp: subp.TransformFeatures(features.DeepCopy()))
weight_list.append(sched.Value())
weight_tensor = tf.stack(weight_list)
chosen_bin = tf.random.categorical(
tf.math.log(weight_tensor[tf.newaxis]),
1,
seed=p.random_seed,
dtype=tf.int32)[0, 0]
features = tf.switch_case(chosen_bin, branch_fns=choice_list)
return features
def TransformShapes(self, shapes):
transformed_shapes = [
subp.TransformShapes(shapes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_shapes[0] == curr for curr in transformed_shapes):
raise ValueError('Shapes after transformations were not identical: '
f'{transformed_shapes}')
return transformed_shapes[0]
def TransformDTypes(self, dtypes):
transformed_dtypes = [
subp.TransformDTypes(dtypes.DeepCopy()) for subp in self.subprocessors
]
if not all(transformed_dtypes[0] == curr for curr in transformed_dtypes):
raise ValueError('DTypes after transformations were not identical: '
f'{transformed_dtypes}')
return transformed_dtypes[0]
class SparseSampler(Preprocessor):
"""Fused SparseCenterSelector and SparseCellGatherFeatures.
This preprocessor expects features to contain the following keys:
- lasers.points_xyz of shape [P, 3]
- lasers.points_feature of shape [P, F]
Adds the following features:
anchor_centers - [num_centers, 3] - Floating point output containing the
center (x, y, z) locations for tiling anchor boxes.
cell_center_xyz - [num_centers, 3] - Floating point output containing
the center (x, y, z) locations for each cell to featurize.
cell_center_padding - [num_centers] - 0/1 padding for each center.
cell_points_xyz - [num_centers, num_neighbors, 3] - Floating point
output containing the (x, y, z) locations for each point for a given
center.
cell_feature - [num_centers, num_neighbors, F] - Floating point output
containing the features for each point for a given center.
cell_points_padding - [num_centers, num_neighbors] - 0/1 padding
for the points in each cell.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('center_selector', 'farthest', 'Method to sample centers. '
'Valid options - uniform, farthest.')
p.Define('neighbor_sampler', 'uniform', 'Method to select neighbors. '
'Valid options - uniform, closest.')
p.Define('num_centers', 16, 'The number of centers to sample.')
p.Define(
'features_preparation_layers', [],
'A list of Params for layers to run on the features before '
'performing farthest point sampling. For example, one may wish to '
'drop points out of frustum for KITTI before selecting centers. '
'Note that these layers will not mutate the original features, '
'instead, a copy will be made.')
p.Define(
'keep_z_range', (-np.inf, np.inf),
'Only points that have z coordinates within this range are kept. '
'Approximate ground-removal can be performed by specifying a '
'lower-bound on the z-range.')
p.Define('num_neighbors', 64, 'Sample these many points within the '
'neighorhood.')
p.Define(
'max_distance', 1.0, 'Points with L2 distances from a center '
'larger than this threshold are not considered to be in the '
'neighborhood.')
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if p.features_preparation_layers:
self.CreateChildren('features_preparation_layers',
p.features_preparation_layers)
def TransformFeatures(self, features):
p = self.params
n, m = p.num_centers, p.num_neighbors
prepared_features = features.DeepCopy()
if p.features_preparation_layers:
for prep_layer in self.features_preparation_layers:
prepared_features = prep_layer.FPropDefaultTheta(prepared_features)
points_data = prepared_features.lasers
points = py_utils.HasShape(points_data.points_xyz, [-1, 3])
if 'points_padding' in points_data:
points_mask = 1 - points_data.points_padding
points = tf.boolean_mask(points, points_mask)
# If num_points < num_centers, pad points to have at least num_centers
# points.
num_points = tf.shape(points)[0]
required_num_points = tf.maximum(num_points, p.num_centers)
zeros = tf.zeros([required_num_points - num_points, 3])
points = tf.concat([points, zeros], axis=0)
num_seeded_points = points_data.get('num_seeded_points', 0)
neighbor_algorithm = 'auto'
# Based on benchmarks, the hash solution works better when the number of
# centers is >= 16 and there are at least 10k points per point cloud.
if p.num_centers >= 16:
neighbor_algorithm = 'hash'
centers, center_paddings, indices, indices_paddings = ops.sample_points(
points=tf.expand_dims(points, 0),
points_padding=tf.zeros([1, required_num_points], tf.float32),
num_seeded_points=num_seeded_points,
center_selector=p.center_selector,
neighbor_sampler=p.neighbor_sampler,
neighbor_algorithm=neighbor_algorithm,
num_centers=p.num_centers,
center_z_min=p.keep_z_range[0],
center_z_max=p.keep_z_range[1],
num_neighbors=p.num_neighbors,
max_distance=p.max_distance,
random_seed=p.random_seed if p.random_seed else -1)
centers = py_utils.HasShape(centers, [1, n])[0, :]
center_paddings = py_utils.HasShape(center_paddings, [1, n])[0, :]
indices = py_utils.HasShape(indices, [1, n, m])[0, :]
indices_paddings = py_utils.HasShape(indices_paddings, [1, n, m])[0, :]
features.cell_center_padding = center_paddings
features.cell_center_xyz = py_utils.HasShape(
tf.gather(points, centers), [n, 3])
features.anchor_centers = features.cell_center_xyz
features.cell_points_xyz = py_utils.HasShape(
tf.gather(points, indices), [n, m, 3])
features.cell_feature = tf.gather(points_data.points_feature, indices)
features.cell_points_padding = indices_paddings
return features
def TransformShapes(self, shapes):
p = self.params
n, m, f = p.num_centers, p.num_neighbors, shapes.lasers.points_feature[-1]
shapes.anchor_centers = tf.TensorShape([n, 3])
shapes.cell_center_padding = tf.TensorShape([n])
shapes.cell_center_xyz = tf.TensorShape([n, 3])
shapes.cell_points_xyz = tf.TensorShape([n, m, 3])
shapes.cell_feature = tf.TensorShape([n, m, f])
shapes.cell_points_padding = tf.TensorShape([n, m])
return shapes
def TransformDTypes(self, dtypes):
dtypes.anchor_centers = tf.float32
dtypes.cell_center_padding = tf.float32
dtypes.cell_center_xyz = tf.float32
dtypes.cell_points_xyz = tf.float32
dtypes.cell_feature = tf.float32
dtypes.cell_points_padding = tf.float32
return dtypes
| """Deep copy a specific key to a parent key if it exists."""
for key in keys:
if key in nested_map:
if parent_key not in nested_map:
nested_map[parent_key] = py_utils.NestedMap()
nested_map[parent_key][key] = nested_map[key].DeepCopy()
return nested_map |
API.js | import IconService, { IconConverter } from 'icon-sdk-js'
import { SCORE_NETWORK, SCORE_ENDPOINT, Networks, ICX_TOKEN_CONTRACT, ICX_TOKEN_DECIMALS, MAX_ITERATION_LOOP } from './constants'
// ================================================
// Constants
// ================================================
const SwapCreatedEvent = 'SwapCreatedEvent(int,int,int)'
/*
const SwapSuccessEvent = 'SwapSuccessEvent(int)'
const SwapCancelledEvent = 'SwapCancelledEvent(int)'
const OrderFilledEvent = 'OrderFilledEvent(int)'
const OrderTransferedEvent = 'OrderTransferedEvent(int,Address,int,Address)'
const OrderRefundedEvent = 'OrderRefundedEvent(int)'
*/
// ================================================
// Exceptions
// ================================================
export class SCOREUnhandledError extends Error {
constructor(error, txHash) {
super(error, txHash)
this.name = 'SCOREUnhandledError'
this.txHash = txHash
this.error = error
}
toString() {
console.log(this.error)
return 'Internal Error, please report this transaction hash on Telegram (https://t.me/iconationteam) : ' + this.txHash + ' - Thank you!'
}
}
export class UnconfirmedTransaction extends Error {
constructor(txHash) {
super(txHash)
this.name = 'UnconfirmedTransaction'
}
toString() {
return 'The transaction cannot be confirmed.'
}
}
export class WrongEventSignature extends Error {
constructor(txHash) {
super(txHash)
this.name = 'WrongEventSignature'
}
toString() {
return 'The event received was not expected.'
}
}
// ================================================
// API Implementation
// ================================================
class API {
constructor(network, scoreAddress) {
const iconNetworkInfo = this._getNetworkInfo(network)
this._nid = iconNetworkInfo.nid
this._network = network
this._scoreAddress = scoreAddress
}
_getIconService() {
return new IconService(new IconService.HttpProvider(this.getAPIEndpoint() + '/api/v3'))
}
_getDebugIconService() {
return new IconService(new IconService.HttpProvider(this.getAPIEndpoint() + '/api/debug/v3'))
}
_getNetworkInfo(network) {
const iconNetworksInfo = []
iconNetworksInfo[Networks.LOCALHOST] = {
name: 'localhost',
api: 'http://127.0.0.1:9000',
tracker: 'http://127.0.0.1:9000',
nid: 0
}
iconNetworksInfo[Networks.MAINNET] = {
name: 'MainNet',
api: [
'https://ctz.solidwallet.io'
],
tracker: 'https://tracker.icon.foundation',
nid: 1
}
iconNetworksInfo[Networks.EULJIRO] = {
name: 'Euljiro (TestNet)',
api: 'https://test-ctz.solidwallet.io',
tracker: 'https://trackerdev.icon.foundation',
nid: 2
}
iconNetworksInfo[Networks.YEOUIDO] = {
name: 'Yeouido (TestNet)',
api: 'https://bicon.net.solidwallet.io',
tracker: 'https://bicon.tracker.solidwallet.io',
nid: 3
}
return iconNetworksInfo[network]
}
progressPromiseAll(promises, tickCallback) {
var len = promises.length;
var progress = 0;
function tick(promise) {
promise.then(function () {
progress++;
tickCallback(progress, len);
});
return promise;
}
return Promise.all(promises.map(tick));
}
getAPIEndpoint() {
const apis = this._getNetworkInfo(this._network).api
return apis[Math.floor(Math.random() * apis.length)];
}
getTrackerEndpoint() {
return this._getNetworkInfo(this._network).tracker
}
getNetworkName() {
return this._getNetworkInfo(this._network).name
}
getWhitelist() {
return this.__call(this._scoreAddress, 'get_whitelist').then(whitelist => {
return whitelist
})
}
getSwap(swapId) {
return this.__call(this._scoreAddress, 'get_swap', { swap_id: IconConverter.toHex(swapId) }).then(swap => {
swap['id'] = swapId
return swap
})
}
getOrder(orderId) {
return this.__call(this._scoreAddress, 'get_order', { order_id: IconConverter.toHex(orderId) }).then(swap => {
return swap
})
}
getTokenDetails(wallet, contract) {
return api.__getBalance(wallet, contract).then(balance => {
if (contract === ICX_TOKEN_CONTRACT) {
return new Promise((resolve, reject) => {
resolve({
name: 'ICX',
symbol: 'ICX',
contract: contract,
decimals: ICX_TOKEN_DECIMALS,
balance: balance
})
})
}
return api.tokenName(contract).then(name => {
return api.tokenSymbol(contract).then(symbol => {
return api.getDecimals(contract).then(decimals => {
return {
name: name,
symbol: symbol,
contract: contract,
decimals: parseInt(decimals, 16),
balance: balance
}
})
})
})
})
}
getDecimals(contract) {
if (contract === ICX_TOKEN_CONTRACT) {
return new Promise((resolve, reject) => {
resolve(ICX_TOKEN_DECIMALS)
})
}
return this.__call(contract, 'decimals').then(decimals => {
return decimals
})
}
async __callWithOffset(contract, method, params) {
let result = {}
let offset = 0
while (true) {
params['offset'] = IconConverter.toHex(offset)
const orders = await this.__call(contract, method, params)
offset += MAX_ITERATION_LOOP
if (Object.keys(orders).length === 0) {
break
}
result = Object.assign({}, result, orders)
}
return result
}
getPendingOrdersByAddress(walletAddress) {
return this.__callWithOffset(this._scoreAddress, 'get_pending_orders_by_address', { address: walletAddress })
.then(orders => {
return orders
})
}
getFilledOrdersByAddress(walletAddress) {
return this.__callWithOffset(this._scoreAddress, 'get_filled_orders_by_address', { address: walletAddress })
.then(orders => {
return orders
})
}
fillOrder(walletAddress, swapId, taker_contract, taker_amount) {
swapId = IconConverter.toHex(IconConverter.toBigNumber(swapId))
if (taker_contract === ICX_TOKEN_CONTRACT) {
const value = IconConverter.toHex(taker_amount)
return this.__iconexCallTransaction(
walletAddress,
this._scoreAddress,
'fill_icx_order',
value, { swap_id: swapId }
).then(tx => {
return tx
})
} else {
const value = IconConverter.toHex(taker_amount)
const data = {
'action': 'fill_irc2_order',
'swap_id': swapId
}
const params = {
'_to': this._scoreAddress,
'_value': value,
'_data': IconConverter.toHex(JSON.stringify(data))
}
return this.__iconexCallTransaction(
walletAddress,
taker_contract,
'transfer',
0,
params
).then(tx => {
return tx
})
}
}
cancelSwap(walletAddress, swapId) {
return this.__iconexCallTransaction(walletAddress, this._scoreAddress, 'cancel_swap', 0, { swap_id: IconConverter.toHex(swapId) }).then(txHash => {
return txHash
})
}
createSwap(walletAddress, maker_contract, maker_amount, taker_contract, taker_amount) {
const getSwapIdFromTx = async (tx) => {
if (!tx) return null;
const txHash = tx['result']
const txResult = await this.__txResult(txHash)
const eventLogs = txResult['eventLogs'][0]
if (eventLogs['indexed'][0] !== SwapCreatedEvent) {
throw WrongEventSignature(eventLogs['indexed']);
}
const swapId = parseInt(eventLogs['indexed'][1], 16)
const maker = parseInt(eventLogs['data'][0], 16)
const taker = parseInt(eventLogs['data'][1], 16)
return { swapId: swapId, maker: maker, taker: taker }
}
if (maker_contract === ICX_TOKEN_CONTRACT) {
const params = {
taker_contract: taker_contract,
taker_amount: IconConverter.toHex(IconConverter.toBigNumber(taker_amount)),
}
return this.__iconexCallTransaction(walletAddress, this._scoreAddress, 'create_icx_swap', maker_amount, params)
.then(async tx => {
return getSwapIdFromTx(tx)
})
} else {
const value = IconConverter.toHex(maker_amount)
const data = {
'action': 'create_irc2_swap',
'taker_contract': taker_contract,
'taker_amount': IconConverter.toHex(IconConverter.toBigNumber(taker_amount)),
}
const params = {
'_to': this._scoreAddress,
'_value': value,
'_data': IconConverter.toHex(JSON.stringify(data))
}
return this.__iconexCallTransaction(walletAddress, maker_contract, 'transfer', 0, params).then(async tx => {
return getSwapIdFromTx(tx)
})
}
}
balanceToFloat(balance, contract) {
return this.getDecimals(contract).then(decimals => {
const digits = IconConverter.toBigNumber('10').exponentiatedBy(decimals)
return IconConverter.toBigNumber(balance).dividedBy(digits).toString()
})
}
// IRC2 Token Interface ============================================================
tokenName(contract) {
return this.__call(contract, 'name').then(name => {
return name
})
}
tokenSymbol(contract) {
return this.__call(contract, 'symbol').then(symbol => {
return symbol
})
}
// ICONex Connect Extension =============================================================
iconexHasAccount() {
return this.__iconexConnectRequest('REQUEST_HAS_ACCOUNT').then(payload => {
return payload
})
}
iconexHasAddress(address) {
return this.__iconexConnectRequest('REQUEST_HAS_ADDRESS', address).then(payload => {
return payload
})
}
iconexAskAddress() {
return this.__iconexConnectRequest('REQUEST_ADDRESS').then(payload => {
return payload
})
}
// ======================================================================================
// Following classes are private because they are lower level methods at a protocol level
__iconexCallTransactionEx(from, to, method, value, stepLimit, params) {
const transaction = this.__icxCallTransactionBuild(from, to, method, value, stepLimit, params)
const jsonRpcQuery = {
jsonrpc: '2.0',
method: 'icx_sendTransaction',
params: IconConverter.toRawTransaction(transaction),
id: 1234
}
return this.__iconexJsonRpc(jsonRpcQuery)
}
__iconexCallTransaction(from, to, method, value, params) {
return this.__estimateCallStep(from, to, method, value, params).then(stepLimit => {
return this.__iconexCallTransactionEx(from, to, method, value, stepLimit, params)
})
}
__iconexConnectRequest(requestType, payload) {
return new Promise((resolve, reject) => {
function eventHandler(event) {
const { payload } = event.detail
window.removeEventListener('ICONEX_RELAY_RESPONSE', eventHandler)
resolve(payload)
}
window.addEventListener('ICONEX_RELAY_RESPONSE', eventHandler)
window.dispatchEvent(new window.CustomEvent('ICONEX_RELAY_REQUEST', {
detail: {
type: requestType,
payload
}
}))
})
}
__iconexIcxTransaction(from, to, value) {
const transaction = this.__icxTransactionBuild(from, to, value, 100000)
const jsonRpcQuery = {
jsonrpc: '2.0',
method: 'icx_sendTransaction',
params: IconConverter.toRawTransaction(transaction),
id: 1234
}
return this.__iconexJsonRpc(jsonRpcQuery)
}
__iconexJsonRpc(jsonRpcQuery) {
return this.__iconexConnectRequest('REQUEST_JSON-RPC', jsonRpcQuery).then(payload => {
return payload
})
}
// ======================================================================================
__getIcxBalance(address) {
const digits = IconConverter.toBigNumber('10').exponentiatedBy(18)
return this._getIconService().getBalance(address).execute().then(balance => {
return balance / digits;
})
}
__getIRC2Balance(address, contract) {
return this.__call(contract, 'balanceOf', { '_owner': address }).then(balance => {
return this.getDecimals(contract).then(decimals => {
const digits = IconConverter.toBigNumber('10').exponentiatedBy(decimals)
return balance / digits
})
})
}
__getBalance(address, contract) {
if (contract === ICX_TOKEN_CONTRACT) {
return this.__getIcxBalance(address)
} else {
return this.__getIRC2Balance(address, contract)
}
}
__call(to, method, params = {}) {
return new Promise((resolve, reject) => {
try {
let callBuilder = new IconService.IconBuilder.CallBuilder()
.from(null)
.to(to)
.method(method)
// Optional "params" field
if (Object.keys(params).length !== 0) {
callBuilder = callBuilder.params(params)
}
const call = callBuilder.build()
const result = this._getIconService().call(call).execute()
resolve(result)
} catch (err) {
reject(err)
}
})
}
__callTx(to, method, wallet, stepLimit, value = 0, params = {}) {
return new Promise((resolve, reject) => {
try {
let callTransactionBuilder = new IconService.IconBuilder.CallTransactionBuilder()
.from(wallet.getAddress())
.to(to)
.value(IconConverter.toHex(IconService.IconAmount.of(value, IconService.IconAmount.Unit.ICX).toLoop()))
.stepLimit(IconConverter.toBigNumber(stepLimit)) // 40000000
.nid(IconConverter.toBigNumber(this._nid))
.nonce(IconConverter.toBigNumber(1))
.version(IconConverter.toBigNumber(3))
.timestamp((new Date()).getTime() * 1000)
.method(method)
// Optional "params" field
if (Object.keys(params).length !== 0) {
callTransactionBuilder = callTransactionBuilder.params(params)
}
const transaction = new IconService.SignedTransaction(callTransactionBuilder.build(), wallet)
const result = this._getIconService().sendTransaction(transaction).execute()
resolve(result)
} catch (err) {
reject(err)
}
})
}
__estimateCallStep(from, to, method, value, params = {}) {
const transaction = {
"jsonrpc": "2.0",
"method": "debug_estimateStep",
"id": 1,
"params": {
"version": "0x3",
"from": from,
"to": to,
"value": IconConverter.toHex(IconConverter.toBigNumber(value)),
"timestamp": IconConverter.toHex((new Date()).getTime() * 1000),
"nid": IconConverter.toHex(IconConverter.toBigNumber(this._nid)),
"nonce": "0x1",
"dataType": "call",
"data": {
"method": method,
"params": params
}
}
}
return new Promise((resolve, reject) => {
try {
const result = this._getDebugIconService().provider.request(transaction).execute()
resolve(result)
} catch (err) {
reject(err)
}
})
}
__icxCallTransactionBuild(from, to, method, value, stepLimit, params = {}) {
let callTransactionBuilder = new IconService.IconBuilder.CallTransactionBuilder()
.from(from)
.to(to)
.value(IconConverter.toHex(value))
.stepLimit(IconConverter.toBigNumber(stepLimit))
.nid(IconConverter.toBigNumber(this._nid))
.nonce(IconConverter.toBigNumber(1))
.version(IconConverter.toBigNumber(3))
.timestamp((new Date()).getTime() * 1000)
.method(method)
// Optional "params" field
if (Object.keys(params).length !== 0) {
callTransactionBuilder = callTransactionBuilder.params(params)
}
return callTransactionBuilder.build()
}
__icxTransactionBuild(from, to, value, stepLimit) {
return new IconService.IconBuilder.IcxTransactionBuilder()
.from(from)
.to(to)
.value(IconConverter.toBigNumber(value))
.stepLimit(IconConverter.toBigNumber(stepLimit))
.nid(IconConverter.toBigNumber(this._nid))
.version(IconConverter.toBigNumber(3))
.timestamp((new Date()).getTime() * 1000)
.build()
}
async __txResult(txHash, retriesLeft = 1000, interval = 100) {
try {
return await this._getIconService().getTransactionResult(txHash).execute()
} catch (error) {
if (retriesLeft) {
await new Promise((resolve, reject) => setTimeout(resolve, interval))
return this.__txResult(txHash, retriesLeft - 1, interval)
} else throw new UnconfirmedTransaction(txHash)
} |
export const api = new API(SCORE_NETWORK, SCORE_ENDPOINT) | }
} |
jobrunner_service.go | /*
Copyright 2020 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0 | See the License for the specific language governing permissions and
limitations under the License.
*/
package resources
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
)
func MakeJobRunnerService(name, namespace string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "Service",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
Labels: jobRunnerLabels,
},
Spec: corev1.ServiceSpec{
Selector: jobRunnerLabels,
Ports: []corev1.ServicePort{
{
Protocol: corev1.ProtocolTCP,
Port: 80,
TargetPort: intstr.IntOrString{IntVal: 8080},
},
},
},
}
} |
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
fshc_test.go | // Package health provides a basic mountpath health monitor.
/*
* Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
*/
package health
import (
"fmt"
"os"
"testing"
"github.com/NVIDIA/aistore/cmn"
"github.com/NVIDIA/aistore/fs"
"github.com/NVIDIA/aistore/memsys"
)
const (
fsCheckerTmpDir = "/tmp/fshc"
)
func testCheckerMountPaths() *fs.MountedFS {
cmn.CreateDir(fsCheckerTmpDir)
cmn.CreateDir(fsCheckerTmpDir + "/1")
cmn.CreateDir(fsCheckerTmpDir + "/2")
cmn.CreateDir(fsCheckerTmpDir + "/3")
cmn.CreateDir(fsCheckerTmpDir + "/4")
config := cmn.GCO.BeginUpdate()
config.TestFSP.Count = 1
cmn.GCO.CommitUpdate(config)
fs.InitMountedFS()
fs.Mountpaths.DisableFsIDCheck()
for i := 1; i <= 4; i++ {
name := fmt.Sprintf("%s/%d", fsCheckerTmpDir, i)
fs.Mountpaths.Add(name)
}
os.RemoveAll(fsCheckerTmpDir + "/3") // one folder is deleted
fs.Mountpaths.Disable(fsCheckerTmpDir + "/4")
return fs.Mountpaths
}
func updateTestConfig() {
config := cmn.GCO.BeginUpdate()
config.FSHC.Enabled = true
config.FSHC.ErrorLimit = 2
cmn.GCO.CommitUpdate(config)
}
type MockFSDispatcher struct {
faultyPath string
faultDetected bool
}
func newMockFSDispatcher(mpathToFail string) *MockFSDispatcher {
return &MockFSDispatcher{
faultyPath: mpathToFail,
}
}
func (d *MockFSDispatcher) DisableMountpath(path, reason string) (disabled bool, err error) {
d.faultDetected = path == d.faultyPath
if d.faultDetected {
return false, fmt.Errorf("fault detected: %s", reason)
}
return true, nil
}
func testCheckerCleanup() {
os.RemoveAll(fsCheckerTmpDir)
}
func TestFSChecker(t *testing.T) {
mm := memsys.DefaultPageMM()
defer mm.Terminate()
updateTestConfig()
var (
failedMpath = fsCheckerTmpDir + "/3"
dispatcher = newMockFSDispatcher(failedMpath)
fshc = NewFSHC(dispatcher, testCheckerMountPaths(), mm, fs.CSM)
)
// initial state = 2 available FSes - must pass
availablePaths, disabledPaths := fshc.mountpaths.Get()
if len(availablePaths) != 3 || len(disabledPaths) != 1 {
t.Errorf("Invalid number of mountpaths at start: %v - %v",
availablePaths, disabledPaths)
}
// inaccessible mountpath
_, _, exists := fshc.testMountpath(
fsCheckerTmpDir+"/3/testfile", fsCheckerTmpDir+"/3", 4, 1024)
if exists {
t.Error("Testing non-existing mountpath must fail")
}
// failed mountpath must be disabled
fshc.runMpathTest(failedMpath, failedMpath+"/dir/testfile")
if !dispatcher.faultDetected {
t.Errorf("Faulty mountpath %s was not detected", failedMpath)
}
// decision making function
type tstInfo struct {
title string
readErrs, writeErrs int
avail, result bool
}
testList := []tstInfo{
{"Inaccessible mountpath", 0, 0, false, false},
{"Healthy mountpath", 0, 0, true, true},
{"Unstable but OK mountpath", 1, 1, true, true},
{"Reads failed", 3, 0, true, false},
{"Writes failed", 1, 3, true, false},
{"Reads and writes failed", 3, 3, true, false},
}
for _, tst := range testList {
t.Run(tst.title, func(t *testing.T) {
res, _ := fshc.isTestPassed("/tmp", tst.readErrs, tst.writeErrs, tst.avail)
if res != tst.result {
t.Errorf("%s failed. %v expected but %v got", tst.title, tst.result, res)
}
})
}
| } | testCheckerCleanup() |
MirrorTool.py | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Tool import Tool
from UM.Event import Event, MouseEvent
from UM.Math.Vector import Vector
from UM.Operations.MirrorOperation import MirrorOperation
from UM.Operations.GroupedOperation import GroupedOperation
from UM.Scene.Selection import Selection
from UM.Scene.ToolHandle import ToolHandle
from PyQt5.QtCore import Qt
from . import MirrorToolHandle
## Provides the tool to mirror meshes and groups
class MirrorTool(Tool):
def | (self):
super().__init__()
self._handle = MirrorToolHandle.MirrorToolHandle()
self._shortcut_key = Qt.Key_M
self._operation_started = False
## Handle mouse and keyboard events
#
# \param event type(Event)
def event(self, event):
super().event(event)
if event.type == Event.MousePressEvent and self._controller.getToolsEnabled():
# Initialise a mirror operation
if MouseEvent.LeftButton not in event.buttons:
return False
id = self._selection_pass.getIdAtPosition(event.x, event.y)
if not id:
return False
if self._handle.isAxis(id):
self.setLockedAxis(id)
self._operation_started = True
self.operationStarted.emit(self)
return True
if event.type == Event.MouseReleaseEvent:
if self._operation_started:
self._operation_started = False
self.operationStopped.emit(self)
# Perform a mirror operation
if self.getLockedAxis() != ToolHandle.NoAxis:
if Selection.getCount() == 1:
node = Selection.getSelectedObject(0)
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op = MirrorOperation(node, mirror, mirror_around_center = True)
else:
op = GroupedOperation()
for node in self._getSelectedObjectsWithoutSelectedAncestors():
if self.getLockedAxis() == ToolHandle.XAxis:
mirror = Vector(-1, 1, 1)
elif self.getLockedAxis() == ToolHandle.YAxis:
mirror = Vector(1, -1, 1)
elif self.getLockedAxis() == ToolHandle.ZAxis:
mirror = Vector(1, 1, -1)
else:
mirror = Vector(1, 1, 1)
op.addOperation(MirrorOperation(node, mirror, mirror_around_center = True))
op.push()
self.setLockedAxis(ToolHandle.NoAxis)
return True
return False
| __init__ |
uvm_object.py | #
#-----------------------------------------------------------------------------
# Copyright 2007-2011 Mentor Graphics Corporation
# Copyright 2007-2011 Cadence Design Systems, Inc.
# Copyright 2010 Synopsys, Inc.
# Copyright 2013 NVIDIA Corporation
# Copyright 2019-2020 Tuomas Poikela (tpoikela)
# All Rights Reserved Worldwide
#
# Licensed under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in
# writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See
# the License for the specific language governing
# permissions and limitations under the License.
#-----------------------------------------------------------------------------
from typing import Dict, Any
from .sv import sv, sv_obj
from .uvm_misc import UVMStatusContainer
from .uvm_object_globals import (UVM_PRINT, UVM_NONE, UVM_COPY, UVM_COMPARE,
UVM_RECORD, UVM_SETINT, UVM_SETOBJ, UVM_SETSTR, UVM_PACK, UVM_UNPACK)
from .uvm_globals import uvm_report_error, uvm_report_warning, uvm_report_info
from typing import Tuple
class UVMObject(sv_obj):
"""
The `UVMObject` class is the base class for all UVM data and hierarchical classes.
Its primary role is to define a set of methods for such common operations as `create`,
`copy`, `compare`, `print`, and `record`.
Classes deriving from `UVMObject` must implement methods such as
`create` and `get_type_name`.
:ivar str name: Name of the object
:ivar int inst_id: Unique instance ID for this object
Group: Seeding
:cvar bool use_uvm_seeding: This bit enables or disables the UVM seeding
mechanism. It globally affects the operation of the `reseed` method.
When enabled, UVM-based objects are seeded based on their type and full
hierarchical name rather than allocation order. This improves random
stability for objects whose instance names are unique across each type.
The `UVMComponent` class is an example of a type that has a unique
instance name.
"""
# Should be set by uvm_*_utils macro
type_id = None # type: Any
depth = 0
m_inst_count = 0
m_inst_count = 0
use_uvm_seeding = True
uvm_global_copy_map = {} # type: Dict['UVMObject', 'UVMObject']
_m_uvm_status_container = UVMStatusContainer()
def __init__(self, name: str):
""" Creates a new uvm_object with the given instance `name`. If `name` is not
supplied, the object is unnamed.
"""
sv_obj.__init__(self)
self.name = name
self.inst_id = UVMObject.m_inst_count
UVMObject.m_inst_count += 1
self.leaf_name = name
def reseed(self) -> None:
"""
Calls `srandom` on the object to reseed the object using the UVM seeding
mechanism, which sets the seed based on type name and instance name instead
of based on instance position in a thread.
If the `use_uvm_seeding` static variable is set to 0, then reseed() does
not perform any function.
"""
if (UVMObject.use_uvm_seeding):
pass
def set_name(self, name: str):
"""
Group: Identification
Sets the instance name of this object, overwriting any previously
given name.
Args:
name:
"""
self.leaf_name = name
def get_name(self) -> str:
"""
Returns the name of the object, as provided by the `name` argument in the
`new` constructor or `set_name` method.
Returns:
str: Name of the object.
"""
return self.leaf_name
def get_full_name(self) -> str:
"""
Objects possessing hierarchy, such as <uvm_components>, override the default
implementation. Other objects might be associated with component hierarchy
but are not themselves components. For example, <uvm_sequence #(REQ,RSP)>
classes are typically associated with a <uvm_sequencer #(REQ,RSP)>. In this
case, it is useful to override get_full_name to return the sequencer's
full name concatenated with the sequence's name. This provides the sequence
a full context, which is useful when debugging.
Returns:
str: The full hierarchical name of this object. The default
implementation is the same as <get_name>, as uvm_objects do not inherently
possess hierarchy.
"""
return self.get_name()
def get_inst_id(self) -> int:
"""
Returns:
int: The object's unique, numeric instance identifier.
"""
return self.inst_id
@classmethod
def get_inst_count(self) -> int:
"""
Returns:
int: The current value of the instance counter, which represents the
total number of uvm_object-based objects that have been allocated in
simulation. The instance counter is used to form a unique numeric instance
identifier.
"""
return UVMObject.m_inst_count
def get_type(self) -> None:
"""
Returns the type-proxy (wrapper) for this object. The `UVMFactory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments.
The default implementation of this method produces an error and returns
`None`. To enable use of this method, a user's subtype must implement a
version that returns the subtype's wrapper.
For example:
.. code-block:: python
class cmd(UVMObject):
type_id = None
@classmethod
def get_type(cls):
return cls.type_id.get()
Then, to use:
.. code-block:: python
factory.set_type_override(cmd.get_type(), subcmd.get_type())
This function is implemented by the uvm_*_utils functions, if employed.
Returns:
"""
uvm_report_error("NOTYPID", "get_type not implemented in derived class: "
+ str(self), UVM_NONE)
return None
def get_object_type(self) -> Any:
"""
Function: get_object_type
Returns the type-proxy (wrapper) for this object. The `uvm_factory`'s
type-based override and creation methods take arguments of
`uvm_object_wrapper`. This method, if implemented, can be used as convenient
means of supplying those arguments. This method is the same as the static
`get_type` method, but uses an already allocated object to determine
the type-proxy to access (instead of using the static object).
The default implementation of this method does a factory lookup of the
proxy using the return value from `get_type_name`. If the type returned
by `get_type_name` is not registered with the factory, then a `None`
handle is returned.
For example:
.. code-block:: python
class cmd (UVMObject):
type_id = UVMObjectRegistry()
@classmethod
def type_id get_type(cls):
return type_id.get()
def get_object_type(self):
return cmd.type_id.get()
This function is implemented by the `uvm_*_utils macros, if employed.
Returns:
"""
from .uvm_coreservice import UVMCoreService
cs = UVMCoreService.get()
factory = cs.get_factory()
if self.get_type_name() == "<unknown>":
return None
return factory.find_wrapper_by_name(self.get_type_name())
def get_type_name(self) -> str:
"""
This function returns the type name of the object, which is typically the
type identifier enclosed in quotes. It is used for various debugging
functions in the library, and it is used by the factory for creating
objects.
This function must be defined in every derived class.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
type_name = "mytype"
def get_type_name(self):
return my_type.type_name
We define the `type_name` static variable to enable access to the type name
without need of an object of the class, i.e., to enable access via the
scope operator, ~mytype::type_name~.
Returns:
str: Type name of the object.
"""
return "<unknown>"
def create(self, name="") -> 'UVMObject':
|
def clone(self) -> 'UVMObject':
"""
The `clone` method creates and returns an exact copy of this object.
The default implementation calls `create` followed by `copy`. As clone is
virtual, derived classes may override this implementation if desired.
Returns:
UVMObject: Clone of the object.
"""
tmp = self.create(self.get_name())
if tmp is None:
uvm_report_warning("CRFLD", sv.sformatf(
"The create method failed for %s, object cannot be cloned",
self.get_name()), UVM_NONE)
else:
tmp.copy(self)
return tmp
def print_obj(self, printer=None) -> None:
"""
Group: Printing
Function: print
The `print` method deep-prints this object's properties in a format and
manner governed by the given `printer` argument; if the `printer` argument
is not provided, the global `uvm_default_printer` is used. See
`uvm_printer` for more information on printer output formatting. See also
`uvm_line_printer`, `uvm_tree_printer`, and `uvm_table_printer` for details
on the pre-defined printer "policies," or formatters, provided by the UVM.
The `print` method is not virtual and must not be overloaded. To include
custom information in the `print` and `sprint` operations, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if printer is None:
uvm_report_error("NonePRINTER", "uvm_default_printer is None")
sv.fwrite(printer.knobs.mcd, self.sprint(printer))
def sprint(self, printer=None) -> str:
"""
The `sprint` method works just like the `print` method, except the output
is returned in a string rather than displayed.
The `sprint` method is not virtual and must not be overloaded. To include
additional fields in the `print` and `sprint` operation, derived classes
must override the `do_print` method and use the provided printer policy
class to format the output. The printer policy will manage all string
concatenations and provide the string to `sprint` to return to the caller.
Args:
printer (UVMPrinter): Printer that is used in printing.
Returns:
str: String representation of the object.
"""
if printer is None:
from .uvm_global_vars import uvm_default_printer
printer = uvm_default_printer
if not printer.istop():
UVMObject._m_uvm_status_container.printer = printer
self._m_uvm_field_automation(None, UVM_PRINT, "")
self.do_print(printer)
return ""
self._m_uvm_status_container = UVMObject._m_uvm_status_container
printer.print_object(self.get_name(), self)
if printer.m_string != "":
return printer.m_string
return printer.emit()
def do_print(self, printer) -> None:
"""
The `do_print` method is the user-definable hook called by `print` and
`sprint` that allows users to customize what gets printed or sprinted
beyond the field information provided by the `uvm_field_* macros,
<Utility and Field Macros for Components and Objects>.
The `printer` argument is the policy object that governs the format and
content of the output. To ensure correct `print` and `sprint` operation,
and to ensure a consistent output format, the `printer` must be used
by all `do_print` implementations. That is, instead of using ~$display~ or
string concatenations directly, a `do_print` implementation must call
through the ~printer's~ API to add information to be printed or sprinted.
An example implementation of `do_print` is as follows::
class mytype (UVMObject):
data_obj data
int f1
virtual function void do_print (uvm_printer printer)
super.do_print(printer)
printer.print_field_int("f1", f1, $bits(f1), UVM_DEC)
printer.print_object("data", data)
endfunction
Then, to print and sprint the object, you could write::
t = mytype()
t.print()
uvm_report_info("Received",t.sprint())
See `UVMPrinter` for information about the printer API.
Args:
printer (UVMPrinter): Printer that is used in printing.
"""
return
def convert2string(self) -> str:
"""
This virtual function is a user-definable hook, called directly by the
user, that allows users to provide object information in the form of
a string. Unlike `sprint`, there is no requirement to use a `uvm_printer`
policy object. As such, the format and content of the output is fully
customizable, which may be suitable for applications not requiring the
consistent formatting offered by the `print`/`sprint`/`do_print`
API.
Fields declared in <Utility Macros> macros (`uvm_field_*), if used, will
not automatically appear in calls to convert2string.
An example implementation of convert2string follows.
.. code-block:: python
class Base(UVMObject):
field = "foo"
def convert2string(self):
return "base_field=" + self.field
class Obj2(UVMObject):
field = "bar"
def convert2string()
convert2string = "child_field=" + self.field
class Obj(Base):
addr = 0x123
data = 0x456
write = 1
child = Obj2()
def convert2string(self):
convert2string = super().convert2string() +
sv.sformatf(" write=%0d addr=%8h data=%8h ",write,addr,data) +
child.convert2string()
Then, to display an object, you could write:
.. code-block:: python
o = Obj()
uvm_report_info("BusMaster", "Sending:\n " + o.convert2string())
The output will look similar to::
UVM_INFO @ 0: reporter [BusMaster] Sending:
base_field=foo write=1 addr=00000123 data=00000456 child_field=bar
Returns:
str: Object converted into string.
"""
return ""
def _m_uvm_field_automation(self, tmp_data__, what__, str__) -> None:
pass
def record(self, recorder=None) -> None:
"""
Group: Recording
The `record` method deep-records this object's properties according to an
optional `recorder` policy. The method is not virtual and must not be
overloaded. To include additional fields in the record operation, derived
classes should override the `do_record` method.
The optional `recorder` argument specifies the recording policy, which
governs how recording takes place. See
`uvm_recorder` for information.
A simulator's recording mechanism is vendor-specific. By providing access
via a common interface, the uvm_recorder policy provides vendor-independent
access to a simulator's recording capabilities.
Args:
recorder (UVMRecorder):
"""
if recorder is None:
return
UVMObject._m_uvm_status_container.recorder = recorder
recorder.recording_depth += 1
self._m_uvm_field_automation(None, UVM_RECORD, "")
self.do_record(recorder)
recorder.recording_depth -= 1
def do_record(self, recorder) -> None:
"""
The `do_record` method is the user-definable hook called by the `record`
method. A derived class should override this method to include its fields
in a record operation.
The `recorder` argument is policy object for recording this object. A
do_record implementation should call the appropriate recorder methods for
each of its fields. Vendor-specific recording implementations are
encapsulated in the `recorder` policy, thereby insulating user-code from
vendor-specific behavior. See `uvm_recorder` for more information.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
data_obj data
int f1
def do_record (self, recorder):
recorder.record_field("f1", f1, sv.bits(f1), UVM_DEC)
recorder.record_object("data", data)
Args:
recorder (UVMRecorder): Recorder policy object.
"""
return
def copy(self, rhs: 'UVMObject'):
"""
The copy makes this object a copy of the specified object.
The `copy` method is not virtual and should not be overloaded in derived
classes. To copy the fields of a derived class, that class should override
the `do_copy` method.
Args:
rhs (UVMObject): An object to be copied.
"""
# For cycle checking
UVMObject.depth = 0
if (rhs is not None) and rhs in UVMObject.uvm_global_copy_map:
return
if rhs is None:
uvm_report_warning("NoneCP",
"A None object was supplied to copy; copy is ignored", UVM_NONE)
return
UVMObject.uvm_global_copy_map[rhs] = self
UVMObject.depth += 1
self._m_uvm_field_automation(rhs, UVM_COPY, "")
self.do_copy(rhs)
UVMObject.depth -= 1
if UVMObject.depth == 0:
UVMObject.uvm_global_copy_map = {}
def do_copy(self, rhs) -> None:
"""
The `do_copy` method is the user-definable hook called by the `copy` method.
A derived class should override this method to include its fields in a `copy`
operation.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
field_1 = 0
def do_copy(self, rhs):
super.do_copy(rhs)
# Optionanl type checking
field_1 = rhs.field_1
The implementation must call `super().do_copy`, and can optionally do
type checking before copying.
Args:
rhs (UVMObject): Object to be copied.
"""
return
def compare(self, rhs, comparer=None) -> bool:
"""
Deep compares members of this data object with those of the object provided
in the `rhs` (right-hand side) argument, returning 1 on a match, 0 otherwise.
The `compare` method is not virtual and should not be overloaded in derived
classes. To compare the fields of a derived class, that class should
override the `do_compare` method.
The optional `comparer` argument specifies the comparison policy. It allows
you to control some aspects of the comparison operation. It also stores the
results of the comparison, such as field-by-field miscompare information
and the total number of miscompares. If a compare policy is not provided,
then the global `uvm_default_comparer` policy is used. See `uvm_comparer`
for more information.
Args:
rhs (UVMObject): Object to be compared against.
comparer (UVMComparer): Comparer policy object.
Returns:
bool: True if objects match, False otherwise.
"""
# t = 0
dc = 0
#static int style
# style = 0
done = 0
cls = UVMObject
if comparer is not None:
cls._m_uvm_status_container.comparer = comparer
else:
from .uvm_global_vars import uvm_default_comparer
cls._m_uvm_status_container.comparer = uvm_default_comparer
comparer = cls._m_uvm_status_container.comparer
if(not cls._m_uvm_status_container.scope.depth()):
comparer.compare_map.delete()
comparer.result = 0
comparer.miscompares = ""
comparer.scope = cls._m_uvm_status_container.scope
if self.get_name() == "":
cls._m_uvm_status_container.scope.down("<object>")
else:
cls._m_uvm_status_container.scope.down(self.get_name())
if(not done and (rhs is None)):
if(cls._m_uvm_status_container.scope.depth()):
comparer.print_msg_object(self, rhs)
else:
comparer.print_msg_object(self, rhs)
uvm_report_info("MISCMP",
sv.sformatf("%0d Miscompare(s) for object %s@%0d vs. None",
comparer.result,
cls._m_uvm_status_container.scope.get(),
self.get_inst_id()),
cls._m_uvm_status_container.comparer.verbosity)
done = 1
if(not done and comparer.compare_map.exists(rhs)):
if(comparer.compare_map[rhs] != self):
comparer.print_msg_object(self, comparer.compare_map[rhs])
done = 1 # don't do any more work after this case, but do cleanup
if(not done and comparer.check_type and (rhs is not None) and
(self.get_type_name() != rhs.get_type_name())):
cls._m_uvm_status_container.stringv = ("lhs type = \"" + self.get_type_name()
+ "' : rhs type = '" + rhs.get_type_name() + "'")
comparer.print_msg(cls._m_uvm_status_container.stringv)
if not done:
comparer.compare_map[rhs] = self
self._m_uvm_field_automation(rhs, UVM_COMPARE, "")
dc = self.do_compare(rhs, comparer)
if cls._m_uvm_status_container.scope.depth() == 1:
cls._m_uvm_status_container.scope.up()
if rhs is not None:
comparer.print_rollup(self, rhs)
return (comparer.result == 0 and dc == 1)
def do_compare(self, rhs, comparer) -> bool:
"""
The `do_compare` method is the user-definable hook called by the `compare`
method. A derived class should override this method to include its fields
in a compare operation. It should return 1 if the comparison succeeds, 0
otherwise.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
f1 = 0
def do_compare(self, rhs, comparer):
do_compare = super.do_compare(rhs,comparer)
# Optional type checking
do_compare &= comparer.compare_field_int("f1", f1, rhs.f1)
return do_compare
A derived class implementation must call `super().do_compare` to ensure its
base class' properties, if any, are included in the comparison. If type
matching is required instead of duck-typing, the user can also
implemented this checking.
The actual comparison should be implemented using the `UVMComparer` object
rather than direct field-by-field comparison. This enables users of your
class to customize how comparisons are performed and how much miscompare
information is collected. See `UVMComparer` for more details.
Args:
rhs (UVMObject):
comparer (UVMComparer):
Returns:
bool: True if objects match, False otherwise.
"""
return True
# // Group: Packing
# // Function: pack
#
# extern function int pack (ref bit bitstream[],
# input uvm_packer packer=None)
def pack(self, packer=None) -> Tuple[Any, Any]:
packer = self.m_pack(packer)
return packer.get_packed_size(), packer.get_bits()
# // Function: pack_bytes
#
# extern function int pack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def pack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_pack(packer)
packed_bytes = packer.get_bytes()
for b in packed_bytes:
bytestream.append(b)
return packer.get_packed_size()
# // Function: pack_ints
# //
# // The pack methods bitwise-concatenate this object's properties into an array
# // of bits, bytes, or ints. The methods are not virtual and must not be
# // overloaded. To include additional fields in the pack operation, derived
# // classes should override the <do_pack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // the packing operation. If a packer policy is not provided, the global
# // <uvm_default_packer> policy is used. See <uvm_packer> for more information.
# //
# // The return value is the total number of bits packed into the given array.
# // Use the array's built-in `size` method to get the number of bytes or ints
# // consumed during the packing process.
#
# extern function int pack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def pack_ints(self, intstream, packer=None) -> Any:
packer = self.m_pack(packer)
ints = packer.get_ints()
for i in ints:
intstream.append(i)
return packer.get_packed_size()
# // Function: do_pack
# //
# // The `do_pack` method is the user-definable hook called by the <pack> methods.
# // A derived class should override this method to include its fields in a pack
# // operation.
# //
# // The `packer` argument is the policy object for packing. The policy object
# // should be used to pack objects.
# //
# // A typical example of an object packing itself is as follows
# //
# //| class mysubtype extends mysupertype
# //| ...
# //| shortint myshort
# //| obj_type myobj
# //| byte myarray[]
# //| ...
# //| function void do_pack (uvm_packer packer)
# //| super.do_pack(packer); // pack mysupertype properties
# //| packer.pack_field_int(myarray.size(), 32)
# //| foreach (myarray)
# //| packer.pack_field_int(myarray[index], 8)
# //| packer.pack_field_int(myshort, $bits(myshort))
# //| packer.pack_object(myobj)
# //| endfunction
# //
# // The implementation must call ~super.do_pack~ so that base class properties
# // are packed as well.
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to unpack into an equivalent data
# // structure when unpacking, you must include meta-information about the
# // dynamic data when packing as follows.
# //
# // - For queues, dynamic arrays, or associative arrays, pack the number of
# // elements in the array in the 32 bits immediately before packing
# // individual elements, as shown above.
# //
# // - For string data types, append a zero byte after packing the string
# // contents.
# //
# // - For objects, pack 4 bits immediately before packing the object. For `None`
# // objects, pack 4'b0000. For non-`None` objects, pack 4'b0001.
# //
# // When the `uvm_field_* macros are used,
# // <Utility and Field Macros for Components and Objects>,
# // the above meta information is included provided the <uvm_packer::use_metadata>
# // variable is set for the packer.
# //
# // Packing order does not need to match declaration order. However, unpacking
# // order must match packing order.
def do_pack(self, packer) -> None:
return
# // Group: Unpacking
#
# // Function: unpack
#
# extern function int unpack (ref bit bitstream[],
# input uvm_packer packer=None)
def unpack(self, bitstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bits(bitstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_bytes
#
# extern function int unpack_bytes (ref byte unsigned bytestream[],
# input uvm_packer packer=None)
def unpack_bytes(self, bytestream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_bytes(bytestream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: unpack_ints
# //
# // The unpack methods extract property values from an array of bits, bytes, or
# // ints. The method of unpacking `must` exactly correspond to the method of
# // packing. This is assured if (a) the same `packer` policy is used to pack
# // and unpack, and (b) the order of unpacking is the same as the order of
# // packing used to create the input array.
# //
# // The unpack methods are fixed (non-virtual) entry points that are directly
# // callable by the user. To include additional fields in the <unpack>
# // operation, derived classes should override the <do_unpack> method.
# //
# // The optional `packer` argument specifies the packing policy, which governs
# // both the pack and unpack operation. If a packer policy is not provided,
# // then the global `uvm_default_packer` policy is used. See uvm_packer for
# // more information.
# //
# // The return value is the actual number of bits unpacked from the given array.
#
# extern function int unpack_ints (ref int unsigned intstream[],
# input uvm_packer packer=None)
def unpack_ints(self, intstream, packer=None) -> Any:
packer = self.m_unpack_pre(packer)
packer.put_ints(intstream)
self.m_unpack_post(packer)
packer.set_packed_size()
return packer.get_packed_size()
# // Function: do_unpack
# //
# // The `do_unpack` method is the user-definable hook called by the <unpack>
# // method. A derived class should override this method to include its fields
# // in an unpack operation.
# //
# // The `packer` argument is the policy object for both packing and unpacking.
# // It must be the same packer used to pack the object into bits. Also,
# // do_unpack must unpack fields in the same order in which they were packed.
# // See <uvm_packer> for more information.
# //
# // The following implementation corresponds to the example given in do_pack.
# //
# //| function void do_unpack (uvm_packer packer)
# //| int sz
# //| super.do_unpack(packer); // unpack super's properties
# //| sz = packer.unpack_field_int(myarray.size(), 32)
# //| myarray.delete()
# //| for(int index=0; index<sz; index++)
# //| myarray[index] = packer.unpack_field_int(8)
# //| myshort = packer.unpack_field_int($bits(myshort))
# //| packer.unpack_object(myobj)
# //| endfunction
# //
# // If your object contains dynamic data (object, string, queue, dynamic array,
# // or associative array), and you intend to <unpack> into an equivalent data
# // structure, you must have included meta-information about the dynamic data
# // when it was packed.
# //
# // - For queues, dynamic arrays, or associative arrays, unpack the number of
# // elements in the array from the 32 bits immediately before unpacking
# // individual elements, as shown above.
# //
# // - For string data types, unpack into the new string until a `None` byte is
# // encountered.
# //
# // - For objects, unpack 4 bits into a byte or int variable. If the value
# // is 0, the target object should be set to `None` and unpacking continues to
# // the next property, if any. If the least significant bit is 1, then the
# // target object should be allocated and its properties unpacked.
def do_unpack(self, packer) -> None:
return
def set_int_local(self, field_name: str, value: int, recurse=True):
"""
Group: Configuration
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool):
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.bitstream = value
self._m_uvm_field_automation(None, UVM_SETINT, field_name)
if UVMObject._m_uvm_status_container.warning and not self._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name),UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_string_local(self, field_name: str, value: str, recurse=True):
"""
Function: set_string_local
Args:
field_name (str): Variable to set
value: Value for the variable
recurse (bool): If True, recurse into sub-objects.
"""
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.stringv = value
self._m_uvm_field_automation(None, UVM_SETSTR, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s (@%0d)",
field_name, self.get_inst_id()), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
def set_object_local(self, field_name: str, value: 'UVMObject', clone=1, recurse=1):
"""
These methods provide write access to integral, string, and
uvm_object-based properties indexed by a `field_name` string. The object
designer choose which, if any, properties will be accessible, and overrides
the appropriate methods depending on the properties' types. For objects,
the optional `clone` argument specifies whether to clone the `value`
argument before assignment.
The global `uvm_is_match` function is used to match the field names, so
`field_name` may contain wildcards.
An example implementation of all three methods is as follows.
.. code-block:: python
class mytype(UVMObject):
def __init__(self, name):
super().__init__(name)
self.myint = 0
self.mybyte = 0
self.myshort = 0
self.mystring = ""
self.myobj = None
# provide access to integral properties
def set_int_local(self, field_name, value):
if (uvm_is_match (field_name, "myint")):
self.myint = value
elif (uvm_is_match (field_name, "mybyte")):
selef.mybyte = value
# provide access to string properties
def set_string_local(self, field_name, value):
if (uvm_is_match (field_name, "mystring")):
self.mystring = value
# provide access to sub-objects
def set_object_local(self, field_name, value,clone=1):
if (uvm_is_match (field_name, "myobj")):
if (value is not None):
tmp = None
# if provided value is not correct type, produce error
if (!$cast(tmp, value)):
# error
else:
if(clone)
self.myobj = tmp.clone()
else
self.myobj = tmp
else:
myobj = None # value is None, so simply assign None to myobj
end
...
Although the object designer implements these methods to provide outside
access to one or more properties, they are intended for internal use (e.g.,
for command-line debugging and auto-configuration) and should not be called
directly by the user.
Args:
field_name (str): Variable to set
value: Value for the variable
clone (bool):
recurse (bool):
"""
# cc = None # uvm_object cc
UVMObject._m_uvm_status_container.cycle_check.clear()
UVMObject._m_uvm_status_container.m_uvm_cycle_scopes.clear()
if clone and (value is not None):
cc = value.clone()
if cc is not None:
cc.set_name(field_name)
value = cc
UVMObject._m_uvm_status_container.status = 0
UVMObject._m_uvm_status_container.object = value
UVMObject._m_uvm_status_container.clone = clone
self._m_uvm_field_automation(None, UVM_SETOBJ, field_name)
if UVMObject._m_uvm_status_container.warning and not UVMObject._m_uvm_status_container.status:
uvm_report_error("NOMTC", sv.sformatf("did not find a match for field %s", field_name), UVM_NONE)
UVMObject._m_uvm_status_container.cycle_check.clear()
# //---------------------------------------------------------------------------
# // **** Internal Methods and Properties ***
# // Do not use directly
# //---------------------------------------------------------------------------
#
# extern local function void m_pack (inout uvm_packer packer)
def m_pack(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_PACK, "")
self.do_pack(packer)
packer.set_packed_size()
packer.scope.up()
return packer
# extern local function void m_unpack_pre (inout uvm_packer packer)
def m_unpack_pre(self, packer) -> Any:
if packer is not None:
UVMObject._m_uvm_status_container.packer = packer
else:
from .uvm_global_vars import uvm_default_packer
UVMObject._m_uvm_status_container.packer = uvm_default_packer
packer = UVMObject._m_uvm_status_container.packer
packer.reset()
return packer
# extern local function void m_unpack_post (uvm_packer packer)
def m_unpack_post(self, packer) -> None:
provided_size = packer.get_packed_size()
# Put this object into the hierarchy
packer.scope.down(self.get_name())
self._m_uvm_field_automation(None, UVM_UNPACK, "")
self.do_unpack(packer)
# Scope back up before leaving
packer.scope.up()
if packer.get_packed_size() != provided_size:
uvm_report_warning("BDUNPK", sv.sformatf(
"Unpack operation unsuccessful: unpacked %0d bits from a total of %0d bits",
packer.get_packed_size(), provided_size), UVM_NONE)
| """
Group: Creation
The `create` method allocates a new object of the same type as this object
and returns it via a base uvm_object handle. Every class deriving from
uvm_object, directly or indirectly, must implement the create method.
A typical implementation is as follows:
.. code-block:: python
class mytype (UVMObject):
...
def create(self, name=""):
mytype t = mytype(name)
return t
Args:
name (str): Name of the created object.
Returns:
obj: New object.
"""
return UVMObject(name) |
offlineQueryProvider.js | class | {
constructor() {
this.queryList_ = [
'There is no internet connection',
'Try checking the network connection',
'Try reconnecting to Wi-Fi',
':-( :-('
];
this.index_ = 0;
}
/**
* Retrieves the next query.
* @return {string} The next query.
*/
next() {
let query = this.queryList_[this.index_];
this.index_ = (this.index_ + 1) % this.queryList_.length;
return query;
}
}
| OfflineQueryProvider |
retree.ts | /**
* Created by ahsanayaz on 08/11/2016.
*/
export class ReTree {
constructor() {
}
public test(string: string , regex: any): any {
let self = this;
if (typeof regex === 'string') {
regex = new RegExp(regex);
}
if (regex instanceof RegExp) {
return regex.test(string);
} else if (regex && Array.isArray(regex.and)) {
return regex.and.every(function (item: any) {
return self.test(string, item);
});
} else if (regex && Array.isArray(regex.or)) {
return regex.or.some(function (item: any) {
return self.test(string, item);
});
} else if (regex && regex.not) {
return !self.test(string, regex.not);
} else {
return false;
}
}
public exec(string: string, regex: any): any { | let self = this;
if (typeof regex === 'string') {
regex = new RegExp(regex);
}
if (regex instanceof RegExp) {
return regex.exec(string);
} else if (regex && Array.isArray(regex)) {
return regex.reduce(function (res: any, item: any) {
return (!!res) ? res : self.exec(string, item);
}, null);
} else {
return null;
}
}
} | |
SVMSemI.py | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
SVMSemI - Support Vector Machine Semantic Decoder
=====================================================================
To use this in pydial, need to set "semitype = SVMSemI" for a domain in the relevant interface config file
(in the current state it is the CamRestaurants domain)
See texthub_svm.cfg, which can be used for this purpose for texthub interface
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: Discriminative Spoken Language Understanding Using Word Confusion Networks
http://mi.eng.cam.ac.uk/~sjy/papers/hgtt12.pdf
.. seealso:: CUED Imports/Dependencies:
import :mod:`semi.SemI` |.|
import :mod:`utils.ContextLogger` |.|
import :mod: `decode.svmdec` |semi/CNetTrain|
************************
Important: please see semi/CNetTrain/README.md
'''
import os, sys, ConfigParser
from utils import ContextLogger
logger = ContextLogger.getLogger('')
import imp
from semi import SemI
old_path = os.getcwd()
if "semi" not in old_path:
path = old_path+"/semi/CNetTrain/"
else:
path = old_path+"/CNetTrain/"
os.sys.path.insert(1, path)
import decode as svmdec
import math
import time
import RegexSemI
print sys.path
__author__ = "cued_dialogue_systems_group"
class SVMSemI(SemI.SemI):
|
if __name__ == '__main__':
svm=SVMSemI()
#preds=svm.decode([('I am looking for a chinese restaurant in the north',1.0)])
preds=svm.decode([('I am looking for restaurant',1.0)])
print preds
preds=[]
#preds=svm.decode([('something in the north',1.0)])
preds=svm.decode( [(' I am looking for a cheap restaurant', 1.0)])
print preds
preds=svm.decode( [('something in the north', 1.0)])
print preds | def __init__(self):
'''
Initialise some objects, use RegexSemI to solve classification errors, and to deal with
googdbye and request for alternatives
:return:
'''
self.RSemI = RegexSemI.RegexSemI() # For goodbye and request alternatives in decode
self.config = ConfigParser.ConfigParser()
self.config.read(path+"/config/eg.cfg")
self.classifiers=svmdec.init_classifier(self.config)
self.sys_act = []
def decode(self, ASR_obs, sys_act=None, turn=None):
'''
Includes os.chdir to change directories from pydial root to the locally installed inside semi directory.
Directories are changed back to pydial root after prediction. This ensures all the required
config and data files are accessed.
:param ASR_obs: hypothesis with the ASR N-best list
:param sys_act: previous system dialogue act
:param turn: turn id
:return: Semantic representation from the asr output
'''
#Check first general dialogue acts with Regular Expressions
regexpred = self.decode_general_hypothesis(ASR_obs[0][0])
if "bye()" in regexpred:
return [("bye()", 1.0)]
elif "reqalts()" in regexpred:
return [("reqalts()", 1.0)]
elif "affirm()" in regexpred:
return [("affirm()",1.0)]
elif "negate()"in regexpred:
return [("negate()",1.0)]
elif "hello()" in regexpred:
return [("hello()",1.0)]
else:
old_path = os.getcwd()
os.chdir(path)
sentinfo = self.input_json(ASR_obs, self.sys_act, turn)
before = int(round(time.time() * 1000))
predictions = svmdec.decode(self.classifiers,self.config, sentinfo)
after = int(round(time.time() * 1000))
pred_dur = after - before
logger.debug("prediction time: %d" % pred_dur) # Time taken by DLSemI for prediction
os.chdir(old_path)
logger.info(predictions)
self.semActs = self.format_semi_output(predictions)
logger.info(self.semActs)
return self.semActs
def input_json(self, ASR_obs, sys_act, turn):
'''
Format the incoming ASR_obs and sys_act into an input for SVM Classifiers in JSON
:param ASR_obs: ASR hypothesis
:param sys_act: Last system action
:param turn: Turn id
:return:
'''
logger.info(ASR_obs)
sentinfo = {}
asrhyps = []
for obs in ASR_obs:
asrhyps.append(dict([ (u'asr-hyp', unicode(obs[0])), (u'score', math.log(obs[1]))]))
sentinfo['turn-id'] = turn
sentinfo['asr-hyps'] = asrhyps
sentinfo['prevsysacts'] = []
return sentinfo
def format_semi_output(self, sluhyps):
'''
Transform the output of SVM classifier to make it compatible with cued-pydial system
:param sluhyps: output coming from SVMSemI
:return: SVMSemI output in the required format for cued-pydial
'''
prediction_clean=[]
for hyp in sluhyps:
if not hyp["slu-hyp"]:
prediction_clean = [('null()',hyp['score'])]
continue
probability = hyp['score']
slu_hyp=hyp["slu-hyp"]
for sluh in slu_hyp:
dact = sluh['act']
pred_str=unicode(dact)
prediction_string = []
if not sluh['slots']:
prediction_string.append(pred_str+"()")
for slot in sluh['slots']:
prediction_string.append('%s(%s=%s)' % (unicode(dact), unicode(slot[0]), unicode(slot[1])))
prediction_string = '|'.join(prediction_string)
prediction_clean.append((prediction_string, probability))
return prediction_clean
def decode_general_hypothesis(self, obs):
'''
Regular expressions for bye() and reqalts(), affirm and type
:param obs: ASR hypothesis
:return: RegexSemI recognised dialogue act
'''
self.RSemI.semanticActs = []
self.RSemI._decode_reqalts(obs)
self.RSemI._decode_bye(obs)
self.RSemI._decode_type(obs)
self.RSemI._decode_affirm(obs)
return self.RSemI.semanticActs |
config.py | # -*- coding: utf-8 -*-
import os
class Config(object):
DEBUG = True
HOST = '0.0.0.0'
PORT = os.getenv('TESLA_PORT', 8000)
SECRET_KEY = (
'\xc85\x95\x9a\x80\xc1\x93\xd0\xe9\x95\x08\xfb\xbe\x85'
'\xd0\x1aq\xd3\x95\xc9\xad \xc0\x08'
) | #TESLA_CLIENT_ID=e4a9949fcfa04068f59abb5a658f2bac0a3428e4652315490b659d5ab3f35a9e
#TESLA_CLIENT_SECRET=c75f14bbadc8bee3a7594412c31⁄416f8300256d7668ea7e6e7f06727bfb9d220 | #http://docs.timdorr.apiary.io/#reference/authentication/tokens |
loganalytics.go | package compute
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
"net/http"
)
// LogAnalyticsClient is the compute Client
type LogAnalyticsClient struct {
BaseClient
}
// NewLogAnalyticsClient creates an instance of the LogAnalyticsClient client.
func NewLogAnalyticsClient(subscriptionID string) LogAnalyticsClient {
return NewLogAnalyticsClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewLogAnalyticsClientWithBaseURI creates an instance of the LogAnalyticsClient client.
func NewLogAnalyticsClientWithBaseURI(baseURI string, subscriptionID string) LogAnalyticsClient {
return LogAnalyticsClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// ExportRequestRateByInterval export logs that show Api requests made by this subscription in the given time window to
// show throttling activities.
// Parameters:
// parameters - parameters supplied to the LogAnalytics getRequestRateByInterval Api.
// location - the location upon which virtual-machine-sizes is queried.
func (client LogAnalyticsClient) ExportRequestRateByInterval(ctx context.Context, parameters RequestRateByIntervalInput, location string) (result LogAnalyticsExportRequestRateByIntervalFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,
Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil |
req, err := client.ExportRequestRateByIntervalPreparer(ctx, parameters, location)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", nil, "Failure preparing request")
return
}
result, err = client.ExportRequestRateByIntervalSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportRequestRateByInterval", result.Response(), "Failure sending request")
return
}
return
}
// ExportRequestRateByIntervalPreparer prepares the ExportRequestRateByInterval request.
func (client LogAnalyticsClient) ExportRequestRateByIntervalPreparer(ctx context.Context, parameters RequestRateByIntervalInput, location string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"location": autorest.Encode("path", location),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getRequestRateByInterval", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ExportRequestRateByIntervalSender sends the ExportRequestRateByInterval request. The method will close the
// http.Response Body if it receives an error.
func (client LogAnalyticsClient) ExportRequestRateByIntervalSender(req *http.Request) (future LogAnalyticsExportRequestRateByIntervalFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// ExportRequestRateByIntervalResponder handles the response to the ExportRequestRateByInterval request. The method always
// closes the http.Response Body.
func (client LogAnalyticsClient) ExportRequestRateByIntervalResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// ExportThrottledRequests export logs that show total throttled Api requests for this subscription in the given time
// window.
// Parameters:
// parameters - parameters supplied to the LogAnalytics getThrottledRequests Api.
// location - the location upon which virtual-machine-sizes is queried.
func (client LogAnalyticsClient) ExportThrottledRequests(ctx context.Context, parameters ThrottledRequestsInput, location string) (result LogAnalyticsExportThrottledRequestsFuture, err error) {
if err := validation.Validate([]validation.Validation{
{TargetValue: location,
Constraints: []validation.Constraint{{Target: "location", Name: validation.Pattern, Rule: `^[-\w\._]+$`, Chain: nil}}}}); err != nil {
return result, validation.NewError("compute.LogAnalyticsClient", "ExportThrottledRequests", err.Error())
}
req, err := client.ExportThrottledRequestsPreparer(ctx, parameters, location)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", nil, "Failure preparing request")
return
}
result, err = client.ExportThrottledRequestsSender(req)
if err != nil {
err = autorest.NewErrorWithError(err, "compute.LogAnalyticsClient", "ExportThrottledRequests", result.Response(), "Failure sending request")
return
}
return
}
// ExportThrottledRequestsPreparer prepares the ExportThrottledRequests request.
func (client LogAnalyticsClient) ExportThrottledRequestsPreparer(ctx context.Context, parameters ThrottledRequestsInput, location string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"location": autorest.Encode("path", location),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-10-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsContentType("application/json; charset=utf-8"),
autorest.AsPost(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/logAnalytics/apiAccess/getThrottledRequests", pathParameters),
autorest.WithJSON(parameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ExportThrottledRequestsSender sends the ExportThrottledRequests request. The method will close the
// http.Response Body if it receives an error.
func (client LogAnalyticsClient) ExportThrottledRequestsSender(req *http.Request) (future LogAnalyticsExportThrottledRequestsFuture, err error) {
var resp *http.Response
resp, err = autorest.SendWithSender(client, req,
azure.DoRetryWithRegistration(client.Client))
if err != nil {
return
}
err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted))
if err != nil {
return
}
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
// ExportThrottledRequestsResponder handles the response to the ExportThrottledRequests request. The method always
// closes the http.Response Body.
func (client LogAnalyticsClient) ExportThrottledRequestsResponder(resp *http.Response) (result LogAnalyticsOperationResult, err error) {
err = autorest.Respond(
resp,
client.ByInspecting(),
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
| {
return result, validation.NewError("compute.LogAnalyticsClient", "ExportRequestRateByInterval", err.Error())
} |
main.js | $(document).ready(function(){
giveHtml('production',1);
$('#production-btn').click(function(){
giveHtml('production',1);
})
$('#social-btn').click(function(){
giveHtml('social',2);
})
$('#about-btn').click(function(){
giveHtml('about',3);
})
$('#siteMusic').click(function(){
var oAudio = document.getElementById('background-audio');
if ($(this).hasClass('activeMusic')){
oAudio.pause();
$(this).removeClass('activeMusic');
}else{
oAudio.play();
$(this).addClass('activeMusic');
}
})
})
function giveHtml(handler,bool){
var height;
var preloader = '<div class="container" id="preloader"><i class="fa fa-spinner fa-pulse fa-3x"></i></div>';
$('#wrapper-content').html(preloader);
$.ajax({
url: "/"+ handler + '?locale=' + $('#_locale').val(),
}).done(function(html) {
$('#wrapper-content').html(html);
if (bool === 3){
ymaps.ready(init());
} | $('.social-img-wrapper').height(height);
}
});
}
init = function() {
var myMap, myPlacemark;
myMap = new ymaps.Map("map", {
center: [44.61660700178595,33.5251130000002],
zoom: 15
});
myPlacemark = new ymaps.Placemark([44.61660700178595,33.5251130000002]);
return myMap.geoObjects.add(myPlacemark);
}; | if (bool === 2){
height = $('.social-img-wrapper').width(); |
main.go | package main
import (
"code.dopame.me/veonik/squircy3/plugin"
"code.dopame.me/veonik/squircy3/plugins/node_compat"
)
func | () {
plugin.Main(node_compat.PluginName)
}
func Initialize(m *plugin.Manager) (plugin.Plugin, error) {
return node_compat.Initialize(m)
}
| main |
secret_store.py | # Copyright (c) 2014 Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from oslo_config import cfg
import six
from stevedore import named
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.util import multiple_backends
from barbican.plugin.util import utils as plugin_utils
_SECRET_STORE = None
CONF = config.new_config()
DEFAULT_PLUGIN_NAMESPACE = 'barbican.secretstore.plugin'
DEFAULT_PLUGINS = ['store_crypto']
store_opt_group = cfg.OptGroup(name='secretstore',
title='Secret Store Plugin Options')
store_opts = [
cfg.StrOpt('namespace',
default=DEFAULT_PLUGIN_NAMESPACE,
help=u._('Extension namespace to search for plugins.')
),
cfg.MultiStrOpt('enabled_secretstore_plugins',
default=DEFAULT_PLUGINS,
help=u._('List of secret store plugins to load.')
),
cfg.BoolOpt('enable_multiple_secret_stores',
default=False,
help=u._('Flag to enable multiple secret store plugin'
' backend support. Default is False')
),
cfg.ListOpt('stores_lookup_suffix',
help=u._('List of suffix to use for looking up plugins which '
'are supported with multiple backend support.')
)
]
CONF.register_group(store_opt_group)
CONF.register_opts(store_opts, group=store_opt_group)
config.parse_args(CONF)
config.set_module_config("secretstore", CONF)
def list_opts():
yield store_opt_group, store_opts
class SecretStorePluginNotFound(exception.BarbicanHTTPException):
"""Raised when no plugins are installed."""
client_message = u._("No plugin was found that could support your request")
status_code = 400
def __init__(self, plugin_name=None):
if plugin_name:
message = u._('Secret store plugin "{name}"'
' not found.').format(name=plugin_name)
else:
message = u._("Secret store plugin not found.")
super(SecretStorePluginNotFound, self).__init__(message)
class SecretStoreSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret store supported plugin is found."""
client_message = u._("Secret store supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for storing "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretStoreSupportedPluginNotFound, self).__init__(
message)
class SecretGenerateSupportedPluginNotFound(exception.BarbicanHTTPException):
"""Raised when no secret generate supported plugin is found."""
client_message = u._("Secret generate supported plugin not found.")
status_code = 400
def __init__(self, key_spec):
message = u._("Could not find a secret store plugin for generating "
"secret with algorithm '{alg}' and bit-length "
"'{len}'.").format(alg=key_spec.alg,
len=key_spec.bit_length)
super(SecretGenerateSupportedPluginNotFound, self).__init__(
message)
class SecretContentTypeNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for payload content type is not available."""
status_code = 400
def __init__(self, content_type):
super(SecretContentTypeNotSupportedException, self).__init__(
u._("A Content-Type of '{content_type}' for secrets is "
"not supported").format(
content_type=content_type)
)
self.content_type = content_type
self.client_message = u._(
"content-type of '{content_type}' not supported").format(
content_type=content_type)
class SecretContentEncodingNotSupportedException(
exception.BarbicanHTTPException):
"""Raised when support for payload content encoding is not available."""
status_code = 400
def __init__(self, content_encoding):
super(SecretContentEncodingNotSupportedException, self).__init__(
u._("Secret Content-Encoding of '{content_encoding}' "
"not supported").format(
content_encoding=content_encoding)
)
self.content_encoding = content_encoding
self.client_message = u._(
"content-encoding of '{content_encoding}' not supported").format(
content_encoding=content_encoding)
class SecretNoPayloadProvidedException(exception.BarbicanException):
"""Raised when secret information is not provided."""
def __init__(self):
super(SecretNoPayloadProvidedException, self).__init__(
u._('No secret information provided to encrypt.')
)
class SecretContentEncodingMustBeBase64(exception.BarbicanHTTPException):
"""Raised when encoding must be base64."""
client_message = u._("Text-based binary secret payloads must "
"specify a content-encoding of 'base64'")
status_code = 400
def __init__(self):
super(SecretContentEncodingMustBeBase64, self).__init__(
u._("Encoding type must be 'base64' for text-based payloads.")
)
class SecretGeneralException(exception.BarbicanException):
"""Raised when a system fault has occurred."""
def __init__(self, reason=u._('Unknown')):
super(SecretGeneralException, self).__init__(
u._('Problem seen during crypto processing - '
'Reason: {reason}').format(reason=reason)
)
self.reason = reason
class SecretPayloadDecodingError(exception.BarbicanHTTPException):
"""Raised when payload could not be decoded."""
client_message = u._("Problem decoding payload")
status_code = 400
def __init__(self):
super(SecretPayloadDecodingError, self).__init__(
u._("Problem decoding payload")
)
class SecretAcceptNotSupportedException(exception.BarbicanHTTPException):
"""Raised when requested decrypted content-type is not available."""
client_message = u._("Wrong payload content-type")
status_code = 406
def __init__(self, accept):
super(SecretAcceptNotSupportedException, self).__init__(
u._("Secret Accept of '{accept}' not supported").format(
accept=accept)
)
self.accept = accept
class SecretNotFoundException(exception.BarbicanHTTPException):
"""Raised when secret information could not be located."""
client_message = u._("Not Found. Sorry but your secret is in another "
"castle")
status_code = 404
def __init__(self):
super(SecretNotFoundException, self).__init__(
u._('No secret information found'))
class SecretAlgorithmNotSupportedException(exception.BarbicanHTTPException):
"""Raised when support for an algorithm is not available."""
client_message = u._("Requested algorithm is not supported")
status_code = 400
def __init__(self, algorithm):
super(SecretAlgorithmNotSupportedException, self).__init__(
u._("Secret algorithm of '{algorithm}' not supported").format(
algorithm=algorithm)
)
self.algorithm = algorithm
class GeneratePassphraseNotSupportedException(exception.BarbicanHTTPException):
"""Raised when generating keys encrypted by passphrase is not supported."""
client_message = (
u._("Generating keys encrypted with passphrases is not supported")
)
status_code = 400
def __init__(self):
super(GeneratePassphraseNotSupportedException, self).__init__(
self.client_message
)
class SecretStorePluginsNotConfigured(exception.BarbicanException):
"""Raised when there are no secret store plugins configured."""
def __init__(self):
super(SecretStorePluginsNotConfigured, self).__init__(
u._('No secret store plugins have been configured')
)
class StorePluginNotAvailableOrMisconfigured(exception.BarbicanException):
"""Raised when a plugin that was previously used can not be found."""
def __init__(self, plugin_name):
super(StorePluginNotAvailableOrMisconfigured, self).__init__(
u._("The requested Store Plugin {plugin_name} is not "
"currently available. This is probably a server "
"misconfiguration.").format(
plugin_name=plugin_name)
)
self.plugin_name = plugin_name
class SecretType(object):
"""Constant to define the symmetric key type.
Used by getSecret to retrieve a symmetric key.
"""
SYMMETRIC = "symmetric"
"""Constant to define the public key type. Used by getSecret to retrieve a
public key.
"""
PUBLIC = "public"
"""Constant to define the private key type. Used by getSecret to retrieve a
private key.
"""
PRIVATE = "private"
"""Constant to define the passphrase type. Used by getSecret to retrieve a
passphrase."""
PASSPHRASE = "passphrase" # nosec
"""Constant to define the certificate type. Used by getSecret to retrieve a
certificate."""
CERTIFICATE = "certificate"
"""Constant to define the opaque date type. Used by getSecret to retrieve
opaque data. Opaque data can be any kind of data. This data type signals to
Barbican to just store the information and do not worry about the format or
encoding. This is the default type if no type is specified by the user."""
OPAQUE = utils.SECRET_TYPE_OPAQUE
class KeyAlgorithm(object):
"""Constant for the Diffie Hellman algorithm."""
DIFFIE_HELLMAN = "diffie_hellman"
"""Constant for the DSA algorithm."""
DSA = "dsa"
"""Constant for the RSA algorithm."""
RSA = "rsa"
"""Constant for the Elliptic Curve algorithm."""
EC = "ec"
"""Constant for the HMACSHA1 algorithm."""
HMACSHA1 = "hmacsha1"
"""Constant for the HMACSHA256 algorithm."""
HMACSHA256 = "hmacsha256"
"""Constant for the HMACSHA384 algorithm."""
HMACSHA384 = "hmacsha384"
"""Constant for the HMACSHA512 algorithm."""
HMACSHA512 = "hmacsha512"
"""List of asymmetric algorithms"""
ASYMMETRIC_ALGORITHMS = [DIFFIE_HELLMAN, DSA, RSA, EC]
"""Constant for the AES algorithm."""
AES = "aes"
"""Constant for the DES algorithm."""
DES = "des"
"""Constant for the DESede (triple-DES) algorithm."""
DESEDE = "desede"
"""List of symmetric algorithms"""
SYMMETRIC_ALGORITHMS = [AES, DES, DESEDE, HMACSHA1,
HMACSHA256, HMACSHA384, HMACSHA512]
class KeySpec(object):
"""This object specifies the algorithm and bit length for a key."""
def __init__(self, alg=None, bit_length=None, mode=None, passphrase=None):
"""Creates a new KeySpec.
:param alg:algorithm for the key
:param bit_length:bit length of the key
:param mode:algorithm mode for the key
:param passphrase:passphrase for the private_key
"""
self.alg = alg
self.bit_length = bit_length
self.mode = mode # TODO(john-wood-w) Paul, is 'mode' required?
self.passphrase = passphrase
class SecretDTO(object):
"""This object is a secret data transfer object (DTO).
This object encapsulates a key and attributes about the key. The attributes
include a KeySpec that contains the algorithm and bit length. The
attributes also include information on the encoding of the key.
"""
# TODO(john-wood-w) Remove 'content_type' once secret normalization work is
# completed.
def __init__(self, type, secret, key_spec, content_type,
transport_key=None):
"""Creates a new SecretDTO.
The secret is stored in the secret parameter. In the future this
DTO may include compression and key wrapping information.
:param type: SecretType for secret
:param secret: secret, as a base64-encoded string
:param key_spec: KeySpec key specifications
:param content_type: Content type of the secret, one of MIME
types such as 'text/plain' or 'application/octet-stream'
:param transport_key: presence of this parameter indicates that the
secret has been encrypted using a transport key. The transport
key is a base64 encoded x509 transport certificate.
"""
self.type = type or SecretType.OPAQUE
self.secret = secret
self.key_spec = key_spec
self.content_type = content_type
self.transport_key = transport_key
class AsymmetricKeyMetadataDTO(object):
"""This DTO encapsulates metadata(s) for asymmetric key components.
These components are private_key_meta, public_key_meta and passphrase_meta.
"""
def __init__(self, private_key_meta=None,
public_key_meta=None,
passphrase_meta=None):
"""Constructor for AsymmetricKeyMetadataDTO
:param private_key_meta: private key metadata
:param public_key_meta: public key metadata
:param passphrase_meta: passphrase key metadata
"""
self.private_key_meta = private_key_meta
self.public_key_meta = public_key_meta
self.passphrase_meta = passphrase_meta
@six.add_metaclass(abc.ABCMeta)
class SecretStoreBase(object):
@abc.abstractmethod
def get_plugin_name(self):
"""Gets user friendly plugin name.
This plugin name is expected to be read from config file.
There will be a default defined for plugin name which can be customized
in specific deployment if needed.
This name needs to be unique across a deployment.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod | A dictionary is returned that contains metadata about the newly created
symmetric key. The dictionary of metadata is stored by Barbican and
passed into other methods to aid the plugins. This can be useful for
plugins that generate a unique ID in the external data store and use it
to retrieve the key in the future. The returned dictionary may be empty
if the SecretStore does not require it.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: an optional dictionary containing metadata about the key
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_asymmetric_key(self, key_spec):
"""Generate a new asymmetric key pair and store it.
Generates a new asymmetric key pair and stores it in the secret
store. An object of type AsymmetricKeyMetadataDTO will be returned
containing attributes of metadata for newly created key pairs.
The metadata is stored by Barbican and passed into other methods
to aid the plugins. This can be useful for plugins that generate
a unique ID in the external data store and use it to retrieve the
key pairs in the future.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: An object of type AsymmetricKeyMetadataDTO containing
metadata about the key pair.
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret(self, secret_dto):
"""Stores a key.
The SecretDTO contains the bytes of the secret and properties of the
secret. The SecretStore retrieves the secret bytes, stores them, and
returns a dictionary of metadata about the secret. This can be
useful for plugins that generate a unique ID in the external data
store and use it to retrieve the secret in the future. The returned
dictionary may be empty if the SecretStore does not require it.
:param secret_dto: SecretDTO for secret
:returns: an optional dictionary containing metadata about the secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def get_secret(self, secret_type, secret_metadata):
"""Retrieves a secret from the secret store.
Retrieves a secret from the secret store and returns a SecretDTO that
contains the secret.
The secret_metadata parameter is the metadata returned from one of the
generate or store methods. This data is used by the plugins to retrieve
the key.
The secret_type parameter may be useful for secret stores to know the
expected format of the secret. For instance if the type is
SecretDTO.PRIVATE then a PKCS8 structure is returned. This way secret
stores do not need to manage the secret type on their own.
:param secret_type: secret type
:param secret_metadata: secret metadata
:returns: SecretDTO that contains secret
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def generate_supports(self, key_spec):
"""Returns a boolean indicating if the secret type is supported.
This checks if the algorithm and bit length are supported by the
generate methods. This is useful to call before calling
generate_symmetric_key or generate_asymetric_key to see if the key type
is supported before trying to generate it.
:param key_spec: KeySpec that contains details on the algorithm and bit
length
:returns: boolean indicating if the algorithm is supported
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def delete_secret(self, secret_metadata):
"""Deletes a secret from the secret store.
Deletes a secret from a secret store. It can no longer be referenced
after this call.
:param secret_metadata: secret_metadata
"""
raise NotImplementedError # pragma: no cover
@abc.abstractmethod
def store_secret_supports(self, key_spec):
"""Returns a boolean indicating if the secret can be stored.
Checks if the secret store can store the secret, give the attributes
of the secret in the KeySpec. For example, some plugins may need to
know the attributes in order to store the secret, but other plugins
may be able to store the secret as a blob if no attributes are given.
:param key_spec: KeySpec for the secret
:returns: a boolean indicating if the secret can be stored
"""
raise NotImplementedError # pragma: no cover
def get_transport_key(self):
"""Gets a transport key.
Returns the current valid transport key associated with this plugin.
The transport key is expected to be a base64 encoded x509 certificate
containing a public key. Admins are responsible for deleting old keys
from the database using the DELETE method on the TransportKey resource.
By default, returns None. Plugins that support transport key
wrapping should override this method.
"""
return None
def is_transport_key_current(self, transport_key):
"""Determines if the provided transport key is the current valid key
Returns true if the transport key is the current valid transport key.
If the key is not valid, then barbican core will request a new
transport key from the plugin.
Returns False by default. Plugins that support transport key wrapping
should override this method.
"""
return False
def _enforce_extensions_configured(plugin_related_function):
def _check_plugins_configured(self, *args, **kwargs):
if not self.extensions:
raise SecretStorePluginsNotConfigured()
return plugin_related_function(self, *args, **kwargs)
return _check_plugins_configured
class SecretStorePluginManager(named.NamedExtensionManager):
def __init__(self, conf=CONF, invoke_args=(), invoke_kwargs={}):
ss_conf = config.get_module_config('secretstore')
plugin_names = self._get_internal_plugin_names(ss_conf)
super(SecretStorePluginManager, self).__init__(
ss_conf.secretstore.namespace,
plugin_names,
invoke_on_load=False, # Defer creating plugins to utility below.
invoke_args=invoke_args,
invoke_kwds=invoke_kwargs,
name_order=True # extensions sorted as per order of plugin names
)
plugin_utils.instantiate_plugins(self, invoke_args, invoke_kwargs)
multiple_backends.sync_secret_stores(self)
@_enforce_extensions_configured
def get_plugin_store(self, key_spec, plugin_name=None,
transport_key_needed=False, project_id=None):
"""Gets a secret store plugin.
:param: plugin_name: set to plugin_name to get specific plugin
:param: key_spec: KeySpec of key that will be stored
:param: transport_key_needed: set to True if a transport
key is required.
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=plugin_name)
if plugin_name is not None:
for plugin in active_plugins:
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise SecretStorePluginNotFound(plugin_name)
if not transport_key_needed:
for plugin in active_plugins:
if plugin.store_secret_supports(key_spec):
return plugin
else:
for plugin in active_plugins:
if (plugin.get_transport_key() is not None and
plugin.store_secret_supports(key_spec)):
return plugin
raise SecretStoreSupportedPluginNotFound(key_spec)
@_enforce_extensions_configured
def get_plugin_retrieve_delete(self, plugin_name):
"""Gets a secret retrieve/delete plugin.
If this function is being called, it is because we are trying to
retrieve or delete an already stored secret. Thus, the plugin name is
actually gotten from the plugin metadata that has already been stored
in the database. So, in this case, if this plugin is not available,
this might be due to a server misconfiguration.
:returns: SecretStoreBase plugin implementation
:raises: StorePluginNotAvailableOrMisconfigured: If the plugin wasn't
found it's because the plugin parameters were not properly
configured on the database side.
"""
for plugin in plugin_utils.get_active_plugins(self):
if utils.generate_fullname_for(plugin) == plugin_name:
return plugin
raise StorePluginNotAvailableOrMisconfigured(plugin_name)
@_enforce_extensions_configured
def get_plugin_generate(self, key_spec, project_id=None):
"""Gets a secret generate plugin.
:param key_spec: KeySpec that contains details on the type of key to
generate
:returns: SecretStoreBase plugin implementation
"""
active_plugins = multiple_backends.get_applicable_store_plugins(
self, project_id=project_id, existing_plugin_name=None)
for plugin in active_plugins:
if plugin.generate_supports(key_spec):
return plugin
raise SecretGenerateSupportedPluginNotFound(key_spec)
def _get_internal_plugin_names(self, secretstore_conf):
"""Gets plugin names used for loading via stevedore.
When multiple secret store support is enabled, then secret store plugin
names are read via updated configuration structure. If not enabled,
then it reads MultiStr property in 'secretstore' config section.
"""
# to cache default global secret store value on first use
self.global_default_store_dict = None
if utils.is_multiple_backends_enabled():
self.parsed_stores = multiple_backends.\
read_multiple_backends_config()
plugin_names = [store.store_plugin for store in self.parsed_stores
if store.store_plugin]
else:
plugin_names = secretstore_conf.secretstore.\
enabled_secretstore_plugins
return plugin_names
def get_manager():
global _SECRET_STORE
if not _SECRET_STORE:
_SECRET_STORE = SecretStorePluginManager()
return _SECRET_STORE | def generate_symmetric_key(self, key_spec):
"""Generate a new symmetric key and store it.
Generates a new symmetric key and stores it in the secret store. |
link_state.py | #################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for link state."""
from typing import Optional
from deepsim.core.pose import Pose
from deepsim.core.twist import Twist
from gazebo_msgs.msg import LinkState as ROSLinkState
class LinkState:
"""
LinkState class
"""
def __init__(self,
link_name: Optional[str] = None,
pose: Optional[Pose] = None,
twist: Optional[Twist] = None,
reference_frame: Optional[str] = None):
"""
Initialize LinkState class
| twist (Optional[Twist]): desired twist in reference frame
reference_frame (Optional[str]): set pose/twist relative to the frame of this entity (Body/Model)
leave empty or "world" or "map" defaults to world-frame
"""
self._link_name = link_name
self._pose = pose.copy() if pose else Pose()
self._twist = twist.copy() if twist else Twist()
self._reference_frame = reference_frame or ''
@property
def link_name(self) -> str:
"""
Returns the link name
Returns:
str: link name
"""
return self._link_name
@link_name.setter
def link_name(self, value: str) -> None:
"""
Set link name
Args:
value (str): link name
"""
self._link_name = value
@property
def pose(self) -> Pose:
"""
Returns the copy of pose.
Returns:
Pose: the copy of pose of the link
"""
return self._pose.copy()
@pose.setter
def pose(self, value: Pose) -> None:
"""
Set the pose.
Args:
value (Pose): the pose
"""
self._pose = value.copy()
@property
def twist(self) -> Twist:
"""
Return the copy of twist.
Returns:
Twist: the copy of twist
"""
return self._twist.copy()
@twist.setter
def twist(self, value: Twist) -> None:
"""
Set the twist.
Args:
value (Twist): the twist
"""
self._twist = value.copy()
@property
def reference_frame(self) -> str:
"""
Returns the reference frame
Returns:
str: the reference frame
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, value: str) -> None:
"""
Set the reference frame
Args:
value (str): the reference frame
"""
self._reference_frame = value
def to_ros(self) -> ROSLinkState:
"""
Return the ROS LinkState object created from this link state.
Returns:
gazebo_msgs.msg.LinkState: ROS LinkState
"""
ros_link_state = ROSLinkState()
if self.link_name:
ros_link_state.link_name = self.link_name
if self._pose:
ros_link_state.pose = self._pose.to_ros()
if self._twist:
ros_link_state.twist = self._twist.to_ros()
if self.reference_frame:
ros_link_state.reference_frame = self.reference_frame
return ros_link_state
@staticmethod
def from_ros(value: ROSLinkState) -> 'LinkState':
"""
Returns new LinkState object created from ROS LinkState
Args:
value (ROSLinkState): ROS LinkState
Returns:
LinkState: new LinkState object created from ROS LinkState
"""
return LinkState(link_name=value.link_name,
pose=Pose.from_ros(value.pose),
twist=Twist.from_ros(value.twist),
reference_frame=value.reference_frame)
def copy(self) -> 'LinkState':
"""
Returns a copy.
Returns:
LinkState: the copied link state
"""
return LinkState(link_name=self.link_name,
pose=self._pose,
twist=self._twist,
reference_frame=self.reference_frame)
def __eq__(self, other: 'LinkState') -> bool:
"""
Equality of LinkState.
Args:
other (LinkState): other to compare
Returns:
bool: True if the differences of all components are within epsilon, Otherwise False.
"""
return (self.link_name == other.link_name and self.reference_frame == other.reference_frame
and self._pose == other._pose and self._twist == other._twist)
def __ne__(self, other: 'LinkState') -> bool:
"""
Inequality of points is inequality of any coordinates
Args:
other (LinkState): other to compare
Returns:
bool: False if the differences of all components are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __str__(self) -> str:
"""
String representation of a link state
Returns:
str: String representation of a link state
"""
return "(link_name=%s, pose=%s, twist=%s, reference_frame=%s)" % (self.link_name,
repr(self._pose),
repr(self._twist),
self.reference_frame)
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "LinkState" + str(self) | Args:
link_name (Optional[str]): link name
pose (Optional[Pose]): desired pose in reference frame |
SoundManager.go | package ozsndqueue
import (
"fmt"
"sort"
)
type SoundManager struct {
SoundService SoundService
priorityQueue map[int][]string
isListen bool
isPlay bool
// Put された事を通知する channel
putChan chan int
// 終了依頼が出されたことを通知する channel
endChan chan int
}
func CreateSoundManager(queueSize int) *SoundManager {
soundManager := &SoundManager{}
soundManager.SoundService = NaiveSoundService{}
soundManager.priorityQueue = make(map[int][]string)
soundManager.isListen = true
soundManager.isPlay = true
soundManager.putChan = make(chan int, queueSize)
soundManager.endChan = make(chan int)
return soundManager
}
func (this SoundManager) StartMainLoop() {
this.mainLoop()
}
func (this SoundManager) mainLoop() {
for {
select {
case <-this.putChan:
this.PlayNext()
case <-this.endChan:
return
}
}
}
func (this SoundManager) Stop() {
this.endChan <- 0
}
func (this SoundManager) StartListen() {
this.isListen = true
}
func (this SoundManager) PauseListen() {
this.isListen = false
}
func (this SoundManager) StartPlay() {
this.isPlay = true
}
func (this SoundManager) PausePlay() {
this.isPlay = false
}
func (this *SoundManager) Play(fileUri string) error {
return this.SoundService.Play(fileUri)
}
// TODO: Put を無視した場合の戻り値を考える。
func (this *SoundManager) Put(fileUri string, queueNumber int) {
// listen 中でなければ Put されたものを無視する
if !this.isListen {
return
}
_, exist := this.priorityQueue[queueNumber]
// key の存在確認。
// なければ key に対する空スライスを追加する。
if !exist {
this.priorityQueue[queueNumber] = []string{}
}
this.priorityQueue[queueNumber] = append(this.priorityQueue[queueNumber], fileUri)
// Put 通知を channel に投げる。
this.putChan <- 0
}
// PlayNext, 一番優先順位の高いファイルを再生する。
func (this *SoundManager) PlayNext() error {
if len(this.priorityQueue) == 0 {
return fmt.Errorf("priority queue is empty")
}
fileUri := this.prioritiedDequeue()
return this.Play(fileUri)
}
// prioritiedDequeue, キュー郡から一番優先順位の高いファイル名を取得する。
func (this *SoundManager) prioritiedDequeue() string {
// そもそも map に要素がないのであれば空文字を返却する。
if len(this.priorityQueue) == 0 {
return ""
}
// キーを抜き出してスライスに詰める
keys := []int{} | for key := range this.priorityQueue {
keys = append(keys, key)
}
// 優先度順にソート
sort.Ints(keys)
// 優先度順に探索して、最初に見つかったものを返却する。
// 返却した要素は削除する。
for _, value := range keys {
fileUri, fileUris := dequeue(this.priorityQueue[value])
// キューの更新
if (len(fileUris) == 0) {
// キューが空になったら map から削除する
delete(this.priorityQueue, value)
} else {
// 空でなければ Dequeue 済みのスライスと入れ替える
this.priorityQueue[value] = fileUris
}
// Dequeue した fuleUri を返却。
return fileUri
}
// 見つからないことは無いはずだけど、
// 見つからなければから文字を返す。
return ""
}
// ファイル名リスト(スライス)から要素を Dequeue する。
func dequeue(fileUris []string) (string, []string) {
// そもそも要素がないのであれば空文字を返却する。
if len(fileUris) == 0 {
return "", fileUris
}
// 先頭 1 要素とそれ以降のスライスを返却。
// この前に長さチェックしてるからそのまま返せるはず。
return fileUris[0], fileUris[1:len(fileUris)]
}
type DBusServiceListenerForSoundManager struct {
SoundManager *SoundManager
}
func (this DBusServiceListenerForSoundManager) StartListen() {
this.SoundManager.StartListen()
}
func (this DBusServiceListenerForSoundManager) PauseListen() {
this.SoundManager.PauseListen()
}
func (this DBusServiceListenerForSoundManager) StartPlay() {
this.SoundManager.StartPlay()
}
func (this DBusServiceListenerForSoundManager) PausePlay() {
this.SoundManager.PausePlay()
}
func (this DBusServiceListenerForSoundManager) Put(fileUri string, queueNumber int32) {
this.SoundManager.Put(fileUri, int(queueNumber))
}
func (this DBusServiceListenerForSoundManager) PlayNow(fileUri string) {
this.SoundManager.Play(fileUri)
} | |
items.py | # https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class Item(scrapy.Item):
|
class File(Item):
data = scrapy.Field()
data_type = scrapy.Field()
# Added by the FilesStore extension, for the KingfisherProcessAPI extension to read the file.
path = scrapy.Field()
files_store = scrapy.Field()
class FileItem(Item):
number = scrapy.Field()
data = scrapy.Field()
data_type = scrapy.Field()
# Added by the FilesStore extension, for the KingfisherProcessAPI extension to read the file.
path = scrapy.Field()
files_store = scrapy.Field()
class FileError(Item):
errors = scrapy.Field()
class PluckedItem(scrapy.Item):
value = scrapy.Field()
| file_name = scrapy.Field()
url = scrapy.Field()
validate = True |
enterprise.go | package quadrant
import "github.com/hculpan/kabtrek/game"
// Enterprise : Information relating to the player ship
type Enterprise struct {
X int
Y int
QuadrantX int
QuadrantY int
Energy int
Shields int
Torpedoes int
}
// NewEnterprise creates a new Enterprise
func | (xloc int, yloc int) *Enterprise {
return &Enterprise{
X: xloc,
Y: yloc,
Energy: game.EnterpriseMaxEnergy,
Shields: 0,
Torpedoes: game.EnterpriseMaxTorpedoes,
}
}
// Move the enterprise
func (e *Enterprise) Move(x int, y int) {
if e.X != x || e.Y != y {
e.Energy -= EnergyToMove + ((e.Shields / 1000) * EnergyToMove)
}
e.X = x
e.Y = y
}
// TakeDamage reduces damage to Enterprise
func (e *Enterprise) TakeDamage(damage int) {
if damage <= e.Shields {
e.Shields -= damage
} else {
damage -= e.Shields
e.Shields = 0
e.Energy -= damage * 2
}
}
// Location returns the location of the Enterprise
func (e Enterprise) Location() (int, int) {
return e.X, e.Y
}
// IsPlayer indicates if this is the player object or not
func (e *Enterprise) IsPlayer() bool {
return true
}
// GetShields returns the object's shield strength
func (e Enterprise) GetShields() int {
if e.Shields > 0 {
return e.Shields
} else {
return e.Energy
}
}
// Name returns the display-friendly name
func (e Enterprise) Name() string {
return "Enterprise"
}
| NewEnterprise |
expertfund_state.go | package expertfund
import (
"github.com/filecoin-project/go-address"
addr "github.com/filecoin-project/go-address"
"github.com/filecoin-project/go-state-types/abi"
"github.com/filecoin-project/go-state-types/big"
"github.com/filecoin-project/specs-actors/v2/actors/builtin"
"github.com/filecoin-project/specs-actors/v2/actors/util/adt"
cid "github.com/ipfs/go-cid"
xerrors "golang.org/x/xerrors"
)
// State state of expert fund.
type State struct {
// Information for all submit rdf data experts.
Experts cid.Cid // Map, HAMT[expert]ExpertInfo
DisqualifiedExperts cid.Cid // MAP, HAMT[expert]DisqualifiedExpertInfo
PoolInfo cid.Cid
PieceInfos cid.Cid // Map, HAMT[PieceCID]PieceInfo
DataStoreThreshold uint64
// expert not foundation has daily data register size threshold
DailyImportSizeThreshold uint64
}
// ExpertInfo info of expert registered data
type ExpertInfo struct {
// DataSize total deposited data size of expert
DataSize abi.PaddedPieceSize
Active bool
// RewardDebt reward debt
RewardDebt abi.TokenAmount
LockedFunds abi.TokenAmount // Total rewards and added funds locked in vesting table
VestingFunds cid.Cid // VestingFunds (Vesting Funds schedule for the expert).
UnlockedFunds abi.TokenAmount
}
type DisqualifiedExpertInfo struct {
DisqualifiedAt abi.ChainEpoch
}
type PieceInfo struct {
Expert addr.Address
DepositThreshold uint64
}
type PoolInfo struct {
// AccPerShare Accumulated EPK per share, times 1e12.
AccPerShare abi.TokenAmount
// LastRewardBalance should be updated after any funds withdrawval or burning.
LastRewardBalance abi.TokenAmount
PrevEpoch abi.ChainEpoch
PrevTotalDataSize abi.PaddedPieceSize
CurrentEpoch abi.ChainEpoch
CurrentTotalDataSize abi.PaddedPieceSize
}
// ConstructState expert fund construct
func | (store adt.Store, pool cid.Cid) (*State, error) {
emptyExpertsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to create empty map: %w", err)
}
emptyPisMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to create experts map: %w", err)
}
emptyDisqualifiedExpertsMapCid, err := adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to create tracked experts map: %w", err)
}
return &State{
Experts: emptyExpertsMapCid,
PoolInfo: pool,
PieceInfos: emptyPisMapCid,
DisqualifiedExperts: emptyDisqualifiedExpertsMapCid,
DataStoreThreshold: DefaultDataStoreThreshold,
DailyImportSizeThreshold: DefaultImportThreshold,
}, nil
}
// Returns err if not found
func (st *State) GetPieceInfos(store adt.Store, pieceCIDs ...cid.Cid) (map[cid.Cid]addr.Address, map[cid.Cid]uint64, error) {
pieceInfos, err := adt.AsMap(store, st.PieceInfos, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, nil, err
}
pieceToExpert := make(map[cid.Cid]addr.Address)
pieceToThreshold := make(map[cid.Cid]uint64)
for _, pieceCID := range pieceCIDs {
var out PieceInfo
found, err := pieceInfos.Get(abi.CidKey(pieceCID), &out)
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, xerrors.Errorf("piece not found: %s", pieceCID)
}
pieceToExpert[pieceCID] = out.Expert
pieceToThreshold[pieceCID] = out.DepositThreshold
}
return pieceToExpert, pieceToThreshold, nil
}
func (st *State) PutPieceInfos(store adt.Store, mustAbsent bool, pieceToInfo map[cid.Cid]*PieceInfo) error {
if len(pieceToInfo) == 0 {
return nil
}
pieceInfos, err := adt.AsMap(store, st.PieceInfos, builtin.DefaultHamtBitwidth)
if err != nil {
return err
}
for pieceCid, pieceInfo := range pieceToInfo {
if mustAbsent {
absent, err := pieceInfos.PutIfAbsent(abi.CidKey(pieceCid), pieceInfo)
if err != nil {
return xerrors.Errorf("failed to put absent: %w", err)
}
if !absent {
return xerrors.Errorf("already exists %s", pieceCid)
}
} else {
err := pieceInfos.Put(abi.CidKey(pieceCid), pieceInfo)
if err != nil {
return xerrors.Errorf("failed to put data info: %w", err)
}
}
}
st.PieceInfos, err = pieceInfos.Root()
if err != nil {
return xerrors.Errorf("failed to flush PieceInfos: %w", err)
}
return nil
}
// !!!Only called by BatchStoreData.
func (st *State) Deposit(rt Runtime, expertToSize map[addr.Address]abi.PaddedPieceSize) error {
store := adt.AsStore(rt)
// update Pool
pool, err := st.UpdatePool(rt)
if err != nil {
return err
}
for expertAddr, size := range expertToSize {
deltaSize := AdjustSize(size)
// update ExpertInfo
expertInfo, err := st.GetExpert(store, expertAddr)
if err != nil {
return err
}
if !expertInfo.Active {
return xerrors.Errorf("inactive expert cannot deposit: %s", expertAddr)
}
if _, err := st.updateVestingFunds(store, rt.CurrEpoch(), pool, expertInfo); err != nil {
return err
}
expertInfo.DataSize += deltaSize
expertInfo.RewardDebt = big.Div(
big.Mul(
big.NewIntUnsigned(uint64(expertInfo.DataSize)),
pool.AccPerShare),
AccumulatedMultiplier)
err = st.SetExpert(store, expertAddr, expertInfo, false)
if err != nil {
return err
}
pool.CurrentTotalDataSize += deltaSize
}
return st.SavePool(store, pool)
}
type ExpertReward struct {
ExpertInfo
PendingFunds abi.TokenAmount
TotalReward abi.TokenAmount
}
func (st *State) Reward(store adt.Store, currEpoch abi.ChainEpoch, expertAddr address.Address) (*ExpertReward, error) {
pool, err := st.GetPool(store)
if err != nil {
return nil, err
}
expert, err := st.GetExpert(store, expertAddr)
if err != nil {
return nil, err
}
pending, err := st.updateVestingFunds(store, currEpoch, pool, expert)
if err != nil {
return nil, err
}
total := big.Add(expert.RewardDebt, pending)
total = big.Add(total, expert.UnlockedFunds)
total = big.Add(total, expert.LockedFunds)
return &ExpertReward{
ExpertInfo: *expert,
PendingFunds: pending,
TotalReward: total,
}, nil
}
func (st *State) Claim(rt Runtime, expertAddr address.Address, amount abi.TokenAmount) (abi.TokenAmount, error) {
pool, err := st.UpdatePool(rt)
if err != nil {
return big.Zero(), err
}
store := adt.AsStore(rt)
out, err := st.GetExpert(store, expertAddr)
if err != nil {
return big.Zero(), err
}
if _, err := st.updateVestingFunds(store, rt.CurrEpoch(), pool, out); err != nil {
return big.Zero(), err
}
if out.Active {
out.RewardDebt = big.Div(
big.Mul(
big.NewIntUnsigned(uint64(out.DataSize)),
pool.AccPerShare),
AccumulatedMultiplier)
}
actual := big.Min(out.UnlockedFunds, amount)
out.UnlockedFunds = big.Sub(out.UnlockedFunds, actual)
if err = st.SetExpert(store, expertAddr, out, false); err != nil {
return big.Zero(), err
}
// save pool
if pool.LastRewardBalance.LessThan(actual) {
return big.Zero(), xerrors.Errorf("LastRewardBalance less than expected amount: %s, %s, %s", expertAddr, pool.LastRewardBalance, actual)
}
pool.LastRewardBalance = big.Sub(pool.LastRewardBalance, actual)
if err = st.SavePool(store, pool); err != nil {
return big.Zero(), err
}
return actual, nil
}
func (st *State) ActivateExpert(rt Runtime, expertAddr address.Address) error {
pool, err := st.UpdatePool(rt)
if err != nil {
return err
}
store := adt.AsStore(rt)
expertInfo, err := st.GetExpert(store, expertAddr)
if err != nil {
return err
}
if !expertInfo.Active {
expertInfo.Active = true
// Clear expert's contribution if necessary.
dInfo, found, err := st.GetDisqualifiedExpertInfo(store, expertAddr)
if err != nil {
return xerrors.Errorf("failed to get disqualified for activation: %w", err)
}
if found {
if dInfo.DisqualifiedAt+ClearExpertContributionDelay < rt.CurrEpoch() {
expertInfo.DataSize = 0
}
err = st.DeleteDisqualifiedExpertInfo(store, expertAddr)
if err != nil {
return xerrors.Errorf("failed to delete disqualified for activation: %w", err)
}
}
expertInfo.RewardDebt = big.Div(
big.Mul(
big.NewIntUnsigned(uint64(expertInfo.DataSize)),
pool.AccPerShare),
AccumulatedMultiplier)
if err := st.SetExpert(store, expertAddr, expertInfo, false); err != nil {
return err
}
pool.CurrentTotalDataSize += expertInfo.DataSize
}
return st.SavePool(store, pool)
}
func (st *State) DeactivateExperts(rt Runtime, experts map[addr.Address]bool) (abi.TokenAmount, error) {
pool, err := st.UpdatePool(rt)
if err != nil {
return big.Zero(), err
}
totalBurned := abi.NewTokenAmount(0)
store := adt.AsStore(rt)
for expertAddr, burnVesting := range experts {
expertInfo, err := st.GetExpert(store, expertAddr)
if err != nil {
return big.Zero(), err
}
if !expertInfo.Active {
continue
}
if _, err := st.updateVestingFunds(store, rt.CurrEpoch(), pool, expertInfo); err != nil {
return big.Zero(), err
}
{
if burnVesting && !expertInfo.LockedFunds.IsZero() {
if pool.LastRewardBalance.LessThan(expertInfo.LockedFunds) {
return big.Zero(), xerrors.Errorf("LastRewardBalance %s less than LockedFunds %s", pool.LastRewardBalance, expertInfo.LockedFunds)
}
pool.LastRewardBalance = big.Sub(pool.LastRewardBalance, expertInfo.LockedFunds)
expertInfo.VestingFunds, err = adt.StoreEmptyMap(store, builtin.DefaultHamtBitwidth)
if err != nil {
return big.Zero(), xerrors.Errorf("failed to create empty map: %w", err)
}
totalBurned = big.Add(totalBurned, expertInfo.LockedFunds)
expertInfo.LockedFunds = abi.NewTokenAmount(0)
}
}
pool.CurrentTotalDataSize -= expertInfo.DataSize
// no need to set expertInfo.RewardDebt, as it will be reset when activation
//
// Set 'false' after st.updateVestingFunds, not before!
expertInfo.Active = false
if err = st.SetExpert(store, expertAddr, expertInfo, false); err != nil {
return big.Zero(), err
}
if err = st.PutDisqualifiedExpertIfAbsent(store, expertAddr, &DisqualifiedExpertInfo{DisqualifiedAt: rt.CurrEpoch()}); err != nil {
return big.Zero(), err
}
}
return totalBurned, st.SavePool(store, pool)
}
func (st *State) updateVestingFunds(store adt.Store, currEpoch abi.ChainEpoch, pool *PoolInfo, out *ExpertInfo) (abi.TokenAmount, error) {
pending := abi.NewTokenAmount(0)
if out.Active {
pending = big.Mul(big.NewIntUnsigned(uint64(out.DataSize)), pool.AccPerShare)
pending = big.Div(pending, AccumulatedMultiplier)
if pending.LessThan(out.RewardDebt) {
return abi.NewTokenAmount(0), xerrors.Errorf("debt greater than pending: %s, %s", out.RewardDebt, pending)
}
pending = big.Sub(pending, out.RewardDebt)
out.LockedFunds = big.Add(out.LockedFunds, pending)
}
vestingFund, err := adt.AsMap(store, out.VestingFunds, builtin.DefaultHamtBitwidth)
if err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to load VestingFunds: %w", err)
}
// add new pending value
if !pending.IsZero() {
k := abi.IntKey(int64(currEpoch))
var old abi.TokenAmount
found, err := vestingFund.Get(k, &old)
if err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to get old vesting at epoch %d: %w", currEpoch, err)
}
if found {
pending = big.Add(pending, old)
}
if err := vestingFund.Put(k, &pending); err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to put new vesting at epoch %d: %w", currEpoch, err)
}
}
unlocked := abi.NewTokenAmount(0)
// calc unlocked amounts
var amount abi.TokenAmount
toDelEpochs := make(map[int64]struct{})
err = vestingFund.ForEach(&amount, func(k string) error {
epoch, err := abi.ParseIntKey(k)
if err != nil {
return xerrors.Errorf("failed to parse vestingFund key: %w", err)
}
if abi.ChainEpoch(epoch)+RewardVestingDelay < currEpoch {
unlocked = big.Add(unlocked, amount)
toDelEpochs[epoch] = struct{}{}
}
return nil
})
if err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to iterate vestingFund: %w", err)
}
for epoch := range toDelEpochs {
if err := vestingFund.Delete(abi.IntKey(epoch)); err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to delete epoch %d in vestingFund: %w", epoch, err)
}
}
out.VestingFunds, err = vestingFund.Root()
if err != nil {
return abi.NewTokenAmount(0), xerrors.Errorf("failed to flush VestingFunds: %w", err)
}
out.LockedFunds = big.Sub(out.LockedFunds, unlocked)
out.UnlockedFunds = big.Add(out.UnlockedFunds, unlocked)
return pending, nil
}
func (st *State) SavePool(store adt.Store, pool *PoolInfo) error {
c, err := store.Put(store.Context(), pool)
if err == nil {
st.PoolInfo = c
}
return err
}
func (st *State) GetPool(store adt.Store) (*PoolInfo, error) {
var pool PoolInfo
if err := store.Get(store.Context(), st.PoolInfo, &pool); err != nil {
return nil, xerrors.Errorf("failed to get pool: %w", err)
}
return &pool, nil
}
// !!!Must save pool if no error occurs during call
// !!!Should only be called once in an actor methods
func (st *State) UpdatePool(rt Runtime) (*PoolInfo, error) {
pool, err := st.GetPool(adt.AsStore(rt))
if err != nil {
return nil, err
}
currBalance := rt.CurrentBalance()
{
currEpoch := rt.CurrEpoch()
if currEpoch < pool.CurrentEpoch {
return nil, xerrors.Errorf("unexpected rt.CurrEpoch %d less than pool.CurrentEpoch", currEpoch, pool.CurrentEpoch)
}
// epoch changed
if currEpoch > pool.CurrentEpoch {
pool.PrevEpoch = pool.CurrentEpoch
pool.PrevTotalDataSize = pool.CurrentTotalDataSize
pool.CurrentEpoch = currEpoch
}
}
if pool.PrevTotalDataSize != 0 {
reward := big.Sub(currBalance, pool.LastRewardBalance)
if reward.LessThan(big.Zero()) {
return nil, xerrors.Errorf("unexpected current balance less than last: %s, %s", currBalance, pool.LastRewardBalance)
}
accPerShare := big.Div(big.Mul(reward, AccumulatedMultiplier), big.NewIntUnsigned(uint64(pool.PrevTotalDataSize)))
pool.AccPerShare = big.Add(pool.AccPerShare, accPerShare)
pool.LastRewardBalance = currBalance
}
return pool, nil
}
func (st *State) GetExpert(store adt.Store, expertAddr addr.Address) (*ExpertInfo, error) {
experts, err := adt.AsMap(store, st.Experts, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to load experts: %w", err)
}
var out ExpertInfo
found, err := experts.Get(abi.AddrKey(expertAddr), &out)
if err != nil {
return nil, xerrors.Errorf("failed to get expert for address %v from store %s: %w", expertAddr, st.Experts, err)
}
if !found {
return nil, xerrors.Errorf("expert not found: %s", expertAddr)
}
return &out, nil
}
func (st *State) SetExpert(store adt.Store, ida addr.Address, expert *ExpertInfo, mustAbsent bool) error {
experts, err := adt.AsMap(store, st.Experts, builtin.DefaultHamtBitwidth)
if err != nil {
return xerrors.Errorf("failed to load experts: %w", err)
}
if mustAbsent {
absent, err := experts.PutIfAbsent(abi.AddrKey(ida), expert)
if err != nil {
return xerrors.Errorf("failed to put absent expert %s: %w", ida, err)
}
if !absent {
return xerrors.Errorf("expert already exists: %s", ida)
}
} else {
if err = experts.Put(abi.AddrKey(ida), expert); err != nil {
return xerrors.Errorf("failed to put expert %s: %w", ida, err)
}
}
st.Experts, err = experts.Root()
if err != nil {
return xerrors.Errorf("failed to flush experts: %w", err)
}
return nil
}
func (st *State) ListExperts(store adt.Store) (map[addr.Address]ExpertInfo, error) {
experts, err := adt.AsMap(store, st.Experts, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to load experts: %w", err)
}
ret := make(map[addr.Address]ExpertInfo)
var out ExpertInfo
err = experts.ForEach(&out, func(key string) error {
expertAddr, err := addr.NewFromBytes([]byte(key))
if err != nil {
return err
}
ret[expertAddr] = out
return nil
})
if err != nil {
return nil, xerrors.Errorf("error iterating Experts: %w", err)
}
return ret, nil
}
func (st *State) ListDisqualifiedExperts(s adt.Store) (map[addr.Address]abi.ChainEpoch, error) {
experts, err := adt.AsMap(s, st.DisqualifiedExperts, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, xerrors.Errorf("failed to load DisqualifiedExperts: %w", err)
}
ret := make(map[addr.Address]abi.ChainEpoch)
var info DisqualifiedExpertInfo
err = experts.ForEach(&info, func(k string) error {
expertAddr, err := addr.NewFromBytes([]byte(k))
if err != nil {
return err
}
ret[expertAddr] = info.DisqualifiedAt
return nil
})
if err != nil {
return nil, xerrors.Errorf("failed to iterate DisqualifiedExperts: %w", err)
}
return ret, nil
}
func (st *State) PutDisqualifiedExpertIfAbsent(s adt.Store, expertAddr addr.Address, info *DisqualifiedExpertInfo) error {
tracked, err := adt.AsMap(s, st.DisqualifiedExperts, builtin.DefaultHamtBitwidth)
if err != nil {
return xerrors.Errorf("failed to load disqualified experts: %w", err)
}
absent, err := tracked.PutIfAbsent(abi.AddrKey(expertAddr), info)
if err != nil {
return xerrors.Errorf("failed to put disqualified expert %s", expertAddr)
}
if absent {
st.DisqualifiedExperts, err = tracked.Root()
return err
}
return nil
}
func (st *State) DeleteDisqualifiedExpertInfo(s adt.Store, expertAddr addr.Address) error {
tracked, err := adt.AsMap(s, st.DisqualifiedExperts, builtin.DefaultHamtBitwidth)
if err != nil {
return xerrors.Errorf("failed to load tracked experts: %w", err)
}
present, err := tracked.TryDelete(abi.AddrKey(expertAddr))
if err != nil {
return xerrors.Errorf("failed to delete tracked expert %s", expertAddr)
}
if present {
st.DisqualifiedExperts, err = tracked.Root()
return err
} else {
return nil
}
}
func (st *State) GetDisqualifiedExpertInfo(s adt.Store, expertAddr addr.Address) (*DisqualifiedExpertInfo, bool, error) {
tracked, err := adt.AsMap(s, st.DisqualifiedExperts, builtin.DefaultHamtBitwidth)
if err != nil {
return nil, false, xerrors.Errorf("failed to load tracked experts: %w", err)
}
var info DisqualifiedExpertInfo
found, err := tracked.Get(abi.AddrKey(expertAddr), &info)
if err != nil {
return nil, false, xerrors.Errorf("failed to get tracked expert info %s", expertAddr)
}
if !found {
return nil, false, nil
}
return &info, true, nil
}
func (st *State) ForEachExpert(store adt.Store, f func(addr.Address, *ExpertInfo)) error {
experts, err := adt.AsMap(store, st.Experts, builtin.DefaultHamtBitwidth)
if err != nil {
return err
}
var info ExpertInfo
return experts.ForEach(&info, func(key string) error {
expertAddr, err := addr.NewFromBytes([]byte(key))
if err != nil {
return err
}
f(expertAddr, &info)
return nil
})
}
// Note: Considering that audio files are larger than text files, it is not fair to text files, so take the square root of size
func AdjustSize(originSize abi.PaddedPieceSize) abi.PaddedPieceSize {
sqrtSize := big.Zero().Sqrt(big.NewIntUnsigned(uint64(originSize)).Int)
sqrtSize = big.Zero().Sqrt(sqrtSize)
sqrtSize = big.Zero().Sqrt(sqrtSize)
return abi.PaddedPieceSize(sqrtSize.Uint64())
}
| ConstructState |
action-philips_hue.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hermes_python.hermes import Hermes
from os.path import expanduser
import os
from snipshue.snipshue import SnipsHue
from snipshelpers.thread_handler import ThreadHandler
from snipshelpers.config_parser import SnipsConfigParser
import queue
CONFIGURATION_ENCODING_FORMAT = "utf-8"
CONFIG_INI = "config.ini"
CACHE_INI = expanduser("~/.hue_cache/cache.ini")
CACHE_INI_DIR = expanduser("~/.hue_cache/")
MQTT_IP_ADDR = "localhost"
MQTT_PORT = 1883
MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
API_KEY = "api_key"
_id = "snips-skill-hue"
class Skill_Hue:
def __init__(self):
try:
config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except:
config = None
hostname = None
code = None
if config and config.get('secret', None) is not None:
if config.get('secret').get('hostname', None) is not None:
hostname = config.get('secret').get('hostname')
if hostname == "":
hostname = None
if config.get('secret').get(API_KEY, None) is not None:
code = config.get('secret').get(API_KEY)
if code == "":
code = None
elif os.path.isfile(CACHE_INI):
try:
cached_config = SnipsConfigParser.read_configuration_file(CACHE_INI)
except:
cached_config = None
if cached_config and cached_config.get('secret', None) is not None:
if cached_config.get('secret').get(API_KEY, None) is not None:
code = cached_config.get('secret').get(API_KEY)
if code == "":
code = None
if hostname is None or code is None:
print('No configuration')
self.snipshue = SnipsHue(hostname, code)
hostname = self.snipshue.hostname
code = self.snipshue.username
self.update_config(CACHE_INI, config, hostname, code)
self.queue = queue.Queue()
self.thread_handler = ThreadHandler()
self.thread_handler.run(target=self.start_blocking)
self.thread_handler.start_run_loop()
def update_config(self, filename, data, hostname, code):
if not os.path.exists(CACHE_INI_DIR):
os.makedirs(CACHE_INI_DIR)
if 'secret' not in data or data['secret'] is None:
data['secret'] = {}
data['secret']['hostname'] = hostname
data['secret'][API_KEY] = code
SnipsConfigParser.write_configuration_file(filename, data)
def start_blocking(self, run_event):
while run_event.is_set():
try:
self.queue.get(False)
except queue.Empty:
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.callback).start()
# section -> extraction of slot value
def extract_house_rooms(self, intent_message):
house_rooms = []
if intent_message.slots.house_room:
for room in intent_message.slots.house_room.all():
print(type(room.value))
house_rooms.append(room.value)
return house_rooms
def extract_percentage(self, intent_message, default_percentage):
percentage = default_percentage
if intent_message.slots.percent:
percentage = intent_message.slots.percent.first().value
if percentage < 0:
percentage = 0
if percentage > 100:
percentage = 100
return percentage
def extract_color(self, intent_message):
color_code = None
if intent_message.slots.color:
color_code = intent_message.slots.color.first().value
return color_code
def extract_scene(self, intent_message):
scene_code = None
if intent_message.slots.scene:
scene_code = intent_message.slots.scene.first().value
return scene_code
# section -> handlers of intents
def callback(self, hermes, intent_message):
print("[HUE] Received")
# all the intents have a house_room slot, extract here
rooms = self.extract_house_rooms(intent_message)
intent_name = intent_message.intent.intent_name
if ':' in intent_name:
intent_name = intent_name.split(":")[1]
if intent_name == 'turnOn':
self.queue.put(self.turn_on(hermes, intent_message, rooms))
if intent_name == 'turnOnLastState':
self.queue.put(self.turn_on(hermes, intent_message, rooms, last_state=True))
if intent_name == 'turnOff':
self.queue.put(self.turn_off(hermes, intent_message, rooms))
if intent_name == 'setBrightness':
self.queue.put(self.set_brightness(hermes, intent_message, rooms))
if intent_name == 'setColor':
self.queue.put(self.set_color(hermes, intent_message, rooms))
if intent_name == 'setScene':
self.queue.put(self.set_scene(hermes, intent_message, rooms))
if intent_name == 'shiftUp':
self.queue.put(self.shift_up(hermes, intent_message, rooms))
if intent_name == 'shiftDown':
self.queue.put(self.shift_down(hermes, intent_message, rooms))
def turn_on(self, hermes, intent_message, rooms, last_state=False):
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_on(room.lower(), last_state)
else:
self.snipshue.light_on_all(last_state)
self.terminate_feedback(hermes, intent_message)
def turn_off(self, hermes, intent_message, rooms):
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_off(room.lower())
else:
self.snipshue.light_off_all()
self.terminate_feedback(hermes, intent_message)
def set_brightness(self, hermes, intent_message, rooms):
percent = self.extract_percentage(intent_message, None)
if percent is None:
self.terminate_feedback(hermes, intent_message)
return
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_brightness(percent, room.lower())
else:
self.snipshue.light_brightness_all(percent)
self.terminate_feedback(hermes, intent_message)
def set_color(self, hermes, intent_message, rooms):
color = self.extract_color(intent_message)
if color is None:
self.terminate_feedback(hermes, intent_message)
return
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_color(color, room.lower())
else:
self.snipshue.light_color_all(color)
self.terminate_feedback(hermes, intent_message)
def set_scene(self, hermes, intent_message, rooms):
scene = self.extract_scene(intent_message)
if scene is None:
self.terminate_feedback(hermes, intent_message)
return
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_scene(scene, room.lower())
else:
self.snipshue.light_scene_all(scene)
self.terminate_feedback(hermes, intent_message)
def shift_up(self, hermes, intent_message, rooms):
percent = self.extract_percentage(intent_message, 20)
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_up(percent, room.lower())
else:
self.snipshue.light_up_all(percent)
self.terminate_feedback(hermes, intent_message)
def | (self, hermes, intent_message, rooms):
percent = self.extract_percentage(intent_message, 20)
if len(rooms) > 0:
for room in rooms:
self.snipshue.light_down(percent, room.lower())
else:
self.snipshue.light_down_all(percent)
self.terminate_feedback(hermes, intent_message)
# section -> feedback reply // future function
def terminate_feedback(self, hermes, intent_message, mode='default'):
if mode == 'default':
hermes.publish_end_session(intent_message.session_id, "")
else:
# more design
hermes.publish_end_session(intent_message.session_id, "")
if __name__ == "__main__":
Skill_Hue()
| shift_down |
config.rs | /*
* Copyright (C) 2019 Josh Gao
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::BTreeMap;
use std::fs::File;
use std::io::Read;
use std::path::Path;
use std::path::PathBuf;
use super::depot::Depot;
use failure::Error;
use failure::ResultExt;
use regex::Regex;
const DEFAULT_CONFIG: &str = "\
autosubmit = false
presubmit = false
[depots.android]
# Path to store the depot.
path = '~/.pore/android'
[[remotes]]
# The name of a remote: used in manifest config and in the actual git repos as the origin name.
name = 'aosp'
# Primary URL used to clone from this remote.
url = 'https://android.googlesource.com/'
# Other URLs that should be mapped onto this remote.
other_urls = ['persistent-https://android.googlesource.com/']
# Name of the depot in which objects from this remote should be stored.
depot = 'android'
# project_renames are used to map remotes with differing directory structures onto the same depot.
# For example, if one remote had a repositories at woodly/{foo,bar,baz} and another had
# doodly/{foo,bar,baz},the following could be used to store all objects at doodly/{foo,bar,baz}.
#
# [[remotes.project_renames]]
# regex = '^woodly/'
# replacement = 'doodly/'
[[manifests]]
# Name of the manifest: used in `pore clone MANIFEST[/BRANCH]`
name = 'aosp'
# Remote from which the manifest project is cloned.
remote = 'aosp'
# Name of the manifest project.
project = 'platform/manifest'
# Default branch to use when `pore clone`d without a specified branch.
default_branch = 'master'
# Default manifest file to use when `pore clone`d without a specified manifest file.
default_manifest_file = 'default.xml'
[[manifests]]
name = 'kernel'
remote = 'aosp'
project = 'kernel/manifest'
";
fn default_autosubmit() -> bool {
false
}
fn default_presubmit() -> bool {
false
}
fn default_project_renames() -> Vec<ProjectRename> {
Vec::new()
}
fn default_branch() -> String |
fn default_manifest_file() -> String {
"default.xml".into()
}
#[derive(Debug, Serialize, Deserialize)]
pub struct Config {
#[serde(default = "default_autosubmit")]
pub autosubmit: bool,
#[serde(default = "default_presubmit")]
pub presubmit: bool,
pub depots: BTreeMap<String, DepotConfig>,
pub remotes: Vec<RemoteConfig>,
pub manifests: Vec<ManifestConfig>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ProjectRename {
#[serde(with = "serde_regex")]
pub regex: Regex,
pub replacement: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct RemoteConfig {
pub name: String,
pub url: String,
pub other_urls: Option<Vec<String>>,
pub depot: String,
#[serde(default = "default_project_renames")]
pub project_renames: Vec<ProjectRename>,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct ManifestConfig {
pub name: String,
pub remote: String,
pub project: String,
#[serde(default = "default_branch")]
pub default_branch: String,
#[serde(default = "default_manifest_file")]
pub default_manifest_file: String,
}
#[derive(Debug, Serialize, Deserialize)]
pub struct DepotConfig {
pub path: String,
}
impl Default for Config {
fn default() -> Config {
toml::from_str(DEFAULT_CONFIG).expect("failed to parse embedded config")
}
}
impl Config {
pub fn default_string() -> &'static str {
DEFAULT_CONFIG
}
pub fn from_path(path: &Path) -> Result<Config, Error> {
let mut file = String::new();
File::open(&path)?.read_to_string(&mut file)?;
let config = toml::from_str(&file).context(format!("failed to parse config file {:?}", path))?;
Ok(config)
}
fn expand_path(path: &str) -> Result<PathBuf, Error> {
let path = shellexpand::full(path).context("shell expansion failed")?;
Ok(path.into_owned().into())
}
pub fn find_depot(&self, depot: &str) -> Result<Depot, Error> {
let depot_config = self
.depots
.get(depot)
.ok_or_else(|| format_err!("unknown depot {}", depot))?;
let path = Config::expand_path(&depot_config.path).context(format!("failed to expand path for depot {}", depot))?;
Depot::new(depot.to_string(), path)
}
pub fn find_remote(&self, remote_name: &str) -> Result<&RemoteConfig, Error> {
for remote in &self.remotes {
if remote.name == remote_name {
return Ok(remote);
}
}
Err(format_err!("unknown remote {}", remote_name))
}
pub fn find_manifest(&self, manifest_name: &str) -> Result<&ManifestConfig, Error> {
for manifest in &self.manifests {
if manifest.name == manifest_name {
return Ok(manifest);
}
}
Err(format_err!("unknown manifest {}", manifest_name))
}
}
| {
"master".into()
} |
init.go | package migrates
import (
"database/sql"
"errors"
"fmt"
"github.com/gokins/gokins/comm"
"github.com/golang-migrate/migrate/v4"
"github.com/golang-migrate/migrate/v4/database/mysql"
"github.com/golang-migrate/migrate/v4/database/postgres"
"github.com/golang-migrate/migrate/v4/database/sqlite3"
_ "github.com/golang-migrate/migrate/v4/source/file"
bindata "github.com/golang-migrate/migrate/v4/source/go_bindata"
"path/filepath"
"strings"
)
func | (host, dbs, user, pass string) (wait bool, rtul string, errs error) {
wait = false
if host == "" || dbs == "" || user == "" {
errs = errors.New("database config not found")
return
}
wait = true
ul := fmt.Sprintf("%s:%s@tcp(%s)/%s?parseTime=true&multiStatements=true",
user,
pass,
host,
dbs)
db, err := sql.Open("mysql", ul)
if err != nil {
errs = err
return
}
err = db.Ping()
if err != nil {
db.Close()
uls := fmt.Sprintf("%s:%s@tcp(%s)/?parseTime=true&multiStatements=true",
user,
pass,
host)
db, err = sql.Open("mysql", uls)
if err != nil {
println("open dbs err:" + err.Error())
errs = err
return
}
defer db.Close()
_, err = db.Exec(fmt.Sprintf("CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8mb4;", dbs))
if err != nil {
println("create dbs err:" + err.Error())
errs = err
return
}
db.Exec(fmt.Sprintf("USE `%s`;", dbs))
err = db.Ping()
}
defer db.Close()
wait = false
if err != nil {
errs = err
return
}
// Run migrations
driver, err := mysql.WithInstance(db, &mysql.Config{})
if err != nil {
println("could not start sql migration... ", err.Error())
errs = err
return
}
defer driver.Close()
var nms []string
tms := comm.AssetNames()
for _, v := range tms {
if strings.HasPrefix(v, "mysql") {
nms = append(nms, strings.Replace(v, "mysql/", "", 1))
}
}
s := bindata.Resource(nms, func(name string) ([]byte, error) {
return comm.Asset("mysql/" + name)
})
sc, err := bindata.WithInstance(s)
if err != nil {
errs = err
return
}
defer sc.Close()
mgt, err := migrate.NewWithInstance(
"bindata", sc,
"mysql", driver)
if err != nil {
errs = err
return
}
defer mgt.Close()
err = mgt.Up()
if err != nil && err != migrate.ErrNoChange {
mgt.Down()
errs = err
return
}
return false, ul, nil
}
func InitSqliteMigrate() (rtul string, errs error) {
ul := filepath.Join(comm.WorkPath, "db.dat")
db, err := sql.Open("sqlite3", ul)
if err != nil {
errs = err
return
}
defer db.Close()
// Run migrations
driver, err := sqlite3.WithInstance(db, &sqlite3.Config{})
if err != nil {
println("could not start sql migration... ", err.Error())
errs = err
return
}
defer driver.Close()
var nms []string
tms := comm.AssetNames()
for _, v := range tms {
if strings.HasPrefix(v, "sqlite") {
nms = append(nms, strings.Replace(v, "sqlite/", "", 1))
}
}
s := bindata.Resource(nms, func(name string) ([]byte, error) {
return comm.Asset("sqlite/" + name)
})
sc, err := bindata.WithInstance(s)
if err != nil {
errs = err
return
}
defer sc.Close()
mgt, err := migrate.NewWithInstance(
"bindata", sc,
"sqlite3", driver)
if err != nil {
errs = err
return
}
defer mgt.Close()
err = mgt.Up()
if err != nil && err != migrate.ErrNoChange {
mgt.Down()
errs = err
return
}
return ul, nil
}
func InitPostgresMigrate(host, dbs, user, pass string) (wait bool, rtul string, errs error) {
wait = false
if host == "" || dbs == "" || user == "" {
errs = errors.New("database config not found")
return
}
wait = true
ul := fmt.Sprintf("postgres://%s:%s@%s/%s?sslmode=disable", user, pass, host, dbs)
db, err := sql.Open("postgres", ul)
if err != nil {
errs = err
return
}
err = db.Ping()
if err != nil {
db.Close()
errs = err
return
}
defer db.Close()
wait = false
if err != nil {
errs = err
return
}
// Run migrations
driver, err := postgres.WithInstance(db, &postgres.Config{})
if err != nil {
println("could not start sql migration... ", err.Error())
errs = err
return
}
defer driver.Close()
var nms []string
tms := comm.AssetNames()
for _, v := range tms {
if strings.HasPrefix(v, "postgres") {
nms = append(nms, strings.Replace(v, "postgres/", "", 1))
}
}
s := bindata.Resource(nms, func(name string) ([]byte, error) {
return comm.Asset("postgres/" + name)
})
sc, err := bindata.WithInstance(s)
if err != nil {
errs = err
return
}
defer sc.Close()
mgt, err := migrate.NewWithInstance(
"bindata", sc,
"postgres", driver)
if err != nil {
errs = err
return
}
defer mgt.Close()
err = mgt.Up()
if err != nil && err != migrate.ErrNoChange {
mgt.Down()
errs = err
return
}
return false, ul, nil
}
| InitMysqlMigrate |
new-task.rs | use crate::lapin::channel::{BasicProperties, BasicPublishOptions, QueueDeclareOptions};
use crate::lapin::client::ConnectionOptions;
use crate::lapin::types::FieldTable;
use failure::Error;
use futures::future::Future;
use itertools::free::join;
use lapin_futures as lapin;
use tokio;
use tokio::net::TcpStream;
use tokio::runtime::Runtime;
use lapin::client::Client as AMQPClient;
fn | () {
let addr = "127.0.0.1:5672".parse().unwrap();
let args: Vec<_> = std::env::args().skip(1).collect();
let message = match args.len() {
0 => "hello".to_string(),
_ => join(args, " "),
};
Runtime::new()
.unwrap()
.block_on_all(
TcpStream::connect(&addr) // try to initiate a TCP connection
.map_err(Error::from)
.and_then(|stream| {
// if successful, pass it to AMQP client
AMQPClient::connect(stream, ConnectionOptions::default()).map_err(Error::from)
})
.and_then(|(client, _)| client.create_channel().map_err(Error::from)) // create a channel
.and_then(|channel| {
channel
// declare a new queue
.queue_declare("hello", QueueDeclareOptions::default(), FieldTable::new())
.and_then(move |_| {
// if successful, send a message
channel
.basic_publish(
"",
"hello",
message.as_bytes().to_vec(),
BasicPublishOptions::default(),
BasicProperties::default(),
)
.map(|_| println!("Sent a message"))
})
.map_err(Error::from)
}),
)
.expect("Failed to create tokio runtime");
}
| main |
interactive.rs | use std::fs;
use std::io::Write;
use std::path::PathBuf;
use std::time::{Duration, Instant};
use ansi_term::Colour::Green;
use ckb_types::{core::service::Request, core::BlockView};
use regex::Regex;
use rustyline::config::Configurer;
use rustyline::error::ReadlineError;
use rustyline::{Cmd, CompletionType, Config, EditMode, Editor, KeyPress};
use serde_json::json;
use crate::subcommands::{
AccountSubCommand, CliSubCommand, IndexController, IndexRequest, MockTxSubCommand,
MoleculeSubCommand, RpcSubCommand, TxSubCommand, UtilSubCommand, WalletSubCommand,
};
use crate::utils::{
completer::CkbCompleter,
config::GlobalConfig,
other::{check_alerts, get_network_type, index_dirname},
printer::{ColorWhen, OutputFormat, Printable},
};
use ckb_sdk::{
rpc::RawHttpRpcClient,
wallet::{KeyStore, ScryptType},
GenesisInfo, HttpRpcClient,
};
const ENV_PATTERN: &str = r"\$\{\s*(?P<key>\S+)\s*\}";
/// Interactive command line
pub struct InteractiveEnv {
config: GlobalConfig,
config_file: PathBuf,
history_file: PathBuf,
index_dir: PathBuf,
parser: clap::App<'static, 'static>,
key_store: KeyStore,
rpc_client: HttpRpcClient,
raw_rpc_client: RawHttpRpcClient,
index_controller: IndexController,
genesis_info: Option<GenesisInfo>,
}
impl InteractiveEnv {
pub fn from_config(
ckb_cli_dir: PathBuf,
mut config: GlobalConfig,
index_controller: IndexController,
) -> Result<InteractiveEnv, String> {
if !ckb_cli_dir.as_path().exists() {
fs::create_dir(&ckb_cli_dir).map_err(|err| err.to_string())?;
}
let mut history_file = ckb_cli_dir.clone();
history_file.push("history");
let mut config_file = ckb_cli_dir.clone();
config_file.push("config");
let mut index_dir = ckb_cli_dir.clone();
index_dir.push(index_dirname());
let mut keystore_dir = ckb_cli_dir.clone();
keystore_dir.push("keystore");
let mut env_file = ckb_cli_dir.clone();
env_file.push("env_vars");
if env_file.as_path().exists() {
let file = fs::File::open(&env_file).map_err(|err| err.to_string())?;
let env_vars_json = serde_json::from_reader(file).unwrap_or(json!(null));
match env_vars_json {
serde_json::Value::Object(env_vars) => config.add_env_vars(env_vars),
_ => eprintln!("Parse environment variable file failed."),
}
}
let parser = crate::build_interactive();
let rpc_client = HttpRpcClient::new(config.get_url().to_string());
let raw_rpc_client = RawHttpRpcClient::from_uri(config.get_url());
fs::create_dir_all(&keystore_dir).map_err(|err| err.to_string())?;
let key_store = KeyStore::from_dir(keystore_dir, ScryptType::default())
.map_err(|err| err.to_string())?;
Ok(InteractiveEnv {
config,
config_file,
index_dir,
history_file,
parser,
rpc_client,
raw_rpc_client,
key_store,
index_controller,
genesis_info: None,
})
}
pub fn start(&mut self) -> Result<(), String> {
self.print_logo();
self.config.print();
let env_regex = Regex::new(ENV_PATTERN).unwrap();
let prompt = {
#[cfg(unix)]
{
use ansi_term::Colour::Blue;
Blue.bold().paint("CKB> ").to_string()
}
#[cfg(not(unix))]
{
"CKB> ".to_string()
}
};
let rl_mode = |rl: &mut Editor<CkbCompleter>, is_list: bool, is_emacs: bool| {
if is_list {
rl.set_completion_type(CompletionType::List)
} else {
rl.set_completion_type(CompletionType::Circular)
}
if is_emacs {
rl.set_edit_mode(EditMode::Emacs)
} else {
rl.set_edit_mode(EditMode::Vi)
}
};
let rl_config = Config::builder()
.history_ignore_space(true)
.completion_type(CompletionType::List)
.edit_mode(EditMode::Emacs)
.build();
let helper = CkbCompleter::new(self.parser.clone());
let mut rl = Editor::with_config(rl_config);
rl.set_helper(Some(helper));
rl.bind_sequence(KeyPress::Meta('N'), Cmd::HistorySearchForward);
rl.bind_sequence(KeyPress::Meta('P'), Cmd::HistorySearchBackward);
if rl.load_history(&self.history_file).is_err() {
eprintln!("No previous history.");
}
Request::call(
self.index_controller.sender(),
IndexRequest::UpdateUrl(self.config.get_url().to_string()),
);
let mut last_save_history = Instant::now();
loop {
rl_mode(
&mut rl,
self.config.completion_style(),
self.config.edit_style(),
);
match rl.readline(&prompt) {
Ok(line) => {
match self.handle_command(line.as_str(), &env_regex) {
Ok(true) => {
break;
}
Ok(false) => {}
Err(err) => {
eprintln!("{}", err.to_string());
}
}
rl.add_history_entry(line.as_str());
}
Err(ReadlineError::Interrupted) => {
println!("CTRL-C");
}
Err(ReadlineError::Eof) => {
println!("CTRL-D");
break;
}
Err(err) => {
eprintln!("Error: {:?}", err);
break;
}
}
if last_save_history.elapsed() >= Duration::from_secs(120) {
if let Err(err) = rl.save_history(&self.history_file) {
eprintln!("Save command history failed: {}", err);
break;
}
last_save_history = Instant::now();
}
}
if let Err(err) = rl.save_history(&self.history_file) {
eprintln!("Save command history failed: {}", err);
}
Ok(())
}
fn print_logo(&mut self) {
println!(
"{}",
format!(
r#"
_ _ ______ _____ __ __ {} _____
| \ | | | ____| | __ \ \ \ / / {} / ____|
| \| | | |__ | |__) | \ \ / / {} | (___
| . ` | | __| | _ / \ \/ / {} \___ \
| |\ | | |____ | | \ \ \ / {} ____) |
|_| \_| |______| |_| \_\ \/ {} |_____/
"#,
Green.bold().paint(r#" ____ "#),
Green.bold().paint(r#" / __ \ "#),
Green.bold().paint(r#"| | | |"#),
Green.bold().paint(r#"| | | |"#),
Green.bold().paint(r#"| |__| |"#),
Green.bold().paint(r#" \____/ "#),
)
);
}
fn | (&mut self) -> Result<GenesisInfo, String> {
if self.genesis_info.is_none() {
let genesis_block: BlockView = self
.rpc_client
.get_block_by_number(0)?
.expect("Can not get genesis block?")
.into();
self.genesis_info = Some(GenesisInfo::from_block(&genesis_block)?);
}
Ok(self.genesis_info.clone().unwrap())
}
fn handle_command(&mut self, line: &str, env_regex: &Regex) -> Result<bool, String> {
let args = match shell_words::split(self.config.replace_cmd(&env_regex, line).as_str()) {
Ok(args) => args,
Err(e) => return Err(e.to_string()),
};
let format = self.config.output_format();
let color = ColorWhen::new(self.config.color()).color();
let debug = self.config.debug();
match self.parser.clone().get_matches_from_safe(args) {
Ok(matches) => match matches.subcommand() {
("config", Some(m)) => {
m.value_of("url").and_then(|url| {
let index_sender = self.index_controller.sender();
Request::call(index_sender, IndexRequest::UpdateUrl(url.to_string()));
self.config.set_url(url.to_string());
self.rpc_client = HttpRpcClient::new(self.config.get_url().to_string());
self.raw_rpc_client = RawHttpRpcClient::from_uri(self.config.get_url());
self.config
.set_network(get_network_type(&mut self.rpc_client).ok());
self.genesis_info = None;
Some(())
});
if m.is_present("color") {
self.config.switch_color();
}
if let Some(format) = m.value_of("output-format") {
let output_format =
OutputFormat::from_str(format).unwrap_or(OutputFormat::Yaml);
self.config.set_output_format(output_format);
}
if m.is_present("debug") {
self.config.switch_debug();
}
if m.is_present("edit_style") {
self.config.switch_edit_style();
}
if m.is_present("completion_style") {
self.config.switch_completion_style();
}
self.config.print();
let mut file = fs::File::create(self.config_file.as_path())
.map_err(|err| format!("open config error: {:?}", err))?;
let content = serde_json::to_string_pretty(&json!({
"url": self.config.get_url().to_string(),
"color": self.config.color(),
"debug": self.config.debug(),
"output_format": self.config.output_format().to_string(),
"completion_style": self.config.completion_style(),
"edit_style": self.config.edit_style(),
}))
.unwrap();
file.write_all(content.as_bytes())
.map_err(|err| format!("save config error: {:?}", err))?;
Ok(())
}
("set", Some(m)) => {
let key = m.value_of("key").unwrap().to_owned();
let value = m.value_of("value").unwrap().to_owned();
self.config.set(key, serde_json::Value::String(value));
Ok(())
}
("get", Some(m)) => {
let key = m.value_of("key");
println!("{}", self.config.get(key).render(format, color));
Ok(())
}
("info", _) => {
self.config.print();
Ok(())
}
("rpc", Some(sub_matches)) => {
check_alerts(&mut self.rpc_client);
let output = RpcSubCommand::new(&mut self.rpc_client, &mut self.raw_rpc_client)
.process(&sub_matches, format, color, debug)?;
println!("{}", output);
Ok(())
}
("account", Some(sub_matches)) => {
let output = AccountSubCommand::new(&mut self.key_store).process(
&sub_matches,
format,
color,
debug,
)?;
println!("{}", output);
Ok(())
}
("mock-tx", Some(sub_matches)) => {
let genesis_info = self.genesis_info().ok();
let output = MockTxSubCommand::new(
&mut self.rpc_client,
&mut self.key_store,
genesis_info,
)
.process(&sub_matches, format, color, debug)?;
println!("{}", output);
Ok(())
}
("tx", Some(sub_matches)) => {
let genesis_info = self.genesis_info().ok();
let output =
TxSubCommand::new(&mut self.rpc_client, &mut self.key_store, genesis_info)
.process(&sub_matches, format, color, debug)?;
println!("{}", output);
Ok(())
}
("util", Some(sub_matches)) => {
let output = UtilSubCommand::new(&mut self.rpc_client).process(
&sub_matches,
format,
color,
debug,
)?;
println!("{}", output);
Ok(())
}
("molecule", Some(sub_matches)) => {
let output =
MoleculeSubCommand::new().process(&sub_matches, format, color, debug)?;
println!("{}", output);
Ok(())
}
("wallet", Some(sub_matches)) => {
let genesis_info = self.genesis_info()?;
let output = WalletSubCommand::new(
&mut self.rpc_client,
&mut self.key_store,
Some(genesis_info),
self.index_dir.clone(),
self.index_controller.clone(),
)
.process(&sub_matches, format, color, debug)?;
println!("{}", output);
Ok(())
}
("exit", _) => {
return Ok(true);
}
_ => Ok(()),
},
Err(err) => Err(err.to_string()),
}
.map(|_| false)
}
}
| genesis_info |
oss_snapstore.go | // Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package snapstore
import (
"fmt"
"io"
"io/ioutil"
"math"
"os"
"path"
"sort"
"sync"
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"github.com/sirupsen/logrus"
)
// OSSBucket is an interface for oss.Bucket used in snapstore
type OSSBucket interface {
GetObject(objectKey string, options ...oss.Option) (io.ReadCloser, error)
InitiateMultipartUpload(objectKey string, options ...oss.Option) (oss.InitiateMultipartUploadResult, error)
CompleteMultipartUpload(imur oss.InitiateMultipartUploadResult, parts []oss.UploadPart, options ...oss.Option) (oss.CompleteMultipartUploadResult, error)
ListObjects(options ...oss.Option) (oss.ListObjectsResult, error)
DeleteObject(objectKey string, options ...oss.Option) error
UploadPart(imur oss.InitiateMultipartUploadResult, reader io.Reader, partSize int64, partNumber int, options ...oss.Option) (oss.UploadPart, error)
AbortMultipartUpload(imur oss.InitiateMultipartUploadResult, options ...oss.Option) error
}
const (
ossNoOfChunk int64 = 10000
ossEndPoint = "ALICLOUD_ENDPOINT"
accessKeyID = "ALICLOUD_ACCESS_KEY_ID"
accessKeySecret = "ALICLOUD_ACCESS_KEY_SECRET"
)
type authOptions struct {
endpoint string
accessID string
accessKey string
}
// OSSSnapStore is snapstore with Alicloud OSS object store as backend
type OSSSnapStore struct {
prefix string
bucket OSSBucket
multiPart sync.Mutex
maxParallelChunkUploads uint
tempDir string
}
// NewOSSSnapStore create new OSSSnapStore from shared configuration with specified bucket
func NewOSSSnapStore(bucket, prefix, tempDir string, maxParallelChunkUploads uint) (*OSSSnapStore, error) {
ao, err := authOptionsFromEnv()
if err != nil {
return nil, err
}
return newOSSFromAuthOpt(bucket, prefix, tempDir, maxParallelChunkUploads, ao)
}
func newOSSFromAuthOpt(bucket, prefix, tempDir string, maxParallelChunkUploads uint, ao authOptions) (*OSSSnapStore, error) {
client, err := oss.New(ao.endpoint, ao.accessID, ao.accessKey)
if err != nil {
return nil, err
}
bucketOSS, err := client.Bucket(bucket)
if err != nil {
return nil, err
}
return NewOSSFromBucket(prefix, tempDir, maxParallelChunkUploads, bucketOSS), nil
}
// NewOSSFromBucket will create the new OSS snapstore object from OSS bucket
func NewOSSFromBucket(prefix, tempDir string, maxParallelChunkUploads uint, bucket OSSBucket) *OSSSnapStore {
return &OSSSnapStore{
prefix: prefix,
bucket: bucket,
maxParallelChunkUploads: maxParallelChunkUploads,
tempDir: tempDir,
}
}
// Fetch should open reader for the snapshot file from store
func (s *OSSSnapStore) Fetch(snap Snapshot) (io.ReadCloser, error) {
body, err := s.bucket.GetObject(path.Join(s.prefix, snap.SnapDir, snap.SnapName))
if err != nil {
return nil, err
}
return body, nil
}
// Save will write the snapshot to store
func (s *OSSSnapStore) Save(snap Snapshot, rc io.ReadCloser) error {
tmpfile, err := ioutil.TempFile(s.tempDir, tmpBackupFilePrefix)
if err != nil {
rc.Close()
return fmt.Errorf("failed to create snapshot tempfile: %v", err)
}
defer func() {
tmpfile.Close()
os.Remove(tmpfile.Name())
}()
size, err := io.Copy(tmpfile, rc)
rc.Close()
if err != nil {
return fmt.Errorf("failed to save snapshot to tmpfile: %v", err)
}
_, err = tmpfile.Seek(0, io.SeekStart)
if err != nil {
return err
}
var (
chunkSize = int64(math.Max(float64(minChunkSize), float64(size/ossNoOfChunk)))
noOfChunks = size / chunkSize
)
if size%chunkSize != 0 {
noOfChunks++
}
ossChunks, err := oss.SplitFileByPartNum(tmpfile.Name(), int(noOfChunks))
if err != nil {
return err
}
imur, err := s.bucket.InitiateMultipartUpload(path.Join(s.prefix, snap.SnapDir, snap.SnapName))
if err != nil {
return err
}
var (
completedParts = make([]oss.UploadPart, noOfChunks)
chunkUploadCh = make(chan chunk, noOfChunks)
resCh = make(chan chunkUploadResult, noOfChunks)
cancelCh = make(chan struct{})
wg sync.WaitGroup
)
for i := uint(0); i < s.maxParallelChunkUploads; i++ {
wg.Add(1)
go s.partUploader(&wg, imur, tmpfile, completedParts, chunkUploadCh, cancelCh, resCh)
}
for _, ossChunk := range ossChunks {
chunk := chunk{
offset: ossChunk.Offset,
size: ossChunk.Size,
id: ossChunk.Number,
}
logrus.Debugf("Triggering chunk upload for offset: %d", chunk.offset)
chunkUploadCh <- chunk
}
logrus.Infof("Triggered chunk upload for all chunks, total: %d", noOfChunks)
snapshotErr := collectChunkUploadError(chunkUploadCh, resCh, cancelCh, noOfChunks)
wg.Wait()
if snapshotErr == nil {
_, err := s.bucket.CompleteMultipartUpload(imur, completedParts)
if err != nil {
return err
}
logrus.Infof("Finishing the multipart upload with upload ID : %s", imur.UploadID)
} else {
logrus.Infof("Aborting the multipart upload with upload ID : %s", imur.UploadID)
err := s.bucket.AbortMultipartUpload(imur)
if err != nil {
return snapshotErr.err
}
}
return nil
}
func (s *OSSSnapStore) partUploader(wg *sync.WaitGroup, imur oss.InitiateMultipartUploadResult, file *os.File, completedParts []oss.UploadPart, chunkUploadCh <-chan chunk, stopCh <-chan struct{}, errCh chan<- chunkUploadResult) {
defer wg.Done()
for {
select {
case <-stopCh:
return
case chunk, ok := <-chunkUploadCh:
if !ok {
return
}
logrus.Infof("Uploading chunk with id: %d, offset: %d, size: %d", chunk.id, chunk.offset, chunk.size)
err := s.uploadPart(imur, file, completedParts, chunk.offset, chunk.size, chunk.id)
errCh <- chunkUploadResult{
err: err,
chunk: &chunk,
}
}
}
}
func (s *OSSSnapStore) uploadPart(imur oss.InitiateMultipartUploadResult, file *os.File, completedParts []oss.UploadPart, offset, chunkSize int64, number int) error {
fd := io.NewSectionReader(file, offset, chunkSize)
part, err := s.bucket.UploadPart(imur, fd, chunkSize, number)
if err == nil {
completedParts[number-1] = part
}
return err
}
// List will list the snapshots from store
func (s *OSSSnapStore) List() (SnapList, error) {
var snapList SnapList
marker := ""
for {
lsRes, err := s.bucket.ListObjects(oss.Marker(marker), oss.Prefix(s.prefix))
if err != nil {
return nil, err
}
for _, object := range lsRes.Objects { | logrus.Warnf("Invalid snapshot found. Ignoring it: %s", object.Key)
} else {
snapList = append(snapList, snap)
}
}
if lsRes.IsTruncated {
marker = lsRes.NextMarker
} else {
break
}
}
sort.Sort(snapList)
return snapList, nil
}
// Delete should delete the snapshot file from store
func (s *OSSSnapStore) Delete(snap Snapshot) error {
return s.bucket.DeleteObject(path.Join(s.prefix, snap.SnapDir, snap.SnapName))
}
func authOptionsFromEnv() (authOptions, error) {
endpoint, err := GetEnvVarOrError(ossEndPoint)
if err != nil {
return authOptions{}, err
}
accessID, err := GetEnvVarOrError(accessKeyID)
if err != nil {
return authOptions{}, err
}
accessKey, err := GetEnvVarOrError(accessKeySecret)
if err != nil {
return authOptions{}, err
}
ao := authOptions{
endpoint: endpoint,
accessID: accessID,
accessKey: accessKey,
}
return ao, nil
} | snap, err := ParseSnapshot(object.Key[len(s.prefix)+1:])
if err != nil {
// Warning |
RemovingData.py | import pandas as pd
import numpy as np
from collections import defaultdict
import RemovingDataSolns as s
# Question 1
def prop_sals_test(prop_sals):
'''
INPUT prop_sals - a float as the percent of missing values in the salary column
Prints statement related to the correctness of the solution of the proportion
'''
if np.allclose(prop_sals, s.prop_sals):
print("Nice job! That looks right!")
else:
print("Oops! Make sure your value is for the proportion of nan values in only the Salary column.")
# Question 2
def sal_rm_test(sal_rm):
'''
INPUT sal_rm - a pandas dataframe with all rows that are missing a value the salary column removed. The dataframe should only have the columns of num_vars (quant variables)
Prints statement related to the correctness of the solution of the dataframe
'''
if sal_rm.equals(s.sal_rm):
print("Nice job! That looks right!")
else:
print("That wasn't quite as expected. Try again, this should be the num_vars dataframe with salary removed.")
# Question 3
def question3_check(question3_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question3_solution == s.question3_solution:
print("Nice job! That's right! Those missing values in the X matrix will still not allow us to predict the response.")
else:
print("Oops! That wasn't what we were expecting. Your solution should be either a, b, or c for the string that best relates to what happened.")
# Question 4
def all_rm_test(all_rm):
'''
INPUT all_rm - a pandas dataframe with all rows that are missing a value in any column removed from num_vars (only the numeric columns)
Prints statement related to the correctness of the solution of the dataframe
'''
if all_rm.equals(s.all_rm):
print("Nice job! That looks right. The default is to drop any row with a missing value in any column, so we didn't need to specify any arguments in this case.")
else:
print("Oops! That doesn't look like what we were expecting. Make sure you are working with only the numeric columns, and you have dropped any rows with missing values.")
# Question 5
def question5_check(question5_solution):
'''
INPUT question3_solution - the letter (a, b, or c) corresponding to the statement that best describes what happend when fitting your model.
Prints statement related to the correctness of the letter chosen.
'''
if question5_solution == s.question5_solution:
print("Nice job! That's right! Python isn't exactly magic, but sometimes it feels like it is!")
else:
print("Oops! Your solution should have worked. In which case, no output should have printed. This solution should follow just as in the screencast.")
# Question 6
def | (r2_test):
'''
INPUT r2_test - the rsquared value from fitting a model with all nan values dropped and only using quantitative variables.
Prints statement related to the correctness rsquared matching solution.
'''
if r2_test == s.r2_test:
print("Nice job! That's right! Your rsquared matches the solution.")
else:
print("Oops! That wasn't the value that was expected. You should fit your model using the training data, predict on the X_test data, and then score comparing the y_test and your predicted values.")
# Question 7
def question7_check(question7_solution):
'''
INPUT question7_solution - a dictionary with statements of takeaways from the rest of the notebook. The values should be the variables a, b, c, d, e, f, or g
Prints statement related to the correctness of the solution of the dictionary
'''
if question7_solution == s.question7_solution:
print("Nice job! That looks right to me! We would really like to predict for anyone who provides a salary, but our model right now definitely has some limitations.")
elif question7_solution['The number of reported salaries in the original dataset'] != s.question7_solution['The number of reported salaries in the original dataset']:
print("The number of reported salaries in the original dataset doesn't look quite right.")
elif question7_solution['The number of test salaries predicted using our model'] != s.question7_solution['The number of test salaries predicted using our model']:
print("The number of salaries predicted using our model doesn't look quite right.")
elif question7_solution['If an individual does not rate stackoverflow, but has a salary'] != s.question7_solution['If an individual does not rate stackoverflow, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['If an individual does not have a a job satisfaction, but has a salary'] != s.question7_solution['If an individual does not have a a job satisfaction, but has a salary']:
print("Whether an individual rates stackoverflow or has a job satisfaction we would still like to predict the salary if we can.")
elif question7_solution['Our model predicts salaries for the two individuals described above.'] != s.question7_solution['Our model predicts salaries for the two individuals described above.']:
print("Unfortunately, our current model will not predict for anyone who has missing values in any column - even if they do have a salary!")
| r2_test_check |
mod.rs | /*
* Licensed to Jim Cooke under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub struct | {
pub mean: f64,
pub weight: f64,
pub sort_key1: isize,
pub sort_key2: isize,
}
impl Centroid {
pub fn to_string(&self) -> String {
format!(
"{{\"mean\": \"{mean}\",\"weight\": \"{weight}\"}}",
mean = self.mean,
weight = self.weight
)
}
pub fn add(&mut self, r: &Centroid) -> String {
if r.weight < 0.0 {
return "centroid weight cannot be less than zero".to_string();
}
if self.weight != 0.0 {
self.weight += r.weight;
self.mean += r.weight * (r.mean - self.mean) / self.weight;
} else {
self.weight = r.weight;
self.mean = r.mean;
}
"".to_string()
}
pub fn clone(&self) -> Centroid {
Centroid {
mean: self.mean,
weight: self.weight,
sort_key1: self.mean.floor() as isize,
sort_key2: (self.mean.signum() * self.mean.fract()) as isize,
}
}
}
| Centroid |
PostForm.js | import React, { useState } from 'react';
import PropTypes from 'prop-types';
import { connect } from 'react-redux';
import { addPost } from '../../actions/post';
const PostForm = ({ addPost }) => {
const [text, setText] = useState('');
return (
<div className='post-form'>
<div className='bg-warning p'>
<h3>Say Something...</h3>
</div>
<form
className='form my-1'
onSubmit={e => {
e.preventDefault();
addPost({ text });
setText('');
}}
>
<textarea
name='text'
cols='30'
rows='5'
placeholder='Create a post'
value={text}
onChange={e => setText(e.target.value)}
required
/>
<input type='submit' className='btn btn-dark my-1' value='Submit' />
</form>
</div>
);
};
| };
export default connect(
null,
{ addPost }
)(PostForm); | PostForm.propTypes = {
addPost: PropTypes.func.isRequired |
utils.py | import asyncio
import discord
from redbot.vendored.discord.ext import menus
from .game import Game
TRANS = {
0: "\N{BLACK LARGE SQUARE}",
1: "\N{RED APPLE}",
2: "\N{LARGE GREEN CIRCLE}",
3: "\N{LARGE GREEN SQUARE}",
}
GET_DIR = {
"w": "up",
"s": "down",
"a": "left",
"d": "right",
None: "Click on a reaction to start",
}
class BoardMenu(menus.Menu):
def __init__(self, player_name, **kwargs):
super().__init__(**kwargs)
self.cur_dir = None
self.player_name = player_name
self.game = Game(12)
# maybe use lock here instead of event?
self.is_started = asyncio.Event()
def edit_board(self, end=False):
emb = discord.Embed(name="Snake", description=self.make_board())
emb.add_field(name="Score", value=self.game.score)
emb.add_field(name="Player", value=self.player_name)
if end:
emb.add_field(name="Current Direction", value="Game Ended")
else:
emb.add_field(name="Current Direction", value=GET_DIR[self.cur_dir])
return emb
def make_board(self):
return "\n".join("".join(map(lambda x: TRANS[x], i)) for i in self.game.board)
async def loop(self):
await self.is_started.wait()
while True:
await asyncio.sleep(1)
if not self.game.move(self.cur_dir):
await self.message.edit(embed=self.edit_board(end=True))
break
await self.message.edit(embed=self.edit_board())
self.stop()
async def | (self, ctx, channel):
self.task = ctx.bot.loop.create_task(self.loop())
return await ctx.send(embed=self.edit_board())
@menus.button("⬆️")
async def up(self, payload):
self.cur_dir = "w"
self.is_started.set()
@menus.button("⬇️")
async def down(self, payload):
self.cur_dir = "s"
self.is_started.set()
@menus.button("⬅️")
async def left(self, payload):
self.cur_dir = "a"
self.is_started.set()
@menus.button("➡️")
async def right(self, payload):
self.cur_dir = "d"
self.is_started.set()
@menus.button("⏹️")
async def on_stop(self, payload):
self.task.cancel()
await self.message.edit(embed=self.edit_board(end=True))
self.stop()
| send_initial_message |
scorer.rs | use std::rc::Rc;
use std::path::Path;
use std::cell::Cell;
use std::collections::BTreeMap;
use url::Url;
use regex::Regex;
use html5ever::tree_builder::TreeSink;
use markup5ever_rcdom::Node;
use markup5ever_rcdom::NodeData::{Element, Text};
use markup5ever_rcdom::Handle;
use markup5ever_rcdom::NodeData::{
Document,
Doctype,
Comment,
ProcessingInstruction
};
use markup5ever_rcdom::RcDom;
use html5ever::{QualName, LocalName};
use html5ever::tree_builder::{NodeOrText, ElementFlags};
use dom;
pub static PUNCTUATIONS_REGEX: &'static str = r"([、。,.!?]|\.[^A-Za-z0-9]|,[^0-9]|!|\?)";
pub static UNLIKELY_CANDIDATES: &'static str =
"combx|comment|community|disqus|extra|foot|header|menu\
|remark|rss|shoutbox|sidebar|sponsor|ad-break|agegate\
|pagination|pager|popup|tweet|twitter\
|ssba";
pub static LIKELY_CANDIDATES: &'static str = "and|article|body|column|main|shadow\
|content|hentry";
pub static POSITIVE_CANDIDATES: &'static str =
"article|body|content|entry|hentry|main|page\
|pagination|post|text|blog|story";
pub static NEGATIVE_CANDIDATES: &'static str =
"combx|comment|com|contact|foot|footer|footnote\
|masthead|media|meta|outbrain|promo|related\
|scroll|shoutbox|sidebar|sponsor|shopping\
|tags|tool|widget|form|textfield\
|uiScale|hidden";
static BLOCK_CHILD_TAGS: [&'static str; 10] = [
"a", "blockquote", "dl", "div", "img", "ol", "p", "pre", "table", "ul",
];
lazy_static! {
static ref PUNCTUATIONS: Regex = Regex::new(PUNCTUATIONS_REGEX).unwrap();
static ref LIKELY: Regex = Regex::new(LIKELY_CANDIDATES).unwrap();
static ref UNLIKELY: Regex = Regex::new(UNLIKELY_CANDIDATES).unwrap();
static ref POSITIVE: Regex = Regex::new(POSITIVE_CANDIDATES).unwrap();
static ref NEGATIVE: Regex = Regex::new(NEGATIVE_CANDIDATES).unwrap();
}
pub struct Candidate {
pub node: Rc<Node>,
pub score: Cell<f32>,
}
pub fn fix_img_path(handle: Handle, url: &Url) -> bool {
let src = dom::get_attr("src", handle.clone());
if src.is_none() {
return false
}
let s = src.unwrap();
if !s.starts_with("//") && !s.starts_with("http://") && s.starts_with("https://") {
ma |
pub fn get_link_density(handle: Handle) -> f32 {
let text_length = dom::text_len(handle.clone()) as f32;
if text_length == 0.0 {
return 0.0;
}
let mut link_length = 0.0;
let mut links: Vec<Rc<Node>> = vec![];
dom::find_node(handle.clone(), "a", &mut links);
for link in links.iter() {
link_length += dom::text_len(link.clone()) as f32;
}
link_length / text_length
}
pub fn is_candidate(handle: Handle) -> bool {
let text_len = dom::text_len(handle.clone());
if text_len < 20 {
return false
}
let n: &str = &dom::get_tag_name(handle. clone()).unwrap_or_default();
match n {
"p" => true,
"div" | "article" | "center" | "section" =>
!dom::has_nodes(handle.clone(), &BLOCK_CHILD_TAGS.iter().map(|t| *t).collect()),
_ => false
}
}
pub fn init_content_score(handle: Handle) -> f32 {
let tag_name = dom::get_tag_name(handle.clone()).unwrap_or_default();
let score = match tag_name.as_ref() {
"article" => 10.0,
"div" => 5.0,
"blockquote" => 3.0,
"form" => -3.0,
"th" => 5.0,
_ => 0.0,
};
score + get_class_weight(handle.clone())
}
pub fn calc_content_score(handle: Handle) -> f32 {
let mut score: f32 = 1.0;
let mut text = String::new();
dom::extract_text(handle.clone(), &mut text, true);
let mat = PUNCTUATIONS.find_iter(&text);
score += mat.count() as f32;
score += f32::min(f32::floor(text.chars().count() as f32 / 100.0), 3.0);
return score
}
pub fn get_class_weight(handle: Handle) -> f32 {
let mut weight: f32 = 0.0;
match handle.data {
Element { name: _, ref attrs, .. } => {
for name in ["id", "class"].iter() {
if let Some(val) = dom::attr(name, &attrs.borrow()) {
if POSITIVE.is_match(&val) {
weight += 25.0
};
if NEGATIVE.is_match(&val) {
weight -= 25.0
}
}
}
},
_ => (),
};
weight
}
pub fn preprocess(mut dom: &mut RcDom, handle: Handle, mut title: &mut String) -> bool {
match handle.clone().data {
Element { ref name, ref attrs, .. } => {
let tag_name = name.local.as_ref();
match tag_name.to_lowercase().as_ref() {
"script" | "link" | "style" => {
return true
},
"title" => dom::extract_text(handle.clone(), &mut title, true),
_ => (),
}
for name in ["id", "class"].iter() {
if let Some(val) = dom::attr(name, &attrs.borrow()) {
if tag_name != "body" && UNLIKELY.is_match(&val) {
if !LIKELY.is_match(&val) {
return true
}
}
}
}
},
_ => (),
}
let mut useless_nodes = vec![];
let mut paragraph_nodes = vec![];
let mut br_count = 0;
for child in handle.children.borrow().iter() {
if preprocess(&mut dom, child.clone(), &mut title) {
useless_nodes.push(child.clone());
}
let c = child.clone();
match c.data {
Element { ref name, .. } => {
let tag_name = name.local.as_ref();
if "br" == tag_name.to_lowercase() {
br_count += 1
} else {
br_count = 0
}
},
Text { ref contents } => {
let s = contents.borrow();
if br_count >= 2 && s.trim().len() > 0 {
paragraph_nodes.push(child.clone());
br_count = 0
}
},
_ => ()
}
}
for node in useless_nodes.iter() {
dom.remove_from_parent(node);
}
for node in paragraph_nodes.iter() {
let name = QualName::new(None, ns!(), LocalName::from("p"));
let p = dom.create_element(name, vec![], ElementFlags::default());
dom.append_before_sibling(node, NodeOrText::AppendNode(p.clone()));
dom.remove_from_parent(node);
match node.clone().data {
Text { ref contents } => {
let text = contents.clone().into_inner().clone();
dom.append(&p, NodeOrText::AppendText(text))
},
_ => (),
}
}
false
}
pub fn find_candidates(mut dom: &mut RcDom,
id: &Path,
handle: Handle,
candidates: &mut BTreeMap<String, Candidate>,
nodes: &mut BTreeMap<String, Rc<Node>>) {
if let Some(id) = id.to_str().map(|id| id.to_string()) {
nodes.insert(id, handle.clone());
}
if is_candidate(handle.clone()) {
let score = calc_content_score(handle.clone());
if let Some(c) = id.parent()
.and_then(|pid| find_or_create_candidate(pid, candidates, nodes))
{
c.score.set(c.score.get() + score)
}
if let Some(c) = id.parent()
.and_then(|pid| pid.parent())
.and_then(|gpid| find_or_create_candidate(gpid, candidates, nodes))
{
c.score.set(c.score.get() + score / 2.0)
}
}
if is_candidate(handle.clone()) {
let score = calc_content_score(handle.clone());
if let Some(c) = id.to_str()
.map(|id| id.to_string())
.and_then(|id| candidates.get(&id)) {
c.score.set(c.score.get() + score)
}
if let Some(c) = id.parent()
.and_then(|pid| pid.to_str())
.map(|id| id.to_string())
.and_then(|pid| candidates.get(&pid)) {
c.score.set(c.score.get() + score)
}
if let Some(c) = id.parent()
.and_then(|p| p.parent())
.and_then(|pid| pid.to_str())
.map(|id| id.to_string())
.and_then(|pid| candidates.get(&pid)) {
c.score.set(c.score.get() + score)
}
}
for (i, child) in handle.children.borrow().iter().enumerate() {
find_candidates(&mut dom,
id.join(i.to_string()).as_path(),
child.clone(),
candidates,
nodes)
}
}
fn find_or_create_candidate<'a>(id: &Path,
candidates: &'a mut BTreeMap<String, Candidate>,
nodes: &BTreeMap<String, Rc<Node>>) -> Option<&'a Candidate> {
if let Some(id) = id.to_str().map(|id| id.to_string()) {
if let Some(node) = nodes.get(&id) {
if candidates.get(&id).is_none() {
candidates.insert(id.clone(), Candidate {
node: node.clone(),
score: Cell::new(init_content_score(node.clone())),
});
}
return candidates.get(&id)
}
}
None
}
pub fn clean(mut dom: &mut RcDom, id: &Path, handle: Handle, url: &Url, candidates: &BTreeMap<String, Candidate>) -> bool {
let mut useless = false;
match handle.data {
Document => (),
Doctype { .. } => (),
Text { ref contents } => {
let s = contents.borrow();
if s.trim().len() == 0 {
useless = true
}
},
Comment { .. } => useless = true,
Element { ref name, ref attrs, .. } => {
let tag_name = name.local.as_ref();
match tag_name.to_lowercase().as_ref() {
"script" | "link" | "style" | "noscript" | "meta"
| "h1" | "object" | "header" | "footer" | "aside" => {
useless = true
},
"form" | "table" | "ul" | "div" => {
useless = is_useless(id, handle.clone(), candidates)
},
"img" => useless = !fix_img_path(handle.clone(), url),
_ => (),
}
dom::clean_attr("id" , &mut *attrs.borrow_mut());
dom::clean_attr("class", &mut *attrs.borrow_mut());
dom::clean_attr("style", &mut *attrs.borrow_mut());
},
ProcessingInstruction { .. } => unreachable!()
}
let mut useless_nodes = vec![];
for (i, child) in handle.children.borrow().iter().enumerate() {
let pid = id.join(i.to_string());
if clean(&mut dom, pid.as_path(), child.clone(), url, candidates) {
useless_nodes.push(child.clone());
}
}
for node in useless_nodes.iter() {
dom.remove_from_parent(node);
}
if dom::is_empty(handle) {
useless = true
}
useless
}
pub fn is_useless(id: &Path, handle: Handle, candidates: &BTreeMap<String, Candidate>) -> bool {
let tag_name = &dom::get_tag_name(handle.clone()).unwrap_or_default();
let weight = get_class_weight(handle.clone());
let score = id.to_str()
.and_then(|id| candidates.get(id))
.map(|c| c.score.get()).unwrap_or(0.0);
if weight + score < 0.0 {
return true
}
let text_nodes_len = dom::text_children_count(handle.clone());
let mut p_nodes: Vec<Rc<Node>> = vec![];
let mut img_nodes: Vec<Rc<Node>> = vec![];
let mut li_nodes: Vec<Rc<Node>> = vec![];
let mut input_nodes: Vec<Rc<Node>> = vec![];
let mut embed_nodes: Vec<Rc<Node>> = vec![];
dom::find_node(handle.clone(), "p" , &mut p_nodes);
dom::find_node(handle.clone(), "img" , &mut img_nodes);
dom::find_node(handle.clone(), "li" , &mut li_nodes);
dom::find_node(handle.clone(), "input" , &mut input_nodes);
dom::find_node(handle.clone(), "embed" , &mut embed_nodes);
let p_count = p_nodes.len();
let img_count = img_nodes.len();
let li_count = li_nodes.len() as i32 - 100;
let input_count = input_nodes.len();
let embed_count = embed_nodes.len();
let link_density = get_link_density(handle.clone());
let content_length = dom::text_len(handle.clone());
let para_count = text_nodes_len + p_count;
if img_count > para_count + text_nodes_len {
return true
}
if li_count > para_count as i32 && tag_name != "ul" && tag_name != "ol" {
return true
}
if input_count as f32 > f32::floor(para_count as f32 / 3.0) {
return true
}
if content_length < 25 && (img_count == 0 || img_count > 2) {
return true
}
if weight < 25.0 && link_density > 0.2 {
return true
}
if (embed_count == 1 && content_length < 35) || embed_count > 1 {
return true
}
return false
}
| tch url.join(&s) {
Ok(new_url) => dom::set_attr("src", new_url.as_str(), handle),
Err(_) => (),
}
}
true
}
|
carousel-resources.ts | import { ICarouselResourceStrings } from 'igniteui-angular';
export const CarouselResourceStringsES: ICarouselResourceStrings = {
igx_carousel_of: 'of' | }; |
|
scf_utils.py | import numpy as np
import os, sys
sys.path.append(os.path.dirname(__file__))
from diis_solver import diis_solver, diis_solver_uhf
sys.path.pop()
import jk
import xform
def homo_lumo_mix(C, nocc, beta):
"""
Mix a portion of LUMO to HOMO.
Used when generating spin-unrestricted guess.
"""
if beta < 0. or beta > 1.:
raise Exception("Mixing beta must be in [0, 1]")
Cb = C.copy()
homo = C[:, nocc - 1]
lumo = C[:, nocc]
Cb[:, nocc - 1] = (1. - beta) ** 0.5 * homo + beta ** 0.5 * lumo
return Cb
def get_dm(C, nel):
D = C[:, :nel]
D = D @ D.T
return D
def get_JK(is_fitted, g, D):
if(is_fitted):
# FINISH LATER
X = np.einsum("Pls,ls->P", g, D)
J = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Z = np.einsum("Pns,ls->Pnl", g, D)
K = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Z)
return (J, K)
else:
#J = np.einsum("pqrs,rs->pq", g, D)
#K = np.einsum("prqs,rs->pq", g, D)
J, K = jk.getJK_np_Dshift(g, D - np.diag(np.diag(D) * 0.5))
return (J, K)
def get_JK_uhf(is_fitted, g, Ds):
"""
Ds = [Da, Db]
"""
Da, Db = Ds[0], Ds[1]
Dtot = Da + Db
if (is_fitted == True):
X = np.einsum("Pls,ls->P", g, Dtot)
Jtot = np.einsum("mnP,P->mn", np.swapaxes(g, 0, 2), X)
Za = np.einsum("Pns,ls->Pnl", g, Da)
Ka = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Za)
Zb = np.einsum("Pns,ls->Pnl", g, Db)
Kb = np.einsum('mlP,Pnl->mn', np.swapaxes(g, 0, 2), Zb)
return Jtot, Ka, Kb
else:
Jtot = np.einsum("pqrs, rs -> pq", g, Dtot)
Ka = np.einsum("prqs, rs -> pq", g, Da)
Kb = np.einsum("prqs, rs -> pq", g, Db)
return Jtot, Ka, Kb
def get_fock(H, g, D):
J, K = get_JK(len(g.shape) == 3, g, D)
return H + 2 * J - K
def diis_update(F_prev_list, r_prev_list):
c = diis_solver(r_prev_list) # GET THE COEFFICIENTS!!
out = 0 * F_prev_list[0]
for i, element in enumerate(F_prev_list):
out += c[i] * element
return out
def oda_update(dF, dD, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
E_deriv = np.sum(dF * dD)
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1: |
def get_fock_uhf(H, g, Ds):
"""
DIIS update given previous Fock matrices and error vectors.
Note that if there are less than two F's, return normal F.
"""
Jtot, Ka, Kb = get_JK_uhf(len(g.shape) == 3, g, Ds)
return H + Jtot - Ka, H + Jtot - Kb
def diis_update_uhf(F_prev_lists, r_prev_lists):
c = diis_solver_uhf(r_prev_lists[0], r_prev_lists[1])
Fa = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[0]):
Fa += c[i] * element
Fb = 0 * F_prev_lists[0][0]
for i, element in enumerate(F_prev_lists[1]):
Fb += c[i] * element
return Fa, Fb
def oda_update_uhf(dFs, dDs, dE):
"""
ODA update:
lbd = 0.5 - dE / E_deriv
"""
if type(dFs) is not list:
raise Exception("arg1 and arg2 are list of alpha/beta matrices.")
E_deriv = np.sum(dFs[0] * dDs[0] + dFs[1] * dDs[1])
lbd = 0.5 * (1. - dE / E_deriv)
if lbd < 0 or lbd > 1:
lbd = 0.9999 if dE < 0 else 1.e-4
return lbd
def diag(F, A):
Fp = A.T @ F @ A
eps, Cp = np.linalg.eigh(Fp)
C = A @ Cp
return eps, C
def get_SCF_err(S, D, F):
err_v = S @ D @ F - F @ D @ S
err = np.mean(err_v ** 2) ** 0.5
return err, err_v
def get_SCF_energy(H, F, D, unrestricted):
"""
Calculates the energy.
"""
if unrestricted == True:
if type(F) is not list or type(D) is not list:
raise Exception("For UHF, F and D must have type list.")
Fa, Fb = F[0], F[1]
Da, Db = D[0], D[1]
Dtot = Da + Db
return np.sum(Dtot * H + Da * Fa + Db * Fb) * 0.5
else:
return np.sum((H + F) * D)
def xform_2(H, A):
"""
Basis xform for 2-tensor
"""
if len(H.shape) != 2:
raise Exception("Dimension error: arg1 should be a matrix")
return A.T @ H @ A
def xform_4(g, A):
"""
Basis xform for 4-tensor
"""
if len(g.shape) != 4:
raise Exception("""
Dimension error: arg1 should be a four-tensor.
Note that you should set is_fitted to be False.
""")
#return np.einsum("pi, qj, pqrs, rk, sl -> ijkl", A, A, g, A, A, optimize=True)
return xform.xform_4_np(g, A) | lbd = 0.9999 if dE < 0 else 1.e-4
return lbd |
event.rs | // This file is generated by rust-protobuf 2.17.0. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![rustfmt::skip]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tensorflow/core/util/event.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_17_0;
#[derive(PartialEq,Clone,Default)]
pub struct Event {
// message fields
pub wall_time: f64,
pub step: i64,
// message oneof groups
pub what: ::std::option::Option<Event_oneof_what>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a Event {
fn default() -> &'a Event {
<Event as ::protobuf::Message>::default_instance()
}
}
#[derive(Clone,PartialEq,Debug)]
pub enum Event_oneof_what {
file_version(::std::string::String),
graph_def(::std::vec::Vec<u8>),
summary(super::summary::Summary),
log_message(LogMessage),
session_log(SessionLog),
tagged_run_metadata(TaggedRunMetadata),
meta_graph_def(::std::vec::Vec<u8>),
}
impl Event {
pub fn new() -> Event {
::std::default::Default::default()
}
// double wall_time = 1;
pub fn get_wall_time(&self) -> f64 {
self.wall_time
}
pub fn clear_wall_time(&mut self) {
self.wall_time = 0.;
}
// Param is passed by value, moved
pub fn set_wall_time(&mut self, v: f64) {
self.wall_time = v;
}
// int64 step = 2;
pub fn get_step(&self) -> i64 {
self.step
}
pub fn clear_step(&mut self) {
self.step = 0;
}
// Param is passed by value, moved
pub fn set_step(&mut self, v: i64) {
self.step = v;
}
// string file_version = 3;
pub fn get_file_version(&self) -> &str {
match self.what {
::std::option::Option::Some(Event_oneof_what::file_version(ref v)) => v,
_ => "",
}
}
pub fn clear_file_version(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_file_version(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::file_version(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_file_version(&mut self, v: ::std::string::String) {
self.what = ::std::option::Option::Some(Event_oneof_what::file_version(v))
}
// Mutable pointer to the field.
pub fn mut_file_version(&mut self) -> &mut ::std::string::String {
if let ::std::option::Option::Some(Event_oneof_what::file_version(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::file_version(::std::string::String::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::file_version(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_file_version(&mut self) -> ::std::string::String {
if self.has_file_version() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::file_version(v)) => v,
_ => panic!(),
}
} else {
::std::string::String::new()
}
}
// bytes graph_def = 4;
pub fn get_graph_def(&self) -> &[u8] {
match self.what {
::std::option::Option::Some(Event_oneof_what::graph_def(ref v)) => v,
_ => &[],
}
}
pub fn clear_graph_def(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_graph_def(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::graph_def(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_graph_def(&mut self, v: ::std::vec::Vec<u8>) {
self.what = ::std::option::Option::Some(Event_oneof_what::graph_def(v))
}
// Mutable pointer to the field.
pub fn mut_graph_def(&mut self) -> &mut ::std::vec::Vec<u8> {
if let ::std::option::Option::Some(Event_oneof_what::graph_def(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::graph_def(::std::vec::Vec::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::graph_def(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_graph_def(&mut self) -> ::std::vec::Vec<u8> {
if self.has_graph_def() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::graph_def(v)) => v,
_ => panic!(),
}
} else {
::std::vec::Vec::new()
}
}
// .tensorflow.Summary summary = 5;
pub fn get_summary(&self) -> &super::summary::Summary {
match self.what {
::std::option::Option::Some(Event_oneof_what::summary(ref v)) => v,
_ => <super::summary::Summary as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_summary(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_summary(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::summary(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_summary(&mut self, v: super::summary::Summary) {
self.what = ::std::option::Option::Some(Event_oneof_what::summary(v))
}
// Mutable pointer to the field.
pub fn mut_summary(&mut self) -> &mut super::summary::Summary {
if let ::std::option::Option::Some(Event_oneof_what::summary(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::summary(super::summary::Summary::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::summary(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_summary(&mut self) -> super::summary::Summary {
if self.has_summary() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::summary(v)) => v,
_ => panic!(),
}
} else {
super::summary::Summary::new()
}
}
// .tensorflow.LogMessage log_message = 6;
pub fn get_log_message(&self) -> &LogMessage {
match self.what {
::std::option::Option::Some(Event_oneof_what::log_message(ref v)) => v,
_ => <LogMessage as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_log_message(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_log_message(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::log_message(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_log_message(&mut self, v: LogMessage) {
self.what = ::std::option::Option::Some(Event_oneof_what::log_message(v))
}
// Mutable pointer to the field.
pub fn mut_log_message(&mut self) -> &mut LogMessage {
if let ::std::option::Option::Some(Event_oneof_what::log_message(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::log_message(LogMessage::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::log_message(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_log_message(&mut self) -> LogMessage {
if self.has_log_message() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::log_message(v)) => v,
_ => panic!(),
}
} else {
LogMessage::new()
}
}
// .tensorflow.SessionLog session_log = 7;
pub fn get_session_log(&self) -> &SessionLog {
match self.what {
::std::option::Option::Some(Event_oneof_what::session_log(ref v)) => v,
_ => <SessionLog as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_session_log(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_session_log(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::session_log(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_session_log(&mut self, v: SessionLog) {
self.what = ::std::option::Option::Some(Event_oneof_what::session_log(v))
}
// Mutable pointer to the field.
pub fn mut_session_log(&mut self) -> &mut SessionLog {
if let ::std::option::Option::Some(Event_oneof_what::session_log(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::session_log(SessionLog::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::session_log(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_session_log(&mut self) -> SessionLog {
if self.has_session_log() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::session_log(v)) => v,
_ => panic!(),
}
} else {
SessionLog::new()
}
}
// .tensorflow.TaggedRunMetadata tagged_run_metadata = 8;
pub fn get_tagged_run_metadata(&self) -> &TaggedRunMetadata {
match self.what {
::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(ref v)) => v,
_ => <TaggedRunMetadata as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_tagged_run_metadata(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_tagged_run_metadata(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_tagged_run_metadata(&mut self, v: TaggedRunMetadata) {
self.what = ::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(v))
}
// Mutable pointer to the field.
pub fn mut_tagged_run_metadata(&mut self) -> &mut TaggedRunMetadata {
if let ::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(TaggedRunMetadata::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_tagged_run_metadata(&mut self) -> TaggedRunMetadata {
if self.has_tagged_run_metadata() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(v)) => v,
_ => panic!(),
}
} else {
TaggedRunMetadata::new()
}
}
// bytes meta_graph_def = 9;
pub fn get_meta_graph_def(&self) -> &[u8] {
match self.what {
::std::option::Option::Some(Event_oneof_what::meta_graph_def(ref v)) => v,
_ => &[],
}
}
pub fn clear_meta_graph_def(&mut self) {
self.what = ::std::option::Option::None;
}
pub fn has_meta_graph_def(&self) -> bool {
match self.what {
::std::option::Option::Some(Event_oneof_what::meta_graph_def(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_meta_graph_def(&mut self, v: ::std::vec::Vec<u8>) {
self.what = ::std::option::Option::Some(Event_oneof_what::meta_graph_def(v))
}
// Mutable pointer to the field.
pub fn mut_meta_graph_def(&mut self) -> &mut ::std::vec::Vec<u8> {
if let ::std::option::Option::Some(Event_oneof_what::meta_graph_def(_)) = self.what {
} else {
self.what = ::std::option::Option::Some(Event_oneof_what::meta_graph_def(::std::vec::Vec::new()));
}
match self.what {
::std::option::Option::Some(Event_oneof_what::meta_graph_def(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_meta_graph_def(&mut self) -> ::std::vec::Vec<u8> {
if self.has_meta_graph_def() {
match self.what.take() {
::std::option::Option::Some(Event_oneof_what::meta_graph_def(v)) => v,
_ => panic!(),
}
} else {
::std::vec::Vec::new()
}
}
}
impl ::protobuf::Message for Event {
fn is_initialized(&self) -> bool {
if let Some(Event_oneof_what::summary(ref v)) = self.what {
if !v.is_initialized() {
return false;
}
}
if let Some(Event_oneof_what::log_message(ref v)) = self.what {
if !v.is_initialized() {
return false;
}
}
if let Some(Event_oneof_what::session_log(ref v)) = self.what {
if !v.is_initialized() {
return false;
}
}
if let Some(Event_oneof_what::tagged_run_metadata(ref v)) = self.what {
if !v.is_initialized() {
return false;
}
}
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeFixed64 {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_double()?;
self.wall_time = tmp;
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.step = tmp;
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::file_version(is.read_string()?));
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::graph_def(is.read_bytes()?));
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::summary(is.read_message()?));
},
6 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::log_message(is.read_message()?));
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::session_log(is.read_message()?));
},
8 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::tagged_run_metadata(is.read_message()?));
},
9 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.what = ::std::option::Option::Some(Event_oneof_what::meta_graph_def(is.read_bytes()?));
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.wall_time != 0. {
my_size += 9;
}
if self.step != 0 {
my_size += ::protobuf::rt::value_size(2, self.step, ::protobuf::wire_format::WireTypeVarint);
}
if let ::std::option::Option::Some(ref v) = self.what {
match v {
&Event_oneof_what::file_version(ref v) => {
my_size += ::protobuf::rt::string_size(3, &v);
},
&Event_oneof_what::graph_def(ref v) => {
my_size += ::protobuf::rt::bytes_size(4, &v);
},
&Event_oneof_what::summary(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Event_oneof_what::log_message(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Event_oneof_what::session_log(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Event_oneof_what::tagged_run_metadata(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&Event_oneof_what::meta_graph_def(ref v) => {
my_size += ::protobuf::rt::bytes_size(9, &v);
},
};
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.wall_time != 0. {
os.write_double(1, self.wall_time)?;
}
if self.step != 0 {
os.write_int64(2, self.step)?;
}
if let ::std::option::Option::Some(ref v) = self.what {
match v {
&Event_oneof_what::file_version(ref v) => {
os.write_string(3, v)?;
},
&Event_oneof_what::graph_def(ref v) => {
os.write_bytes(4, v)?;
},
&Event_oneof_what::summary(ref v) => {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Event_oneof_what::log_message(ref v) => {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Event_oneof_what::session_log(ref v) => {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Event_oneof_what::tagged_run_metadata(ref v) => {
os.write_tag(8, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&Event_oneof_what::meta_graph_def(ref v) => {
os.write_bytes(9, v)?;
},
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> Event {
Event::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeDouble>(
"wall_time",
|m: &Event| { &m.wall_time },
|m: &mut Event| { &mut m.wall_time },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"step",
|m: &Event| { &m.step },
|m: &mut Event| { &mut m.step },
));
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor::<_>(
"file_version",
Event::has_file_version,
Event::get_file_version,
));
fields.push(::protobuf::reflect::accessor::make_singular_bytes_accessor::<_>(
"graph_def",
Event::has_graph_def,
Event::get_graph_def,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, super::summary::Summary>(
"summary",
Event::has_summary,
Event::get_summary,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, LogMessage>(
"log_message",
Event::has_log_message,
Event::get_log_message,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, SessionLog>(
"session_log",
Event::has_session_log,
Event::get_session_log,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, TaggedRunMetadata>(
"tagged_run_metadata",
Event::has_tagged_run_metadata,
Event::get_tagged_run_metadata,
));
fields.push(::protobuf::reflect::accessor::make_singular_bytes_accessor::<_>(
"meta_graph_def",
Event::has_meta_graph_def,
Event::get_meta_graph_def,
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<Event>(
"Event",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static Event {
static instance: ::protobuf::rt::LazyV2<Event> = ::protobuf::rt::LazyV2::INIT;
instance.get(Event::new)
}
}
impl ::protobuf::Clear for Event {
fn clear(&mut self) {
self.wall_time = 0.;
self.step = 0;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.what = ::std::option::Option::None;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for Event {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for Event {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct LogMessage {
// message fields
pub level: LogMessage_Level,
pub message: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a LogMessage {
fn default() -> &'a LogMessage {
<LogMessage as ::protobuf::Message>::default_instance()
}
}
impl LogMessage {
pub fn new() -> LogMessage {
::std::default::Default::default()
}
// .tensorflow.LogMessage.Level level = 1;
pub fn get_level(&self) -> LogMessage_Level {
self.level
}
pub fn clear_level(&mut self) {
self.level = LogMessage_Level::UNKNOWN;
}
// Param is passed by value, moved
pub fn set_level(&mut self, v: LogMessage_Level) {
self.level = v;
}
// string message = 2;
pub fn get_message(&self) -> &str {
&self.message
}
pub fn clear_message(&mut self) {
self.message.clear();
}
// Param is passed by value, moved
pub fn set_message(&mut self, v: ::std::string::String) {
self.message = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_message(&mut self) -> &mut ::std::string::String {
&mut self.message
}
// Take field
pub fn take_message(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.message, ::std::string::String::new())
}
}
impl ::protobuf::Message for LogMessage {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.level, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.message)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.level != LogMessage_Level::UNKNOWN {
my_size += ::protobuf::rt::enum_size(1, self.level);
}
if !self.message.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.message);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.level != LogMessage_Level::UNKNOWN {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.level))?;
}
if !self.message.is_empty() {
os.write_string(2, &self.message)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> LogMessage {
LogMessage::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<LogMessage_Level>>(
"level",
|m: &LogMessage| { &m.level },
|m: &mut LogMessage| { &mut m.level },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"message",
|m: &LogMessage| { &m.message },
|m: &mut LogMessage| { &mut m.message },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<LogMessage>(
"LogMessage",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static LogMessage {
static instance: ::protobuf::rt::LazyV2<LogMessage> = ::protobuf::rt::LazyV2::INIT;
instance.get(LogMessage::new)
}
}
impl ::protobuf::Clear for LogMessage {
fn clear(&mut self) {
self.level = LogMessage_Level::UNKNOWN;
self.message.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for LogMessage {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for LogMessage {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum LogMessage_Level {
UNKNOWN = 0,
DEBUGGING = 10,
INFO = 20,
WARN = 30,
ERROR = 40,
FATAL = 50,
}
impl ::protobuf::ProtobufEnum for LogMessage_Level {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<LogMessage_Level> {
match value {
0 => ::std::option::Option::Some(LogMessage_Level::UNKNOWN),
10 => ::std::option::Option::Some(LogMessage_Level::DEBUGGING),
20 => ::std::option::Option::Some(LogMessage_Level::INFO),
30 => ::std::option::Option::Some(LogMessage_Level::WARN),
40 => ::std::option::Option::Some(LogMessage_Level::ERROR),
50 => ::std::option::Option::Some(LogMessage_Level::FATAL),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [LogMessage_Level] = &[
LogMessage_Level::UNKNOWN,
LogMessage_Level::DEBUGGING,
LogMessage_Level::INFO,
LogMessage_Level::WARN,
LogMessage_Level::ERROR,
LogMessage_Level::FATAL,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<LogMessage_Level>("LogMessage.Level", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for LogMessage_Level {
}
impl ::std::default::Default for LogMessage_Level {
fn default() -> Self {
LogMessage_Level::UNKNOWN
}
}
impl ::protobuf::reflect::ProtobufValue for LogMessage_Level {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default)]
pub struct SessionLog {
// message fields
pub status: SessionLog_SessionStatus,
pub checkpoint_path: ::std::string::String,
pub msg: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SessionLog {
fn default() -> &'a SessionLog {
<SessionLog as ::protobuf::Message>::default_instance()
}
}
impl SessionLog {
pub fn new() -> SessionLog {
::std::default::Default::default()
}
// .tensorflow.SessionLog.SessionStatus status = 1;
pub fn get_status(&self) -> SessionLog_SessionStatus {
self.status
}
pub fn clear_status(&mut self) {
self.status = SessionLog_SessionStatus::STATUS_UNSPECIFIED;
}
// Param is passed by value, moved
pub fn set_status(&mut self, v: SessionLog_SessionStatus) {
self.status = v;
}
// string checkpoint_path = 2;
pub fn get_checkpoint_path(&self) -> &str {
&self.checkpoint_path
}
pub fn clear_checkpoint_path(&mut self) {
self.checkpoint_path.clear();
}
// Param is passed by value, moved
pub fn set_checkpoint_path(&mut self, v: ::std::string::String) {
self.checkpoint_path = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_checkpoint_path(&mut self) -> &mut ::std::string::String {
&mut self.checkpoint_path
}
// Take field
pub fn take_checkpoint_path(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.checkpoint_path, ::std::string::String::new())
}
// string msg = 3;
pub fn get_msg(&self) -> &str {
&self.msg
}
pub fn clear_msg(&mut self) {
self.msg.clear();
}
// Param is passed by value, moved
pub fn set_msg(&mut self, v: ::std::string::String) {
self.msg = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_msg(&mut self) -> &mut ::std::string::String {
&mut self.msg
}
// Take field
pub fn take_msg(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.msg, ::std::string::String::new())
}
}
impl ::protobuf::Message for SessionLog {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.status, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.checkpoint_path)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.msg)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.status != SessionLog_SessionStatus::STATUS_UNSPECIFIED {
my_size += ::protobuf::rt::enum_size(1, self.status);
}
if !self.checkpoint_path.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.checkpoint_path);
}
if !self.msg.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.msg);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.status != SessionLog_SessionStatus::STATUS_UNSPECIFIED {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.status))?;
}
if !self.checkpoint_path.is_empty() {
os.write_string(2, &self.checkpoint_path)?;
}
if !self.msg.is_empty() {
os.write_string(3, &self.msg)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SessionLog {
SessionLog::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<SessionLog_SessionStatus>>(
"status",
|m: &SessionLog| { &m.status },
|m: &mut SessionLog| { &mut m.status },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"checkpoint_path",
|m: &SessionLog| { &m.checkpoint_path },
|m: &mut SessionLog| { &mut m.checkpoint_path },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"msg",
|m: &SessionLog| { &m.msg },
|m: &mut SessionLog| { &mut m.msg },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<SessionLog>(
"SessionLog",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static SessionLog {
static instance: ::protobuf::rt::LazyV2<SessionLog> = ::protobuf::rt::LazyV2::INIT;
instance.get(SessionLog::new)
}
}
impl ::protobuf::Clear for SessionLog {
fn clear(&mut self) {
self.status = SessionLog_SessionStatus::STATUS_UNSPECIFIED;
self.checkpoint_path.clear();
self.msg.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for SessionLog {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for SessionLog {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum SessionLog_SessionStatus {
STATUS_UNSPECIFIED = 0,
START = 1,
STOP = 2,
CHECKPOINT = 3,
}
impl ::protobuf::ProtobufEnum for SessionLog_SessionStatus {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<SessionLog_SessionStatus> {
match value {
0 => ::std::option::Option::Some(SessionLog_SessionStatus::STATUS_UNSPECIFIED),
1 => ::std::option::Option::Some(SessionLog_SessionStatus::START),
2 => ::std::option::Option::Some(SessionLog_SessionStatus::STOP),
3 => ::std::option::Option::Some(SessionLog_SessionStatus::CHECKPOINT),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [SessionLog_SessionStatus] = &[
SessionLog_SessionStatus::STATUS_UNSPECIFIED,
SessionLog_SessionStatus::START,
SessionLog_SessionStatus::STOP,
SessionLog_SessionStatus::CHECKPOINT,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<SessionLog_SessionStatus>("SessionLog.SessionStatus", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for SessionLog_SessionStatus {
}
impl ::std::default::Default for SessionLog_SessionStatus {
fn default() -> Self {
SessionLog_SessionStatus::STATUS_UNSPECIFIED
}
}
impl ::protobuf::reflect::ProtobufValue for SessionLog_SessionStatus {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TaggedRunMetadata {
// message fields
pub tag: ::std::string::String,
pub run_metadata: ::std::vec::Vec<u8>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TaggedRunMetadata {
fn default() -> &'a TaggedRunMetadata {
<TaggedRunMetadata as ::protobuf::Message>::default_instance()
}
}
impl TaggedRunMetadata {
pub fn new() -> TaggedRunMetadata {
::std::default::Default::default()
}
// string tag = 1;
pub fn get_tag(&self) -> &str {
&self.tag
}
pub fn clear_tag(&mut self) {
self.tag.clear();
}
// Param is passed by value, moved
pub fn set_tag(&mut self, v: ::std::string::String) {
self.tag = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tag(&mut self) -> &mut ::std::string::String {
&mut self.tag
}
// Take field
pub fn take_tag(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.tag, ::std::string::String::new())
}
// bytes run_metadata = 2;
pub fn get_run_metadata(&self) -> &[u8] {
&self.run_metadata
}
pub fn clear_run_metadata(&mut self) {
self.run_metadata.clear();
}
// Param is passed by value, moved
pub fn set_run_metadata(&mut self, v: ::std::vec::Vec<u8>) {
self.run_metadata = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_run_metadata(&mut self) -> &mut ::std::vec::Vec<u8> {
&mut self.run_metadata
}
// Take field
pub fn take_run_metadata(&mut self) -> ::std::vec::Vec<u8> {
::std::mem::replace(&mut self.run_metadata, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for TaggedRunMetadata {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.tag)?;
},
2 => {
::protobuf::rt::read_singular_proto3_bytes_into(wire_type, is, &mut self.run_metadata)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.tag.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.tag);
}
if !self.run_metadata.is_empty() {
my_size += ::protobuf::rt::bytes_size(2, &self.run_metadata);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.tag.is_empty() {
os.write_string(1, &self.tag)?;
}
if !self.run_metadata.is_empty() {
os.write_bytes(2, &self.run_metadata)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TaggedRunMetadata {
TaggedRunMetadata::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"tag",
|m: &TaggedRunMetadata| { &m.tag },
|m: &mut TaggedRunMetadata| { &mut m.tag },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"run_metadata",
|m: &TaggedRunMetadata| { &m.run_metadata },
|m: &mut TaggedRunMetadata| { &mut m.run_metadata },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TaggedRunMetadata>(
"TaggedRunMetadata",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TaggedRunMetadata {
static instance: ::protobuf::rt::LazyV2<TaggedRunMetadata> = ::protobuf::rt::LazyV2::INIT;
instance.get(TaggedRunMetadata::new)
}
}
impl ::protobuf::Clear for TaggedRunMetadata {
fn clear(&mut self) {
self.tag.clear();
self.run_metadata.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TaggedRunMetadata {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TaggedRunMetadata {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct WatchdogConfig {
// message fields
pub timeout_ms: i64,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a WatchdogConfig {
fn default() -> &'a WatchdogConfig {
<WatchdogConfig as ::protobuf::Message>::default_instance()
}
}
impl WatchdogConfig {
pub fn new() -> WatchdogConfig {
::std::default::Default::default()
}
// int64 timeout_ms = 1;
pub fn get_timeout_ms(&self) -> i64 {
self.timeout_ms
}
pub fn clear_timeout_ms(&mut self) {
self.timeout_ms = 0;
}
// Param is passed by value, moved
pub fn set_timeout_ms(&mut self, v: i64) {
self.timeout_ms = v;
}
}
impl ::protobuf::Message for WatchdogConfig {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_int64()?;
self.timeout_ms = tmp;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.timeout_ms != 0 {
my_size += ::protobuf::rt::value_size(1, self.timeout_ms, ::protobuf::wire_format::WireTypeVarint);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.timeout_ms != 0 {
os.write_int64(1, self.timeout_ms)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> WatchdogConfig {
WatchdogConfig::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"timeout_ms",
|m: &WatchdogConfig| { &m.timeout_ms },
|m: &mut WatchdogConfig| { &mut m.timeout_ms },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<WatchdogConfig>(
"WatchdogConfig",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static WatchdogConfig {
static instance: ::protobuf::rt::LazyV2<WatchdogConfig> = ::protobuf::rt::LazyV2::INIT;
instance.get(WatchdogConfig::new)
}
}
impl ::protobuf::Clear for WatchdogConfig {
fn clear(&mut self) {
self.timeout_ms = 0;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for WatchdogConfig {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result |
}
impl ::protobuf::reflect::ProtobufValue for WatchdogConfig {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct WorkerHeartbeatRequest {
// message fields
pub shutdown_mode: WorkerShutdownMode,
pub watchdog_config: ::protobuf::SingularPtrField<WatchdogConfig>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a WorkerHeartbeatRequest {
fn default() -> &'a WorkerHeartbeatRequest {
<WorkerHeartbeatRequest as ::protobuf::Message>::default_instance()
}
}
impl WorkerHeartbeatRequest {
pub fn new() -> WorkerHeartbeatRequest {
::std::default::Default::default()
}
// .tensorflow.WorkerShutdownMode shutdown_mode = 1;
pub fn get_shutdown_mode(&self) -> WorkerShutdownMode {
self.shutdown_mode
}
pub fn clear_shutdown_mode(&mut self) {
self.shutdown_mode = WorkerShutdownMode::DEFAULT;
}
// Param is passed by value, moved
pub fn set_shutdown_mode(&mut self, v: WorkerShutdownMode) {
self.shutdown_mode = v;
}
// .tensorflow.WatchdogConfig watchdog_config = 2;
pub fn get_watchdog_config(&self) -> &WatchdogConfig {
self.watchdog_config.as_ref().unwrap_or_else(|| <WatchdogConfig as ::protobuf::Message>::default_instance())
}
pub fn clear_watchdog_config(&mut self) {
self.watchdog_config.clear();
}
pub fn has_watchdog_config(&self) -> bool {
self.watchdog_config.is_some()
}
// Param is passed by value, moved
pub fn set_watchdog_config(&mut self, v: WatchdogConfig) {
self.watchdog_config = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_watchdog_config(&mut self) -> &mut WatchdogConfig {
if self.watchdog_config.is_none() {
self.watchdog_config.set_default();
}
self.watchdog_config.as_mut().unwrap()
}
// Take field
pub fn take_watchdog_config(&mut self) -> WatchdogConfig {
self.watchdog_config.take().unwrap_or_else(|| WatchdogConfig::new())
}
}
impl ::protobuf::Message for WorkerHeartbeatRequest {
fn is_initialized(&self) -> bool {
for v in &self.watchdog_config {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.shutdown_mode, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.watchdog_config)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.shutdown_mode != WorkerShutdownMode::DEFAULT {
my_size += ::protobuf::rt::enum_size(1, self.shutdown_mode);
}
if let Some(ref v) = self.watchdog_config.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.shutdown_mode != WorkerShutdownMode::DEFAULT {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.shutdown_mode))?;
}
if let Some(ref v) = self.watchdog_config.as_ref() {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> WorkerHeartbeatRequest {
WorkerHeartbeatRequest::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<WorkerShutdownMode>>(
"shutdown_mode",
|m: &WorkerHeartbeatRequest| { &m.shutdown_mode },
|m: &mut WorkerHeartbeatRequest| { &mut m.shutdown_mode },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<WatchdogConfig>>(
"watchdog_config",
|m: &WorkerHeartbeatRequest| { &m.watchdog_config },
|m: &mut WorkerHeartbeatRequest| { &mut m.watchdog_config },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<WorkerHeartbeatRequest>(
"WorkerHeartbeatRequest",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static WorkerHeartbeatRequest {
static instance: ::protobuf::rt::LazyV2<WorkerHeartbeatRequest> = ::protobuf::rt::LazyV2::INIT;
instance.get(WorkerHeartbeatRequest::new)
}
}
impl ::protobuf::Clear for WorkerHeartbeatRequest {
fn clear(&mut self) {
self.shutdown_mode = WorkerShutdownMode::DEFAULT;
self.watchdog_config.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for WorkerHeartbeatRequest {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for WorkerHeartbeatRequest {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct WorkerHeartbeatResponse {
// message fields
pub health_status: WorkerHealth,
pub worker_log: ::protobuf::RepeatedField<Event>,
pub hostname: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a WorkerHeartbeatResponse {
fn default() -> &'a WorkerHeartbeatResponse {
<WorkerHeartbeatResponse as ::protobuf::Message>::default_instance()
}
}
impl WorkerHeartbeatResponse {
pub fn new() -> WorkerHeartbeatResponse {
::std::default::Default::default()
}
// .tensorflow.WorkerHealth health_status = 1;
pub fn get_health_status(&self) -> WorkerHealth {
self.health_status
}
pub fn clear_health_status(&mut self) {
self.health_status = WorkerHealth::OK;
}
// Param is passed by value, moved
pub fn set_health_status(&mut self, v: WorkerHealth) {
self.health_status = v;
}
// repeated .tensorflow.Event worker_log = 2;
pub fn get_worker_log(&self) -> &[Event] {
&self.worker_log
}
pub fn clear_worker_log(&mut self) {
self.worker_log.clear();
}
// Param is passed by value, moved
pub fn set_worker_log(&mut self, v: ::protobuf::RepeatedField<Event>) {
self.worker_log = v;
}
// Mutable pointer to the field.
pub fn mut_worker_log(&mut self) -> &mut ::protobuf::RepeatedField<Event> {
&mut self.worker_log
}
// Take field
pub fn take_worker_log(&mut self) -> ::protobuf::RepeatedField<Event> {
::std::mem::replace(&mut self.worker_log, ::protobuf::RepeatedField::new())
}
// string hostname = 3;
pub fn get_hostname(&self) -> &str {
&self.hostname
}
pub fn clear_hostname(&mut self) {
self.hostname.clear();
}
// Param is passed by value, moved
pub fn set_hostname(&mut self, v: ::std::string::String) {
self.hostname = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_hostname(&mut self) -> &mut ::std::string::String {
&mut self.hostname
}
// Take field
pub fn take_hostname(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.hostname, ::std::string::String::new())
}
}
impl ::protobuf::Message for WorkerHeartbeatResponse {
fn is_initialized(&self) -> bool {
for v in &self.worker_log {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.health_status, 1, &mut self.unknown_fields)?
},
2 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.worker_log)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.hostname)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.health_status != WorkerHealth::OK {
my_size += ::protobuf::rt::enum_size(1, self.health_status);
}
for value in &self.worker_log {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if !self.hostname.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.hostname);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.health_status != WorkerHealth::OK {
os.write_enum(1, ::protobuf::ProtobufEnum::value(&self.health_status))?;
}
for v in &self.worker_log {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if !self.hostname.is_empty() {
os.write_string(3, &self.hostname)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> WorkerHeartbeatResponse {
WorkerHeartbeatResponse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<WorkerHealth>>(
"health_status",
|m: &WorkerHeartbeatResponse| { &m.health_status },
|m: &mut WorkerHeartbeatResponse| { &mut m.health_status },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<Event>>(
"worker_log",
|m: &WorkerHeartbeatResponse| { &m.worker_log },
|m: &mut WorkerHeartbeatResponse| { &mut m.worker_log },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"hostname",
|m: &WorkerHeartbeatResponse| { &m.hostname },
|m: &mut WorkerHeartbeatResponse| { &mut m.hostname },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<WorkerHeartbeatResponse>(
"WorkerHeartbeatResponse",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static WorkerHeartbeatResponse {
static instance: ::protobuf::rt::LazyV2<WorkerHeartbeatResponse> = ::protobuf::rt::LazyV2::INIT;
instance.get(WorkerHeartbeatResponse::new)
}
}
impl ::protobuf::Clear for WorkerHeartbeatResponse {
fn clear(&mut self) {
self.health_status = WorkerHealth::OK;
self.worker_log.clear();
self.hostname.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for WorkerHeartbeatResponse {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for WorkerHeartbeatResponse {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum WorkerHealth {
OK = 0,
RECEIVED_SHUTDOWN_SIGNAL = 1,
INTERNAL_ERROR = 2,
SHUTTING_DOWN = 3,
}
impl ::protobuf::ProtobufEnum for WorkerHealth {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<WorkerHealth> {
match value {
0 => ::std::option::Option::Some(WorkerHealth::OK),
1 => ::std::option::Option::Some(WorkerHealth::RECEIVED_SHUTDOWN_SIGNAL),
2 => ::std::option::Option::Some(WorkerHealth::INTERNAL_ERROR),
3 => ::std::option::Option::Some(WorkerHealth::SHUTTING_DOWN),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [WorkerHealth] = &[
WorkerHealth::OK,
WorkerHealth::RECEIVED_SHUTDOWN_SIGNAL,
WorkerHealth::INTERNAL_ERROR,
WorkerHealth::SHUTTING_DOWN,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<WorkerHealth>("WorkerHealth", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for WorkerHealth {
}
impl ::std::default::Default for WorkerHealth {
fn default() -> Self {
WorkerHealth::OK
}
}
impl ::protobuf::reflect::ProtobufValue for WorkerHealth {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
#[derive(Clone,PartialEq,Eq,Debug,Hash)]
pub enum WorkerShutdownMode {
DEFAULT = 0,
NOT_CONFIGURED = 1,
WAIT_FOR_COORDINATOR = 2,
SHUTDOWN_AFTER_TIMEOUT = 3,
}
impl ::protobuf::ProtobufEnum for WorkerShutdownMode {
fn value(&self) -> i32 {
*self as i32
}
fn from_i32(value: i32) -> ::std::option::Option<WorkerShutdownMode> {
match value {
0 => ::std::option::Option::Some(WorkerShutdownMode::DEFAULT),
1 => ::std::option::Option::Some(WorkerShutdownMode::NOT_CONFIGURED),
2 => ::std::option::Option::Some(WorkerShutdownMode::WAIT_FOR_COORDINATOR),
3 => ::std::option::Option::Some(WorkerShutdownMode::SHUTDOWN_AFTER_TIMEOUT),
_ => ::std::option::Option::None
}
}
fn values() -> &'static [Self] {
static values: &'static [WorkerShutdownMode] = &[
WorkerShutdownMode::DEFAULT,
WorkerShutdownMode::NOT_CONFIGURED,
WorkerShutdownMode::WAIT_FOR_COORDINATOR,
WorkerShutdownMode::SHUTDOWN_AFTER_TIMEOUT,
];
values
}
fn enum_descriptor_static() -> &'static ::protobuf::reflect::EnumDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::EnumDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
::protobuf::reflect::EnumDescriptor::new_pb_name::<WorkerShutdownMode>("WorkerShutdownMode", file_descriptor_proto())
})
}
}
impl ::std::marker::Copy for WorkerShutdownMode {
}
impl ::std::default::Default for WorkerShutdownMode {
fn default() -> Self {
WorkerShutdownMode::DEFAULT
}
}
impl ::protobuf::reflect::ProtobufValue for WorkerShutdownMode {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Enum(::protobuf::ProtobufEnum::descriptor(self))
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n\x20tensorflow/core/util/event.proto\x12\ntensorflow\x1a'tensorflow/co\
re/framework/summary.proto\"\xa4\x03\n\x05Event\x12\x1b\n\twall_time\x18\
\x01\x20\x01(\x01R\x08wallTime\x12\x12\n\x04step\x18\x02\x20\x01(\x03R\
\x04step\x12#\n\x0cfile_version\x18\x03\x20\x01(\tH\0R\x0bfileVersion\
\x12\x1d\n\tgraph_def\x18\x04\x20\x01(\x0cH\0R\x08graphDef\x12/\n\x07sum\
mary\x18\x05\x20\x01(\x0b2\x13.tensorflow.SummaryH\0R\x07summary\x129\n\
\x0blog_message\x18\x06\x20\x01(\x0b2\x16.tensorflow.LogMessageH\0R\nlog\
Message\x129\n\x0bsession_log\x18\x07\x20\x01(\x0b2\x16.tensorflow.Sessi\
onLogH\0R\nsessionLog\x12O\n\x13tagged_run_metadata\x18\x08\x20\x01(\x0b\
2\x1d.tensorflow.TaggedRunMetadataH\0R\x11taggedRunMetadata\x12&\n\x0eme\
ta_graph_def\x18\t\x20\x01(\x0cH\0R\x0cmetaGraphDefB\x06\n\x04what\"\xa9\
\x01\n\nLogMessage\x122\n\x05level\x18\x01\x20\x01(\x0e2\x1c.tensorflow.\
LogMessage.LevelR\x05level\x12\x18\n\x07message\x18\x02\x20\x01(\tR\x07m\
essage\"M\n\x05Level\x12\x0b\n\x07UNKNOWN\x10\0\x12\r\n\tDEBUGGING\x10\n\
\x12\x08\n\x04INFO\x10\x14\x12\x08\n\x04WARN\x10\x1e\x12\t\n\x05ERROR\
\x10(\x12\t\n\x05FATAL\x102\"\xd3\x01\n\nSessionLog\x12<\n\x06status\x18\
\x01\x20\x01(\x0e2$.tensorflow.SessionLog.SessionStatusR\x06status\x12'\
\n\x0fcheckpoint_path\x18\x02\x20\x01(\tR\x0echeckpointPath\x12\x10\n\
\x03msg\x18\x03\x20\x01(\tR\x03msg\"L\n\rSessionStatus\x12\x16\n\x12STAT\
US_UNSPECIFIED\x10\0\x12\t\n\x05START\x10\x01\x12\x08\n\x04STOP\x10\x02\
\x12\x0e\n\nCHECKPOINT\x10\x03\"H\n\x11TaggedRunMetadata\x12\x10\n\x03ta\
g\x18\x01\x20\x01(\tR\x03tag\x12!\n\x0crun_metadata\x18\x02\x20\x01(\x0c\
R\x0brunMetadata\"/\n\x0eWatchdogConfig\x12\x1d\n\ntimeout_ms\x18\x01\
\x20\x01(\x03R\ttimeoutMs\"\xa2\x01\n\x16WorkerHeartbeatRequest\x12C\n\r\
shutdown_mode\x18\x01\x20\x01(\x0e2\x1e.tensorflow.WorkerShutdownModeR\
\x0cshutdownMode\x12C\n\x0fwatchdog_config\x18\x02\x20\x01(\x0b2\x1a.ten\
sorflow.WatchdogConfigR\x0ewatchdogConfig\"\xa6\x01\n\x17WorkerHeartbeat\
Response\x12=\n\rhealth_status\x18\x01\x20\x01(\x0e2\x18.tensorflow.Work\
erHealthR\x0chealthStatus\x120\n\nworker_log\x18\x02\x20\x03(\x0b2\x11.t\
ensorflow.EventR\tworkerLog\x12\x1a\n\x08hostname\x18\x03\x20\x01(\tR\
\x08hostname*[\n\x0cWorkerHealth\x12\x06\n\x02OK\x10\0\x12\x1c\n\x18RECE\
IVED_SHUTDOWN_SIGNAL\x10\x01\x12\x12\n\x0eINTERNAL_ERROR\x10\x02\x12\x11\
\n\rSHUTTING_DOWN\x10\x03*k\n\x12WorkerShutdownMode\x12\x0b\n\x07DEFAULT\
\x10\0\x12\x12\n\x0eNOT_CONFIGURED\x10\x01\x12\x18\n\x14WAIT_FOR_COORDIN\
ATOR\x10\x02\x12\x1a\n\x16SHUTDOWN_AFTER_TIMEOUT\x10\x03B'\n\x13org.tens\
orflow.utilB\x0bEventProtosP\x01\xf8\x01\x01b\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
}
| {
::protobuf::text_format::fmt(self, f)
} |
_metastore_operations.py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Optional, TypeVar
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator_async import distributed_trace_async | from ... import models as _models
from ..._vendor import _convert_request
from ...operations._metastore_operations import build_delete_request, build_get_database_operations_request, build_register_request, build_update_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class MetastoreOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.synapse.artifacts.aio.ArtifactsClient`'s
:attr:`metastore` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace_async
async def register(
self,
id: str,
input_folder: str,
**kwargs: Any
) -> _models.MetastoreRegistrationResponse:
"""Register files in Syms.
:param id: The name of the database to be created. The name can contain only alphanumeric
characters and should not exceed 24 characters.
:type id: str
:param input_folder: The input folder containing CDM files.
:type input_folder: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreRegistrationResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreRegistrationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreRegistrationResponse]
_register_body = _models.MetastoreRegisterObject(input_folder=input_folder)
_json = self._serialize.body(_register_body, 'MetastoreRegisterObject')
request = build_register_request(
id=id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.register.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreRegistrationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
register.metadata = {'url': "/metastore/create-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def get_database_operations(
self,
id: str,
**kwargs: Any
) -> _models.MetastoreRequestSuccessResponse:
"""Gets status of the database.
:param id:
:type id: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreRequestSuccessResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreRequestSuccessResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreRequestSuccessResponse]
request = build_get_database_operations_request(
id=id,
api_version=api_version,
template_url=self.get_database_operations.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreRequestSuccessResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_database_operations.metadata = {'url': "/metastore/create-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def update(
self,
id: str,
input_folder: str,
**kwargs: Any
) -> _models.MetastoreUpdationResponse:
"""Update files in Syms.
:param id: The name of the database to be updated.
:type id: str
:param input_folder: The input folder containing CDM files.
:type input_folder: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetastoreUpdationResponse, or the result of cls(response)
:rtype: ~azure.synapse.artifacts.models.MetastoreUpdationResponse
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
content_type = kwargs.pop('content_type', _headers.pop('Content-Type', "application/json")) # type: Optional[str]
cls = kwargs.pop('cls', None) # type: ClsType[_models.MetastoreUpdationResponse]
_update_body = _models.MetastoreUpdateObject(input_folder=input_folder)
_json = self._serialize.body(_update_body, 'MetastoreUpdateObject')
request = build_update_request(
id=id,
api_version=api_version,
content_type=content_type,
json=_json,
template_url=self.update.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetastoreUpdationResponse', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': "/metastore/update-database-operations/{id}"} # type: ignore
@distributed_trace_async
async def delete( # pylint: disable=inconsistent-return-statements
self,
id: str,
**kwargs: Any
) -> None:
"""Remove files in Syms.
:param id:
:type id: str
:keyword api_version: Api Version. Default value is "2021-07-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop('api_version', _params.pop('api-version', "2021-07-01-preview")) # type: str
cls = kwargs.pop('cls', None) # type: ClsType[None]
request = build_delete_request(
id=id,
api_version=api_version,
template_url=self.delete.metadata['url'],
headers=_headers,
params=_params,
)
request = _convert_request(request)
path_format_arguments = {
"endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
request.url = self._client.format_url(request.url, **path_format_arguments) # type: ignore
pipeline_response = await self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorContract, pipeline_response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete.metadata = {'url': "/metastore/databases/{id}"} # type: ignore | from azure.core.utils import case_insensitive_dict
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.