file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
v1.rs | //! [PUT /_matrix/federation/v1/invite/{roomId}/{eventId}](https://matrix.org/docs/spec/server_server/r0.1.4#put-matrix-federation-v1-invite-roomid-eventid)
use ruma_api::ruma_api;
use ruma_events::{room::member::MemberEventContent, EventType};
use ruma_identifiers::{EventId, RoomId, ServerName, UserId};
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
use super::{InviteEvent, StrippedState};
ruma_api! {
metadata: {
description: "Invites a remote user to a room.",
method: PUT,
name: "create_invite",
path: "/_matrix/federation/v1/invite/:room_id/:event_id",
rate_limited: false,
authentication: ServerSignatures,
}
request: {
/// The room ID that the user is being invited to.
#[ruma_api(path)]
pub room_id: &'a RoomId,
/// The event ID for the invite event, generated by the inviting server.
#[ruma_api(path)]
pub event_id: &'a EventId,
/// The matrix ID of the user who sent the original `m.room.third_party_invite`.
pub sender: &'a UserId,
/// The name of the inviting homeserver.
pub origin: &'a ServerName,
/// A timestamp added by the inviting homeserver.
#[serde(with = "ruma_serde::time::ms_since_unix_epoch")]
pub origin_server_ts: SystemTime,
/// The value `m.room.member`.
#[serde(rename = "type")]
pub kind: EventType,
/// The user ID of the invited member.
pub state_key: &'a UserId,
/// The content of the event.
pub content: MemberEventContent,
/// Information included alongside the event that is not signed.
#[serde(default, skip_serializing_if = "UnsignedEventContent::is_empty")]
pub unsigned: UnsignedEventContent,
}
response: {
/// The response invite event
#[ruma_api(body)]
#[serde(with = "crate::serde::v1_pdu")]
pub event: InviteEvent,
}
}
/// Information included alongside an event that is not signed.
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
#[cfg_attr(not(feature = "unstable-exhaustive-types"), non_exhaustive)]
pub struct | {
/// An optional list of simplified events to help the receiver of the invite identify the room.
/// The recommended events to include are the join rules, canonical alias, avatar, and name of
/// the room.
#[serde(skip_serializing_if = "<[_]>::is_empty")]
pub invite_room_state: Vec<StrippedState>,
}
impl UnsignedEventContent {
/// Creates an empty `UnsignedEventContent`.
pub fn new() -> Self {
Default::default()
}
/// Checks whether all of the fields are empty.
pub fn is_empty(&self) -> bool {
self.invite_room_state.is_empty()
}
}
/// Initial set of fields of `Request`.
pub struct RequestInit<'a> {
/// The room ID that the user is being invited to.
pub room_id: &'a RoomId,
/// The event ID for the invite event, generated by the inviting server.
pub event_id: &'a EventId,
/// The matrix ID of the user who sent the original `m.room.third_party_invite`.
pub sender: &'a UserId,
/// The name of the inviting homeserver.
pub origin: &'a ServerName,
/// A timestamp added by the inviting homeserver.
pub origin_server_ts: SystemTime,
/// The user ID of the invited member.
pub state_key: &'a UserId,
/// The content of the event.
pub content: MemberEventContent,
/// Information included alongside the event that is not signed.
pub unsigned: UnsignedEventContent,
}
impl<'a> From<RequestInit<'a>> for Request<'a> {
/// Creates a new `Request` with the given parameters.
fn from(init: RequestInit<'a>) -> Self {
Self {
room_id: init.room_id,
event_id: init.event_id,
sender: init.sender,
origin: init.origin,
origin_server_ts: init.origin_server_ts,
kind: EventType::RoomMember,
state_key: init.state_key,
content: init.content,
unsigned: init.unsigned,
}
}
}
impl Response {
/// Creates a new `Response` with the given invite event.
pub fn new(event: InviteEvent) -> Self {
Self { event }
}
}
| UnsignedEventContent |
621.py | class Solution:
| def XXX(self, x: int) -> int:
if x == 1:
return 1
left = 0
right = x
while right - left > 1:
mid = (left + right) // 2
a = mid ** 2
if a == x:
return mid
if a > x:
right = mid
else:
left = mid
return left |
|
alert_watcher_test_response.py | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.8.1 Python SDK
Pure Storage FlashBlade REST 1.8.1 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.8.1
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class AlertWatcherTestResponse(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
#BEGIN_CUSTOM
# IR-51527: Prevent Pytest from attempting to collect this class based on name.
__test__ = False
#END_CUSTOM
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'pagination_info': 'PaginationInfo',
'items': 'list[AlertWatcherTest]'
}
attribute_map = {
'pagination_info': 'pagination_info',
'items': 'items'
}
def __init__(self, pagination_info=None, items=None): # noqa: E501
"""AlertWatcherTestResponse - a model defined in Swagger""" # noqa: E501
self._pagination_info = None
self._items = None
self.discriminator = None
if pagination_info is not None:
self.pagination_info = pagination_info
if items is not None:
self.items = items
@property
def pagination_info(self):
"""Gets the pagination_info of this AlertWatcherTestResponse. # noqa: E501
pagination information, only available in GET requests # noqa: E501
:return: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:rtype: PaginationInfo
"""
return self._pagination_info
@pagination_info.setter
def pagination_info(self, pagination_info):
"""Sets the pagination_info of this AlertWatcherTestResponse.
pagination information, only available in GET requests # noqa: E501
:param pagination_info: The pagination_info of this AlertWatcherTestResponse. # noqa: E501
:type: PaginationInfo
"""
self._pagination_info = pagination_info
@property
def items(self):
"""Gets the items of this AlertWatcherTestResponse. # noqa: E501
a list of alert watcher test results # noqa: E501
:return: The items of this AlertWatcherTestResponse. # noqa: E501
:rtype: list[AlertWatcherTest]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this AlertWatcherTestResponse.
a list of alert watcher test results # noqa: E501
:param items: The items of this AlertWatcherTestResponse. # noqa: E501
:type: list[AlertWatcherTest]
"""
self._items = items
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
)) | if issubclass(AlertWatcherTestResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AlertWatcherTestResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | else:
result[attr] = value |
detect.rs | use std::fmt;
use std::collections::BTreeMap;
use once_cell::sync::OnceCell;
use serde::Serialize;
use crate::server::version::Version;
use crate::server::os_trait::CurrentOs;
use crate::server::methods::{self, InstallMethod};
use crate::server::distribution::{DistributionRef};
use anyhow::Context;
#[cfg(target_arch="x86_64")]
pub const ARCH: &str = "x86_64";
#[cfg(not(any(
target_arch="x86_64",
)))]
compile_error!("Unsupported architecture, supported: x86_64");
#[derive(Clone, Debug, Default)]
pub struct Lazy<T>(once_cell::sync::OnceCell<T>);
#[derive(Debug)]
pub enum VersionQuery {
Stable(Option<Version<String>>),
Nightly,
}
#[derive(Clone, Serialize, Debug)]
pub struct VersionResult {
pub package_name: String,
pub major_version: Version<String>,
pub version: Version<String>,
pub revision: String,
}
#[derive(Clone, Serialize, Debug)]
pub struct InstalledPackage {
pub package_name: String,
pub major_version: Version<String>, | }
impl<T: Serialize> Serialize for Lazy<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where
S: serde::Serializer
{
self.0.get().serialize(serializer)
}
}
impl<T> Lazy<T> {
pub fn lazy() -> Lazy<T> {
Lazy(OnceCell::new())
}
pub fn eager(val:T) -> Lazy<T> {
let cell = OnceCell::new();
cell.set(val).map_err(|_| "cell failed").unwrap();
Lazy(cell)
}
pub fn get_or_init<F>(&self, f: F) -> &T
where F: FnOnce() -> T
{
self.0.get_or_init(f)
}
pub fn get_or_try_init<F, E>(&self, f: F) -> Result<&T, E>
where F: FnOnce() -> Result<T, E>
{
self.0.get_or_try_init(f)
}
}
pub fn current_os() -> anyhow::Result<Box<dyn CurrentOs>> {
use crate::server::{windows, macos, linux, unknown_os};
if cfg!(windows) {
Ok(Box::new(windows::Windows::new()))
} else if cfg!(target_os="macos") {
Ok(Box::new(macos::Macos::new()))
} else if cfg!(target_os="linux") {
linux::detect_distro()
.context("error detecting linux distribution")
} else {
Ok(Box::new(unknown_os::Unknown::new()))
}
}
pub fn main(_arg: &crate::server::options::Detect)
-> Result<(), anyhow::Error>
{
#[derive(Serialize)]
struct Info {
os_type: &'static str,
os_info: serde_json::Value,
detected: methods::InstallationMethods,
methods: BTreeMap<InstallMethod, serde_json::Value>,
}
let os = current_os()?;
let detected = os.get_available_methods()?;
let methods = detected.instantiate_all(&*os, true)?;
serde_json::to_writer_pretty(std::io::stdout(), &Info {
os_type: os.get_type_name(),
os_info: os.detect_all(),
detected,
methods: methods.iter()
.map(|(mname, meth)| (mname.clone(), meth.detect_all()))
.collect(),
})?;
Ok(())
}
impl VersionQuery {
pub fn new(nightly: bool, version: Option<&Version<String>>)
-> VersionQuery
{
if nightly {
VersionQuery::Nightly
} else {
VersionQuery::Stable(version.cloned())
}
}
pub fn is_nightly(&self) -> bool {
matches!(self, VersionQuery::Nightly)
}
pub fn is_specific(&self) -> bool {
matches!(self, VersionQuery::Stable(Some(..)))
}
pub fn to_arg(&self) -> Option<String> {
use VersionQuery::*;
match self {
Stable(None) => None,
Stable(Some(ver)) => Some(format!("--version={}", ver)),
Nightly => Some("--nightly".into()),
}
}
pub fn installed_matches(&self, pkg: &InstalledPackage) -> bool {
use VersionQuery::*;
match self {
Nightly => pkg.is_nightly(),
Stable(None) => !pkg.is_nightly(),
Stable(Some(v)) => &pkg.major_version == v && !pkg.is_nightly(),
}
}
pub fn distribution_matches(&self, distr: &DistributionRef) -> bool {
use VersionQuery as Q;
use crate::server::distribution::MajorVersion as V;
match (self, distr.major_version()) {
(Q::Nightly, V::Nightly) => true,
(Q::Stable(None), V::Stable(_)) => true,
(Q::Stable(Some(q)), V::Stable(v)) if q == v => true,
_ => false,
}
}
}
impl fmt::Display for VersionQuery {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use VersionQuery::*;
match self {
Stable(None) => "stable".fmt(f),
Stable(Some(ver)) => ver.fmt(f),
Nightly => "nightly".fmt(f),
}
}
}
impl InstalledPackage {
pub fn is_nightly(&self) -> bool {
// TODO(tailhook) get nightly flag from the source index
return self.version.as_ref().contains(".dev")
}
pub fn full_version(&self) -> Version<String> {
Version(format!("{}-{}", self.version, self.revision))
}
} | pub version: Version<String>,
pub revision: String, |
transport.rs | use std::fmt;
use std::io;
use std::io::Write;
use bytes::buf::FromBuf;
use bytes::BytesMut;
use futures::{Async, Poll, Stream};
use slog;
use tokio_io::AsyncRead;
/// Decoding of items in buffers.
///
/// An implementation of `Decoder` takes a buffer of bytes and is expected
/// to decode exactly one item. Any other result should be considered an error.
///
/// Implementations are able to track state on `self`.
pub trait Decoder {
/// The type of decoded items.
type Item;
/// The type of unrecoverable frame decoding errors.
///
/// If an individual message is ill-formed but can be ignored without
/// interfering with the processing of future messages, it may be more
/// useful to report the failure as an `Item`.
///
/// `From<io::Error>` is required in the interest of making `Error` suitable
/// for returning directly from a `FramedRead`, and to enable the default
/// implementation of `decode_eof` to yield an `io::Error` when the decoder
/// fails to consume all available data.
///
/// Note that implementors of this trait can simply indicate `type Error =
/// io::Error` to use I/O errors as this type.
type Error: From<io::Error>;
/// Decode an item from the provided buffer of bytes.
///
/// The length of the buffer will exactly match the number of bytes
/// returned by the last call made to `read_len`.
///
/// If the bytes in the buffer are malformed then an error is
/// returned indicating why. This indicates the stream is now
/// corrupt and should be terminated.
fn decode(&mut self, src: &mut BytesMut) -> Result<Self::Item, Self::Error>;
fn read_len(&self) -> usize;
}
/// Trait of helper objects to write out items as bytes.
pub trait Encoder {
/// The type of items consumed by the `Encoder`
type Item;
/// The type of encoding errors.
///
/// Required to implement `From<io::Error>` so it can be
/// used as the error type of a Sink that does I/O.
type Error: From<io::Error>;
/// Encodes an item into the buffer provided.
///
/// This method will encode `item` into the byte buffer provided by `buf`.
/// The `buf` provided may be re-used for subsequent encodings.
fn encode(&mut self, item: Self::Item, buf: &mut BytesMut) -> Result<(), Self::Error>;
}
/// Synchronous sink for items
pub trait SyncSink {
type SinkItem;
type SinkError;
fn send(&mut self, item: Self::SinkItem) -> Result<(), Self::SinkError>;
fn close(&mut self) -> Result<(), Self::SinkError> {
Ok(())
}
}
pub struct Transport<T, E, D> {
inner: T,
encoder: E,
decoder: D,
logger: slog::Logger,
}
impl<T, E, D> Transport<T, E, D>
where
T: AsyncRead + Write,
E: Encoder,
D: Decoder,
{
pub fn new(inner: T, encoder: E, decoder: D, logger: slog::Logger) -> Transport<T, E, D> {
Transport {
decoder,
encoder,
inner,
logger,
}
}
}
impl<T: Write, E, D> Write for Transport<T, E, D> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.inner.write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<T, E, D> Stream for Transport<T, E, D>
where
T: AsyncRead,
D: Decoder,
{
type Item = D::Item;
type Error = D::Error;
fn | (&mut self) -> Poll<Option<Self::Item>, Self::Error> {
let read_len = self.decoder.read_len();
let mut buffer = vec![0u8; read_len];
match self.inner.read(&mut buffer[..]) {
Ok(0) => {
trace!(self.logger, "CharacterDevice::Stream::poll => Ok");
Ok(Async::Ready(None))
}
Ok(n) => {
if n != read_len {
return Err(io::Error::new(io::ErrorKind::InvalidData, "short read").into());
}
let bytes = &mut BytesMut::from_buf(buffer);
trace!(self.logger, "CharacterDevice::Stream::poll => Ok"; "bytes" => ?&bytes);
let frame = self.decoder.decode(bytes)?;
Ok(Async::Ready(Some(frame)))
}
Err(ref e) if e.kind() == ::std::io::ErrorKind::WouldBlock => {
trace!(self.logger, "CharacterDevice::Stream::poll => WouldBlock");
Ok(Async::NotReady)
}
Err(e) => {
trace!(self.logger, "CharacterDevice::Stream::poll => Err");
Err(e.into())
}
}
}
}
impl<T, E, D> SyncSink for Transport<T, E, D>
where
T: Write,
E: Encoder,
{
type SinkItem = E::Item;
type SinkError = E::Error;
fn send(&mut self, item: Self::SinkItem) -> Result<(), Self::SinkError> {
let mut buffer = BytesMut::new();
self.encoder.encode(item, &mut buffer)?;
let bytes = buffer.take();
trace!(self.logger, "CharacterDevice::SyncSink::send"; "bytes" => ?&bytes);
match self.inner.write(&bytes) {
Ok(0) => Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write item to transport",
)
.into()),
Ok(n) if n == bytes.len() => Ok(()),
Ok(_) => Err(io::Error::new(
io::ErrorKind::Other,
"failed to write entire item to transport",
)
.into()),
Err(e) => Err(e.into()),
}
}
}
impl<T, E, D> fmt::Debug for Transport<T, E, D>
where
T: fmt::Debug,
E: fmt::Debug,
D: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("CharacterDevice")
.field("inner", &self.inner)
.field("encoder", &self.encoder)
.field("decoder", &self.decoder)
.finish()
}
}
| poll |
config.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// Service config.
///
///
/// Service configuration allows for customization of endpoints, region, credentials providers,
/// and retry configuration. Generally, it is constructed automatically for you from a shared
/// configuration loaded by the `aws-config` crate. For example:
///
/// ```ignore
/// // Load a shared config from the environment
/// let shared_config = aws_config::from_env().load().await;
/// // The client constructor automatically converts the shared config into the service config
/// let client = Client::new(&shared_config);
/// ```
///
/// The service config can also be constructed manually using its builder.
///
pub struct Config {
pub(crate) make_token: crate::idempotency_token::IdempotencyTokenProvider,
app_name: Option<aws_types::app_name::AppName>,
pub(crate) timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
pub(crate) sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
pub(crate) retry_config: Option<aws_smithy_types::retry::RetryConfig>,
pub(crate) endpoint_resolver: ::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>,
pub(crate) region: Option<aws_types::region::Region>,
pub(crate) credentials_provider: aws_types::credentials::SharedCredentialsProvider,
}
impl std::fmt::Debug for Config {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut config = f.debug_struct("Config");
config.finish()
}
}
impl Config {
/// Constructs a config builder.
pub fn builder() -> Builder {
Builder::default()
}
/// Returns the name of the app that is using the client, if it was provided.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(&self) -> Option<&aws_types::app_name::AppName> {
self.app_name.as_ref()
}
/// Creates a new [service config](crate::Config) from a [shared `config`](aws_types::config::Config).
pub fn new(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
/// The signature version 4 service signing name to use in the credential scope when signing requests.
///
/// The signing service may be overridden by the `Endpoint`, or by specifying a custom
/// [`SigningService`](aws_types::SigningService) during operation construction
pub fn signing_service(&self) -> &'static str {
"resiliencehub"
}
}
/// Builder for creating a `Config`.
#[derive(Default)]
pub struct Builder {
make_token: Option<crate::idempotency_token::IdempotencyTokenProvider>,
app_name: Option<aws_types::app_name::AppName>,
timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
endpoint_resolver: Option<::std::sync::Arc<dyn aws_endpoint::ResolveAwsEndpoint>>,
region: Option<aws_types::region::Region>,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
}
impl Builder {
/// Constructs a config builder.
pub fn new() -> Self {
Self::default()
}
/// Sets the idempotency token provider to use for service calls that require tokens.
pub fn make_token(
mut self,
make_token: impl Into<crate::idempotency_token::IdempotencyTokenProvider>,
) -> Self {
self.make_token = Some(make_token.into());
self
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn app_name(mut self, app_name: aws_types::app_name::AppName) -> Self {
self.set_app_name(Some(app_name));
self
}
/// Sets the name of the app that is using the client.
///
/// This _optional_ name is used to identify the application in the user agent that
/// gets sent along with requests.
pub fn set_app_name(&mut self, app_name: Option<aws_types::app_name::AppName>) -> &mut Self {
self.app_name = app_name;
self
}
/// Set the timeout_config for the builder
///
/// # Examples
///
/// ```no_run
/// # use std::time::Duration;
/// use aws_sdk_resiliencehub::config::Config;
/// use aws_smithy_types::timeout::TimeoutConfig;
///
/// let timeout_config = TimeoutConfig::new()
/// .with_api_call_attempt_timeout(Some(Duration::from_secs(1)));
/// let config = Config::builder().timeout_config(timeout_config).build();
/// ```
pub fn timeout_config(
mut self,
timeout_config: aws_smithy_types::timeout::TimeoutConfig,
) -> Self {
self.set_timeout_config(Some(timeout_config));
self
}
/// Set the timeout_config for the builder
///
/// # Examples
///
/// ```no_run
/// # use std::time::Duration;
/// use aws_sdk_resiliencehub::config::{Builder, Config};
/// use aws_smithy_types::timeout::TimeoutConfig;
///
/// fn set_request_timeout(builder: &mut Builder) {
/// let timeout_config = TimeoutConfig::new()
/// .with_api_call_timeout(Some(Duration::from_secs(3)));
/// builder.set_timeout_config(Some(timeout_config));
/// }
///
/// let mut builder = Config::builder();
/// set_request_timeout(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_timeout_config(
&mut self,
timeout_config: Option<aws_smithy_types::timeout::TimeoutConfig>,
) -> &mut Self {
self.timeout_config = timeout_config;
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
///
/// ```no_run
/// use aws_sdk_resiliencehub::config::Config;
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// let config = Config::builder().sleep_impl(sleep_impl).build();
/// ```
pub fn sleep_impl(
mut self,
sleep_impl: std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>,
) -> Self {
self.set_sleep_impl(Some(sleep_impl));
self
}
/// Set the sleep_impl for the builder
///
/// # Examples
///
/// ```no_run
/// use aws_sdk_resiliencehub::config::{Builder, Config};
/// use aws_smithy_async::rt::sleep::AsyncSleep;
/// use aws_smithy_async::rt::sleep::Sleep;
///
/// #[derive(Debug)]
/// pub struct ForeverSleep;
///
/// impl AsyncSleep for ForeverSleep {
/// fn sleep(&self, duration: std::time::Duration) -> Sleep {
/// Sleep::new(std::future::pending())
/// }
/// }
///
/// fn set_never_ending_sleep_impl(builder: &mut Builder) {
/// let sleep_impl = std::sync::Arc::new(ForeverSleep);
/// builder.set_sleep_impl(Some(sleep_impl));
/// }
///
/// let mut builder = Config::builder();
/// set_never_ending_sleep_impl(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_sleep_impl(
&mut self,
sleep_impl: Option<std::sync::Arc<dyn aws_smithy_async::rt::sleep::AsyncSleep>>,
) -> &mut Self {
self.sleep_impl = sleep_impl;
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```no_run
/// use aws_sdk_resiliencehub::config::Config;
/// use aws_smithy_types::retry::RetryConfig;
///
/// let retry_config = RetryConfig::new().with_max_attempts(5);
/// let config = Config::builder().retry_config(retry_config).build();
/// ```
pub fn retry_config(mut self, retry_config: aws_smithy_types::retry::RetryConfig) -> Self {
self.set_retry_config(Some(retry_config));
self
}
/// Set the retry_config for the builder
///
/// # Examples
/// ```no_run
/// use aws_sdk_resiliencehub::config::{Builder, Config};
/// use aws_smithy_types::retry::RetryConfig;
///
/// fn disable_retries(builder: &mut Builder) {
/// let retry_config = RetryConfig::new().with_max_attempts(1);
/// builder.set_retry_config(Some(retry_config));
/// }
///
/// let mut builder = Config::builder();
/// disable_retries(&mut builder);
/// let config = builder.build();
/// ```
pub fn set_retry_config(
&mut self,
retry_config: Option<aws_smithy_types::retry::RetryConfig>,
) -> &mut Self {
self.retry_config = retry_config;
self
}
// TODO(docs): include an example of using a static endpoint
/// Sets the endpoint resolver to use when making requests.
pub fn endpoint_resolver(
mut self,
endpoint_resolver: impl aws_endpoint::ResolveAwsEndpoint + 'static,
) -> Self {
self.endpoint_resolver = Some(::std::sync::Arc::new(endpoint_resolver));
self
}
/// Sets the AWS region to use when making requests.
pub fn region(mut self, region: impl Into<Option<aws_types::region::Region>>) -> Self {
self.region = region.into();
self
}
/// Sets the credentials provider for this service
pub fn credentials_provider(
mut self,
credentials_provider: impl aws_types::credentials::ProvideCredentials + 'static,
) -> Self |
/// Sets the credentials provider for this service
pub fn set_credentials_provider(
&mut self,
credentials_provider: Option<aws_types::credentials::SharedCredentialsProvider>,
) -> &mut Self {
self.credentials_provider = credentials_provider;
self
}
/// Builds a [`Config`].
pub fn build(self) -> Config {
Config {
make_token: self
.make_token
.unwrap_or_else(crate::idempotency_token::default_provider),
app_name: self.app_name,
timeout_config: self.timeout_config,
sleep_impl: self.sleep_impl,
retry_config: self.retry_config,
endpoint_resolver: self
.endpoint_resolver
.unwrap_or_else(|| ::std::sync::Arc::new(crate::aws_endpoint::endpoint_resolver())),
region: self.region,
credentials_provider: self.credentials_provider.unwrap_or_else(|| {
aws_types::credentials::SharedCredentialsProvider::new(
crate::no_credentials::NoCredentials,
)
}),
}
}
}
impl From<&aws_types::config::Config> for Builder {
fn from(input: &aws_types::config::Config) -> Self {
let mut builder = Builder::default();
builder = builder.region(input.region().cloned());
builder.set_retry_config(input.retry_config().cloned());
builder.set_timeout_config(input.timeout_config().cloned());
builder.set_sleep_impl(input.sleep_impl().clone());
builder.set_credentials_provider(input.credentials_provider().cloned());
builder.set_app_name(input.app_name().cloned());
builder
}
}
impl From<&aws_types::config::Config> for Config {
fn from(config: &aws_types::config::Config) -> Self {
Builder::from(config).build()
}
}
| {
self.credentials_provider = Some(aws_types::credentials::SharedCredentialsProvider::new(
credentials_provider,
));
self
} |
gruntfile.js | 'use strict'
var path = require('path')
module.exports = function(grunt) {
var pkg = grunt.file.readJSON('package.json')
var paths = {
app: path.join(path.resolve(), '/'),
test: path.join(path.resolve(), '/test/**/*.js'),
}
var notify = {
test: {
options: {
title: pkg.name + ': Test',
message: 'Finished!',
},
},
success: {
options: {
title: pkg.name + ': Success',
message: '## Already! ##',
},
},
}
var shell = {
exec: {
command: 'node index.js',
},
verpatch: { | command:
'npm version --no-git-tag-version patch && git add -A && git commit -a -m "Version Patch Updated"',
},
verminor: {
command:
'npm version --no-git-tag-version minor && git add -A && git commit -a -m "Version Minor Updated"',
},
vermajor: {
command:
'npm version --no-git-tag-version major && git add -A && git commit -a -m "Version Major Updated"',
},
deploy: {
command: 'npm publish',
},
gitflowrelease: {
command: 'git flow release start ' + pkg.version,
},
gitflowreleasefinish: {
command: 'git flow release finish -m <%= pkg.version %> ' + pkg.version,
},
}
var clean = {
src: [
path.resolve() + '/*.log',
path.resolve() + '/*.txt',
path.resolve() + '/*.zip',
path.resolve() + '/*.heapsnapshot',
],
}
var mochaTest = {
test: {
options: {
reporter: 'spec',
captureFile: 'results.txt',
timeout: 60000,
},
src: ['<%= paths.test %>'],
},
}
var watch = {
debug: {
files: ['<%= paths.app %>/**/*.js', '<%= paths.test %>'],
tasks: ['env:debugtest', 'mochaTest', 'env:debugdev'],
},
js: {
files: ['<%= paths.app %>/**/*.js', '<%= paths.test %>'],
tasks: ['env:test', 'mochaTest', 'env:dev'],
},
}
var nodemon = {
default: {
script: '<%= paths.app %>/index.js',
options: {
cwd: path.resolve(),
watch: ['<%= paths.app %>'],
ignore: ['node_modules'],
},
},
}
var concurrent = {
debug: {
tasks: ['nodemon', 'watch:debug'],
},
default: {
tasks: ['nodemon', 'watch:js'],
},
options: {
logConcurrentOutput: true,
},
}
grunt.initConfig({
pkg: pkg,
env: {
debugdev: {
NODE_ENV: 'development',
DEBUG: path.basename(path.resolve()),
},
debugtest: {
NODE_ENV: 'test',
DEBUG: path.basename(path.resolve()),
},
dev: {
NODE_ENV: 'development',
},
test: {
NODE_ENV: 'test',
},
},
mochaTest: mochaTest,
notify: notify,
shell: shell,
paths: paths,
clean: clean,
watch: watch,
nodemon: nodemon,
concurrent: concurrent,
})
require('load-grunt-tasks')(grunt)
grunt.registerTask('compile', ['clean'])
grunt.registerTask('test', ['env:test', 'compile', 'mochaTest', 'notify:test'])
grunt.registerTask('debug-test', ['env:debugtest', 'compile', 'mochaTest', 'notify:test'])
grunt.registerTask('dev', ['compile', 'env:dev', 'notify:success', 'concurrent:default'])
grunt.registerTask('debug-dev', [
'compile',
'env:debugdev',
'notify:success',
'concurrent:debug',
])
grunt.registerTask('default', ['test', 'env:dev', 'notify:success', 'shell:exec'])
grunt.registerTask('version', ['shell:verpatch'])
grunt.registerTask('version:minor', ['shell:verminor'])
grunt.registerTask('version:major', ['shell:vermajor'])
grunt.registerTask('deploy', ['test', 'shell:deploy'])
grunt.registerTask('release', ['test', 'shell:gitflowrelease', 'notify:success'])
grunt.registerTask('release:finish', ['shell:gitflowreleasefinish', 'deploy', 'notify:success'])
} | |
abstract_data_manager.py | # -*- encoding: utf-8 -*-
import abc
import numpy as np
import scipy.sparse
from autosklearn.pipeline.implementations.OneHotEncoder import OneHotEncoder
from autosklearn.util import predict_RAM_usage
def perform_one_hot_encoding(sparse, categorical, data):
predicted_RAM_usage = float(
predict_RAM_usage(data[0], categorical)) / 1024 / 1024
if predicted_RAM_usage > 1000:
sparse = True
rvals = []
if any(categorical):
encoder = OneHotEncoder(categorical_features=categorical,
dtype=np.float32,
sparse=sparse)
rvals.append(encoder.fit_transform(data[0]))
for d in data[1:]:
rvals.append(encoder.transform(d))
if not sparse and scipy.sparse.issparse(rvals[0]):
for i in range(len(rvals)):
rvals[i] = rvals[i].todense()
else:
rvals = data
return rvals, sparse
class AbstractDataManager():
__metaclass__ = abc.ABCMeta
def __init__(self, name):
self._data = dict()
self._info = dict()
self._name = name
@property
def name(self):
return self._name
@property
def data(self):
return self._data
@property
def info(self):
return self._info
@property
def feat_type(self):
return self._feat_type
@feat_type.setter
def feat_type(self, value):
self._feat_type = value
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, value):
self._encoder = value
def perform1HotEncoding(self):
sparse = True if self.info['is_sparse'] == 1 else False
has_missing = True if self.info['has_missing'] else False
to_encode = ['categorical']
if has_missing:
to_encode += ['binary']
encoding_mask = [feat_type.lower() in to_encode
for feat_type in self.feat_type]
data = [self.data['X_train']]
if 'X_valid' in self.data:
data.append(self.data['X_valid'])
if 'X_test' in self.data:
data.append(self.data['X_test'])
data, sparse = perform_one_hot_encoding(
sparse=sparse, categorical=encoding_mask,
data=data)
self.info['is_sparse'] = 1 if sparse else 0
self.data['X_train'] = data[0]
if 'X_valid' in self.data and 'X_test' in self.data:
self.data['X_valid'] = data[1]
self.data['X_test'] = data[2]
elif 'X_valid' in self.data:
self.data['X_valid'] = data[1]
elif 'X_test' in self.data:
self.data['X_test'] = data[1]
def __repr__(self):
return 'DataManager : ' + self.name
def | (self):
val = 'DataManager : ' + self.name + '\ninfo:\n'
for item in self.info:
val = val + '\t' + item + ' = ' + str(self.info[item]) + '\n'
val = val + 'data:\n'
for subset in self.data:
val = val + '\t%s = %s %s %s\n' % (subset, type(self.data[subset]),
str(self.data[subset].shape),
str(self.data[subset].dtype))
if isinstance(self.data[subset], scipy.sparse.spmatrix):
val = val + '\tdensity: %f\n' % \
(float(len(self.data[subset].data)) /
self.data[subset].shape[0] /
self.data[subset].shape[1])
val = val + 'feat_type:\t' + str(self.feat_type) + '\n'
return val
| __str__ |
TestingStyleController.js | ({
doInit: function(component, event, helper) {
console.log('doInit');
var eventParams = event.getParams();
if(eventParams.changeType === "LOADED") {
component.get("v.recordInfo");
// record is loaded (render other component which needs record data value)
console.log("Record is loaded successfully.");
} else if(eventParams.changeType === "CHANGED") {
// record is changed
} else if(eventParams.changeType === "REMOVED") {
// record is deleted
} else if(eventParams.changeType === "ERROR") {
// there’s an error while loading, saving, or deleting the record
}
var clsStopwatch = function() {
var startAt = startAt || 0;
var lapTime = lapTime || 0;
var now = function() {
return (new Date()).getTime();
};
this.start = function() {
startAt = startAt ? startAt : now();
};
this.stop = function() {
lapTime = startAt ? lapTime + now() - startAt : lapTime;
startAt = 0; | lapTime = 0;
startAt = 0;
};
this.time = function() {
return lapTime + (startAt ? now() - startAt : 0);
};
};
var x = new clsStopwatch();
component.set("v.stopwatch", x);
var clocktimer = setInterval(function() {
helper.updateStatus(component, event);
}, 1);
var theCase = component.get('v.simpleCase');
if(theCase.Status === 'Closed') {
component.set('v.disabled',true);
$A.util.addClass(component.find("timerDiv"), "slds-hide");
component.set('v.playing',false);
} else {
component.set('v.disabled',false);
$A.util.removeClass(component.find("timerDiv"), "slds-hide");
if(component.get('v.autoStart') === 'True') {
x.start();
component.set('v.playing',true);
component.set('v.recording',true);
} else {
component.set('v.playing',false);
x.stop();
}
}
helper.loadSessions(component, event);
},
onClick : function(component, event, helper) {
var clocktimer;
var id = event.target.id;
var x = component.get("v.stopwatch");
switch(id){
case "start":
component.set('v.playing',true);
component.set('v.recording',true);
clocktimer = setInterval(function() {
helper.updateStatus(component, event);
}, 1);
x.start();
break;
case "stop":
component.set('v.playing',false);
x.stop();
clearInterval(clocktimer);
helper.updateStatus(component, event);
break;
default:
stop();
break;
}
},
update : function (component, event, helper) {
// Get the new hash from the event
var loc = event.getParam("token");
console.log("Creating Session: update");
console.log(loc);
helper.createSession(component, event);
// Do something else
},
handleSaveSession: function(component, event, helper) {
console.log('start save session');
if(helper.validateSessionForm(component)) {
console.log('after conditional helper');
// Prepare the action to create the new session
var saveSessionAction = component.get("c.newSessionManual");
saveSessionAction.setParams({
"caseId" : component.get("v.recordId"),
"timeVal" : component.get("v.manualDuration"),
"theDate" : new Date(component.get("v.manualDate")).toJSON()
});
// Configure the response handler for the action
saveSessionAction.setCallback(this, function(response) {
var state = response.getState();
if(state === "SUCCESS") {
// Prepare a toast UI message
var resultsToast = $A.get("e.force:showToast");
resultsToast.setParams({
"title": "Session Saved",
"message": "The new session was created."
});
// Update the UI: close panel, show toast, refresh case page
$A.get("e.force:closeQuickAction").fire();
resultsToast.fire();
$A.get("e.force:refreshView").fire();
}
else if (state === "ERROR") {
console.log('Problem saving session, response state: ' + state);
}
else {
console.log('Unknown problem, response state: ' + state);
}
});
// Send the request to create the new session
$A.enqueueAction(saveSessionAction);
helper.hideModal(component, event);
}
helper.loadSessions(component, event);
},
newTime : function(component, event, helper) {
$A.util.toggleClass(component.find("myModal"), "slds-hide");
},
cancelSession : function(component, event, helper) {
helper.hideModal(component, event);
}
}) | };
this.reset = function() { |
0002_application_user.py | # Generated by Django 3.1.1 on 2020-12-07 14:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
| dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('api', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='application',
name='user',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
] |
|
service_discovery.rs | use crate::connection_manager::{BrokerAddress, ConnectionManager};
use crate::error::{ConnectionError, ServiceDiscoveryError};
use crate::executor::Executor;
use crate::message::proto::{
command_lookup_topic_response, command_partitioned_topic_metadata_response,
CommandLookupTopicResponse,
};
use futures::{future::try_join_all, FutureExt};
use std::{sync::Arc, time::Duration};
use url::Url;
/// Look up broker addresses for topics and partitioned topics
///
/// The ServiceDiscovery object provides a single interface to start
/// interacting with a cluster. It will automatically follow redirects
/// or use a proxy, and aggregate broker connections
#[derive(Clone)]
pub struct ServiceDiscovery<Exe: Executor> {
manager: Arc<ConnectionManager<Exe>>,
}
impl<Exe: Executor> ServiceDiscovery<Exe> {
pub fn with_manager(manager: Arc<ConnectionManager<Exe>>) -> Self {
ServiceDiscovery { manager }
}
/// get the broker address for a topic
pub async fn lookup_topic<S: Into<String>>(
&self,
topic: S,
) -> Result<BrokerAddress, ServiceDiscoveryError> {
let topic = topic.into();
let mut proxied_query = false;
let mut conn = self.manager.get_base_connection().await?;
let base_url = self.manager.url.clone();
let mut is_authoritative = false;
let mut broker_address = self.manager.get_base_address();
let mut max_retries = 20u8;
loop {
let response = match conn
.sender()
.lookup_topic(topic.to_string(), is_authoritative)
.await
{
Ok(res) => res,
Err(ConnectionError::Disconnected) => {
error!("tried to lookup a topic but connection was closed, reconnecting...");
conn = self.manager.get_connection(&broker_address).await?;
conn.sender()
.lookup_topic(topic.to_string(), is_authoritative)
.await?
}
Err(e) => return Err(e.into()),
};
if response.response.is_none()
|| response.response
== Some(command_lookup_topic_response::LookupType::Failed as i32)
{
let error = response.error.and_then(crate::error::server_error);
if error == Some(crate::message::proto::ServerError::ServiceNotReady)
&& max_retries > 0
{
error!("lookup({}) answered ServiceNotReady, retrying request after 500ms (max_retries = {})", topic, max_retries);
max_retries -= 1;
self.manager.executor.delay(Duration::from_millis(500)).await;
continue;
}
return Err(ServiceDiscoveryError::Query(
error,
response.message.clone(),
));
}
let LookupResponse {
broker_url,
broker_url_tls,
proxy,
redirect,
authoritative,
} = convert_lookup_response(&response)?;
is_authoritative = authoritative;
// use the TLS connection if available
let connection_url = broker_url_tls.clone().unwrap_or_else(|| broker_url.clone());
// if going through a proxy, we use the base URL
let url = if proxied_query || proxy {
base_url.clone()
} else {
connection_url.clone()
};
let broker_url = match broker_url_tls {
Some(u) => format!("{}:{}", u.host_str().unwrap(), u.port().unwrap_or(6651)),
None => format!(
"{}:{}",
broker_url.host_str().unwrap(),
broker_url.port().unwrap_or(6650)
),
};
broker_address = BrokerAddress {
url,
broker_url,
proxy: proxied_query || proxy,
};
// if the response indicated a redirect, do another query
// to the target broker
if redirect {
conn = self.manager.get_connection(&broker_address).await?;
proxied_query = broker_address.proxy;
continue;
} else {
let res = self
.manager
.get_connection(&broker_address)
.await
.map(|_| broker_address)
.map_err(ServiceDiscoveryError::Connection);
break res;
}
}
}
/// get the number of partitions for a partitioned topic
pub async fn lookup_partitioned_topic_number<S: Into<String>>(
&self,
topic: S,
) -> Result<u32, ServiceDiscoveryError> {
let mut connection = self.manager.get_base_connection().await?;
let topic = topic.into();
let mut max_retries = 20u8;
let response = loop {
let response = match connection.sender().lookup_partitioned_topic(&topic).await {
Ok(res) => res, | connection.sender().lookup_partitioned_topic(&topic).await?
}
Err(e) => return Err(e.into()),
};
if response.response.is_none()
|| response.response
== Some(command_partitioned_topic_metadata_response::LookupType::Failed as i32)
{
let error = response.error.and_then(crate::error::server_error);
if error == Some(crate::message::proto::ServerError::ServiceNotReady)
&& max_retries > 0
{
error!("lookup_partitioned_topic_number({}) answered ServiceNotReady, retrying request after 500ms (max_retries = {})", topic, max_retries);
max_retries -= 1;
self.manager.executor.delay(Duration::from_millis(500)).await;
continue;
}
return Err(ServiceDiscoveryError::Query(
error,
response.message.clone(),
));
}
break response;
};
match response.partitions {
Some(partitions) => Ok(partitions),
None => Err(ServiceDiscoveryError::Query(
response.error.and_then(crate::error::server_error),
response.message,
)),
}
}
/// Lookup a topic, returning a list of the partitions (if partitioned) and addresses
/// associated with that topic.
pub async fn lookup_partitioned_topic<S: Into<String>>(
&self,
topic: S,
) -> Result<Vec<(String, BrokerAddress)>, ServiceDiscoveryError> {
let topic = topic.into();
let partitions = self.lookup_partitioned_topic_number(&topic).await?;
trace!("Partitions for topic {}: {}", &topic, &partitions);
let topics = match partitions {
0 => vec![topic],
_ => (0..partitions).map(|n| format!("{}-partition-{}", &topic, n)).collect(),
};
try_join_all(topics.into_iter().map(|topic| {
self.lookup_topic(topic.clone())
.map(move |address_res| match address_res {
Err(e) => Err(e),
Ok(address) => Ok((topic, address)),
})
}))
.await
}
}
struct LookupResponse {
pub broker_url: Url,
pub broker_url_tls: Option<Url>,
pub proxy: bool,
pub redirect: bool,
pub authoritative: bool,
}
/// extracts information from a lookup response
fn convert_lookup_response(
response: &CommandLookupTopicResponse,
) -> Result<LookupResponse, ServiceDiscoveryError> {
let proxy = response.proxy_through_service_url.unwrap_or(false);
let authoritative = response.authoritative.unwrap_or(false);
let redirect =
response.response == Some(command_lookup_topic_response::LookupType::Redirect as i32);
if response.broker_service_url.is_none() {
return Err(ServiceDiscoveryError::NotFound);
}
let broker_url = Url::parse(&response.broker_service_url.clone().unwrap()).map_err(|e| {
error!("error parsing URL: {:?}", e);
ServiceDiscoveryError::NotFound
})?;
let broker_url_tls = match response.broker_service_url_tls.as_ref() {
Some(u) => Some(Url::parse(&u).map_err(|e| {
error!("error parsing URL: {:?}", e);
ServiceDiscoveryError::NotFound
})?),
None => None,
};
Ok(LookupResponse {
broker_url,
broker_url_tls,
proxy,
redirect,
authoritative,
})
} | Err(ConnectionError::Disconnected) => {
error!("tried to lookup a topic but connection was closed, reconnecting...");
connection = self.manager.get_base_connection().await?; |
codec.rs | use std::ffi::CStr;
use ffmpeg_sys as ff;
#[derive(Clone, Copy)]
pub struct Codec(*mut ff::AVCodec);
// The codec is basically reference to a immutable static variable
unsafe impl Send for Codec {}
unsafe impl Sync for Codec {}
impl Codec {
pub fn name(&self) -> &CStr {
unsafe { CStr::from_ptr((*self.0).name) }
}
pub fn default_encoder() -> Option<Self> {
unsafe {
let ptr = ff::avcodec_find_encoder(ff::AVCodecID::AV_CODEC_ID_H264);
Self::from_ptr(ptr)
}
}
pub fn find_encoder(name: &CStr) -> Option<Self> {
unsafe {
let ptr = ff::avcodec_find_encoder_by_name(name.as_ptr());
Self::from_ptr(ptr)
}
}
pub fn default_decoder() -> Option<Self> {
unsafe { | Self::from_ptr(ptr)
}
}
pub fn find_decoder(name: &CStr) -> Option<Self> {
unsafe {
let ptr = ff::avcodec_find_decoder_by_name(name.as_ptr());
Self::from_ptr(ptr)
}
}
unsafe fn from_ptr(ptr: *mut ff::AVCodec) -> Option<Self> {
if ptr.is_null() {
None
} else {
Some(Codec(ptr))
}
}
pub fn as_mut_ptr(&mut self) -> *mut ff::AVCodec {
self.0
}
} | let ptr = ff::avcodec_find_decoder(ff::AVCodecID::AV_CODEC_ID_H264); |
misc.rs | // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Miscellaneous builder routines that are not specific to building any particular
//! kind of thing.
use build::Builder;
use rustc_const_math::{ConstInt, ConstUsize, ConstIsize};
use rustc::middle::const_val::ConstVal;
use rustc::ty::{self, Ty};
use rustc::mir::repr::*;
use syntax::ast;
use syntax_pos::Span;
impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
/// Add a new temporary value of type `ty` storing the result of
/// evaluating `expr`.
///
/// NB: **No cleanup is scheduled for this temporary.** You should
/// call `schedule_drop` once the temporary is initialized.
pub fn temp(&mut self, ty: Ty<'tcx>) -> Lvalue<'tcx> {
let temp = self.temp_decls.push(TempDecl { ty: ty });
let lvalue = Lvalue::Temp(temp);
debug!("temp: created temp {:?} with type {:?}",
lvalue, self.temp_decls[temp].ty);
lvalue
}
pub fn literal_operand(&mut self,
span: Span,
ty: Ty<'tcx>,
literal: Literal<'tcx>)
-> Operand<'tcx> {
let constant = Constant {
span: span,
ty: ty,
literal: literal,
};
Operand::Constant(constant)
}
pub fn unit_rvalue(&mut self) -> Rvalue<'tcx> {
Rvalue::Aggregate(AggregateKind::Tuple, vec![])
}
// Returns a zero literal operand for the appropriate type, works for
// bool, char and integers.
pub fn zero_literal(&mut self, span: Span, ty: Ty<'tcx>) -> Operand<'tcx> {
let literal = match ty.sty {
ty::TyBool => {
self.hir.false_literal()
}
ty::TyChar => Literal::Value { value: ConstVal::Char('\0') },
ty::TyUint(ity) => {
let val = match ity {
ast::UintTy::U8 => ConstInt::U8(0),
ast::UintTy::U16 => ConstInt::U16(0),
ast::UintTy::U32 => ConstInt::U32(0),
ast::UintTy::U64 => ConstInt::U64(0),
ast::UintTy::Us => {
let uint_ty = self.hir.tcx().sess.target.uint_type;
let val = ConstUsize::new(0, uint_ty).unwrap();
ConstInt::Usize(val)
}
};
Literal::Value { value: ConstVal::Integral(val) }
}
ty::TyInt(ity) => {
let val = match ity {
ast::IntTy::I8 => ConstInt::I8(0),
ast::IntTy::I16 => ConstInt::I16(0),
ast::IntTy::I32 => ConstInt::I32(0),
ast::IntTy::I64 => ConstInt::I64(0),
ast::IntTy::Is => {
let int_ty = self.hir.tcx().sess.target.int_type;
let val = ConstIsize::new(0, int_ty).unwrap();
ConstInt::Isize(val)
}
};
Literal::Value { value: ConstVal::Integral(val) }
}
_ => {
span_bug!(span, "Invalid type for zero_literal: `{:?}`", ty)
}
};
self.literal_operand(span, ty, literal)
}
pub fn push_usize(&mut self,
block: BasicBlock,
source_info: SourceInfo,
value: u64)
-> Lvalue<'tcx> |
}
| {
let usize_ty = self.hir.usize_ty();
let temp = self.temp(usize_ty);
self.cfg.push_assign_constant(
block, source_info, &temp,
Constant {
span: source_info.span,
ty: self.hir.usize_ty(),
literal: self.hir.usize_literal(value),
});
temp
} |
lib.rs | //! A crate for writing sparql clients in safe Rust
//!
#![warn(missing_docs, missing_debug_implementations, missing_copy_implementations, trivial_casts,
trivial_numeric_casts, unused_import_braces, unused_qualifications
)]
use std::time::Duration;
use actix_web::client::{Client, ClientRequest};
use actix_web::client::Connector;
use actix_web::http::{header, Uri};
use mime::Mime;
use openssl::ssl::{SslConnector, SslMethod};
use serde::Serialize;
/// TODO: Documentation for json module
pub mod json;
/// Some example SPARQL queries
pub mod query_template;
/// Structs for StructOpt-based SPARQL CLI
pub mod cli_model;
/// Default timeout for sparql_client()
pub const TIMEOUT: u64 = 1000;
/// Return a actix_web::client::Client that's configured for a SPARQL query
pub fn sparql_client(timeout: u64) -> Client {
let builder = SslConnector::builder(SslMethod::tls()).unwrap();
Client::builder()
.connector(
Connector::new()
.timeout(Duration::from_millis(timeout))
.ssl(builder.build())
.finish(),
)
.finish()
}
/// Struct used by serde to serialize the ?query= part to the URL
#[derive(Serialize, Debug)]
pub struct GetRequestParams {
/// The ?query= port of the SPARQL GET URL
pub query: String
}
/// Set up a ClientRequest to perform a SPARQL query
pub fn sparql_get(client: Client, host: Uri, accept: Mime, query: &str) -> ClientRequest {
let params = GetRequestParams {
query: (&query).parse().unwrap()
}; | .header(header::ACCEPT, accept)
.query(¶ms)
.unwrap()
} |
client
.get(host)
.header("User-Agent", "agnos-ai/sparql-client-rs") |
form.py | from __future__ import unicode_literals
from wtforms.form import Form
from wtforms.validators import ValidationError
from .fields import CSRFTokenField
class SecureForm(Form):
"""
Form that enables CSRF processing via subclassing hooks.
"""
|
def __init__(self, formdata=None, obj=None, prefix="", csrf_context=None, **kwargs):
"""
:param csrf_context:
Optional extra data which is passed transparently to your
CSRF implementation.
"""
super(SecureForm, self).__init__(formdata, obj, prefix, **kwargs)
self.csrf_token.current_token = self.generate_csrf_token(csrf_context)
def generate_csrf_token(self, csrf_context):
"""
Implementations must override this to provide a method with which one
can get a CSRF token for this form.
A CSRF token should be a string which can be generated
deterministically so that on the form POST, the generated string is
(usually) the same assuming the user is using the site normally.
:param csrf_context:
A transparent object which can be used as contextual info for
generating the token.
"""
raise NotImplementedError()
def validate_csrf_token(self, field):
"""
Override this method to provide custom CSRF validation logic.
The default CSRF validation logic simply checks if the recently
generated token equals the one we received as formdata.
"""
if field.current_token != field.data:
raise ValidationError(field.gettext("Invalid CSRF Token"))
@property
def data(self):
d = super(SecureForm, self).data
d.pop("csrf_token")
return d | csrf_token = CSRFTokenField() |
fleet_test.go | // Copyright 2018 Google LLC All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package e2e
import (
"context"
"fmt"
"strings"
"sync"
"testing"
"time"
"agones.dev/agones/pkg/apis"
agonesv1 "agones.dev/agones/pkg/apis/agones/v1"
allocationv1 "agones.dev/agones/pkg/apis/allocation/v1"
typedagonesv1 "agones.dev/agones/pkg/client/clientset/versioned/typed/agones/v1"
"agones.dev/agones/pkg/util/runtime"
e2e "agones.dev/agones/test/e2e/framework"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
appsv1 "k8s.io/api/apps/v1"
autoscalingv1 "k8s.io/api/autoscaling/v1"
corev1 "k8s.io/api/core/v1"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"
)
const (
key = "test-state"
red = "red"
green = "green"
replicasCount = 3
)
// TestFleetRequestsLimits reproduces an issue when 1000m and 1 CPU is not equal, but should be equal
// Every fleet should create no more than 2 GameServerSet at once on a simple fleet patch
func TestFleetRequestsLimits(t *testing.T) {
t.Parallel()
ctx := context.Background()
flt := defaultFleet(framework.Namespace)
flt.Spec.Template.Spec.Template.Spec.Containers[0].Resources.Limits[corev1.ResourceCPU] = *resource.NewScaledQuantity(1000, -3)
client := framework.AgonesClient.AgonesV1()
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.NoError(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
newReplicas := int32(5)
patch := fmt.Sprintf(`[{ "op": "replace", "path": "/spec/template/spec/template/spec/containers/0/resources/requests/cpu", "value": "1000m"},
{ "op": "replace", "path": "/spec/replicas", "value": %d}]`, newReplicas)
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Patch(ctx, flt.ObjectMeta.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
assert.Nil(t, err)
// In bug scenario fleet was infinitely creating new GSSets (5 at a time), because 1000m CPU was changed to 1 CPU
// internally - thought as new wrong GSSet in a Fleet Controller
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(newReplicas))
}
// TestFleetStrategyValidation reproduces an issue when we are trying
// to update a fleet with no strategy in a new one
func TestFleetStrategyValidation(t *testing.T) {
t.Parallel()
ctx := context.Background()
flt := defaultFleet(framework.Namespace)
client := framework.AgonesClient.AgonesV1()
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
flt, err = client.Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.GetName(), metav1.GetOptions{})
assert.NoError(t, err)
// func to check that we receive an expected error
verifyErr := func(err error) {
assert.NotNil(t, err)
statusErr, ok := err.(*k8serrors.StatusError)
assert.True(t, ok)
fmt.Println(statusErr)
CausesMessages := []string{"Strategy Type should be one of: RollingUpdate, Recreate."}
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Contains(t, CausesMessages, statusErr.Status().Details.Causes[0].Message)
}
// Change DeploymentStrategy Type, set it to empty string, which is forbidden
fltCopy := flt.DeepCopy()
fltCopy.Spec.Strategy.Type = appsv1.DeploymentStrategyType("")
_, err = client.Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
verifyErr(err)
// Try to remove whole DeploymentStrategy in a patch
patch := `[{ "op": "remove", "path": "/spec/strategy"},
{ "op": "replace", "path": "/spec/replicas", "value": 3}]`
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Patch(ctx, flt.ObjectMeta.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
verifyErr(err)
}
func TestFleetScaleUpEditAndScaleDown(t *testing.T) {
t.Parallel()
//Use scaleFleetPatch (true) or scaleFleetSubresource (false)
fixtures := []bool{true, false}
for _, usePatch := range fixtures {
usePatch := usePatch
t.Run("Use fleet Patch "+fmt.Sprint(usePatch), func(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = 1
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, int32(1), flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
// scale up
const targetScale = 3
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, targetScale)
assert.Equal(t, int32(targetScale), flt.Spec.Replicas)
} else {
flt = scaleFleetSubresource(ctx, t, flt, targetScale)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(targetScale))
gsa := framework.CreateAndApplyAllocation(t, flt)
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 1
})
flt, err = client.Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.GetName(), metav1.GetOptions{})
assert.Nil(t, err)
// Change ContainerPort to trigger creating a new GSSet
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.Spec.Ports[0].ContainerPort++
flt, err = client.Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
assert.Nil(t, err)
// Wait for one more GSSet to be created and ReadyReplicas created in new GSS
err = wait.PollImmediate(1*time.Second, time.Minute, func() (bool, error) {
selector := labels.SelectorFromSet(labels.Set{agonesv1.FleetNameLabel: flt.ObjectMeta.Name})
list, err := framework.AgonesClient.AgonesV1().GameServerSets(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return false, err
}
ready := false
if len(list.Items) == 2 {
for _, v := range list.Items {
if v.Status.ReadyReplicas > 0 && v.Status.AllocatedReplicas == 0 {
ready = true
}
}
}
return ready, nil
})
assert.Nil(t, err)
// scale down, with allocation
const scaleDownTarget = 1
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, scaleDownTarget)
} else {
flt = scaleFleetSubresource(ctx, t, flt, scaleDownTarget)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(0))
// delete the allocated GameServer
gp := int64(1)
err = client.GameServers(framework.Namespace).Delete(ctx, gsa.Status.GameServerName, metav1.DeleteOptions{GracePeriodSeconds: &gp})
assert.Nil(t, err)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(1))
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 0
})
})
}
}
// TestFleetRollingUpdate - test that the limited number of gameservers are created and deleted at a time
// maxUnavailable and maxSurge parameters check.
func | (t *testing.T) {
t.Parallel()
ctx := context.Background()
//Use scaleFleetPatch (true) or scaleFleetSubresource (false)
fixtures := []bool{true, false}
maxSurge := []string{"25%", "10%"}
for _, usePatch := range fixtures {
for _, maxSurgeParam := range maxSurge {
usePatch := usePatch
maxSurgeParam := maxSurgeParam
t.Run(fmt.Sprintf("Use fleet Patch %t %s", usePatch, maxSurgeParam), func(t *testing.T) {
t.Parallel()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.ApplyDefaults()
flt.Spec.Replicas = 1
rollingUpdatePercent := intstr.FromString(maxSurgeParam)
flt.Spec.Strategy.RollingUpdate.MaxSurge = &rollingUpdatePercent
flt.Spec.Strategy.RollingUpdate.MaxUnavailable = &rollingUpdatePercent
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, int32(1), flt.Spec.Replicas)
assert.Equal(t, maxSurgeParam, flt.Spec.Strategy.RollingUpdate.MaxSurge.StrVal)
assert.Equal(t, maxSurgeParam, flt.Spec.Strategy.RollingUpdate.MaxUnavailable.StrVal)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
// scale up
const targetScale = 8
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, targetScale)
assert.Equal(t, int32(targetScale), flt.Spec.Replicas)
} else {
flt = scaleFleetSubresource(ctx, t, flt, targetScale)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(targetScale))
flt, err = client.Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.GetName(), metav1.GetOptions{})
assert.NoError(t, err)
// Change ContainerPort to trigger creating a new GSSet
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.Spec.Ports[0].ContainerPort++
flt, err = client.Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
assert.NoError(t, err)
selector := labels.SelectorFromSet(labels.Set{agonesv1.FleetNameLabel: flt.ObjectMeta.Name})
// New GSS was created
err = wait.PollImmediate(1*time.Second, 30*time.Second, func() (bool, error) {
gssList, err := framework.AgonesClient.AgonesV1().GameServerSets(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return false, err
}
return len(gssList.Items) == 2, nil
})
assert.NoError(t, err)
// Check that total number of gameservers in the system does not exceed the RollingUpdate
// parameters (creating no more than maxSurge, deleting maxUnavailable servers at a time)
// Wait for old GSSet to be deleted
err = wait.PollImmediate(1*time.Second, 5*time.Minute, func() (bool, error) {
list, err := framework.AgonesClient.AgonesV1().GameServers(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return false, err
}
maxSurge, err := intstr.GetValueFromIntOrPercent(flt.Spec.Strategy.RollingUpdate.MaxSurge, int(flt.Spec.Replicas), true)
assert.Nil(t, err)
roundUp := true
if runtime.FeatureEnabled(runtime.FeatureRollingUpdateOnReady) {
roundUp = false
}
maxUnavailable, err := intstr.GetValueFromIntOrPercent(flt.Spec.Strategy.RollingUpdate.MaxUnavailable, int(flt.Spec.Replicas), roundUp)
shift := 0
if runtime.FeatureEnabled(runtime.FeatureRollingUpdateOnReady) {
if maxUnavailable == 0 {
maxUnavailable = 1
}
// This difference is inevitable, also could be seen with Deployments and ReplicaSets
shift = maxUnavailable
}
assert.Nil(t, err)
expectedTotal := targetScale + maxSurge + maxUnavailable + shift
if len(list.Items) > expectedTotal {
err = fmt.Errorf("new replicas should be less than target + maxSurge + maxUnavailable + shift. Replicas: %d, Expected: %d", len(list.Items), expectedTotal)
}
if err != nil {
return false, err
}
gssList, err := framework.AgonesClient.AgonesV1().GameServerSets(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
if err != nil {
return false, err
}
return len(gssList.Items) == 1, nil
})
assert.NoError(t, err)
// scale down, with allocation
const scaleDownTarget = 1
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, scaleDownTarget)
} else {
flt = scaleFleetSubresource(ctx, t, flt, scaleDownTarget)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(1))
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 0
})
})
}
}
}
func TestUpdateFleetReplicaAndSpec(t *testing.T) {
if !runtime.FeatureEnabled(runtime.FeatureRollingUpdateOnReady) {
t.SkipNow()
}
t.Parallel()
client := framework.AgonesClient.AgonesV1()
ctx := context.Background()
flt := defaultFleet(framework.Namespace)
flt.ApplyDefaults()
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
require.NoError(t, err)
logrus.WithField("fleet", flt).Info("Created Fleet")
selector := labels.SelectorFromSet(labels.Set{agonesv1.FleetNameLabel: flt.ObjectMeta.Name})
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
require.Eventuallyf(t, func() bool {
list, err := client.GameServerSets(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
require.NoError(t, err)
return len(list.Items) == 1
}, time.Minute, time.Second, "Wrong number of GameServerSets")
// update both replicas and template at the same time
flt, err = client.Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.GetName(), metav1.GetOptions{})
require.NoError(t, err)
fltCopy := flt.DeepCopy()
fltCopy.Spec.Replicas = 0
fltCopy.Spec.Template.Spec.Ports[0].ContainerPort++
require.NotEqual(t, flt.Spec.Template.Spec.Ports[0].ContainerPort, fltCopy.Spec.Template.Spec.Ports[0].ContainerPort)
flt, err = client.Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
require.NoError(t, err)
require.Empty(t, flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
require.Eventuallyf(t, func() bool {
list, err := client.GameServerSets(framework.Namespace).List(ctx,
metav1.ListOptions{LabelSelector: selector.String()})
require.NoError(t, err)
return len(list.Items) == 1 && list.Items[0].Spec.Replicas == 0
}, time.Minute, time.Second, "Wrong number of GameServerSets")
}
func TestScaleFleetUpAndDownWithGameServerAllocation(t *testing.T) {
t.Parallel()
ctx := context.Background()
fixtures := []bool{false, true}
for _, usePatch := range fixtures {
usePatch := usePatch
t.Run("Use fleet Patch "+fmt.Sprint(usePatch), func(t *testing.T) {
t.Parallel()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = 1
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, int32(1), flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
// scale up
const targetScale = 3
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, targetScale)
assert.Equal(t, int32(targetScale), flt.Spec.Replicas)
} else {
flt = scaleFleetSubresource(ctx, t, flt, targetScale)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(targetScale))
// get an allocation
gsa := &allocationv1.GameServerAllocation{ObjectMeta: metav1.ObjectMeta{GenerateName: "allocation-"},
Spec: allocationv1.GameServerAllocationSpec{
Required: metav1.LabelSelector{MatchLabels: map[string]string{agonesv1.FleetNameLabel: flt.ObjectMeta.Name}},
}}
gsa, err = framework.AgonesClient.AllocationV1().GameServerAllocations(framework.Namespace).Create(ctx, gsa, metav1.CreateOptions{})
assert.Nil(t, err)
assert.Equal(t, allocationv1.GameServerAllocationAllocated, gsa.Status.State)
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 1
})
// scale down, with allocation
const scaleDownTarget = 1
if usePatch {
flt = scaleFleetPatch(ctx, t, flt, scaleDownTarget)
} else {
flt = scaleFleetSubresource(ctx, t, flt, scaleDownTarget)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(0))
// delete the allocated GameServer
gp := int64(1)
err = client.GameServers(framework.Namespace).Delete(ctx, gsa.Status.GameServerName, metav1.DeleteOptions{GracePeriodSeconds: &gp})
assert.Nil(t, err)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(1))
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 0
})
})
}
}
func TestFleetUpdates(t *testing.T) {
t.Parallel()
ctx := context.Background()
fixtures := map[string]func() *agonesv1.Fleet{
"recreate": func() *agonesv1.Fleet {
flt := defaultFleet(framework.Namespace)
flt.Spec.Strategy.Type = appsv1.RecreateDeploymentStrategyType
return flt
},
"rolling": func() *agonesv1.Fleet {
flt := defaultFleet(framework.Namespace)
flt.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType
return flt
},
}
for k, v := range fixtures {
k := k
v := v
t.Run(k, func(t *testing.T) {
t.Parallel()
client := framework.AgonesClient.AgonesV1()
flt := v()
flt.Spec.Template.ObjectMeta.Annotations = map[string]string{key: red}
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
err = framework.WaitForFleetGameServersCondition(flt, func(gs *agonesv1.GameServer) bool {
return gs.ObjectMeta.Annotations[key] == red
})
assert.Nil(t, err)
// if the generation has been updated, it's time to try again.
err = wait.PollImmediate(time.Second, 10*time.Second, func() (bool, error) {
flt, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.ObjectMeta.Annotations[key] = green
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
if err != nil {
logrus.WithError(err).Warn("Could not update fleet, trying again")
return false, nil
}
return true, nil
})
assert.Nil(t, err)
err = framework.WaitForFleetGameServersCondition(flt, func(gs *agonesv1.GameServer) bool {
return gs.ObjectMeta.Annotations[key] == green
})
assert.Nil(t, err)
})
}
}
func TestUpdateGameServerConfigurationInFleet(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
gsSpec := framework.DefaultGameServer(framework.Namespace).Spec
oldPort := int32(7111)
gsSpec.Ports = []agonesv1.GameServerPort{{
ContainerPort: oldPort,
Name: "gameport",
PortPolicy: agonesv1.Dynamic,
Protocol: corev1.ProtocolUDP,
}}
flt := fleetWithGameServerSpec(&gsSpec, framework.Namespace)
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.Nil(t, err, "could not create fleet")
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
assert.Equal(t, int32(replicasCount), flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
// get an allocation
gsa := &allocationv1.GameServerAllocation{ObjectMeta: metav1.ObjectMeta{GenerateName: "allocation-"},
Spec: allocationv1.GameServerAllocationSpec{
Required: metav1.LabelSelector{MatchLabels: map[string]string{agonesv1.FleetNameLabel: flt.ObjectMeta.Name}},
}}
gsa, err = framework.AgonesClient.AllocationV1().GameServerAllocations(framework.Namespace).Create(ctx, gsa, metav1.CreateOptions{})
assert.Nil(t, err, "cloud not create gameserver allocation")
assert.Equal(t, allocationv1.GameServerAllocationAllocated, gsa.Status.State)
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.AllocatedReplicas == 1
})
flt, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Get(ctx, flt.Name, metav1.GetOptions{})
assert.Nil(t, err, "could not get fleet")
// Update the configuration of the gameservers of the fleet, i.e. container port.
// The changes should only be rolled out to gameservers in ready state, but not the allocated gameserver.
newPort := int32(7222)
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.Spec.Ports[0].ContainerPort = newPort
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
assert.Nil(t, err, "could not update fleet")
err = framework.WaitForFleetGameServersCondition(flt, func(gs *agonesv1.GameServer) bool {
containerPort := gs.Spec.Ports[0].ContainerPort
return (gs.Name == gsa.Status.GameServerName && containerPort == oldPort) ||
(gs.Name != gsa.Status.GameServerName && containerPort == newPort)
})
assert.Nil(t, err, "gameservers don't have expected container port")
}
func TestReservedGameServerInFleet(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = 3
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.NoError(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
gsList, err := framework.ListGameServersFromFleet(flt)
assert.NoError(t, err)
assert.Len(t, gsList, int(flt.Spec.Replicas))
// mark one as reserved
gsCopy := gsList[0].DeepCopy()
gsCopy.Status.State = agonesv1.GameServerStateReserved
_, err = client.GameServers(framework.Namespace).Update(ctx, gsCopy, metav1.UpdateOptions{})
assert.NoError(t, err)
// make sure counts are correct
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
return fleet.Status.ReadyReplicas == 2 && fleet.Status.ReservedReplicas == 1
})
// scale down to 0
flt = scaleFleetSubresource(ctx, t, flt, 0)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(0))
// one should be left behind
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
result := fleet.Status.ReservedReplicas == 1
logrus.WithField("reserved", fleet.Status.ReservedReplicas).WithField("result", result).Info("waiting for 1 reserved replica")
return result
})
// check against gameservers directly too, just to be extra sure
err = wait.PollImmediate(2*time.Second, 5*time.Minute, func() (done bool, err error) {
list, err := framework.ListGameServersFromFleet(flt)
if err != nil {
return true, err
}
l := len(list)
logrus.WithField("len", l).WithField("state", list[0].Status.State).Info("waiting for 1 reserved gs")
return l == 1 && list[0].Status.State == agonesv1.GameServerStateReserved, nil
})
assert.NoError(t, err)
}
// TestFleetGSSpecValidation is built to test Fleet's underlying Gameserver template
// validation. Gameserver Spec contained in a Fleet should be valid to create a fleet.
func TestFleetGSSpecValidation(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
// check two Containers in Gameserver Spec Template validation
flt := defaultFleet(framework.Namespace)
containerName := "container2"
flt.Spec.Template.Spec.Template =
corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{{Name: "container", Image: "myImage"}, {Name: containerName, Image: "myImage2"}},
},
}
flt.Spec.Template.Spec.Container = "testing"
_, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok := err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 2)
assert.Equal(t, "Container must be empty or the name of a container in the pod template", statusErr.Status().Details.Causes[1].Message)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Equal(t, "Could not find a container named testing", statusErr.Status().Details.Causes[0].Message)
flt.Spec.Template.Spec.Container = ""
_, err = client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok = err.(*k8serrors.StatusError)
assert.True(t, ok)
CausesMessages := []string{agonesv1.ErrContainerRequired, "Could not find a container named "}
if assert.Len(t, statusErr.Status().Details.Causes, 2) {
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[1].Type)
assert.Contains(t, CausesMessages, statusErr.Status().Details.Causes[1].Message)
}
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Contains(t, CausesMessages, statusErr.Status().Details.Causes[0].Message)
// use valid name for a container, one of two defined above
flt.Spec.Template.Spec.Container = containerName
_, err = client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
// check port configuration validation
fltPort := defaultFleet(framework.Namespace)
fltPort.Spec.Template.Spec.Ports = []agonesv1.GameServerPort{{Name: "Dyn", HostPort: 5555, PortPolicy: agonesv1.Dynamic, ContainerPort: 5555}}
_, err = client.Fleets(framework.Namespace).Create(ctx, fltPort, metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok = err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, agonesv1.ErrHostPort, statusErr.Status().Details.Causes[0].Message)
fltPort.Spec.Template.Spec.Ports[0].PortPolicy = agonesv1.Static
fltPort.Spec.Template.Spec.Ports[0].HostPort = 0
fltPort.Spec.Template.Spec.Ports[0].ContainerPort = 5555
_, err = client.Fleets(framework.Namespace).Create(ctx, fltPort, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, fltPort.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
}
// TestFleetNameValidation is built to test Fleet Name length validation,
// Fleet Name should have at most 63 chars.
func TestFleetNameValidation(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
nameLen := validation.LabelValueMaxLength + 1
bytes := make([]byte, nameLen)
for i := 0; i < nameLen; i++ {
bytes[i] = 'f'
}
flt.Name = string(bytes)
_, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok := err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.True(t, len(statusErr.Status().Details.Causes) > 0)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
goodFlt := defaultFleet(framework.Namespace)
goodFlt.Name = string(bytes[0 : nameLen-1])
goodFlt, err = client.Fleets(framework.Namespace).Create(ctx, goodFlt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, goodFlt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
}
func assertSuccessOrUpdateConflict(t *testing.T, err error) {
if !k8serrors.IsConflict(err) {
// update conflicts are sometimes ok, we simply lost the race.
assert.Nil(t, err)
}
}
// TestGameServerAllocationDuringGameServerDeletion is built to specifically
// test for race conditions of allocations when doing scale up/down,
// rolling updates, etc. Failures may not happen ALL the time -- as that is the
// nature of race conditions.
func TestGameServerAllocationDuringGameServerDeletion(t *testing.T) {
t.Parallel()
ctx := context.Background()
testAllocationRaceCondition := func(t *testing.T, fleet func(string) *agonesv1.Fleet, deltaSleep time.Duration, delta func(t *testing.T, flt *agonesv1.Fleet)) {
client := framework.AgonesClient.AgonesV1()
flt := fleet(framework.Namespace)
flt.ApplyDefaults()
size := int32(10)
flt.Spec.Replicas = size
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, size, flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
var allocs []string
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
for {
// this gives room for fleet scaling to go down - makes it more likely for the race condition to fire
time.Sleep(100 * time.Millisecond)
gsa := &allocationv1.GameServerAllocation{ObjectMeta: metav1.ObjectMeta{GenerateName: "allocation-"},
Spec: allocationv1.GameServerAllocationSpec{
Required: metav1.LabelSelector{MatchLabels: map[string]string{agonesv1.FleetNameLabel: flt.ObjectMeta.Name}},
}}
gsa, err = framework.AgonesClient.AllocationV1().GameServerAllocations(framework.Namespace).Create(ctx, gsa, metav1.CreateOptions{})
if err != nil || gsa.Status.State == allocationv1.GameServerAllocationUnAllocated {
logrus.WithError(err).Info("Allocation ended")
break
}
logrus.WithField("gs", gsa.Status.GameServerName).Info("Allocated")
allocs = append(allocs, gsa.Status.GameServerName)
}
wg.Done()
}()
go func() {
// this tends to force the scaling to happen as we are fleet allocating
time.Sleep(deltaSleep)
// call the function that makes the change to the fleet
logrus.Info("Applying delta function")
delta(t, flt)
wg.Done()
}()
wg.Wait()
assert.NotEmpty(t, allocs)
for _, name := range allocs {
gsCheck, err := client.GameServers(framework.Namespace).Get(ctx, name, metav1.GetOptions{})
assert.Nil(t, err)
assert.True(t, gsCheck.ObjectMeta.DeletionTimestamp.IsZero())
}
}
t.Run("scale down", func(t *testing.T) {
t.Parallel()
testAllocationRaceCondition(t, defaultFleet, time.Second,
func(t *testing.T, flt *agonesv1.Fleet) {
const targetScale = int32(0)
flt = scaleFleetPatch(ctx, t, flt, targetScale)
assert.Equal(t, targetScale, flt.Spec.Replicas)
})
})
t.Run("recreate update", func(t *testing.T) {
t.Parallel()
fleet := func(ns string) *agonesv1.Fleet {
flt := defaultFleet(ns)
flt.Spec.Strategy.Type = appsv1.RecreateDeploymentStrategyType
flt.Spec.Template.ObjectMeta.Annotations = map[string]string{key: red}
return flt
}
testAllocationRaceCondition(t, fleet, time.Second,
func(t *testing.T, flt *agonesv1.Fleet) {
flt, err := framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.Name, metav1.GetOptions{})
assert.Nil(t, err)
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.ObjectMeta.Annotations[key] = green
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
assertSuccessOrUpdateConflict(t, err)
})
})
t.Run("rolling update", func(t *testing.T) {
t.Parallel()
fleet := func(ns string) *agonesv1.Fleet {
flt := defaultFleet(ns)
flt.Spec.Strategy.Type = appsv1.RollingUpdateDeploymentStrategyType
flt.Spec.Template.ObjectMeta.Annotations = map[string]string{key: red}
return flt
}
testAllocationRaceCondition(t, fleet, time.Duration(0),
func(t *testing.T, flt *agonesv1.Fleet) {
flt, err := framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Get(ctx, flt.ObjectMeta.Name, metav1.GetOptions{})
assert.Nil(t, err)
fltCopy := flt.DeepCopy()
fltCopy.Spec.Template.ObjectMeta.Annotations[key] = green
_, err = framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Update(ctx, fltCopy, metav1.UpdateOptions{})
assertSuccessOrUpdateConflict(t, err)
})
})
}
// TestCreateFleetAndUpdateScaleSubresource is built to
// test scale subresource usage and its ability to change Fleet Replica size.
// Both scaling up and down.
func TestCreateFleetAndUpdateScaleSubresource(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
const initialReplicas int32 = 1
flt.Spec.Replicas = initialReplicas
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, initialReplicas, flt.Spec.Replicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
newReplicas := initialReplicas * 2
scaleFleetSubresource(ctx, t, flt, newReplicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(newReplicas))
scaleFleetSubresource(ctx, t, flt, initialReplicas)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(initialReplicas))
}
// TestScaleUpAndDownInParallelStressTest creates N fleets, half of which start with replicas=0
// and the other half with 0 and scales them up/down 3 times in parallel expecting it to reach
// the desired number of ready replicas each time.
// This test is also used as a stress test with 'make stress-test-e2e', in which case it creates
// many more fleets of bigger sizes and runs many more repetitions.
func TestScaleUpAndDownInParallelStressTest(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
fleetCount := 2
fleetSize := int32(10)
defaultReplicas := int32(1)
repeatCount := 3
deadline := time.Now().Add(1 * time.Minute)
logrus.WithField("fleetCount", fleetCount).
WithField("fleetSize", fleetSize).
WithField("repeatCount", repeatCount).
WithField("deadline", deadline).
Info("starting scale up/down test")
if framework.StressTestLevel > 0 {
fleetSize = 10 * int32(framework.StressTestLevel)
repeatCount = 10
fleetCount = 10
deadline = time.Now().Add(45 * time.Minute)
}
var fleets []*agonesv1.Fleet
scaleUpStats := framework.NewStatsCollector(fmt.Sprintf("fleet_%v_scale_up", fleetSize), framework.Version)
scaleDownStats := framework.NewStatsCollector(fmt.Sprintf("fleet_%v_scale_down", fleetSize), framework.Version)
defer scaleUpStats.Report()
defer scaleDownStats.Report()
for fleetNumber := 0; fleetNumber < fleetCount; fleetNumber++ {
flt := defaultFleet(framework.Namespace)
flt.ObjectMeta.GenerateName = fmt.Sprintf("scale-fleet-%v-", fleetNumber)
if fleetNumber%2 == 0 {
// even-numbered fleets starts at fleetSize and are scaled down to zero and back.
flt.Spec.Replicas = fleetSize
} else {
// odd-numbered fleets starts at default 1 replica and are scaled up to fleetSize and back.
flt.Spec.Replicas = defaultReplicas
}
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
fleets = append(fleets, flt)
}
// wait for initial fleet conditions.
for fleetNumber, flt := range fleets {
if fleetNumber%2 == 0 {
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(fleetSize))
} else {
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(defaultReplicas))
}
}
errorsChan := make(chan error)
var wg sync.WaitGroup
finished := make(chan bool, 1)
for fleetNumber, flt := range fleets {
wg.Add(1)
go func(fleetNumber int, flt *agonesv1.Fleet) {
defer wg.Done()
defer func() {
if err := recover(); err != nil {
t.Errorf("recovered panic: %v", err)
}
}()
if fleetNumber%2 == 0 {
duration, err := scaleAndWait(ctx, t, flt, 0)
if err != nil {
fmt.Println(err)
errorsChan <- err
return
}
scaleDownStats.ReportDuration(duration, nil)
}
for i := 0; i < repeatCount; i++ {
if time.Now().After(deadline) {
break
}
duration, err := scaleAndWait(ctx, t, flt, fleetSize)
if err != nil {
fmt.Println(err)
errorsChan <- err
return
}
scaleUpStats.ReportDuration(duration, nil)
duration, err = scaleAndWait(ctx, t, flt, 0)
if err != nil {
fmt.Println(err)
errorsChan <- err
return
}
scaleDownStats.ReportDuration(duration, nil)
}
}(fleetNumber, flt)
}
go func() {
wg.Wait()
close(finished)
}()
select {
case <-finished:
case err := <-errorsChan:
t.Fatalf("Error in waiting for a fleet to scale: %s", err)
}
fmt.Println("We are Done")
}
// Creates a fleet and one GameServer with Packed scheduling.
// Scale to two GameServers with Distributed scheduling.
// The old GameServer has Scheduling set to 5 and the new one has it set to Distributed.
func TestUpdateFleetScheduling(t *testing.T) {
t.Parallel()
ctx := context.Background()
t.Run("Updating Spec.Scheduling on fleet should be updated in GameServer",
func(t *testing.T) {
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = 1
flt.Spec.Scheduling = apis.Packed
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
assert.Equal(t, int32(1), flt.Spec.Replicas)
assert.Equal(t, apis.Packed, flt.Spec.Scheduling)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
const targetScale = 2
flt = schedulingFleetPatch(ctx, t, flt, apis.Distributed, targetScale)
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(targetScale))
assert.Equal(t, int32(targetScale), flt.Spec.Replicas)
assert.Equal(t, apis.Distributed, flt.Spec.Scheduling)
err = framework.WaitForFleetGameServerListCondition(flt,
func(gsList []agonesv1.GameServer) bool {
return countFleetScheduling(gsList, apis.Distributed) == 1 &&
countFleetScheduling(gsList, apis.Packed) == 1
})
assert.Nil(t, err)
})
}
// TestFleetWithZeroReplicas ensures that we can always create 0 replica
// fleets, which is useful!
func TestFleetWithZeroReplicas(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = 0
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.NoError(t, err)
// can't think of a better way to wait for a bit before checking.
time.Sleep(time.Second)
list, err := framework.ListGameServersFromFleet(flt)
assert.NoError(t, err)
assert.Empty(t, list)
}
// TestFleetWithLongLabelsAnnotations ensures that we can not create a fleet
// with label over 64 chars and Annotations key over 64
func TestFleetWithLongLabelsAnnotations(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
fleetSize := int32(1)
flt := defaultFleet(framework.Namespace)
flt.Spec.Replicas = fleetSize
normalLengthName := strings.Repeat("f", validation.LabelValueMaxLength)
longName := normalLengthName + "f"
flt.Spec.Template.ObjectMeta.Labels = make(map[string]string)
flt.Spec.Template.ObjectMeta.Labels["label"] = longName
_, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.Error(t, err)
statusErr, ok := err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Equal(t, "labels", statusErr.Status().Details.Causes[0].Field)
// Set Label to normal size and add Annotations with an error
flt.Spec.Template.ObjectMeta.Labels["label"] = normalLengthName
flt.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
flt.Spec.Template.ObjectMeta.Annotations[longName] = normalLengthName
_, err = client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
assert.Error(t, err)
statusErr, ok = err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, "annotations", statusErr.Status().Details.Causes[0].Field)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
goodFlt := defaultFleet(framework.Namespace)
goodFlt.Spec.Template.ObjectMeta.Labels = make(map[string]string)
goodFlt.Spec.Template.ObjectMeta.Labels["label"] = normalLengthName
goodFlt, err = client.Fleets(framework.Namespace).Create(ctx, goodFlt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, goodFlt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
err = framework.WaitForFleetCondition(t, goodFlt, e2e.FleetReadyCount(goodFlt.Spec.Replicas))
assert.Nil(t, err)
// Verify validation on Update()
flt, err = client.Fleets(framework.Namespace).Get(ctx, goodFlt.ObjectMeta.GetName(), metav1.GetOptions{})
assert.Nil(t, err)
goodFlt = flt.DeepCopy()
goodFlt.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
goodFlt.Spec.Template.ObjectMeta.Annotations[longName] = normalLengthName
_, err = client.Fleets(framework.Namespace).Update(ctx, goodFlt, metav1.UpdateOptions{})
assert.Error(t, err)
statusErr, ok = err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, "annotations", statusErr.Status().Details.Causes[0].Field)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
// Make sure normal annotations path Validation on Update
flt, err = client.Fleets(framework.Namespace).Get(ctx, goodFlt.ObjectMeta.GetName(), metav1.GetOptions{})
assert.Nil(t, err)
goodFlt = flt.DeepCopy()
goodFlt.Spec.Template.ObjectMeta.Annotations = make(map[string]string)
goodFlt.Spec.Template.ObjectMeta.Annotations[normalLengthName] = longName
_, err = client.Fleets(framework.Namespace).Update(ctx, goodFlt, metav1.UpdateOptions{})
assert.Nil(t, err)
}
// TestFleetRecreateGameServers tests various gameserver shutdown scenarios to ensure
// that recreation happens as expected
func TestFleetRecreateGameServers(t *testing.T) {
t.Parallel()
ctx := context.Background()
tests := map[string]struct {
f func(t *testing.T, list *agonesv1.GameServerList)
}{
"pod deletion": {f: func(t *testing.T, list *agonesv1.GameServerList) {
podClient := framework.KubeClient.CoreV1().Pods(framework.Namespace)
for _, gs := range list.Items {
gs := gs
pod, err := podClient.Get(ctx, gs.ObjectMeta.Name, metav1.GetOptions{})
assert.NoError(t, err)
assert.True(t, metav1.IsControlledBy(pod, &gs))
err = podClient.Delete(ctx, pod.ObjectMeta.Name, metav1.DeleteOptions{})
assert.NoError(t, err)
}
}},
"gameserver shutdown": {f: func(t *testing.T, list *agonesv1.GameServerList) {
for _, gs := range list.Items {
gs := gs
var reply string
reply, err := e2e.SendGameServerUDP(&gs, "EXIT")
if err != nil {
t.Fatalf("Could not message GameServer: %v", err)
}
assert.Equal(t, "ACK: EXIT\n", reply)
}
}},
"gameserver unhealthy": {f: func(t *testing.T, list *agonesv1.GameServerList) {
for _, gs := range list.Items {
gs := gs
var reply string
reply, err := e2e.SendGameServerUDP(&gs, "UNHEALTHY")
if err != nil {
t.Fatalf("Could not message GameServer: %v", err)
}
assert.Equal(t, "ACK: UNHEALTHY\n", reply)
}
}},
}
for k, v := range tests {
k := k
v := v
t.Run(k, func(t *testing.T) {
t.Parallel()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
// add more game servers, to hunt for race conditions
flt.Spec.Replicas = 10
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt, metav1.CreateOptions{})
if assert.Nil(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
list, err := listGameServers(ctx, flt, client)
assert.NoError(t, err)
assert.Len(t, list.Items, int(flt.Spec.Replicas))
// apply deletion function
logrus.Info("applying deletion function")
v.f(t, list)
for i, gs := range list.Items {
err = wait.Poll(time.Second, 5*time.Minute, func() (done bool, err error) {
_, err = client.GameServers(framework.Namespace).Get(ctx, gs.ObjectMeta.Name, metav1.GetOptions{})
if err != nil && k8serrors.IsNotFound(err) {
logrus.Infof("gameserver %d/%d not found", i+1, flt.Spec.Replicas)
return true, nil
}
return false, err
})
assert.NoError(t, err)
}
framework.AssertFleetCondition(t, flt, e2e.FleetReadyCount(flt.Spec.Replicas))
})
}
}
// TestFleetResourceValidation - check that we are not able to use
// invalid PodTemplate for GameServer Spec with wrong Resource Requests and Limits
func TestFleetResourceValidation(t *testing.T) {
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
// check two Containers in Gameserver Spec Template validation
flt := defaultFleet(framework.Namespace)
containerName := "container2"
resources := corev1.ResourceRequirements{
Requests: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("30m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("30m"),
corev1.ResourceMemory: resource.MustParse("32Mi"),
},
}
flt.Spec.Template.Spec.Template =
corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{Name: "container", Image: framework.GameServerImage, Resources: *(resources.DeepCopy())},
{Name: containerName, Image: framework.GameServerImage, Resources: *(resources.DeepCopy())},
},
},
}
mi128 := resource.MustParse("128Mi")
m50 := resource.MustParse("50m")
flt.Spec.Template.Spec.Container = containerName
containers := flt.Spec.Template.Spec.Template.Spec.Containers
containers[1].Resources.Limits[corev1.ResourceMemory] = resource.MustParse("64Mi")
containers[1].Resources.Requests[corev1.ResourceMemory] = mi128
_, err := client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok := err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 1)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Equal(t, "container", statusErr.Status().Details.Causes[0].Field)
containers[0].Resources.Limits[corev1.ResourceCPU] = resource.MustParse("-50m")
_, err = client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{})
assert.NotNil(t, err)
statusErr, ok = err.(*k8serrors.StatusError)
assert.True(t, ok)
assert.Len(t, statusErr.Status().Details.Causes, 3)
assert.Equal(t, metav1.CauseTypeFieldValueInvalid, statusErr.Status().Details.Causes[0].Type)
assert.Equal(t, "container", statusErr.Status().Details.Causes[0].Field)
causes := statusErr.Status().Details.Causes
assertCausesContainsString(t, causes, "Request must be less than or equal to cpu limit")
assertCausesContainsString(t, causes, "Resource cpu limit value must be non negative")
assertCausesContainsString(t, causes, "Request must be less than or equal to memory limit")
containers[1].Resources.Limits[corev1.ResourceMemory] = mi128
containers[0].Resources.Limits[corev1.ResourceCPU] = m50
flt, err = client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{})
if assert.NoError(t, err) {
defer client.Fleets(framework.Namespace).Delete(ctx, flt.ObjectMeta.Name, metav1.DeleteOptions{}) // nolint:errcheck
}
containers = flt.Spec.Template.Spec.Template.Spec.Containers
assert.Equal(t, mi128, containers[1].Resources.Limits[corev1.ResourceMemory])
assert.Equal(t, m50, containers[0].Resources.Limits[corev1.ResourceCPU])
}
func TestFleetAggregatedPlayerStatus(t *testing.T) {
if !runtime.FeatureEnabled(runtime.FeaturePlayerTracking) {
t.SkipNow()
}
t.Parallel()
ctx := context.Background()
client := framework.AgonesClient.AgonesV1()
flt := defaultFleet(framework.Namespace)
flt.Spec.Template.Spec.Players = &agonesv1.PlayersSpec{
InitialCapacity: 10,
}
flt, err := client.Fleets(framework.Namespace).Create(ctx, flt.DeepCopy(), metav1.CreateOptions{})
assert.NoError(t, err)
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
if fleet.Status.Players == nil {
logrus.WithField("status", fleet.Status).Info("No Players")
return false
}
logrus.WithField("status", fleet.Status).Info("Checking Capacity")
return fleet.Status.Players.Capacity == 30
})
list, err := framework.ListGameServersFromFleet(flt)
assert.NoError(t, err)
// set 3 random capacities, and connect a random number of players
totalCapacity := 0
totalPlayers := 0
for i := range list {
// Do this, otherwise scopelint complains about "using a reference for the variable on range scope"
gs := &list[i]
players := rand.IntnRange(1, 5)
capacity := rand.IntnRange(players, 100)
totalCapacity += capacity
msg := fmt.Sprintf("PLAYER_CAPACITY %d", capacity)
reply, err := e2e.SendGameServerUDP(gs, msg)
if err != nil {
t.Fatalf("Could not message GameServer: %v", err)
}
assert.Equal(t, fmt.Sprintf("ACK: %s\n", msg), reply)
totalPlayers += players
for i := 1; i <= players; i++ {
msg := "PLAYER_CONNECT " + fmt.Sprintf("%d", i)
logrus.WithField("msg", msg).WithField("gs", gs.ObjectMeta.Name).Info("Sending Player Connect")
// retry on failure. Will stop flakiness of UDP packets being sent/received.
err := wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
reply, err := e2e.SendGameServerUDP(gs, msg)
if err != nil {
logrus.WithError(err).Warn("error with udp packet")
return false, nil
}
assert.Equal(t, fmt.Sprintf("ACK: %s\n", msg), reply)
return true, nil
})
assert.NoError(t, err)
}
}
framework.AssertFleetCondition(t, flt, func(fleet *agonesv1.Fleet) bool {
logrus.WithField("players", fleet.Status.Players).WithField("totalCapacity", totalCapacity).
WithField("totalPlayers", totalPlayers).Info("Checking Capacity")
// since UDP packets might fail, we might get an extra player, so we'll check for that.
return (fleet.Status.Players.Capacity == int64(totalCapacity)) && (fleet.Status.Players.Count >= int64(totalPlayers))
})
}
func assertCausesContainsString(t *testing.T, causes []metav1.StatusCause, expected string) {
found := false
for _, v := range causes {
if expected == v.Message {
found = true
break
}
}
assert.True(t, found, "Was not able to find '%s'", expected)
}
func listGameServers(ctx context.Context, flt *agonesv1.Fleet, getter typedagonesv1.GameServersGetter) (*agonesv1.GameServerList, error) {
selector := labels.SelectorFromSet(labels.Set{agonesv1.FleetNameLabel: flt.ObjectMeta.Name})
return getter.GameServers(framework.Namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()})
}
// Counts the number of gameservers with the specified scheduling strategy in a fleet
func countFleetScheduling(gsList []agonesv1.GameServer, scheduling apis.SchedulingStrategy) int {
count := 0
for i := range gsList {
gs := &gsList[i]
if gs.Spec.Scheduling == scheduling {
count++
}
}
return count
}
// Patches fleet with scheduling and scale values
func schedulingFleetPatch(ctx context.Context, t *testing.T, f *agonesv1.Fleet, scheduling apis.SchedulingStrategy, scale int32) *agonesv1.Fleet {
patch := fmt.Sprintf(`[{ "op": "replace", "path": "/spec/scheduling", "value": "%s" },
{ "op": "replace", "path": "/spec/replicas", "value": %d }]`,
scheduling, scale)
logrus.WithField("fleet", f.ObjectMeta.Name).
WithField("scheduling", scheduling).
WithField("scale", scale).
WithField("patch", patch).
Info("updating scheduling")
fltRes, err := framework.AgonesClient.
AgonesV1().
Fleets(framework.Namespace).
Patch(ctx, f.ObjectMeta.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
assert.Nil(t, err)
return fltRes
}
func scaleAndWait(ctx context.Context, t *testing.T, flt *agonesv1.Fleet, fleetSize int32) (duration time.Duration, err error) {
t0 := time.Now()
scaleFleetSubresource(ctx, t, flt, fleetSize)
err = framework.WaitForFleetCondition(t, flt, e2e.FleetReadyCount(fleetSize))
duration = time.Since(t0)
return
}
// scaleFleetPatch creates a patch to apply to a Fleet.
// Easier for testing, as it removes object generational issues.
func scaleFleetPatch(ctx context.Context, t *testing.T, f *agonesv1.Fleet, scale int32) *agonesv1.Fleet {
patch := fmt.Sprintf(`[{ "op": "replace", "path": "/spec/replicas", "value": %d }]`, scale)
logrus.WithField("fleet", f.ObjectMeta.Name).WithField("scale", scale).WithField("patch", patch).Info("Scaling fleet")
fltRes, err := framework.AgonesClient.AgonesV1().Fleets(framework.Namespace).Patch(ctx, f.ObjectMeta.Name, types.JSONPatchType, []byte(patch), metav1.PatchOptions{})
assert.Nil(t, err)
return fltRes
}
// scaleFleetSubresource uses scale subresource to change Replicas size of the Fleet.
// Returns the same f as in parameter, just to keep signature in sync with scaleFleetPatch
func scaleFleetSubresource(ctx context.Context, t *testing.T, f *agonesv1.Fleet, scale int32) *agonesv1.Fleet {
logrus.WithField("fleet", f.ObjectMeta.Name).WithField("scale", scale).Info("Scaling fleet")
err := retry.RetryOnConflict(retry.DefaultBackoff, func() error {
client := framework.AgonesClient.AgonesV1()
// GetScale returns current Scale object with resourceVersion which is opaque object
// and it will be used to create new Scale object
opts := metav1.GetOptions{}
sc, err := client.Fleets(framework.Namespace).GetScale(ctx, f.ObjectMeta.Name, opts)
if err != nil {
return err
}
sc2 := newScale(f.Name, scale, sc.ObjectMeta.ResourceVersion)
_, err = client.Fleets(framework.Namespace).UpdateScale(ctx, f.ObjectMeta.Name, sc2, metav1.UpdateOptions{})
return err
})
if err != nil {
t.Fatal("could not update the scale subresource")
}
return f
}
// defaultFleet returns a default fleet configuration
func defaultFleet(namespace string) *agonesv1.Fleet {
gs := framework.DefaultGameServer(namespace)
return fleetWithGameServerSpec(&gs.Spec, namespace)
}
// fleetWithGameServerSpec returns a fleet with specified gameserver spec
func fleetWithGameServerSpec(gsSpec *agonesv1.GameServerSpec, namespace string) *agonesv1.Fleet {
return &agonesv1.Fleet{
ObjectMeta: metav1.ObjectMeta{GenerateName: "simple-fleet-", Namespace: namespace},
Spec: agonesv1.FleetSpec{
Replicas: replicasCount,
Template: agonesv1.GameServerTemplateSpec{
Spec: *gsSpec,
},
},
}
}
// newScale returns a scale with specified Replicas spec
func newScale(fleetName string, newReplicas int32, resourceVersion string) *autoscalingv1.Scale {
return &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{Name: fleetName, Namespace: framework.Namespace, ResourceVersion: resourceVersion},
Spec: autoscalingv1.ScaleSpec{
Replicas: newReplicas,
},
}
}
| TestFleetRollingUpdate |
azure_firewall_network_rule_py3.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AzureFirewallNetworkRule(Model):
| """Properties of the network rule.
:param name: Name of the network rule.
:type name: str
:param description: Description of the rule.
:type description: str
:param protocols: Array of AzureFirewallNetworkRuleProtocols.
:type protocols: list[str or
~azure.mgmt.network.v2018_11_01.models.AzureFirewallNetworkRuleProtocol]
:param source_addresses: List of source IP addresses for this rule.
:type source_addresses: list[str]
:param destination_addresses: List of destination IP addresses.
:type destination_addresses: list[str]
:param destination_ports: List of destination ports.
:type destination_ports: list[str]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
'protocols': {'key': 'protocols', 'type': '[str]'},
'source_addresses': {'key': 'sourceAddresses', 'type': '[str]'},
'destination_addresses': {'key': 'destinationAddresses', 'type': '[str]'},
'destination_ports': {'key': 'destinationPorts', 'type': '[str]'},
}
def __init__(self, *, name: str=None, description: str=None, protocols=None, source_addresses=None, destination_addresses=None, destination_ports=None, **kwargs) -> None:
super(AzureFirewallNetworkRule, self).__init__(**kwargs)
self.name = name
self.description = description
self.protocols = protocols
self.source_addresses = source_addresses
self.destination_addresses = destination_addresses
self.destination_ports = destination_ports |
|
Nat.ts | import AbstractEvent from './AbstractEvent';
class Nat extends AbstractEvent {
| }
export default Nat; |
|
vmtemplateserver.go | package vmtemplateserver
import (
"fmt"
hfv1 "github.com/hobbyfarm/gargantua/pkg/apis/hobbyfarm.io/v1"
"github.com/hobbyfarm/gargantua/pkg/authclient"
hfClientset "github.com/hobbyfarm/gargantua/pkg/client/clientset/versioned"
hfInformers "github.com/hobbyfarm/gargantua/pkg/client/informers/externalversions"
"k8s.io/client-go/tools/cache"
)
const (
idIndex = "vmts.hobbyfarm.io/id-index"
nameIndex = "vmts.hobbyfarm.io/name-index"
)
type VMTemplateServer struct {
auth *authclient.AuthClient
hfClientSet *hfClientset.Clientset
vmTemplateIndexer cache.Indexer
}
func | (authClient *authclient.AuthClient, hfClientset *hfClientset.Clientset, hfInformerFactory hfInformers.SharedInformerFactory) (*VMTemplateServer, error) {
vmts := VMTemplateServer{}
vmts.hfClientSet = hfClientset
vmts.auth = authClient
inf := hfInformerFactory.Hobbyfarm().V1().VirtualMachineTemplates().Informer()
indexers := map[string]cache.IndexFunc{idIndex: vmtIdIndexer, nameIndex: vmtNameIndexer}
inf.AddIndexers(indexers)
vmts.vmTemplateIndexer = inf.GetIndexer()
return &vmts, nil
}
func (vmts VMTemplateServer) GetVirtualMachineTemplateById(id string) (hfv1.VirtualMachineTemplate, error) {
empty := hfv1.VirtualMachineTemplate{}
if len(id) == 0 {
return empty, fmt.Errorf("vm template id passed in was empty")
}
obj, err := vmts.vmTemplateIndexer.ByIndex(idIndex, id)
if err != nil {
return empty, fmt.Errorf("error while retrieving virtualmachinetemplate by id: %s with error: %v", id, err)
}
if len(obj) < 1 {
return empty, fmt.Errorf("virtualmachinetemplate not found by id: %s", id)
}
result, ok := obj[0].(*hfv1.VirtualMachineTemplate)
if !ok {
return empty, fmt.Errorf("error while converting virtualmachinetemplate found by id to object: %s", id)
}
return *result, nil
}
func (vmts VMTemplateServer) GetVirtualMachineTemplateByName(name string) (hfv1.VirtualMachineTemplate, error) {
empty := hfv1.VirtualMachineTemplate{}
if len(name) == 0 {
return empty, fmt.Errorf("vm template name passed in was empty")
}
obj, err := vmts.vmTemplateIndexer.ByIndex(nameIndex, name)
if err != nil {
return empty, fmt.Errorf("error while retrieving virtualmachinetemplate by name: %s with error: %v", name, err)
}
if len(obj) < 1 {
return empty, fmt.Errorf("virtualmachinetemplate not found by name: %s", name)
}
result, ok := obj[0].(*hfv1.VirtualMachineTemplate)
if !ok {
return empty, fmt.Errorf("error while converting virtualmachinetemplate found by name to object: %s", name)
}
return *result, nil
}
func vmtIdIndexer(obj interface{}) ([]string, error) {
vmt, ok := obj.(*hfv1.VirtualMachineTemplate)
if !ok {
return []string{}, nil
}
return []string{vmt.Spec.Id}, nil
}
func vmtNameIndexer(obj interface{}) ([]string, error) {
vmt, ok := obj.(*hfv1.VirtualMachineTemplate)
if !ok {
return []string{}, nil
}
return []string{vmt.Spec.Name}, nil
}
| NewVMTemplateServer |
syntax_highlighting.rs | mod tags;
mod html;
mod injection;
#[cfg(test)]
mod tests;
use hir::{Name, Semantics, VariantDef};
use ra_ide_db::{
defs::{classify_name, classify_name_ref, Definition, NameClass, NameRefClass},
RootDatabase,
};
use ra_prof::profile;
use ra_syntax::{
ast::{self, HasFormatSpecifier},
AstNode, AstToken, Direction, NodeOrToken, SyntaxElement,
SyntaxKind::*,
TextRange, WalkEvent, T,
};
use rustc_hash::FxHashMap;
use crate::FileId;
use ast::FormatSpecifier;
pub(crate) use html::highlight_as_html;
pub use tags::{Highlight, HighlightModifier, HighlightModifiers, HighlightTag};
#[derive(Debug, Clone)]
pub struct HighlightedRange {
pub range: TextRange,
pub highlight: Highlight,
pub binding_hash: Option<u64>,
}
// Feature: Semantic Syntax Highlighting
//
// rust-analyzer highlights the code semantically.
// For example, `bar` in `foo::Bar` might be colored differently depending on whether `Bar` is an enum or a trait.
// rust-analyzer does not specify colors directly, instead it assigns tag (like `struct`) and a set of modifiers (like `declaration`) to each token.
// It's up to the client to map those to specific colors.
//
// The general rule is that a reference to an entity gets colored the same way as the entity itself.
// We also give special modifier for `mut` and `&mut` local variables.
pub(crate) fn highlight(
db: &RootDatabase,
file_id: FileId,
range_to_highlight: Option<TextRange>,
syntactic_name_ref_highlighting: bool,
) -> Vec<HighlightedRange> {
let _p = profile("highlight");
let sema = Semantics::new(db);
// Determine the root based on the given range.
let (root, range_to_highlight) = {
let source_file = sema.parse(file_id);
match range_to_highlight {
Some(range) => {
let node = match source_file.syntax().covering_element(range) {
NodeOrToken::Node(it) => it,
NodeOrToken::Token(it) => it.parent(),
};
(node, range)
}
None => (source_file.syntax().clone(), source_file.syntax().text_range()),
}
};
let mut bindings_shadow_count: FxHashMap<Name, u32> = FxHashMap::default();
// We use a stack for the DFS traversal below.
// When we leave a node, the we use it to flatten the highlighted ranges.
let mut stack = HighlightedRangeStack::new();
let mut current_macro_call: Option<ast::MacroCall> = None;
let mut format_string: Option<SyntaxElement> = None;
// Walk all nodes, keeping track of whether we are inside a macro or not.
// If in macro, expand it first and highlight the expanded code.
for event in root.preorder_with_tokens() {
match &event {
WalkEvent::Enter(_) => stack.push(),
WalkEvent::Leave(_) => stack.pop(),
};
let event_range = match &event {
WalkEvent::Enter(it) => it.text_range(),
WalkEvent::Leave(it) => it.text_range(),
};
// Element outside of the viewport, no need to highlight
if range_to_highlight.intersect(event_range).is_none() {
continue;
}
// Track "inside macro" state
match event.clone().map(|it| it.into_node().and_then(ast::MacroCall::cast)) {
WalkEvent::Enter(Some(mc)) => {
current_macro_call = Some(mc.clone());
if let Some(range) = macro_call_range(&mc) {
stack.add(HighlightedRange {
range,
highlight: HighlightTag::Macro.into(),
binding_hash: None,
});
}
if let Some(name) = mc.is_macro_rules() {
if let Some((highlight, binding_hash)) = highlight_element(
&sema,
&mut bindings_shadow_count,
syntactic_name_ref_highlighting,
name.syntax().clone().into(),
) {
stack.add(HighlightedRange {
range: name.syntax().text_range(),
highlight,
binding_hash,
});
}
}
continue;
}
WalkEvent::Leave(Some(mc)) => {
assert!(current_macro_call == Some(mc));
current_macro_call = None;
format_string = None;
}
_ => (),
}
// Check for Rust code in documentation
match &event {
WalkEvent::Leave(NodeOrToken::Node(node)) => {
if let Some((doctest, range_mapping, new_comments)) =
injection::extract_doc_comments(node)
{
injection::highlight_doc_comment(
doctest,
range_mapping,
new_comments,
&mut stack,
);
}
}
_ => (),
}
let element = match event {
WalkEvent::Enter(it) => it,
WalkEvent::Leave(_) => continue,
};
let range = element.text_range();
let element_to_highlight = if current_macro_call.is_some() && element.kind() != COMMENT {
// Inside a macro -- expand it first
let token = match element.clone().into_token() {
Some(it) if it.parent().kind() == TOKEN_TREE => it,
_ => continue,
};
let token = sema.descend_into_macros(token.clone());
let parent = token.parent();
// Check if macro takes a format string and remember it for highlighting later.
// The macros that accept a format string expand to a compiler builtin macros
// `format_args` and `format_args_nl`.
if let Some(name) = parent
.parent()
.and_then(ast::MacroCall::cast)
.and_then(|mc| mc.path())
.and_then(|p| p.segment())
.and_then(|s| s.name_ref())
{
match name.text().as_str() {
"format_args" | "format_args_nl" => {
format_string = parent
.children_with_tokens()
.filter(|t| t.kind() != WHITESPACE)
.nth(1)
.filter(|e| {
ast::String::can_cast(e.kind())
|| ast::RawString::can_cast(e.kind())
})
}
_ => {}
}
}
// We only care Name and Name_ref
match (token.kind(), parent.kind()) {
(IDENT, NAME) | (IDENT, NAME_REF) => parent.into(),
_ => token.into(),
}
} else {
element.clone()
};
if let Some(token) = element.as_token().cloned().and_then(ast::RawString::cast) {
let expanded = element_to_highlight.as_token().unwrap().clone();
if injection::highlight_injection(&mut stack, &sema, token, expanded).is_some() {
continue;
}
}
let is_format_string = format_string.as_ref() == Some(&element_to_highlight);
if let Some((highlight, binding_hash)) = highlight_element(
&sema,
&mut bindings_shadow_count,
syntactic_name_ref_highlighting,
element_to_highlight.clone(),
) {
stack.add(HighlightedRange { range, highlight, binding_hash });
if let Some(string) =
element_to_highlight.as_token().cloned().and_then(ast::String::cast)
{
if is_format_string {
stack.push();
string.lex_format_specifier(|piece_range, kind| {
if let Some(highlight) = highlight_format_specifier(kind) {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: highlight.into(),
binding_hash: None,
});
}
});
stack.pop();
}
// Highlight escape sequences
if let Some(char_ranges) = string.char_ranges() {
stack.push();
for (piece_range, _) in char_ranges.iter().filter(|(_, char)| char.is_ok()) {
if string.text()[piece_range.start().into()..].starts_with('\\') {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: HighlightTag::EscapeSequence.into(),
binding_hash: None,
});
}
}
stack.pop_and_inject(None);
}
} else if let Some(string) =
element_to_highlight.as_token().cloned().and_then(ast::RawString::cast)
{
if is_format_string {
stack.push();
string.lex_format_specifier(|piece_range, kind| {
if let Some(highlight) = highlight_format_specifier(kind) {
stack.add(HighlightedRange {
range: piece_range + range.start(),
highlight: highlight.into(),
binding_hash: None,
});
}
});
stack.pop();
}
}
}
}
stack.flattened()
}
#[derive(Debug)]
struct HighlightedRangeStack {
stack: Vec<Vec<HighlightedRange>>,
}
/// We use a stack to implement the flattening logic for the highlighted
/// syntax ranges.
impl HighlightedRangeStack {
fn new() -> Self {
Self { stack: vec![Vec::new()] }
}
fn push(&mut self) {
self.stack.push(Vec::new());
}
/// Flattens the highlighted ranges.
///
/// For example `#[cfg(feature = "foo")]` contains the nested ranges:
/// 1) parent-range: Attribute [0, 23)
/// 2) child-range: String [16, 21)
///
/// The following code implements the flattening, for our example this results to:
/// `[Attribute [0, 16), String [16, 21), Attribute [21, 23)]`
fn pop(&mut self) {
let children = self.stack.pop().unwrap();
let prev = self.stack.last_mut().unwrap();
let needs_flattening = !children.is_empty()
&& !prev.is_empty()
&& prev.last().unwrap().range.contains_range(children.first().unwrap().range);
if !needs_flattening {
prev.extend(children);
} else {
let mut parent = prev.pop().unwrap();
for ele in children {
assert!(parent.range.contains_range(ele.range));
let cloned = Self::intersect(&mut parent, &ele);
if !parent.range.is_empty() {
prev.push(parent);
}
prev.push(ele);
parent = cloned;
}
if !parent.range.is_empty() {
prev.push(parent);
}
}
}
/// Intersects the `HighlightedRange` `parent` with `child`.
/// `parent` is mutated in place, becoming the range before `child`.
/// Returns the range (of the same type as `parent`) *after* `child`.
fn intersect(parent: &mut HighlightedRange, child: &HighlightedRange) -> HighlightedRange {
assert!(parent.range.contains_range(child.range));
let mut cloned = parent.clone();
parent.range = TextRange::new(parent.range.start(), child.range.start());
cloned.range = TextRange::new(child.range.end(), cloned.range.end());
cloned
}
/// Remove the `HighlightRange` of `parent` that's currently covered by `child`.
fn intersect_partial(parent: &mut HighlightedRange, child: &HighlightedRange) {
assert!(
parent.range.start() <= child.range.start()
&& parent.range.end() >= child.range.start()
&& child.range.end() > parent.range.end()
);
parent.range = TextRange::new(parent.range.start(), child.range.start());
}
/// Similar to `pop`, but can modify arbitrary prior ranges (where `pop`)
/// can only modify the last range currently on the stack.
/// Can be used to do injections that span multiple ranges, like the
/// doctest injection below.
/// If `overwrite_parent` is non-optional, the highlighting of the parent range
/// is overwritten with the argument.
///
/// Note that `pop` can be simulated by `pop_and_inject(false)` but the
/// latter is computationally more expensive.
fn pop_and_inject(&mut self, overwrite_parent: Option<Highlight>) {
let mut children = self.stack.pop().unwrap();
let prev = self.stack.last_mut().unwrap();
children.sort_by_key(|range| range.range.start());
prev.sort_by_key(|range| range.range.start());
for child in children {
if let Some(idx) =
prev.iter().position(|parent| parent.range.contains_range(child.range))
{
if let Some(tag) = overwrite_parent {
prev[idx].highlight = tag;
}
let cloned = Self::intersect(&mut prev[idx], &child);
let insert_idx = if prev[idx].range.is_empty() {
prev.remove(idx);
idx
} else {
idx + 1
};
prev.insert(insert_idx, child);
if !cloned.range.is_empty() {
prev.insert(insert_idx + 1, cloned);
}
} else {
let maybe_idx =
prev.iter().position(|parent| parent.range.contains(child.range.start()));
match (overwrite_parent, maybe_idx) {
(Some(_), Some(idx)) => {
Self::intersect_partial(&mut prev[idx], &child);
let insert_idx = if prev[idx].range.is_empty() {
prev.remove(idx);
idx
} else {
idx + 1
};
prev.insert(insert_idx, child);
}
(_, None) => {
let idx = prev
.binary_search_by_key(&child.range.start(), |range| range.range.start())
.unwrap_or_else(|x| x);
prev.insert(idx, child);
}
_ => {
unreachable!("child range should be completely contained in parent range");
}
}
}
}
}
fn add(&mut self, range: HighlightedRange) {
self.stack
.last_mut()
.expect("during DFS traversal, the stack must not be empty")
.push(range)
}
fn flattened(mut self) -> Vec<HighlightedRange> {
assert_eq!(
self.stack.len(),
1,
"after DFS traversal, the stack should only contain a single element"
);
let mut res = self.stack.pop().unwrap();
res.sort_by_key(|range| range.range.start());
// Check that ranges are sorted and disjoint
assert!(res
.iter()
.zip(res.iter().skip(1))
.all(|(left, right)| left.range.end() <= right.range.start()));
res
}
}
fn highlight_format_specifier(kind: FormatSpecifier) -> Option<HighlightTag> {
Some(match kind {
FormatSpecifier::Open
| FormatSpecifier::Close
| FormatSpecifier::Colon
| FormatSpecifier::Fill
| FormatSpecifier::Align
| FormatSpecifier::Sign
| FormatSpecifier::NumberSign
| FormatSpecifier::DollarSign
| FormatSpecifier::Dot
| FormatSpecifier::Asterisk
| FormatSpecifier::QuestionMark => HighlightTag::FormatSpecifier,
FormatSpecifier::Integer | FormatSpecifier::Zero => HighlightTag::NumericLiteral,
FormatSpecifier::Identifier => HighlightTag::Local,
})
}
fn macro_call_range(macro_call: &ast::MacroCall) -> Option<TextRange> {
let path = macro_call.path()?;
let name_ref = path.segment()?.name_ref()?;
let range_start = name_ref.syntax().text_range().start();
let mut range_end = name_ref.syntax().text_range().end();
for sibling in path.syntax().siblings_with_tokens(Direction::Next) {
match sibling.kind() {
T![!] | IDENT => range_end = sibling.text_range().end(),
_ => (),
}
}
Some(TextRange::new(range_start, range_end))
}
fn is_possibly_unsafe(name_ref: &ast::NameRef) -> bool {
name_ref
.syntax()
.parent()
.and_then(|parent| {
ast::FieldExpr::cast(parent.clone())
.map(|_| true)
.or_else(|| ast::RecordPatField::cast(parent).map(|_| true))
})
.unwrap_or(false)
}
fn highlight_element(
sema: &Semantics<RootDatabase>,
bindings_shadow_count: &mut FxHashMap<Name, u32>,
syntactic_name_ref_highlighting: bool,
element: SyntaxElement,
) -> Option<(Highlight, Option<u64>)> {
let db = sema.db;
let mut binding_hash = None;
let highlight: Highlight = match element.kind() {
FN => {
bindings_shadow_count.clear();
return None;
}
// Highlight definitions depending on the "type" of the definition.
NAME => {
let name = element.into_node().and_then(ast::Name::cast).unwrap();
let name_kind = classify_name(sema, &name);
if let Some(NameClass::Definition(Definition::Local(local))) = &name_kind {
if let Some(name) = local.name(db) {
let shadow_count = bindings_shadow_count.entry(name.clone()).or_default();
*shadow_count += 1;
binding_hash = Some(calc_binding_hash(&name, *shadow_count))
}
};
match name_kind {
Some(NameClass::Definition(def)) => {
highlight_name(db, def, false) | HighlightModifier::Definition
}
Some(NameClass::ConstReference(def)) => highlight_name(db, def, false),
Some(NameClass::FieldShorthand { field, .. }) => {
let mut h = HighlightTag::Field.into();
if let Definition::Field(field) = field {
if let VariantDef::Union(_) = field.parent_def(db) {
h |= HighlightModifier::Unsafe;
}
}
h
}
None => highlight_name_by_syntax(name) | HighlightModifier::Definition,
}
}
// Highlight references like the definitions they resolve to
NAME_REF if element.ancestors().any(|it| it.kind() == ATTR) => {
Highlight::from(HighlightTag::Function) | HighlightModifier::Attribute
}
NAME_REF => {
let name_ref = element.into_node().and_then(ast::NameRef::cast).unwrap();
let possibly_unsafe = is_possibly_unsafe(&name_ref);
match classify_name_ref(sema, &name_ref) {
Some(name_kind) => match name_kind {
NameRefClass::Definition(def) => {
if let Definition::Local(local) = &def {
if let Some(name) = local.name(db) {
let shadow_count =
bindings_shadow_count.entry(name.clone()).or_default();
binding_hash = Some(calc_binding_hash(&name, *shadow_count))
}
};
highlight_name(db, def, possibly_unsafe)
}
NameRefClass::FieldShorthand { .. } => HighlightTag::Field.into(),
},
None if syntactic_name_ref_highlighting => {
highlight_name_ref_by_syntax(name_ref, sema)
}
None => HighlightTag::UnresolvedReference.into(),
}
}
// Simple token-based highlighting
COMMENT => {
let comment = element.into_token().and_then(ast::Comment::cast)?;
let h = HighlightTag::Comment;
match comment.kind().doc {
Some(_) => h | HighlightModifier::Documentation,
None => h.into(),
}
}
STRING | RAW_STRING | RAW_BYTE_STRING | BYTE_STRING => HighlightTag::StringLiteral.into(),
ATTR => HighlightTag::Attribute.into(),
INT_NUMBER | FLOAT_NUMBER => HighlightTag::NumericLiteral.into(),
BYTE => HighlightTag::ByteLiteral.into(),
CHAR => HighlightTag::CharLiteral.into(),
QUESTION => Highlight::new(HighlightTag::Operator) | HighlightModifier::ControlFlow,
LIFETIME => {
let h = Highlight::new(HighlightTag::Lifetime);
match element.parent().map(|it| it.kind()) {
Some(LIFETIME_PARAM) | Some(LABEL) => h | HighlightModifier::Definition,
_ => h,
}
}
p if p.is_punct() => match p {
T![::] | T![->] | T![=>] | T![&] | T![..] | T![=] | T![@] => {
HighlightTag::Operator.into()
}
T![!] if element.parent().and_then(ast::MacroCall::cast).is_some() => {
HighlightTag::Macro.into()
}
T![*] if element.parent().and_then(ast::PtrType::cast).is_some() => {
HighlightTag::Keyword.into()
}
T![*] if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
let prefix_expr = element.parent().and_then(ast::PrefixExpr::cast)?;
let expr = prefix_expr.expr()?;
let ty = sema.type_of_expr(&expr)?;
if ty.is_raw_ptr() {
HighlightTag::Operator | HighlightModifier::Unsafe
} else if let Some(ast::PrefixOp::Deref) = prefix_expr.op_kind() {
HighlightTag::Operator.into()
} else {
HighlightTag::Punctuation.into()
}
}
T![-] if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
HighlightTag::NumericLiteral.into()
}
_ if element.parent().and_then(ast::PrefixExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::BinExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RangeExpr::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RangePat::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::RestPat::cast).is_some() => {
HighlightTag::Operator.into()
}
_ if element.parent().and_then(ast::Attr::cast).is_some() => {
HighlightTag::Attribute.into()
}
_ => HighlightTag::Punctuation.into(),
},
k if k.is_keyword() => {
let h = Highlight::new(HighlightTag::Keyword);
match k {
T![break]
| T![continue]
| T![else]
| T![if]
| T![loop]
| T![match]
| T![return]
| T![while]
| T![in] => h | HighlightModifier::ControlFlow,
T![for] if !is_child_of_impl(&element) => h | HighlightModifier::ControlFlow,
T![unsafe] => h | HighlightModifier::Unsafe,
T![true] | T![false] => HighlightTag::BoolLiteral.into(),
T![self] => {
let self_param_is_mut = element
.parent()
.and_then(ast::SelfParam::cast)
.and_then(|p| p.mut_token())
.is_some();
// closure to enforce lazyness
let self_path = || {
sema.resolve_path(&element.parent()?.parent().and_then(ast::Path::cast)?)
};
if self_param_is_mut
|| matches!(self_path(),
Some(hir::PathResolution::Local(local))
if local.is_self(db)
&& (local.is_mut(db) || local.ty(db).is_mutable_reference())
)
{
HighlightTag::SelfKeyword | HighlightModifier::Mutable
} else {
HighlightTag::SelfKeyword.into()
}
}
_ => h,
}
}
_ => return None,
};
return Some((highlight, binding_hash));
fn calc_binding_hash(name: &Name, shadow_count: u32) -> u64 {
fn hash<T: std::hash::Hash + std::fmt::Debug>(x: T) -> u64 {
use std::{collections::hash_map::DefaultHasher, hash::Hasher};
let mut hasher = DefaultHasher::new();
x.hash(&mut hasher);
hasher.finish()
}
hash((name, shadow_count))
}
}
fn | (element: &SyntaxElement) -> bool {
match element.parent() {
Some(e) => e.kind() == IMPL,
_ => false,
}
}
fn highlight_name(db: &RootDatabase, def: Definition, possibly_unsafe: bool) -> Highlight {
match def {
Definition::Macro(_) => HighlightTag::Macro,
Definition::Field(field) => {
let mut h = HighlightTag::Field.into();
if possibly_unsafe {
if let VariantDef::Union(_) = field.parent_def(db) {
h |= HighlightModifier::Unsafe;
}
}
return h;
}
Definition::ModuleDef(def) => match def {
hir::ModuleDef::Module(_) => HighlightTag::Module,
hir::ModuleDef::Function(func) => {
let mut h = HighlightTag::Function.into();
if func.is_unsafe(db) {
h |= HighlightModifier::Unsafe;
}
return h;
}
hir::ModuleDef::Adt(hir::Adt::Struct(_)) => HighlightTag::Struct,
hir::ModuleDef::Adt(hir::Adt::Enum(_)) => HighlightTag::Enum,
hir::ModuleDef::Adt(hir::Adt::Union(_)) => HighlightTag::Union,
hir::ModuleDef::EnumVariant(_) => HighlightTag::EnumVariant,
hir::ModuleDef::Const(_) => HighlightTag::Constant,
hir::ModuleDef::Trait(_) => HighlightTag::Trait,
hir::ModuleDef::TypeAlias(_) => HighlightTag::TypeAlias,
hir::ModuleDef::BuiltinType(_) => HighlightTag::BuiltinType,
hir::ModuleDef::Static(s) => {
let mut h = Highlight::new(HighlightTag::Static);
if s.is_mut(db) {
h |= HighlightModifier::Mutable;
h |= HighlightModifier::Unsafe;
}
return h;
}
},
Definition::SelfType(_) => HighlightTag::SelfType,
Definition::TypeParam(_) => HighlightTag::TypeParam,
Definition::Local(local) => {
let tag =
if local.is_param(db) { HighlightTag::ValueParam } else { HighlightTag::Local };
let mut h = Highlight::new(tag);
if local.is_mut(db) || local.ty(db).is_mutable_reference() {
h |= HighlightModifier::Mutable;
}
return h;
}
}
.into()
}
fn highlight_name_by_syntax(name: ast::Name) -> Highlight {
let default = HighlightTag::UnresolvedReference;
let parent = match name.syntax().parent() {
Some(it) => it,
_ => return default.into(),
};
let tag = match parent.kind() {
STRUCT => HighlightTag::Struct,
ENUM => HighlightTag::Enum,
UNION => HighlightTag::Union,
TRAIT => HighlightTag::Trait,
TYPE_ALIAS => HighlightTag::TypeAlias,
TYPE_PARAM => HighlightTag::TypeParam,
RECORD_FIELD => HighlightTag::Field,
MODULE => HighlightTag::Module,
FN => HighlightTag::Function,
CONST => HighlightTag::Constant,
STATIC => HighlightTag::Static,
VARIANT => HighlightTag::EnumVariant,
IDENT_PAT => HighlightTag::Local,
_ => default,
};
tag.into()
}
fn highlight_name_ref_by_syntax(name: ast::NameRef, sema: &Semantics<RootDatabase>) -> Highlight {
let default = HighlightTag::UnresolvedReference;
let parent = match name.syntax().parent() {
Some(it) => it,
_ => return default.into(),
};
let tag = match parent.kind() {
METHOD_CALL_EXPR => HighlightTag::Function,
FIELD_EXPR => {
let h = HighlightTag::Field;
let is_union = ast::FieldExpr::cast(parent)
.and_then(|field_expr| {
let field = sema.resolve_field(&field_expr)?;
Some(if let VariantDef::Union(_) = field.parent_def(sema.db) {
true
} else {
false
})
})
.unwrap_or(false);
return if is_union { h | HighlightModifier::Unsafe } else { h.into() };
}
PATH_SEGMENT => {
let path = match parent.parent().and_then(ast::Path::cast) {
Some(it) => it,
_ => return default.into(),
};
let expr = match path.syntax().parent().and_then(ast::PathExpr::cast) {
Some(it) => it,
_ => {
// within path, decide whether it is module or adt by checking for uppercase name
return if name.text().chars().next().unwrap_or_default().is_uppercase() {
HighlightTag::Struct
} else {
HighlightTag::Module
}
.into();
}
};
let parent = match expr.syntax().parent() {
Some(it) => it,
None => return default.into(),
};
match parent.kind() {
CALL_EXPR => HighlightTag::Function,
_ => {
if name.text().chars().next().unwrap_or_default().is_uppercase() {
HighlightTag::Struct
} else {
HighlightTag::Constant
}
}
}
}
_ => default,
};
tag.into()
}
| is_child_of_impl |
const-pages.js | export const PAGES = {
LOGIN: 0, | NEW_CAMPAIGN: 2,
SHOW_CAMPAIGN: 3,
}; | SELECT_CAMPAIGN: 1, |
configure-sagas.tsx | import autoresetSaga from '../actions/autoreset'
import chat2Saga from '../actions/chat2'
import configSaga from '../actions/config'
import createSagaMiddleware from 'redux-saga'
import deeplinksSaga from '../actions/deeplinks'
import deviceSaga from '../actions/devices'
import fsSaga from '../actions/fs'
import gitSaga from '../actions/git'
import gregorSaga from '../actions/gregor'
import loginSaga from '../actions/login'
import provisionSaga from '../actions/provision'
import notificationsSaga from '../actions/notifications'
import peopleSaga from '../actions/people'
import pinentrySaga from '../actions/pinentry'
import profileSaga from '../actions/profile'
import tracker2Saga from '../actions/tracker2'
import sagaMonitor from './saga-monitor'
import searchSaga from '../actions/search'
import settingsSaga from '../actions/settings'
import signupSaga from '../actions/signup'
import teamsSaga from '../actions/teams'
import unlockFoldersSaga from '../actions/unlock-folders'
import usersSaga from '../actions/users'
import walletsSaga from '../actions/wallets'
import {reduxSagaLogger} from '../local-debug'
import {sagaTimer} from '../util/user-timings'
import * as Saga from '../util/saga'
function* mainSaga() {
yield Saga.spawn(autoresetSaga)
yield Saga.spawn(chat2Saga)
yield Saga.spawn(configSaga)
yield Saga.spawn(deeplinksSaga)
yield Saga.spawn(deviceSaga)
yield Saga.spawn(fsSaga)
yield Saga.spawn(gregorSaga)
yield Saga.spawn(loginSaga)
yield Saga.spawn(provisionSaga)
yield Saga.spawn(notificationsSaga)
yield Saga.spawn(pinentrySaga)
yield Saga.spawn(profileSaga)
yield Saga.spawn(tracker2Saga)
yield Saga.spawn(searchSaga)
yield Saga.spawn(settingsSaga)
yield Saga.spawn(teamsSaga)
yield Saga.spawn(unlockFoldersSaga)
yield Saga.spawn(usersSaga)
yield Saga.spawn(gitSaga)
yield Saga.spawn(peopleSaga)
yield Saga.spawn(walletsSaga)
yield Saga.spawn(signupSaga)
}
let middleWare
function create(crashHandler: (err: any) => void) {
if (!__DEV__ && middleWare) {
throw new Error('Only create one saga middleware!')
}
middleWare = createSagaMiddleware({
onError: crashHandler, | return middleWare
}
function run() {
middleWare && middleWare.run(mainSaga)
}
export {create, run} | sagaMonitor: sagaTimer || (reduxSagaLogger ? sagaMonitor : undefined),
}) |
ki_KE_test.go | package ki_KE
import (
"testing"
"time"
"github.com/jinycoo/jinygo/text/i18n/locales"
"github.com/jinycoo/jinygo/text/i18n/locales/currency"
)
func TestLocale(t *testing.T) {
trans := New()
expected := "ki_KE"
if trans.Locale() != expected {
t.Errorf("Expected '%s' Got '%s'", expected, trans.Locale())
}
}
func TestPluralsRange(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsRange()
// expected := 1
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsOrdinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleTwo,
// },
// {
// expected: locales.PluralRuleFew,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsOrdinal()
// expected := 4
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsCardinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsCardinal()
// expected := 2
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestRangePlurals(t *testing.T) {
trans := New()
tests := []struct {
num1 float64
v1 uint64
num2 float64
v2 uint64
expected locales.PluralRule
}{
// {
// num1: 1,
// v1: 1,
// num2: 2,
// v2: 2,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.RangePluralRule(tt.num1, tt.v1, tt.num2, tt.v2)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestOrdinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 2,
// v: 0,
// expected: locales.PluralRuleTwo,
// },
// {
// num: 3,
// v: 0,
// expected: locales.PluralRuleFew,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.OrdinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestCardinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.CardinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestDaysAbbreviated(t *testing.T) {
trans := New()
days := trans.WeekdaysAbbreviated()
for i, day := range days {
s := trans.WeekdayAbbreviated(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sun",
// },
// {
// idx: 1,
// expected: "Mon",
// },
// {
// idx: 2,
// expected: "Tue",
// },
// {
// idx: 3,
// expected: "Wed",
// },
// {
// idx: 4,
// expected: "Thu",
// },
// {
// idx: 5,
// expected: "Fri",
// },
// {
// idx: 6,
// expected: "Sat",
// },
}
for _, tt := range tests {
s := trans.WeekdayAbbreviated(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysNarrow(t *testing.T) {
trans := New()
days := trans.WeekdaysNarrow()
for i, day := range days {
s := trans.WeekdayNarrow(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", string(day), s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "S",
// },
// {
// idx: 1,
// expected: "M",
// },
// {
// idx: 2,
// expected: "T",
// },
// {
// idx: 3,
// expected: "W",
// },
// {
// idx: 4,
// expected: "T",
// },
// {
// idx: 5,
// expected: "F",
// },
// {
// idx: 6,
// expected: "S",
// },
}
for _, tt := range tests {
s := trans.WeekdayNarrow(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysShort(t *testing.T) |
func TestDaysWide(t *testing.T) {
trans := New()
days := trans.WeekdaysWide()
for i, day := range days {
s := trans.WeekdayWide(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sunday",
// },
// {
// idx: 1,
// expected: "Monday",
// },
// {
// idx: 2,
// expected: "Tuesday",
// },
// {
// idx: 3,
// expected: "Wednesday",
// },
// {
// idx: 4,
// expected: "Thursday",
// },
// {
// idx: 5,
// expected: "Friday",
// },
// {
// idx: 6,
// expected: "Saturday",
// },
}
for _, tt := range tests {
s := trans.WeekdayWide(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsAbbreviated(t *testing.T) {
trans := New()
months := trans.MonthsAbbreviated()
for i, month := range months {
s := trans.MonthAbbreviated(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "Jan",
// },
// {
// idx: 2,
// expected: "Feb",
// },
// {
// idx: 3,
// expected: "Mar",
// },
// {
// idx: 4,
// expected: "Apr",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "Jun",
// },
// {
// idx: 7,
// expected: "Jul",
// },
// {
// idx: 8,
// expected: "Aug",
// },
// {
// idx: 9,
// expected: "Sep",
// },
// {
// idx: 10,
// expected: "Oct",
// },
// {
// idx: 11,
// expected: "Nov",
// },
// {
// idx: 12,
// expected: "Dec",
// },
}
for _, tt := range tests {
s := trans.MonthAbbreviated(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsNarrow(t *testing.T) {
trans := New()
months := trans.MonthsNarrow()
for i, month := range months {
s := trans.MonthNarrow(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "J",
// },
// {
// idx: 2,
// expected: "F",
// },
// {
// idx: 3,
// expected: "M",
// },
// {
// idx: 4,
// expected: "A",
// },
// {
// idx: 5,
// expected: "M",
// },
// {
// idx: 6,
// expected: "J",
// },
// {
// idx: 7,
// expected: "J",
// },
// {
// idx: 8,
// expected: "A",
// },
// {
// idx: 9,
// expected: "S",
// },
// {
// idx: 10,
// expected: "O",
// },
// {
// idx: 11,
// expected: "N",
// },
// {
// idx: 12,
// expected: "D",
// },
}
for _, tt := range tests {
s := trans.MonthNarrow(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsWide(t *testing.T) {
trans := New()
months := trans.MonthsWide()
for i, month := range months {
s := trans.MonthWide(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "January",
// },
// {
// idx: 2,
// expected: "February",
// },
// {
// idx: 3,
// expected: "March",
// },
// {
// idx: 4,
// expected: "April",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "June",
// },
// {
// idx: 7,
// expected: "July",
// },
// {
// idx: 8,
// expected: "August",
// },
// {
// idx: 9,
// expected: "September",
// },
// {
// idx: 10,
// expected: "October",
// },
// {
// idx: 11,
// expected: "November",
// },
// {
// idx: 12,
// expected: "December",
// },
}
for _, tt := range tests {
s := string(trans.MonthWide(time.Month(tt.idx)))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeFull(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
// fixed := time.FixedZone("OTHER", -4)
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am Eastern Standard Time",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, fixed),
// expected: "8:05:01 pm OTHER",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeLong(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am EST",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, loc),
// expected: "8:05:01 pm EST",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05:01 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05:01 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateFull(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Wednesday, February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateLong(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Feb 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/16",
// },
// {
// t: time.Date(-500, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/500",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtNumber(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// expected: "1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// expected: "1,123,456.6",
// },
// {
// num: 221123456.5643,
// v: 3,
// expected: "221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtNumber(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtCurrency(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "-$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "-CAD 221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtCurrency(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtAccounting(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "($221,123,456.564)",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "(CAD 221,123,456.564)",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtAccounting(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtPercent(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 15,
// v: 0,
// expected: "15%",
// },
// {
// num: 15,
// v: 2,
// expected: "15.00%",
// },
// {
// num: 434.45,
// v: 0,
// expected: "434%",
// },
// {
// num: 34.4,
// v: 2,
// expected: "34.40%",
// },
// {
// num: -34,
// v: 0,
// expected: "-34%",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtPercent(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
| {
trans := New()
days := trans.WeekdaysShort()
for i, day := range days {
s := trans.WeekdayShort(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Su",
// },
// {
// idx: 1,
// expected: "Mo",
// },
// {
// idx: 2,
// expected: "Tu",
// },
// {
// idx: 3,
// expected: "We",
// },
// {
// idx: 4,
// expected: "Th",
// },
// {
// idx: 5,
// expected: "Fr",
// },
// {
// idx: 6,
// expected: "Sa",
// },
}
for _, tt := range tests {
s := trans.WeekdayShort(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
} |
get-data.py | """This script downloads all of the data located in the AWS S3 bucket, given the proper
access key and secret key. Assumes that this script will be run from the root of the repository.
Usage: get-data.py --access_key=<access_key> --secret_key=<secret_key>
Options:
--access_key=<access_key> The AWS access key providing access to the bucket.
--secret_key=<secret_key> The AWS secret key providing access to the bucket.
"""
import boto3
import os
from docopt import docopt
# Code is largely adapted from user Shan
# on StackOverflow: https://stackoverflow.com/questions/31918960/boto3-to-download-all-files-from-a-s3-bucket/33350380#33350380
opt = docopt(__doc__)
def main(access_key, secret_key):
|
main(
access_key=opt['--access_key'],
secret_key=opt['--secret_key']
) | """
This function downloads all of the data in the S3 bucket, given
an accesss key and secret key with the right access.
Parameters
----------
access_key: str
The AWS access key.
secret_key: str
The AWS secret key.
Returns
---------
None
Examples
---------
main(
access_key=MY_ACCESS_KEY,
secret_key=MY_SECRET_KEY
)
"""
# Initiate S3 client
s3 = boto3.client(
's3',
aws_access_key_id=access_key,
aws_secret_access_key=secret_key
)
for item in s3.list_objects(Bucket='mds-capstone-assurance')['Contents']:
if not item['Key'].endswith("/"):
print("Downloading file:", item['Key'])
s3.download_file(
'mds-capstone-assurance',
item['Key'],
item['Key']
)
else:
if not os.path.exists(item['Key']):
os.makedirs(item['Key'])
return |
google.go | // Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package google
import (
"encoding/json"
"errors"
"fmt"
"strings"
"time"
"cloud.google.com/go/compute/metadata"
"golang.org/x/net/context"
"golang.org/x/oauth2"
"golang.org/x/oauth2/jwt"
)
// Endpoint is Google's OAuth 2.0 endpoint.
var Endpoint = oauth2.Endpoint{
AuthURL: "https://accounts.google.com/o/oauth2/auth",
TokenURL: "https://accounts.google.com/o/oauth2/token",
}
// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
// ConfigFromJSON uses a Google Developers Console client_credentials.json
// file to construct a config.
// client_credentials.json can be downloaded from
// https://console.developers.google.com, under "Credentials". Download the Web
// application credentials in the JSON format and provide the contents of the
// file as jsonKey.
func ConfigFromJSON(jsonKey []byte, scope ...string) (*oauth2.Config, error) |
// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
// the credentials that authorize and authenticate the requests.
// Create a service account on "Credentials" for your project at
// https://console.developers.google.com to download a JSON key file.
func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
var f credentialsFile
if err := json.Unmarshal(jsonKey, &f); err != nil {
return nil, err
}
if f.Type != serviceAccountKey {
return nil, fmt.Errorf("google: read JWT from JSON credentials: 'type' field is %q (expected %q)", f.Type, serviceAccountKey)
}
scope = append([]string(nil), scope...) // copy
return f.jwtConfig(scope), nil
}
// JSON key file types.
const (
serviceAccountKey = "service_account"
userCredentialsKey = "authorized_user"
)
// credentialsFile is the unmarshalled representation of a credentials file.
type credentialsFile struct {
Type string `json:"type"` // serviceAccountKey or userCredentialsKey
// Service Account fields
ClientEmail string `json:"client_email"`
PrivateKeyID string `json:"private_key_id"`
PrivateKey string `json:"private_key"`
TokenURL string `json:"token_uri"`
ProjectID string `json:"project_id"`
// User Credential fields
// (These typically come from gcloud auth.)
ClientSecret string `json:"client_secret"`
ClientID string `json:"client_id"`
RefreshToken string `json:"refresh_token"`
}
func (f *credentialsFile) jwtConfig(scopes []string) *jwt.Config {
cfg := &jwt.Config{
Email: f.ClientEmail,
PrivateKey: []byte(f.PrivateKey),
PrivateKeyID: f.PrivateKeyID,
Scopes: scopes,
TokenURL: f.TokenURL,
}
if cfg.TokenURL == "" {
cfg.TokenURL = JWTTokenURL
}
return cfg
}
func (f *credentialsFile) tokenSource(ctx context.Context, scopes []string) (oauth2.TokenSource, error) {
switch f.Type {
case serviceAccountKey:
cfg := f.jwtConfig(scopes)
return cfg.TokenSource(ctx), nil
case userCredentialsKey:
cfg := &oauth2.Config{
ClientID: f.ClientID,
ClientSecret: f.ClientSecret,
Scopes: scopes,
Endpoint: Endpoint,
}
tok := &oauth2.Token{RefreshToken: f.RefreshToken}
return cfg.TokenSource(ctx, tok), nil
case "":
return nil, errors.New("missing 'type' field in credentials")
default:
return nil, fmt.Errorf("unknown credential type: %q", f.Type)
}
}
// ComputeTokenSource returns a token source that fetches access tokens
// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
// this token source if your program is running on a GCE instance.
// If no account is specified, "default" is used.
// Further information about retrieving access tokens from the GCE metadata
// server can be found at https://cloud.google.com/compute/docs/authentication.
func ComputeTokenSource(account string) oauth2.TokenSource {
return oauth2.ReuseTokenSource(nil, computeSource{account: account})
}
type computeSource struct {
account string
}
func (cs computeSource) Token() (*oauth2.Token, error) {
if !metadata.OnGCE() {
return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
}
acct := cs.account
if acct == "" {
acct = "default"
}
tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
if err != nil {
return nil, err
}
var res struct {
AccessToken string `json:"access_token"`
ExpiresInSec int `json:"expires_in"`
TokenType string `json:"token_type"`
}
err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
if err != nil {
return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
}
if res.ExpiresInSec == 0 || res.AccessToken == "" {
return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
}
return &oauth2.Token{
AccessToken: res.AccessToken,
TokenType: res.TokenType,
Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
}, nil
}
| {
type cred struct {
ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"`
RedirectURIs []string `json:"redirect_uris"`
AuthURI string `json:"auth_uri"`
TokenURI string `json:"token_uri"`
}
var j struct {
Web *cred `json:"web"`
Installed *cred `json:"installed"`
}
if err := json.Unmarshal(jsonKey, &j); err != nil {
return nil, err
}
var c *cred
switch {
case j.Web != nil:
c = j.Web
case j.Installed != nil:
c = j.Installed
default:
return nil, fmt.Errorf("oauth2/google: no credentials found")
}
if len(c.RedirectURIs) < 1 {
return nil, errors.New("oauth2/google: missing redirect URL in the client_credentials.json")
}
return &oauth2.Config{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RedirectURL: c.RedirectURIs[0],
Scopes: scope,
Endpoint: oauth2.Endpoint{
AuthURL: c.AuthURI,
TokenURL: c.TokenURI,
},
}, nil
} |
supervisor.go | package supervisor
import (
"encoding/json"
"fmt"
"net/http"
"strings"
"time"
log "github.com/sirupsen/logrus"
"github.com/LIVEauctioneers/rabbit-amazon-forwarder/mapping"
)
const (
jsonType = "application/json"
success = "success"
notSupported = "not supported response format"
acceptHeader = "Accept"
contentType = "Content-Type"
acceptAll = "*/*"
)
type response struct {
Healthy bool `json:"healthy"`
Message string `json:"message"`
}
type consumerChannel struct {
name string
check chan bool
stop chan bool
}
// Client supervisor client
type Client struct {
mappings []mapping.ConsumerForwarderMapping
consumers map[string]*consumerChannel
}
// New client for supervisor
func New(consumerForwarderMapping []mapping.ConsumerForwarderMapping) Client |
// Start starts supervisor
func (c *Client) Start() error {
c.consumers = make(map[string]*consumerChannel)
for _, mappingEntry := range c.mappings {
channel := makeConsumerChannel(mappingEntry.Forwarder.Name())
c.consumers[mappingEntry.Forwarder.Name()] = channel
go mappingEntry.Consumer.Start(mappingEntry.Forwarder, channel.check, channel.stop)
log.WithFields(log.Fields{
"consumerName": mappingEntry.Consumer.Name(),
"forwarderName": mappingEntry.Forwarder.Name()}).Info("Started consumer with forwarder")
}
return nil
}
// Check checks running consumers
func (c *Client) Check(w http.ResponseWriter, r *http.Request) {
if accept := r.Header.Get(acceptHeader); accept != "" &&
!strings.Contains(accept, jsonType) &&
!strings.Contains(accept, acceptAll) {
log.WithField("acceptHeader", accept).Warn("Wrong Accept header")
notAcceptableResponse(w)
return
}
stopped := 0
for _, consumer := range c.consumers {
if len(consumer.check) > 0 {
stopped = stopped + 1
continue
}
consumer.check <- true
time.Sleep(500 * time.Millisecond)
if len(consumer.check) > 0 {
stopped = stopped + 1
}
}
if stopped > 0 {
message := fmt.Sprintf("Number of failed consumers: %d", stopped)
errorResponse(w, message)
return
}
successResponse(w)
}
// Restart restarts every consumer
func (c *Client) Restart(w http.ResponseWriter, r *http.Request) {
c.stop()
if err := c.Start(); err != nil {
log.Error(err)
errorResponse(w, "")
return
}
successResponse(w)
}
func (c *Client) stop() {
for _, consumer := range c.consumers {
consumer.stop <- true
}
}
func makeConsumerChannel(name string) *consumerChannel {
check := make(chan bool)
stop := make(chan bool)
return &consumerChannel{name: name, check: check, stop: stop}
}
func errorResponse(w http.ResponseWriter, message string) {
w.Header().Set(contentType, jsonType)
w.WriteHeader(500)
w.Write([]byte(message))
}
func notAcceptableResponse(w http.ResponseWriter) {
w.Header().Set(contentType, jsonType)
w.WriteHeader(406)
bytes, err := json.Marshal(response{Healthy: false, Message: notSupported})
if err != nil {
log.Error(err)
w.WriteHeader(500)
return
}
w.Write(bytes)
}
func successResponse(w http.ResponseWriter) {
w.Header().Set(contentType, jsonType)
w.WriteHeader(200)
bytes, err := json.Marshal(response{Healthy: true, Message: success})
if err != nil {
log.Error(err)
w.WriteHeader(200)
return
}
w.Write(bytes)
}
| {
return Client{mappings: consumerForwarderMapping}
} |
change-password.component.ts | import { Component, OnInit } from '@angular/core';
import { FormBuilder, FormGroup, Validator, Validators } from '@angular/forms';
import { Router } from '@angular/router';
import { MatDialog, MatSnackBar } from '@angular/material';
import { MyProfileService } from '../my-profile.service';
import { patternValidators } from 'src/app/utils/validators';
import { ConfirmationComponent } from 'src/app/utils/components/confirmation/confirmation.component';
import { SessionStorage } from 'ngx-webstorage';
import { RoleAuth } from 'src/app/utils/models';
import { SharedService } from 'src/app/utils/services';
@Component({
selector: 'bas-change-password',
templateUrl: './change-password.component.html',
styleUrls: ['./change-password.component.scss']
})
export class ChangePasswordComponent implements OnInit {
@SessionStorage('auth') private authToken;
@SessionStorage('uId') private viewableUser: any;
@SessionStorage('pStatus') public isResetPassword: any;
@SessionStorage('mod') public modules: any;
public changePasswordForm: FormGroup;
public permissions = new RoleAuth([]);
public isInitialReset = false;
public showOldPassword = false;
public showNewPassword = false;
public showConfirmPassword = false;
public canNavigate = true;
constructor(
public formBuilder: FormBuilder,
private myProfileService: MyProfileService,
public sharedService: SharedService,
private dialog: MatDialog,
private router: Router,
private snackBar: MatSnackBar
) { }
ngOnInit() {
// initialize the form
this.changePasswordForm = this.formBuilder.group({
email: [((this.authToken && this.authToken.userDetails && this.authToken.userDetails.email) ? this.authToken.userDetails.email : '')],
oldpassword: ['',
[Validators.required]],
newpassword: ['',
[Validators.required, Validators.pattern(patternValidators().passwordRegExp)]],
confirmPassword: ['',
[Validators.required, Validators.pattern(patternValidators().passwordRegExp)]]
}, {
validator: this.validatePassword('oldpassword', 'newpassword', 'confirmPassword')
}
);
if (this.authToken && this.authToken.userDetails && this.authToken.userDetails.hasOwnProperty('passwordreset')) {
this.isInitialReset = this.authToken.userDetails.passwordreset;
}
}
/* ask for confirmation before navigating with unsaved data */
canDeactivate() {
return this.changePasswordForm.dirty && this.canNavigate;
}
/**
* @param unMatchCtrl - actual field
* @param actualCtrl - actual field
* @param matchCtrl - field to be matched with actual field
*/
validatePassword(unMatchCtrl: string, actualCtrl: string, matchCtrl: string) {
return (formGroup: FormGroup) => {
const control = formGroup.controls[actualCtrl];
const unMatchControl = formGroup.controls[unMatchCtrl];
const matchControl = formGroup.controls[matchCtrl];
if (matchControl.errors && !matchControl.errors.mustMatch && control.errors && !control.errors.mustNotMatch) {
// return if another validator has already found an error on the matchingControl
return;
}
// set error on unMatchControl if validation fails
if (control.value !== '' && unMatchControl.value !== '' && control.value === unMatchControl.value) {
control.setErrors({ mustNotMatch: true });
} else {
control.setErrors(null); | }
if (control.value !== '' && matchControl.value !== '' && control.value !== matchControl.value) {
matchControl.setErrors({ mustMatch: true });
} else {
matchControl.setErrors(null);
}
}
}
/* toggle the input type from 'text' ro 'password' and vice-versa */
toggleInputType(status: string) {
switch (status) {
case 'old':
this.showOldPassword = !this.showOldPassword;
break;
case 'new':
this.showNewPassword = !this.showNewPassword;
break;
case 'confirm':
this.showConfirmPassword = !this.showConfirmPassword;
break;
}
}
/* service call for password update */
upadatePassword() {
// let changePasswordData = this.changePasswordForm.value;
// delete changePasswordData.confirmPassword;
let changePasswordData = {
old_password: this.changePasswordForm.value.oldpassword,
email: this.changePasswordForm.value.email,
password: this.changePasswordForm.value.newpassword
};
this.myProfileService.post('changepassword', changePasswordData).subscribe(response => {
if (response.success === true) {
const dialogRef = this.dialog.open(ConfirmationComponent, {
autoFocus: false,
disableClose: true,
panelClass: 'confirm-delete-dialog',
backdropClass: 'confirm-delete-backdrop',
data: {
title: 'Change Password',
message: `Password has been changed successfully.`,
buttonLableSubmit: 'Okay',
buttonLableCancel: ''
}
});
dialogRef.afterClosed().subscribe((status: Boolean) => {
if (status) {
this.canNavigate = false;
if (this.authToken && this.authToken.userDetails && this.authToken.userDetails.hasOwnProperty('passwordreset') && this.authToken.userDetails.passwordreset === true) {
this.router.navigate([this.sharedService.landingPage]);
if (this.sharedService.dialogRef) {
this.sharedService.dialogRef.close();
}
} else {
this.router.navigate(['./login']);
// window.location.reload();
// this.changePasswordForm.reset();
}
}
});
} else {
this.snackBar.open('Error while updating password.', 'okay', window['snackBarBottom'])
}
}, (error => {
this.snackBar.open(error.error && error.error.hasOwnProperty('message') ? error.error.message :
window['serverError'], '', window['snackBarBottom']);
}));
}
/* confirm reset action */
confirmReset() {
const dialogRef = this.dialog.open(ConfirmationComponent, {
autoFocus: false,
disableClose: true,
panelClass: 'confirm-delete-dialog',
backdropClass: 'confirm-delete-backdrop',
data: {
title: 'Reset Password',
message: `Are you sure to reset ${this.viewableUser.user.name.toUpperCase()}'s password?`,
buttonLableSubmit: 'Reset'
}
});
dialogRef.afterClosed().subscribe((status: Boolean) => {
if (status) {
this.resetPassword();
}
});
}
cancelPassword() {
this.router.navigateByUrl(this.sharedService.landingPage);
}
/* api request to reset password */
resetPassword() {
let resetData = {
userid: this.viewableUser.user.userid,
password: 'Amazon$2019'
};
this.myProfileService.put('user', resetData).subscribe(response => {
if (response.success === true) {
this.snackBar.open(`${this.viewableUser.user.userid}'s password has been reset successfully.`, 'okay', window['snackBarBottom']);
}
}, (error => {
this.showError(error);
}));
}
/* error handler - shows error message on snackbar */
showError(error: any) {
this.snackBar.open(error.error.hasOwnProperty('message') ? error.error.message : window['serverError'], 'okay',
window['snackBarBottom']);
}
} | // control.setErrors({ mustNotMatch: false }); |
p15.rs | use std::collections::BinaryHeap;
type Grid = Vec<Vec<u32>>;
fn parse(input: &'static str) -> Grid {
input
.trim_end()
.split('\n')
.map(|l| {
l.chars()
.map(|c| c.to_digit(10).expect("input is valid u32s"))
.collect()
})
.collect()
}
/* fn expand(g: &Grid, factor: usize) -> Grid {
(0..(factor * g.len()))
.map(|x| {
(0..(factor * g[0].len()))
.map(|y| {
(g[x % g.len()][y % g[0].len()]
+ u32::try_from(x / g.len() + y / g[0].len())
.expect("grid is not 2^32 long"))
% 10
})
.collect()
})
.collect()
} */
fn expand(g: &Grid, factor: usize) -> Grid {
(0..(factor * g.len()))
.map(|x| {
(0..(factor * g[0].len()))
.map(|y| {
let cost = g[x % g.len()][y % g[0].len()]
// apply the increment
+ u32::try_from(x / g.len() + y / g[0].len())
.expect("grid is not 2^32 long");
// apply wrapping: 10 -> 1, hence the extra cost / 10
cost % 10 + cost / 10
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>()
}
#[derive(Default, PartialEq, Eq)]
struct DijkstraState {
position: (usize, usize),
cost: u32,
}
impl Ord for DijkstraState {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// smaller costs compare higher, since BinaryHeap is a max heap
other
.cost
.cmp(&self.cost)
.then_with(|| self.position.cmp(&other.position))
}
}
impl PartialOrd for DijkstraState {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
/// Get values of adjacent elements. Diagonals are not adjacent.
fn neighbors(g: &Grid, x: usize, y: usize) -> Vec<(usize, usize)> {
let mut out = vec![];
if x > 0 {
out.push((x - 1, y));
}
if x < g.len() - 1 {
out.push((x + 1, y));
}
if y > 0 {
out.push((x, y - 1));
}
if y < g[x].len() - 1 {
out.push((x, y + 1));
}
out
}
fn dijkstra(grid: &Grid) -> u32 {
let goal = (grid.len() - 1, grid[0].len() - 1);
let mut dist = vec![vec![u32::MAX; grid[0].len()]; grid.len()];
let mut queue = BinaryHeap::from([DijkstraState::default()]);
while let Some(DijkstraState {
cost,
position: (x, y),
}) = queue.pop()
{
if (x, y) == goal {
return cost;
}
if cost > dist[x][y] {
continue;
}
for (nx, ny) in neighbors(grid, x, y) {
let ncost = cost + grid[nx][ny];
if ncost < dist[nx][ny] {
queue.push(DijkstraState {
position: (nx, ny),
cost: ncost,
});
dist[nx][ny] = ncost;
}
}
}
unreachable!("grid is well-connected")
}
fn solve_for(input: &'static str) -> u32 |
super::example!(315, "15");
super::problem!(u32, "15");
| {
dijkstra(&expand(&parse(input), 5))
} |
mod.rs | //! Rustdoc's HTML rendering module.
//!
//! This modules contains the bulk of the logic necessary for rendering a
//! rustdoc `clean::Crate` instance to a set of static HTML pages. This
//! rendering process is largely driven by the `format!` syntax extension to
//! perform all I/O into files and streams.
//!
//! The rendering process is largely driven by the `Context` and `Cache`
//! structures. The cache is pre-populated by crawling the crate in question,
//! and then it is shared among the various rendering threads. The cache is meant
//! to be a fairly large structure not implementing `Clone` (because it's shared
//! among threads). The context, however, should be a lightweight structure. This
//! is cloned per-thread and contains information about what is currently being
//! rendered.
//!
//! In order to speed up rendering (mostly because of markdown rendering), the
//! rendering process has been parallelized. This parallelization is only
//! exposed through the `crate` method on the context, and then also from the
//! fact that the shared cache is stored in TLS (and must be accessed as such).
//!
//! In addition to rendering the crate itself, this module is also responsible
//! for creating the corresponding search index and source file renderings.
//! These threads are not parallelized (they haven't been a bottleneck yet), and
//! both occur before the crate is rendered.
crate mod cache;
#[cfg(test)]
mod tests;
mod context;
mod print_item;
mod span_map;
mod write_shared;
crate use context::*;
crate use span_map::{collect_spans_and_sources, LinkFromSrc};
use std::collections::VecDeque;
use std::default::Default;
use std::fmt;
use std::path::PathBuf;
use std::str;
use std::string::ToString;
use rustc_ast_pretty::pprust;
use rustc_attr::{ConstStability, Deprecation, StabilityLevel};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_hir::def::CtorKind;
use rustc_hir::def_id::DefId;
use rustc_hir::Mutability;
use rustc_middle::middle::stability;
use rustc_span::symbol::{kw, sym, Symbol};
use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer};
use crate::clean::{self, GetDefId, ItemId, RenderedLink, SelfTy};
use crate::docfs::PathError;
use crate::error::Error;
use crate::formats::cache::Cache;
use crate::formats::item_type::ItemType;
use crate::formats::{AssocItemRender, Impl, RenderMode};
use crate::html::escape::Escape;
use crate::html::format::{
href, print_abi_with_space, print_constness_with_space, print_default_space,
print_generic_bounds, print_where_clause, Buffer, HrefError, PrintWithSpace,
};
use crate::html::markdown::{Markdown, MarkdownHtml, MarkdownSummaryLine};
/// A pair of name and its optional document.
crate type NameDoc = (String, Option<String>);
crate fn ensure_trailing_slash(v: &str) -> impl fmt::Display + '_ {
crate::html::format::display_fn(move |f| {
if !v.ends_with('/') && !v.is_empty() { write!(f, "{}/", v) } else { f.write_str(v) }
})
}
// Helper structs for rendering items/sidebars and carrying along contextual
// information
/// Struct representing one entry in the JS search index. These are all emitted
/// by hand to a large JS file at the end of cache-creation.
#[derive(Debug)]
crate struct IndexItem {
crate ty: ItemType,
crate name: String,
crate path: String,
crate desc: String,
crate parent: Option<DefId>,
crate parent_idx: Option<usize>,
crate search_type: Option<IndexItemFunctionType>,
crate aliases: Box<[String]>,
}
/// A type used for the search index.
#[derive(Debug)]
crate struct RenderType {
name: Option<String>,
generics: Option<Vec<String>>,
}
/// Full type of functions/methods in the search index.
#[derive(Debug)]
crate struct IndexItemFunctionType {
inputs: Vec<TypeWithKind>,
output: Option<Vec<TypeWithKind>>,
}
impl Serialize for IndexItemFunctionType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
// If we couldn't figure out a type, just write `null`.
let mut iter = self.inputs.iter();
if match self.output {
Some(ref output) => iter.chain(output.iter()).any(|ref i| i.ty.name.is_none()),
None => iter.any(|ref i| i.ty.name.is_none()),
} {
serializer.serialize_none()
} else {
let mut seq = serializer.serialize_seq(None)?;
seq.serialize_element(&self.inputs)?;
if let Some(output) = &self.output {
if output.len() > 1 {
seq.serialize_element(&output)?;
} else {
seq.serialize_element(&output[0])?;
}
}
seq.end()
}
}
}
#[derive(Debug)]
crate struct TypeWithKind {
ty: RenderType,
kind: ItemType,
}
impl From<(RenderType, ItemType)> for TypeWithKind {
fn from(x: (RenderType, ItemType)) -> TypeWithKind {
TypeWithKind { ty: x.0, kind: x.1 }
}
}
impl Serialize for TypeWithKind {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut seq = serializer.serialize_seq(None)?;
seq.serialize_element(&self.ty.name)?;
seq.serialize_element(&self.kind)?;
if let Some(generics) = &self.ty.generics {
seq.serialize_element(generics)?;
}
seq.end()
}
}
#[derive(Debug, Clone)]
crate struct StylePath {
/// The path to the theme
crate path: PathBuf,
/// What the `disabled` attribute should be set to in the HTML tag
crate disabled: bool,
}
fn write_srclink(cx: &Context<'_>, item: &clean::Item, buf: &mut Buffer) {
if let Some(l) = cx.src_href(item) {
write!(buf, "<a class=\"srclink\" href=\"{}\" title=\"goto source code\">[src]</a>", l)
}
}
#[derive(Debug, Eq, PartialEq, Hash)]
struct ItemEntry {
url: String,
name: String,
}
impl ItemEntry {
fn new(mut url: String, name: String) -> ItemEntry {
while url.starts_with('/') {
url.remove(0);
}
ItemEntry { url, name }
}
}
impl ItemEntry {
crate fn print(&self) -> impl fmt::Display + '_ {
crate::html::format::display_fn(move |f| {
write!(f, "<a href=\"{}\">{}</a>", self.url, Escape(&self.name))
})
}
}
impl PartialOrd for ItemEntry {
fn partial_cmp(&self, other: &ItemEntry) -> Option<::std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl Ord for ItemEntry {
fn cmp(&self, other: &ItemEntry) -> ::std::cmp::Ordering {
self.name.cmp(&other.name)
}
}
#[derive(Debug)]
struct AllTypes {
structs: FxHashSet<ItemEntry>,
enums: FxHashSet<ItemEntry>,
unions: FxHashSet<ItemEntry>,
primitives: FxHashSet<ItemEntry>,
traits: FxHashSet<ItemEntry>,
macros: FxHashSet<ItemEntry>,
functions: FxHashSet<ItemEntry>,
typedefs: FxHashSet<ItemEntry>,
opaque_tys: FxHashSet<ItemEntry>,
statics: FxHashSet<ItemEntry>,
constants: FxHashSet<ItemEntry>,
attributes: FxHashSet<ItemEntry>,
derives: FxHashSet<ItemEntry>,
trait_aliases: FxHashSet<ItemEntry>,
}
impl AllTypes {
fn new() -> AllTypes {
let new_set = |cap| FxHashSet::with_capacity_and_hasher(cap, Default::default());
AllTypes {
structs: new_set(100),
enums: new_set(100),
unions: new_set(100),
primitives: new_set(26),
traits: new_set(100),
macros: new_set(100),
functions: new_set(100),
typedefs: new_set(100),
opaque_tys: new_set(100),
statics: new_set(100),
constants: new_set(100),
attributes: new_set(100),
derives: new_set(100),
trait_aliases: new_set(100),
}
}
fn append(&mut self, item_name: String, item_type: &ItemType) {
let mut url: Vec<_> = item_name.split("::").skip(1).collect();
if let Some(name) = url.pop() {
let new_url = format!("{}/{}.{}.html", url.join("/"), item_type, name);
url.push(name);
let name = url.join("::");
match *item_type {
ItemType::Struct => self.structs.insert(ItemEntry::new(new_url, name)),
ItemType::Enum => self.enums.insert(ItemEntry::new(new_url, name)),
ItemType::Union => self.unions.insert(ItemEntry::new(new_url, name)),
ItemType::Primitive => self.primitives.insert(ItemEntry::new(new_url, name)),
ItemType::Trait => self.traits.insert(ItemEntry::new(new_url, name)),
ItemType::Macro => self.macros.insert(ItemEntry::new(new_url, name)),
ItemType::Function => self.functions.insert(ItemEntry::new(new_url, name)),
ItemType::Typedef => self.typedefs.insert(ItemEntry::new(new_url, name)),
ItemType::OpaqueTy => self.opaque_tys.insert(ItemEntry::new(new_url, name)),
ItemType::Static => self.statics.insert(ItemEntry::new(new_url, name)),
ItemType::Constant => self.constants.insert(ItemEntry::new(new_url, name)),
ItemType::ProcAttribute => self.attributes.insert(ItemEntry::new(new_url, name)),
ItemType::ProcDerive => self.derives.insert(ItemEntry::new(new_url, name)),
ItemType::TraitAlias => self.trait_aliases.insert(ItemEntry::new(new_url, name)),
_ => true,
};
}
}
}
impl AllTypes {
fn print(self, f: &mut Buffer) {
fn print_entries(f: &mut Buffer, e: &FxHashSet<ItemEntry>, title: &str, class: &str) {
if !e.is_empty() {
let mut e: Vec<&ItemEntry> = e.iter().collect();
e.sort();
write!(
f,
"<h3 id=\"{}\">{}</h3><ul class=\"{} docblock\">",
title.replace(' ', "-"), // IDs cannot contain whitespaces.
title,
class
);
for s in e.iter() {
write!(f, "<li>{}</li>", s.print());
}
f.write_str("</ul>");
}
}
f.write_str(
"<h1 class=\"fqn\">\
<span class=\"in-band\">List of all items</span>\
<span class=\"out-of-band\">\
<span id=\"render-detail\">\
<a id=\"toggle-all-docs\" href=\"javascript:void(0)\" \
title=\"collapse all docs\">\
[<span class=\"inner\">−</span>]\
</a>\
</span>
</span>
</h1>",
);
// Note: print_entries does not escape the title, because we know the current set of titles
// doesn't require escaping.
print_entries(f, &self.structs, "Structs", "structs");
print_entries(f, &self.enums, "Enums", "enums");
print_entries(f, &self.unions, "Unions", "unions");
print_entries(f, &self.primitives, "Primitives", "primitives");
print_entries(f, &self.traits, "Traits", "traits");
print_entries(f, &self.macros, "Macros", "macros");
print_entries(f, &self.attributes, "Attribute Macros", "attributes");
print_entries(f, &self.derives, "Derive Macros", "derives");
print_entries(f, &self.functions, "Functions", "functions");
print_entries(f, &self.typedefs, "Typedefs", "typedefs");
print_entries(f, &self.trait_aliases, "Trait Aliases", "trait-aliases");
print_entries(f, &self.opaque_tys, "Opaque Types", "opaque-types");
print_entries(f, &self.statics, "Statics", "statics");
print_entries(f, &self.constants, "Constants", "constants")
}
}
#[derive(Debug)]
enum Setting {
Section {
description: &'static str,
sub_settings: Vec<Setting>,
},
Toggle {
js_data_name: &'static str,
description: &'static str,
default_value: bool,
},
Select {
js_data_name: &'static str,
description: &'static str,
default_value: &'static str,
options: Vec<(String, String)>,
},
}
impl Setting {
fn display(&self, root_path: &str, suffix: &str) -> String {
match *self {
Setting::Section { description, ref sub_settings } => format!(
"<div class=\"setting-line\">\
<div class=\"title\">{}</div>\
<div class=\"sub-settings\">{}</div>
</div>",
description,
sub_settings.iter().map(|s| s.display(root_path, suffix)).collect::<String>()
),
Setting::Toggle { js_data_name, description, default_value } => format!(
"<div class=\"setting-line\">\
<label class=\"toggle\">\
<input type=\"checkbox\" id=\"{}\" {}>\
<span class=\"slider\"></span>\
</label>\
<div>{}</div>\
</div>",
js_data_name,
if default_value { " checked" } else { "" },
description,
),
Setting::Select { js_data_name, description, default_value, ref options } => format!(
"<div class=\"setting-line\">\
<div>{}</div>\
<label class=\"select-wrapper\">\
<select id=\"{}\" autocomplete=\"off\">{}</select>\
<img src=\"{}down-arrow{}.svg\" alt=\"Select item\">\
</label>\
</div>",
description,
js_data_name,
options
.iter()
.map(|opt| format!(
"<option value=\"{}\" {}>{}</option>",
opt.0,
if opt.0 == default_value { "selected" } else { "" },
opt.1,
))
.collect::<String>(),
root_path,
suffix,
),
}
}
}
impl From<(&'static str, &'static str, bool)> for Setting {
fn from(values: (&'static str, &'static str, bool)) -> Setting {
Setting::Toggle { js_data_name: values.0, description: values.1, default_value: values.2 }
}
}
impl<T: Into<Setting>> From<(&'static str, Vec<T>)> for Setting {
fn from(values: (&'static str, Vec<T>)) -> Setting {
Setting::Section {
description: values.0,
sub_settings: values.1.into_iter().map(|v| v.into()).collect::<Vec<_>>(),
}
}
}
fn settings(root_path: &str, suffix: &str, themes: &[StylePath]) -> Result<String, Error> {
let theme_names: Vec<(String, String)> = themes
.iter()
.map(|entry| {
let theme =
try_none!(try_none!(entry.path.file_stem(), &entry.path).to_str(), &entry.path)
.to_string();
Ok((theme.clone(), theme))
})
.collect::<Result<_, Error>>()?;
// (id, explanation, default value)
let settings: &[Setting] = &[
(
"Theme preferences",
vec![
Setting::from(("use-system-theme", "Use system theme", true)),
Setting::Select {
js_data_name: "preferred-dark-theme",
description: "Preferred dark theme",
default_value: "dark",
options: theme_names.clone(),
},
Setting::Select {
js_data_name: "preferred-light-theme",
description: "Preferred light theme",
default_value: "light",
options: theme_names,
},
],
)
.into(),
("auto-hide-large-items", "Auto-hide item contents for large items.", true).into(),
("auto-hide-method-docs", "Auto-hide item methods' documentation", false).into(),
("auto-hide-trait-implementations", "Auto-hide trait implementation documentation", false)
.into(),
("go-to-only-result", "Directly go to item in search if there is only one result", false)
.into(),
("line-numbers", "Show line numbers on code examples", false).into(),
("disable-shortcuts", "Disable keyboard shortcuts", false).into(),
];
Ok(format!(
"<h1 class=\"fqn\">\
<span class=\"in-band\">Rustdoc settings</span>\
</h1>\
<div class=\"settings\">{}</div>\
<script src=\"{}settings{}.js\"></script>",
settings.iter().map(|s| s.display(root_path, suffix)).collect::<String>(),
root_path,
suffix
))
}
fn document(w: &mut Buffer, cx: &Context<'_>, item: &clean::Item, parent: Option<&clean::Item>) {
if let Some(ref name) = item.name {
info!("Documenting {}", name);
}
document_item_info(w, cx, item, parent);
if parent.is_none() {
document_full_collapsible(w, item, cx);
} else {
document_full(w, item, cx);
}
}
/// Render md_text as markdown.
fn render_markdown(w: &mut Buffer, cx: &Context<'_>, md_text: &str, links: Vec<RenderedLink>) {
let mut ids = cx.id_map.borrow_mut();
write!(
w,
"<div class=\"docblock\">{}</div>",
Markdown(
md_text,
&links,
&mut ids,
cx.shared.codes,
cx.shared.edition(),
&cx.shared.playground
)
.into_string()
)
}
/// Writes a documentation block containing only the first paragraph of the documentation. If the
/// docs are longer, a "Read more" link is appended to the end.
fn document_short(
w: &mut Buffer,
item: &clean::Item,
cx: &Context<'_>,
link: AssocItemLink<'_>,
parent: &clean::Item,
show_def_docs: bool,
) {
document_item_info(w, cx, item, Some(parent));
if !show_def_docs {
return;
}
if let Some(s) = item.doc_value() {
let mut summary_html = MarkdownSummaryLine(&s, &item.links(cx)).into_string();
if s.contains('\n') {
let link = format!(r#" <a href="{}">Read more</a>"#, naive_assoc_href(item, link, cx));
if let Some(idx) = summary_html.rfind("</p>") {
summary_html.insert_str(idx, &link);
} else {
summary_html.push_str(&link);
}
}
write!(w, "<div class='docblock'>{}</div>", summary_html,);
}
}
fn document_full_collapsible(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>) {
document_full_inner(w, item, cx, true);
}
fn document_full(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>) {
document_full_inner(w, item, cx, false);
}
fn document_full_inner(w: &mut Buffer, item: &clean::Item, cx: &Context<'_>, is_collapsible: bool) {
if let Some(s) = cx.shared.maybe_collapsed_doc_value(item) {
debug!("Doc block: =====\n{}\n=====", s);
if is_collapsible {
w.write_str(
"<details class=\"rustdoc-toggle top-doc\" open>\
<summary class=\"hideme\">\
<span>Expand description</span>\
</summary>",
);
render_markdown(w, cx, &s, item.links(cx));
w.write_str("</details>");
} else {
render_markdown(w, cx, &s, item.links(cx));
}
}
}
/// Add extra information about an item such as:
///
/// * Stability
/// * Deprecated
/// * Required features (through the `doc_cfg` feature)
fn document_item_info(
w: &mut Buffer,
cx: &Context<'_>,
item: &clean::Item,
parent: Option<&clean::Item>,
) {
let item_infos = short_item_info(item, cx, parent);
if !item_infos.is_empty() {
w.write_str("<div class=\"item-info\">");
for info in item_infos {
w.write_str(&info);
}
w.write_str("</div>");
}
}
fn portability(item: &clean::Item, parent: Option<&clean::Item>) -> Option<String> {
let cfg = match (&item.cfg, parent.and_then(|p| p.cfg.as_ref())) {
(Some(cfg), Some(parent_cfg)) => cfg.simplify_with(parent_cfg),
(cfg, _) => cfg.as_deref().cloned(),
};
debug!("Portability {:?} - {:?} = {:?}", item.cfg, parent.and_then(|p| p.cfg.as_ref()), cfg);
Some(format!("<div class=\"stab portability\">{}</div>", cfg?.render_long_html()))
}
/// Render the stability, deprecation and portability information that is displayed at the top of
/// the item's documentation.
fn short_item_info(
item: &clean::Item,
cx: &Context<'_>,
parent: Option<&clean::Item>,
) -> Vec<String> {
let mut extra_info = vec![];
let error_codes = cx.shared.codes;
if let Some(Deprecation { note, since, is_since_rustc_version, suggestion: _ }) =
item.deprecation(cx.tcx())
{
// We display deprecation messages for #[deprecated] and #[rustc_deprecated]
// but only display the future-deprecation messages for #[rustc_deprecated].
let mut message = if let Some(since) = since {
let since = &since.as_str();
if !stability::deprecation_in_effect(is_since_rustc_version, Some(since)) {
if *since == "TBD" {
String::from("Deprecating in a future Rust version")
} else {
format!("Deprecating in {}", Escape(since))
}
} else {
format!("Deprecated since {}", Escape(since))
}
} else {
String::from("Deprecated")
};
if let Some(note) = note {
let note = note.as_str();
let mut ids = cx.id_map.borrow_mut();
let html = MarkdownHtml(
¬e,
&mut ids,
error_codes,
cx.shared.edition(),
&cx.shared.playground,
);
message.push_str(&format!(": {}", html.into_string()));
}
extra_info.push(format!(
"<div class=\"stab deprecated\"><span class=\"emoji\">👎</span> {}</div>",
message,
));
}
// Render unstable items. But don't render "rustc_private" crates (internal compiler crates).
// Those crates are permanently unstable so it makes no sense to render "unstable" everywhere.
if let Some((StabilityLevel::Unstable { reason, issue, .. }, feature)) = item
.stability(cx.tcx())
.as_ref()
.filter(|stab| stab.feature != sym::rustc_private)
.map(|stab| (stab.level, stab.feature))
{
let mut message =
"<span class=\"emoji\">🔬</span> This is a nightly-only experimental API.".to_owned();
let mut feature = format!("<code>{}</code>", Escape(&feature.as_str()));
if let (Some(url), Some(issue)) = (&cx.shared.issue_tracker_base_url, issue) {
feature.push_str(&format!(
" <a href=\"{url}{issue}\">#{issue}</a>",
url = url,
issue = issue
));
}
message.push_str(&format!(" ({})", feature));
if let Some(unstable_reason) = reason {
let mut ids = cx.id_map.borrow_mut();
message = format!(
"<details><summary>{}</summary>{}</details>",
message,
MarkdownHtml(
&unstable_reason.as_str(),
&mut ids,
error_codes,
cx.shared.edition(),
&cx.shared.playground,
)
.into_string()
);
}
extra_info.push(format!("<div class=\"stab unstable\">{}</div>", message));
}
if let Some(portability) = portability(item, parent) {
extra_info.push(portability);
}
extra_info
}
// Render the list of items inside one of the sections "Trait Implementations",
// "Auto Trait Implementations," "Blanket Trait Implementations" (on struct/enum pages).
fn render_impls(
cx: &Context<'_>,
w: &mut Buffer,
traits: &[&&Impl],
containing_item: &clean::Item,
) {
let cache = cx.cache();
let tcx = cx.tcx();
let mut impls = traits
.iter()
.map(|i| {
let did = i.trait_did_full(cache).unwrap();
let provided_trait_methods = i.inner_impl().provided_trait_methods(tcx);
let assoc_link = AssocItemLink::GotoSource(did.into(), &provided_trait_methods);
let mut buffer = if w.is_for_html() { Buffer::html() } else { Buffer::new() };
render_impl(
&mut buffer,
cx,
i,
containing_item,
assoc_link,
RenderMode::Normal,
None,
&[],
ImplRenderingParameters {
show_def_docs: true,
is_on_foreign_type: false,
show_default_items: true,
show_non_assoc_items: true,
toggle_open_by_default: true,
},
);
buffer.into_inner()
})
.collect::<Vec<_>>();
impls.sort();
w.write_str(&impls.join(""));
}
fn naive_assoc_href(it: &clean::Item, link: AssocItemLink<'_>, cx: &Context<'_>) -> String {
use crate::formats::item_type::ItemType::*;
let name = it.name.as_ref().unwrap();
let ty = match it.type_() {
Typedef | AssocType => AssocType,
s => s,
};
let anchor = format!("#{}.{}", ty, name);
match link {
AssocItemLink::Anchor(Some(ref id)) => format!("#{}", id),
AssocItemLink::Anchor(None) => anchor,
AssocItemLink::GotoSource(did, _) => {
href(did.expect_def_id(), cx).map(|p| format!("{}{}", p.0, anchor)).unwrap_or(anchor)
}
}
}
fn assoc_const(
w: &mut Buffer,
it: &clean::Item,
ty: &clean::Type,
_default: Option<&String>,
link: AssocItemLink<'_>,
extra: &str,
cx: &Context<'_>,
) {
write!(
w,
"{}{}const <a href=\"{}\" class=\"constant\">{}</a>: {}",
extra,
it.visibility.print_with_space(it.def_id, cx),
naive_assoc_href(it, link, cx),
it.name.as_ref().unwrap(),
ty.print(cx)
);
}
fn assoc_type(
w: &mut Buffer,
it: &clean::Item,
bounds: &[clean::GenericBound],
default: Option<&clean::Type>,
link: AssocItemLink<'_>,
extra: &str,
cx: &Context<'_>,
) {
write!(
w,
"{}type <a href=\"{}\" class=\"type\">{}</a>",
extra,
naive_assoc_href(it, link, cx),
it.name.as_ref().unwrap()
);
if !bounds.is_empty() {
write!(w, ": {}", print_generic_bounds(bounds, cx))
}
if let Some(default) = default {
write!(w, " = {}", default.print(cx))
}
}
fn render_stability_since_raw(
w: &mut Buffer,
ver: Option<&str>,
const_stability: Option<&ConstStability>,
containing_ver: Option<&str>,
containing_const_ver: Option<&str>,
) {
let ver = ver.filter(|inner| !inner.is_empty());
match (ver, const_stability) {
// stable and const stable
(Some(v), Some(ConstStability { level: StabilityLevel::Stable { since }, .. }))
if Some(since.as_str()).as_deref() != containing_const_ver =>
{
write!(
w,
"<span class=\"since\" title=\"Stable since Rust version {0}, const since {1}\">{0} (const: {1})</span>",
v, since
);
}
// stable and const unstable
(
Some(v),
Some(ConstStability { level: StabilityLevel::Unstable { issue, .. }, feature, .. }),
) => {
write!(
w,
"<span class=\"since\" title=\"Stable since Rust version {0}, const unstable\">{0} (const: ",
v
);
if let Some(n) = issue {
write!(
w,
"<a href=\"https://github.com/rust-lang/rust/issues/{}\" title=\"Tracking issue for {}\">unstable</a>",
n, feature
);
} else {
write!(w, "unstable");
}
write!(w, ")</span>");
}
// stable
(Some(v), _) if ver != containing_ver => {
write!(
w,
"<span class=\"since\" title=\"Stable since Rust version {0}\">{0}</span>",
v
);
}
_ => {}
}
}
fn render_assoc_item(
w: &mut Buffer,
item: &clean::Item,
link: AssocItemLink<'_>,
parent: ItemType,
cx: &Context<'_>,
) {
fn method(
w: &mut Buffer,
meth: &clean::Item,
header: hir::FnHeader,
g: &clean::Generics,
d: &clean::FnDecl,
link: AssocItemLink<'_>,
parent: ItemType,
cx: &Context<'_>,
) {
let name = meth.name.as_ref().unwrap();
let href = match link {
AssocItemLink::Anchor(Some(ref id)) => Some(format!("#{}", id)),
AssocItemLink::Anchor(None) => Some(format!("#{}.{}", meth.type_(), name)),
AssocItemLink::GotoSource(did, provided_methods) => {
// We're creating a link from an impl-item to the corresponding
// trait-item and need to map the anchored type accordingly.
let ty = if provided_methods.contains(&name) {
ItemType::Method
} else {
ItemType::TyMethod
};
match (href(did.expect_def_id(), cx), ty) {
(Ok(p), ty) => Some(format!("{}#{}.{}", p.0, ty, name)),
(Err(HrefError::DocumentationNotBuilt), ItemType::TyMethod) => None,
(Err(_), ty) => Some(format!("#{}.{}", ty, name)),
}
}
};
let vis = meth.visibility.print_with_space(meth.def_id, cx).to_string();
let constness =
print_constness_with_space(&header.constness, meth.const_stability(cx.tcx()));
let asyncness = header.asyncness.print_with_space();
let unsafety = header.unsafety.print_with_space();
let defaultness = print_default_space(meth.is_default());
let abi = print_abi_with_space(header.abi).to_string();
// NOTE: `{:#}` does not print HTML formatting, `{}` does. So `g.print` can't be reused between the length calculation and `write!`.
let generics_len = format!("{:#}", g.print(cx)).len();
let mut header_len = "fn ".len()
+ vis.len()
+ constness.len()
+ asyncness.len()
+ unsafety.len()
+ defaultness.len()
+ abi.len()
+ name.as_str().len()
+ generics_len;
let (indent, indent_str, end_newline) = if parent == ItemType::Trait {
header_len += 4;
let indent_str = " ";
render_attributes_in_pre(w, meth, indent_str);
(4, indent_str, false)
} else {
render_attributes_in_code(w, meth);
(0, "", true)
};
w.reserve(header_len + "<a href=\"\" class=\"fnname\">{".len() + "</a>".len());
write!(
w,
"{indent}{vis}{constness}{asyncness}{unsafety}{defaultness}{abi}fn <a {href} class=\"fnname\">{name}</a>\
{generics}{decl}{notable_traits}{where_clause}",
indent = indent_str,
vis = vis,
constness = constness,
asyncness = asyncness,
unsafety = unsafety,
defaultness = defaultness,
abi = abi,
// links without a href are valid - https://www.w3schools.com/tags/att_a_href.asp
href = href.map(|href| format!("href=\"{}\"", href)).unwrap_or_else(|| "".to_string()),
name = name,
generics = g.print(cx),
decl = d.full_print(header_len, indent, header.asyncness, cx),
notable_traits = notable_traits_decl(&d, cx),
where_clause = print_where_clause(g, cx, indent, end_newline),
)
}
match *item.kind {
clean::StrippedItem(..) => {}
clean::TyMethodItem(ref m) => {
method(w, item, m.header, &m.generics, &m.decl, link, parent, cx)
}
clean::MethodItem(ref m, _) => {
method(w, item, m.header, &m.generics, &m.decl, link, parent, cx)
}
clean::AssocConstItem(ref ty, ref default) => assoc_const(
w,
item,
ty,
default.as_ref(),
link,
if parent == ItemType::Trait { " " } else { "" },
cx,
),
clean::AssocTypeItem(ref bounds, ref default) => assoc_type(
w,
item,
bounds,
default.as_ref(),
link,
if parent == ItemType::Trait { " " } else { "" },
cx,
),
_ => panic!("render_assoc_item called on non-associated-item"),
}
}
const ALLOWED_ATTRIBUTES: &[Symbol] =
&[sym::export_name, sym::link_section, sym::no_mangle, sym::repr, sym::non_exhaustive];
fn attributes(it: &clean::Item) -> Vec<String> {
it.attrs
.other_attrs
.iter()
.filter_map(|attr| {
if ALLOWED_ATTRIBUTES.contains(&attr.name_or_empty()) {
Some(pprust::attribute_to_string(&attr).replace("\n", "").replace(" ", " "))
} else {
None
}
})
.collect()
}
// When an attribute is rendered inside a `<pre>` tag, it is formatted using
// a whitespace prefix and newline.
fn render_attributes_in_pre(w: &mut Buffer, it: &clean::Item, prefix: &str) {
for a in attributes(it) {
writeln!(w, "{}{}", prefix, a);
}
}
// When an attribute is rendered inside a <code> tag, it is formatted using
// a div to produce a newline after it.
fn render_attributes_in_code(w: &mut Buffer, it: &clean::Item) {
for a in attributes(it) {
write!(w, "<div class=\"code-attribute\">{}</div>", a);
}
}
#[derive(Copy, Clone)]
enum AssocItemLink<'a> {
Anchor(Option<&'a str>),
GotoSource(ItemId, &'a FxHashSet<Symbol>),
}
impl<'a> AssocItemLink<'a> {
fn anchor(&self, id: &'a str) -> Self {
match *self {
AssocItemLink::Anchor(_) => AssocItemLink::Anchor(Some(&id)),
ref other => *other,
}
}
}
fn render_assoc_items(
w: &mut Buffer,
cx: &Context<'_>,
containing_item: &clean::Item,
it: DefId,
what: AssocItemRender<'_>,
) {
info!("Documenting associated items of {:?}", containing_item.name);
let cache = cx.cache();
let v = match cache.impls.get(&it) {
Some(v) => v,
None => return,
};
let (non_trait, traits): (Vec<_>, _) = v.iter().partition(|i| i.inner_impl().trait_.is_none());
if !non_trait.is_empty() {
let render_mode = match what {
AssocItemRender::All => {
w.write_str(
"<h2 id=\"implementations\" class=\"small-section-header\">\
Implementations<a href=\"#implementations\" class=\"anchor\"></a>\
</h2>",
);
RenderMode::Normal
}
AssocItemRender::DerefFor { trait_, type_, deref_mut_ } => {
write!(
w,
"<h2 id=\"deref-methods\" class=\"small-section-header\">\
<span>Methods from {trait_}<Target = {type_}></span>\
<a href=\"#deref-methods\" class=\"anchor\"></a>\
</h2>",
trait_ = trait_.print(cx),
type_ = type_.print(cx),
);
RenderMode::ForDeref { mut_: deref_mut_ }
}
};
for i in &non_trait {
render_impl(
w,
cx,
i,
containing_item,
AssocItemLink::Anchor(None),
render_mode,
None,
&[],
ImplRenderingParameters {
show_def_docs: true,
is_on_foreign_type: false,
show_default_items: true,
show_non_assoc_items: true,
toggle_open_by_default: true,
},
);
}
}
if let AssocItemRender::DerefFor { .. } = what {
return;
}
if !traits.is_empty() {
let deref_impl = traits
.iter()
.find(|t| t.inner_impl().trait_.def_id_full(cache) == cache.deref_trait_did);
if let Some(impl_) = deref_impl {
let has_deref_mut = traits
.iter()
.any(|t| t.inner_impl().trait_.def_id_full(cache) == cache.deref_mut_trait_did);
render_deref_methods(w, cx, impl_, containing_item, has_deref_mut);
}
let (synthetic, concrete): (Vec<&&Impl>, Vec<&&Impl>) =
traits.iter().partition(|t| t.inner_impl().synthetic);
let (blanket_impl, concrete): (Vec<&&Impl>, _) =
concrete.into_iter().partition(|t| t.inner_impl().blanket_impl.is_some());
let mut impls = Buffer::empty_from(&w);
render_impls(cx, &mut impls, &concrete, containing_item);
let impls = impls.into_inner();
if !impls.is_empty() {
write!(
w,
"<h2 id=\"trait-implementations\" class=\"small-section-header\">\
Trait Implementations<a href=\"#trait-implementations\" class=\"anchor\"></a>\
</h2>\
<div id=\"trait-implementations-list\">{}</div>",
impls
);
}
if !synthetic.is_empty() {
w.write_str(
"<h2 id=\"synthetic-implementations\" class=\"small-section-header\">\
Auto Trait Implementations\
<a href=\"#synthetic-implementations\" class=\"anchor\"></a>\
</h2>\
<div id=\"synthetic-implementations-list\">",
);
render_impls(cx, w, &synthetic, containing_item);
w.write_str("</div>");
}
if !blanket_impl.is_empty() {
w.write_str(
"<h2 id=\"blanket-implementations\" class=\"small-section-header\">\
Blanket Implementations\
<a href=\"#blanket-implementations\" class=\"anchor\"></a>\
</h2>\
<div id=\"blanket-implementations-list\">",
);
render_impls(cx, w, &blanket_impl, containing_item);
w.write_str("</div>");
}
}
}
fn render_deref_methods(
w: &mut Buffer,
cx: &Context<'_>,
impl_: &Impl,
container_item: &clean::Item,
deref_mut: bool,
) {
let cache = cx.cache();
let deref_type = impl_.inner_impl().trait_.as_ref().unwrap();
let (target, real_target) = impl_
.inner_impl()
.items
.iter()
.find_map(|item| match *item.kind {
clean::TypedefItem(ref t, true) => Some(match *t {
clean::Typedef { item_type: Some(ref type_), .. } => (type_, &t.type_),
_ => (&t.type_, &t.type_),
}),
_ => None,
})
.expect("Expected associated type binding");
debug!("Render deref methods for {:#?}, target {:#?}", impl_.inner_impl().for_, target);
let what =
AssocItemRender::DerefFor { trait_: deref_type, type_: real_target, deref_mut_: deref_mut };
if let Some(did) = target.def_id_full(cache) {
if let Some(type_did) = impl_.inner_impl().for_.def_id_full(cache) {
// `impl Deref<Target = S> for S`
if did == type_did {
// Avoid infinite cycles
return;
}
}
render_assoc_items(w, cx, container_item, did, what);
} else {
if let Some(prim) = target.primitive_type() {
if let Some(&did) = cache.primitive_locations.get(&prim) {
render_assoc_items(w, cx, container_item, did, what);
}
}
}
}
fn should_render_item(item: &clean::Item, deref_mut_: bool, cache: &Cache) -> bool {
let self_type_opt = match *item.kind {
clean::MethodItem(ref method, _) => method.decl.self_type(),
clean::TyMethodItem(ref method) => method.decl.self_type(),
_ => None,
};
if let Some(self_ty) = self_type_opt {
let (by_mut_ref, by_box, by_value) = match self_ty {
SelfTy::SelfBorrowed(_, mutability)
| SelfTy::SelfExplicit(clean::BorrowedRef { mutability, .. }) => {
(mutability == Mutability::Mut, false, false)
}
SelfTy::SelfExplicit(clean::ResolvedPath { did, .. }) => {
(false, Some(did) == cache.owned_box_did, false)
}
SelfTy::SelfValue => (false, false, true),
_ => (false, false, false),
};
(deref_mut_ || !by_mut_ref) && !by_box && !by_value
} else {
false
}
}
fn notable_traits_decl(decl: &clean::FnDecl, cx: &Context<'_>) -> String {
let mut out = Buffer::html();
let mut trait_ = String::new();
if let Some(did) = decl.output.def_id_full(cx.cache()) {
if let Some(impls) = cx.cache().impls.get(&did) {
for i in impls {
let impl_ = i.inner_impl();
if impl_.trait_.def_id().map_or(false, |d| {
cx.cache().traits.get(&d).map(|t| t.is_notable).unwrap_or(false)
}) {
if out.is_empty() {
write!(
&mut out,
"<div class=\"notable\">Notable traits for {}</div>\
<code class=\"content\">",
impl_.for_.print(cx)
);
trait_.push_str(&impl_.for_.print(cx).to_string());
}
//use the "where" class here to make it small
write!(
&mut out,
"<span class=\"where fmt-newline\">{}</span>",
impl_.print(false, cx)
);
let t_did = impl_.trait_.def_id_full(cx.cache()).unwrap();
for it in &impl_.items {
if let clean::TypedefItem(ref tydef, _) = *it.kind {
out.push_str("<span class=\"where fmt-newline\"> ");
assoc_type(
&mut out,
it,
&[],
Some(&tydef.type_),
AssocItemLink::GotoSource(t_did.into(), &FxHashSet::default()),
"",
cx,
);
out.push_str(";</span>");
}
}
}
}
}
}
if !out.is_empty() {
out.insert_str(
0,
"<span class=\"notable-traits\"><span class=\"notable-traits-tooltip\">ⓘ\
<div class=\"notable-traits-tooltiptext\"><span class=\"docblock\">",
);
out.push_str("</code></span></div></span></span>");
}
out.into_inner()
}
#[derive(Clone, Copy, Debug)]
struct ImplRenderingParameters {
show_def_docs: bool,
is_on_foreign_type: bool,
show_default_items: bool,
/// Whether or not to show methods.
show_non_assoc_items: bool,
toggle_open_by_default: bool,
}
fn render_impl(
w: &mut Buffer,
cx: &Context<'_>,
i: &Impl,
parent: &clean::Item,
link: AssocItemLink<'_>,
render_mode: RenderMode,
use_absolute: Option<bool>,
aliases: &[String],
rendering_params: ImplRenderingParameters,
) {
let cache = cx.cache();
let traits = &cache.traits;
let trait_ = i.trait_did_full(cache).map(|did| &traits[&did]);
let mut close_tags = String::new();
// For trait implementations, the `interesting` output contains all methods that have doc
// comments, and the `boring` output contains all methods that do not. The distinction is
// used to allow hiding the boring methods.
// `containing_item` is used for rendering stability info. If the parent is a trait impl,
// `containing_item` will the grandparent, since trait impls can't have stability attached.
fn doc_impl_item(
boring: &mut Buffer,
interesting: &mut Buffer,
cx: &Context<'_>,
item: &clean::Item,
parent: &clean::Item,
containing_item: &clean::Item,
link: AssocItemLink<'_>,
render_mode: RenderMode,
is_default_item: bool,
trait_: Option<&clean::Trait>,
rendering_params: ImplRenderingParameters,
) {
let item_type = item.type_();
let name = item.name.as_ref().unwrap();
let render_method_item = rendering_params.show_non_assoc_items
&& match render_mode {
RenderMode::Normal => true,
RenderMode::ForDeref { mut_: deref_mut_ } => {
should_render_item(&item, deref_mut_, cx.cache())
}
};
let in_trait_class = if trait_.is_some() { " trait-impl" } else { "" };
let mut doc_buffer = Buffer::empty_from(boring);
let mut info_buffer = Buffer::empty_from(boring);
let mut short_documented = true;
if render_method_item {
if !is_default_item {
if let Some(t) = trait_ {
// The trait item may have been stripped so we might not
// find any documentation or stability for it.
if let Some(it) = t.items.iter().find(|i| i.name == item.name) {
// We need the stability of the item from the trait
// because impls can't have a stability.
if item.doc_value().is_some() {
document_item_info(&mut info_buffer, cx, it, Some(parent));
document_full(&mut doc_buffer, item, cx);
short_documented = false;
} else {
// In case the item isn't documented,
// provide short documentation from the trait.
document_short(
&mut doc_buffer,
it,
cx,
link,
parent,
rendering_params.show_def_docs,
);
}
}
} else {
document_item_info(&mut info_buffer, cx, item, Some(parent));
if rendering_params.show_def_docs {
document_full(&mut doc_buffer, item, cx);
short_documented = false;
}
}
} else {
document_short(
&mut doc_buffer,
item,
cx,
link,
parent,
rendering_params.show_def_docs,
);
}
}
let w = if short_documented && trait_.is_some() { interesting } else { boring };
let toggled = !doc_buffer.is_empty();
if toggled {
let method_toggle_class =
if item_type == ItemType::Method { " method-toggle" } else { "" };
write!(w, "<details class=\"rustdoc-toggle{}\" open><summary>", method_toggle_class);
}
match *item.kind {
clean::MethodItem(..) | clean::TyMethodItem(_) => {
// Only render when the method is not static or we allow static methods
if render_method_item {
let id = cx.derive_id(format!("{}.{}", item_type, name));
let source_id = trait_
.and_then(|trait_| {
trait_.items.iter().find(|item| {
item.name.map(|n| n.as_str().eq(&name.as_str())).unwrap_or(false)
})
})
.map(|item| format!("{}.{}", item.type_(), name));
write!(
w,
"<div id=\"{}\" class=\"{}{} has-srclink\">",
id, item_type, in_trait_class,
);
render_rightside(w, cx, item, containing_item);
write!(w, "<a href=\"#{}\" class=\"anchor\"></a>", id);
w.write_str("<h4 class=\"code-header\">");
render_assoc_item(
w,
item,
link.anchor(source_id.as_ref().unwrap_or(&id)),
ItemType::Impl,
cx,
);
w.write_str("</h4>");
w.write_str("</div>");
}
}
clean::TypedefItem(ref tydef, _) => {
let source_id = format!("{}.{}", ItemType::AssocType, name);
let id = cx.derive_id(source_id.clone());
write!(
w,
"<div id=\"{}\" class=\"{}{} has-srclink\">",
id, item_type, in_trait_class
);
write!(w, "<a href=\"#{}\" class=\"anchor\"></a>", id);
w.write_str("<h4 class=\"code-header\">");
assoc_type(
w,
item,
&Vec::new(),
Some(&tydef.type_),
link.anchor(if trait_.is_some() { &source_id } else { &id }),
"",
cx,
);
w.write_str("</h4>");
w.write_str("</div>");
}
clean::AssocConstItem(ref ty, ref default) => {
let source_id = format!("{}.{}", item_type, name);
let id = cx.derive_id(source_id.clone());
write!(
w,
"<div id=\"{}\" class=\"{}{} has-srclink\">",
id, item_type, in_trait_class
);
render_rightside(w, cx, item, containing_item);
write!(w, "<a href=\"#{}\" class=\"anchor\"></a>", id);
w.write_str("<h4 class=\"code-header\">");
assoc_const(
w,
item,
ty,
default.as_ref(),
link.anchor(if trait_.is_some() { &source_id } else { &id }),
"",
cx,
);
w.write_str("</h4>");
w.write_str("</div>");
}
clean::AssocTypeItem(ref bounds, ref default) => {
let source_id = format!("{}.{}", item_type, name);
let id = cx.derive_id(source_id.clone());
write!(w, "<div id=\"{}\" class=\"{}{}\">", id, item_type, in_trait_class,);
write!(w, "<a href=\"#{}\" class=\"anchor\"></a>", id);
w.write_str("<h4 class=\"code-header\">");
assoc_type(
w,
item,
bounds,
default.as_ref(),
link.anchor(if trait_.is_some() { &source_id } else { &id }),
"",
cx,
);
w.write_str("</h4>");
w.write_str("</div>");
}
clean::StrippedItem(..) => return,
_ => panic!("can't make docs for trait item with name {:?}", item.name),
}
w.push_buffer(info_buffer);
if toggled {
w.write_str("</summary>");
w.push_buffer(doc_buffer);
w.push_str("</details>");
}
}
let mut impl_items = Buffer::empty_from(w);
let mut default_impl_items = Buffer::empty_from(w);
for trait_item in &i.inner_impl().items {
doc_impl_item(
&mut default_impl_items,
&mut impl_items,
cx,
trait_item,
if trait_.is_some() { &i.impl_item } else { parent },
parent,
link,
render_mode,
false,
trait_.map(|t| &t.trait_),
rendering_params,
);
}
fn render_default_items(
boring: &mut Buffer,
interesting: &mut Buffer,
cx: &Context<'_>,
t: &clean::Trait,
i: &clean::Impl,
parent: &clean::Item,
containing_item: &clean::Item,
render_mode: RenderMode,
rendering_params: ImplRenderingParameters,
) {
for trait_item in &t.items {
let n = trait_item.name;
if i.items.iter().any(|m| m.name == n) {
continue;
}
let did = i.trait_.as_ref().unwrap().def_id_full(cx.cache()).unwrap();
let provided_methods = i.provided_trait_methods(cx.tcx());
let assoc_link = AssocItemLink::GotoSource(did.into(), &provided_methods);
doc_impl_item(
boring,
interesting,
cx,
trait_item,
parent,
containing_item,
assoc_link,
render_mode,
true,
Some(t),
rendering_params,
);
}
}
// If we've implemented a trait, then also emit documentation for all
// default items which weren't overridden in the implementation block.
// We don't emit documentation for default items if they appear in the
// Implementations on Foreign Types or Implementors sections.
if rendering_params.show_default_items {
if let Some(t) = trait_ {
render_default_items(
&mut default_impl_items,
&mut impl_items,
cx,
&t.trait_,
&i.inner_impl(),
&i.impl_item,
parent,
render_mode,
rendering_params,
);
}
}
if render_mode == RenderMode::Normal {
let toggled = !(impl_items.is_empty() && default_impl_items.is_empty());
if toggled {
close_tags.insert_str(0, "</details>");
write!(
w,
"<details class=\"rustdoc-toggle implementors-toggle\"{}>",
if rendering_params.toggle_open_by_default { " open" } else { "" }
);
write!(w, "<summary>")
}
render_impl_summary(
w,
cx,
i,
parent,
parent,
rendering_params.show_def_docs,
use_absolute,
rendering_params.is_on_foreign_type,
aliases,
);
if toggled {
write!(w, "</summary>")
}
if let Some(ref dox) = cx.shared.maybe_collapsed_doc_value(&i.impl_item) {
let mut ids = cx.id_map.borrow_mut();
write!(
w,
"<div class=\"docblock\">{}</div>",
Markdown(
&*dox,
&i.impl_item.links(cx),
&mut ids,
cx.shared.codes,
cx.shared.edition(),
&cx.shared.playground
)
.into_string()
);
}
}
if !default_impl_items.is_empty() || !impl_items.is_empty() {
w.write_str("<div class=\"impl-items\">");
w.push_buffer(default_impl_items);
w.push_buffer(impl_items);
close_tags.insert_str(0, "</div>");
}
w.write_str(&close_tags);
}
// Render the items that appear on the right side of methods, impls, and
// associated types. For example "1.0.0 (const: 1.39.0) [src]".
fn render_rightside(
w: &mut Buffer,
cx: &Context<'_>,
item: &clean::Item,
containing_item: &clean::Item,
) {
let tcx = cx.tcx();
write!(w, "<div class=\"rightside\">");
render_stability_since_raw(
w,
item.stable_since(tcx).as_deref(),
item.const_stability(tcx),
containing_item.stable_since(tcx).as_deref(),
containing_item.const_stable_since(tcx).as_deref(),
);
write_srclink(cx, item, w);
w.write_str("</div>");
}
pub(crate) fn render_impl_summary(
w: &mut Buffer,
cx: &Context<'_>,
i: &Impl,
parent: &clean::Item,
containing_item: &clean::Item,
show_def_docs: bool,
use_absolute: Option<bool>,
is_on_foreign_type: bool,
// This argument is used to reference same type with different paths to avoid duplication
// in documentation pages for trait with automatic implementations like "Send" and "Sync".
aliases: &[String],
) {
let id = cx.derive_id(match i.inner_impl().trait_ {
Some(ref t) => {
if is_on_foreign_type {
get_id_for_impl_on_foreign_type(&i.inner_impl().for_, t, cx)
} else {
format!("impl-{}", small_url_encode(format!("{:#}", t.print(cx))))
}
}
None => "impl".to_string(),
});
let aliases = if aliases.is_empty() {
String::new()
} else {
format!(" data-aliases=\"{}\"", aliases.join(","))
};
write!(w, "<div id=\"{}\" class=\"impl has-srclink\"{}>", id, aliases);
render_rightside(w, cx, &i.impl_item, containing_item);
write!(w, "<a href=\"#{}\" class=\"anchor\"></a>", id);
write!(w, "<h3 class=\"code-header in-band\">");
if let Some(use_absolute) = use_absolute {
write!(w, "{}", i.inner_impl().print(use_absolute, cx));
if show_def_docs {
for it in &i.inner_impl().items {
if let clean::TypedefItem(ref tydef, _) = *it.kind {
w.write_str("<span class=\"where fmt-newline\"> ");
assoc_type(w, it, &[], Some(&tydef.type_), AssocItemLink::Anchor(None), "", cx);
w.write_str(";</span>");
}
}
}
} else {
write!(w, "{}", i.inner_impl().print(false, cx));
}
write!(w, "</h3>");
let is_trait = i.inner_impl().trait_.is_some();
if is_trait {
if let Some(portability) = portability(&i.impl_item, Some(parent)) {
write!(w, "<div class=\"item-info\">{}</div>", portability);
}
}
w.write_str("</div>");
}
fn print_sidebar(cx: &Context<'_>, it: &clean::Item, buffer: &mut Buffer) {
let parentlen = cx.current.len() - if it.is_mod() { 1 } else { 0 };
if it.is_struct()
|| it.is_trait()
|| it.is_primitive()
|| it.is_union()
|| it.is_enum()
|| it.is_mod()
|| it.is_typedef()
{
write!(
buffer,
"<h2 class=\"location\">{}{}</h2>",
match *it.kind {
clean::StructItem(..) => "Struct ",
clean::TraitItem(..) => "Trait ",
clean::PrimitiveItem(..) => "Primitive Type ",
clean::UnionItem(..) => "Union ",
clean::EnumItem(..) => "Enum ",
clean::TypedefItem(..) => "Type Definition ",
clean::ForeignTypeItem => "Foreign Type ",
clean::ModuleItem(..) =>
if it.is_crate() {
"Crate "
} else {
"Module "
},
_ => "",
},
it.name.as_ref().unwrap()
);
}
if it.is_crate() {
if let Some(ref version) = cx.cache().crate_version {
write!(
buffer,
"<div class=\"block version\">\
<div class=\"narrow-helper\"></div>\
<p>Version {}</p>\
</div>",
Escape(version),
);
}
}
buffer.write_str("<div class=\"sidebar-elems\">");
if it.is_crate() {
write!(
buffer,
"<a id=\"all-types\" href=\"all.html\"><p>See all {}'s items</p></a>",
it.name.as_ref().expect("crates always have a name"),
);
}
match *it.kind {
clean::StructItem(ref s) => sidebar_struct(cx, buffer, it, s),
clean::TraitItem(ref t) => sidebar_trait(cx, buffer, it, t),
clean::PrimitiveItem(_) => sidebar_primitive(cx, buffer, it),
clean::UnionItem(ref u) => sidebar_union(cx, buffer, it, u),
clean::EnumItem(ref e) => sidebar_enum(cx, buffer, it, e),
clean::TypedefItem(_, _) => sidebar_typedef(cx, buffer, it),
clean::ModuleItem(ref m) => sidebar_module(buffer, &m.items),
clean::ForeignTypeItem => sidebar_foreign_type(cx, buffer, it),
_ => {}
}
// The sidebar is designed to display sibling functions, modules and
// other miscellaneous information. since there are lots of sibling
// items (and that causes quadratic growth in large modules),
// we refactor common parts into a shared JavaScript file per module.
// still, we don't move everything into JS because we want to preserve
// as much HTML as possible in order to allow non-JS-enabled browsers
// to navigate the documentation (though slightly inefficiently).
if !it.is_mod() {
buffer.write_str("<h2 class=\"location\">Other items in<br>");
for (i, name) in cx.current.iter().take(parentlen).enumerate() {
if i > 0 {
buffer.write_str("::<wbr>");
}
write!(
buffer,
"<a href=\"{}index.html\">{}</a>",
&cx.root_path()[..(cx.current.len() - i - 1) * 3],
*name
);
}
buffer.write_str("</h2>");
}
// Sidebar refers to the enclosing module, not this module.
let relpath = if it.is_mod() && parentlen != 0 { "./" } else { "" };
write!(
buffer,
"<div id=\"sidebar-vars\" data-name=\"{name}\" data-ty=\"{ty}\" data-relpath=\"{path}\">\
</div>",
name = it.name.unwrap_or(kw::Empty),
ty = it.type_(),
path = relpath
);
write!(buffer, "<script defer src=\"{}sidebar-items.js\"></script>", relpath);
// Closes sidebar-elems div.
buffer.write_str("</div>");
}
fn get_next_url(used_links: &mut FxHashSet<String>, url: String) -> String {
if used_links.insert(url.clone()) {
return url;
}
let mut add = 1;
while !used_links.insert(format!("{}-{}", url, add)) {
add += 1;
}
format!("{}-{}", url, add)
}
fn get_methods(
i: &clean::Impl,
for_deref: bool,
used_links: &mut FxHashSet<String>,
deref_mut: bool,
cache: &Cache,
) -> Vec<String> {
i.items
.iter()
.filter_map(|item| match item.name {
Some(ref name) if !name.is_empty() && item.is_method() => {
if !for_deref || should_render_item(item, deref_mut, cache) {
Some(format!(
"<a href=\"#{}\">{}</a>",
get_next_url(used_links, format!("method.{}", name)),
name
))
} else {
None
}
}
_ => None,
})
.collect::<Vec<_>>()
}
// The point is to url encode any potential character from a type with genericity.
fn small_url_encode(s: String) -> String {
let mut st = String::new();
let mut last_match = 0;
for (idx, c) in s.char_indices() {
let escaped = match c {
'<' => "%3C",
'>' => "%3E",
' ' => "%20",
'?' => "%3F",
'\'' => "%27",
'&' => "%26",
',' => "%2C",
':' => "%3A",
';' => "%3B",
'[' => "%5B",
']' => "%5D",
'"' => "%22",
_ => continue,
};
st += &s[last_match..idx];
st += escaped;
// NOTE: we only expect single byte characters here - which is fine as long as we
// only match single byte characters
last_match = idx + 1;
}
if last_match != 0 {
st += &s[last_match..];
st
} else {
s
}
}
fn sidebar_assoc_items(cx: &Context<'_>, out: &mut Buffer, it: &clean::Item) {
let did = it.def_id.expect_def_id();
let cache = cx.cache();
if let Some(v) = cache.impls.get(&did) {
let mut used_links = FxHashSet::default();
{
let used_links_bor = &mut used_links;
let mut ret = v
.iter()
.filter(|i| i.inner_impl().trait_.is_none())
.flat_map(move |i| get_methods(i.inner_impl(), false, used_links_bor, false, cache))
.collect::<Vec<_>>();
if !ret.is_empty() {
// We want links' order to be reproducible so we don't use unstable sort.
ret.sort();
out.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#implementations\">Methods</a></h3>\
<div class=\"sidebar-links\">",
);
for line in ret {
out.push_str(&line);
}
out.push_str("</div>");
}
}
if v.iter().any(|i| i.inner_impl().trait_.is_some()) {
if let Some(impl_) = v
.iter()
.filter(|i| i.inner_impl().trait_.is_some())
.find(|i| i.inner_impl().trait_.def_id_full(cache) == cache.deref_trait_did)
{
sidebar_deref_methods(cx, out, impl_, v);
}
let format_impls = |impls: Vec<&Impl>| {
let mut links = FxHashSet::default();
let mut ret = impls
.iter()
.filter_map(|it| {
if let Some(ref i) = it.inner_impl().trait_ {
let i_display = format!("{:#}", i.print(cx));
let out = Escape(&i_display);
let encoded = small_url_encode(format!("{:#}", i.print(cx)));
let generated = format!(
"<a href=\"#impl-{}\">{}{}</a>",
encoded,
if it.inner_impl().negative_polarity { "!" } else { "" },
out
);
if links.insert(generated.clone()) { Some(generated) } else { None }
} else {
None
}
})
.collect::<Vec<String>>();
ret.sort();
ret
};
let write_sidebar_links = |out: &mut Buffer, links: Vec<String>| {
out.push_str("<div class=\"sidebar-links\">");
for link in links {
out.push_str(&link);
}
out.push_str("</div>");
};
let (synthetic, concrete): (Vec<&Impl>, Vec<&Impl>) =
v.iter().partition::<Vec<_>, _>(|i| i.inner_impl().synthetic);
let (blanket_impl, concrete): (Vec<&Impl>, Vec<&Impl>) = concrete
.into_iter()
.partition::<Vec<_>, _>(|i| i.inner_impl().blanket_impl.is_some());
let concrete_format = format_impls(concrete);
let synthetic_format = format_impls(synthetic);
let blanket_format = format_impls(blanket_impl);
if !concrete_format.is_empty() {
out.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#trait-implementations\">\
Trait Implementations</a></h3>",
);
write_sidebar_links(out, concrete_format);
}
if !synthetic_format.is_empty() {
out.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#synthetic-implementations\">\
Auto Trait Implementations</a></h3>",
);
write_sidebar_links(out, synthetic_format);
}
if !blanket_format.is_empty() {
out.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#blanket-implementations\">\
Blanket Implementations</a></h3>",
);
write_sidebar_links(out, blanket_format);
}
}
}
}
fn sidebar_deref_methods(cx: &Context<'_>, out: &mut Buffer, impl_: &Impl, v: &Vec<Impl>) {
let c = cx.cache();
debug!("found Deref: {:?}", impl_);
if let Some((target, real_target)) =
impl_.inner_impl().items.iter().find_map(|item| match *item.kind {
clean::TypedefItem(ref t, true) => Some(match *t {
clean::Typedef { item_type: Some(ref type_), .. } => (type_, &t.type_),
_ => (&t.type_, &t.type_),
}),
_ => None,
})
{
debug!("found target, real_target: {:?} {:?}", target, real_target);
if let Some(did) = target.def_id_full(c) {
if let Some(type_did) = impl_.inner_impl().for_.def_id_full(c) {
// `impl Deref<Target = S> for S`
if did == type_did {
// Avoid infinite cycles
return;
}
}
}
let deref_mut = v
.iter()
.filter(|i| i.inner_impl().trait_.is_some())
.any(|i| i.inner_impl().trait_.def_id_full(c) == c.deref_mut_trait_did);
let inner_impl = target
.def_id_full(c)
.or_else(|| {
target.primitive_type().and_then(|prim| c.primitive_locations.get(&prim).cloned())
})
.and_then(|did| c.impls.get(&did));
if let Some(impls) = inner_impl {
debug!("found inner_impl: {:?}", impls);
let mut used_links = FxHashSet::default();
let mut ret = impls
.iter()
.filter(|i| i.inner_impl().trait_.is_none())
.flat_map(|i| get_methods(i.inner_impl(), true, &mut used_links, deref_mut, c))
.collect::<Vec<_>>();
if !ret.is_empty() {
write!(
out,
"<h3 class=\"sidebar-title\"><a href=\"#deref-methods\">Methods from {}<Target={}></a></h3>",
Escape(&format!("{:#}", impl_.inner_impl().trait_.as_ref().unwrap().print(cx))),
Escape(&format!("{:#}", real_target.print(cx))),
);
// We want links' order to be reproducible so we don't use unstable sort.
ret.sort();
out.push_str("<div class=\"sidebar-links\">");
for link in ret {
out.push_str(&link);
}
out.push_str("</div>");
}
}
}
}
fn sidebar_struct(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item, s: &clean::Struct) {
let mut sidebar = Buffer::new();
let fields = get_struct_fields_name(&s.fields);
if !fields.is_empty() {
if let CtorKind::Fictive = s.struct_type {
sidebar.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#fields\">Fields</a></h3>\
<div class=\"sidebar-links\">",
);
for field in fields {
sidebar.push_str(&field);
}
sidebar.push_str("</div>");
} else if let CtorKind::Fn = s.struct_type {
sidebar
.push_str("<h3 class=\"sidebar-title\"><a href=\"#fields\">Tuple Fields</a></h3>");
}
}
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
fn get_id_for_impl_on_foreign_type(
for_: &clean::Type,
trait_: &clean::Type,
cx: &Context<'_>,
) -> String {
small_url_encode(format!("impl-{:#}-for-{:#}", trait_.print(cx), for_.print(cx),))
}
fn extract_for_impl_name(item: &clean::Item, cx: &Context<'_>) -> Option<(String, String)> {
match *item.kind {
clean::ItemKind::ImplItem(ref i) => {
if let Some(ref trait_) = i.trait_ {
// Alternative format produces no URLs,
// so this parameter does nothing.
Some((
format!("{:#}", i.for_.print(cx)),
get_id_for_impl_on_foreign_type(&i.for_, trait_, cx),
))
} else {
None
}
}
_ => None,
}
}
fn sidebar_trait(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item, t: &clean::Trait) {
buf.write_str("<div class=\"block items\">");
fn print_sidebar_section(
out: &mut Buffer,
items: &[clean::Item],
before: &str,
filter: impl Fn(&clean::Item) -> bool,
write: impl Fn(&mut Buffer, &str),
after: &str,
) {
let mut items = items
.iter()
.filter_map(|m| match m.name {
Some(ref name) if filter(m) => Some(name.as_str()),
_ => None,
})
.collect::<Vec<_>>();
if !items.is_empty() {
items.sort_unstable();
out.push_str(before);
for item in items.into_iter() {
write(out, &item);
}
out.push_str(after);
}
}
print_sidebar_section(
buf,
&t.items,
"<h3 class=\"sidebar-title\"><a href=\"#associated-types\">\
Associated Types</a></h3><div class=\"sidebar-links\">",
|m| m.is_associated_type(),
|out, sym| write!(out, "<a href=\"#associatedtype.{0}\">{0}</a>", sym),
"</div>",
);
print_sidebar_section(
buf,
&t.items,
"<h3 class=\"sidebar-title\"><a href=\"#associated-const\">\
Associated Constants</a></h3><div class=\"sidebar-links\">",
|m| m.is_associated_const(),
|out, sym| write!(out, "<a href=\"#associatedconstant.{0}\">{0}</a>", sym),
"</div>",
);
print_sidebar_section(
buf,
&t.items,
"<h3 class=\"sidebar-title\"><a href=\"#required-methods\">\
Required Methods</a></h3><div class=\"sidebar-links\">",
|m| m.is_ty_method(),
|out, sym| write!(out, "<a href=\"#tymethod.{0}\">{0}</a>", sym),
"</div>",
);
print_sidebar_section(
buf,
&t.items,
"<h3 class=\"sidebar-title\"><a href=\"#provided-methods\">\
Provided Methods</a></h3><div class=\"sidebar-links\">",
|m| m.is_method(),
|out, sym| write!(out, "<a href=\"#method.{0}\">{0}</a>", sym),
"</div>",
);
let cache = cx.cache();
if let Some(implementors) = cache.implementors.get(&it.def_id.expect_def_id()) {
let mut res = implementors
.iter()
.filter(|i| {
i.inner_impl()
.for_
.def_id_full(cache)
.map_or(false, |d| !cache.paths.contains_key(&d))
})
.filter_map(|i| extract_for_impl_name(&i.impl_item, cx))
.collect::<Vec<_>>();
if !res.is_empty() {
res.sort();
buf.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#foreign-impls\">\
Implementations on Foreign Types</a></h3>\
<div class=\"sidebar-links\">",
);
for (name, id) in res.into_iter() {
write!(buf, "<a href=\"#{}\">{}</a>", id, Escape(&name));
}
buf.push_str("</div>");
}
}
sidebar_assoc_items(cx, buf, it);
buf.push_str("<h3 class=\"sidebar-title\"><a href=\"#implementors\">Implementors</a></h3>");
if t.is_auto {
buf.push_str(
"<h3 class=\"sidebar-title\"><a \
href=\"#synthetic-implementors\">Auto Implementors</a></h3>",
);
}
buf.push_str("</div>")
}
fn sidebar_primitive(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item) {
let mut sidebar = Buffer::new();
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
fn sidebar_typedef(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item) {
let mut sidebar = Buffer::new();
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
fn get_struct_fields_name(fields: &[clean::Item]) -> Vec<String> {
let mut fields = fields
.iter()
.filter(|f| matches!(*f.kind, clean::StructFieldItem(..)))
.filter_map(|f| {
f.name.map(|name| format!("<a href=\"#structfield.{name}\">{name}</a>", name = name))
})
.collect::<Vec<_>>();
fields.sort();
fields
}
fn sidebar_union(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item, u: &clean::Union) {
let mut sidebar = Buffer::new();
let fields = get_struct_fields_name(&u.fields);
if !fields.is_empty() {
sidebar.push_str(
"<h3 class=\"sidebar-title\"><a href=\"#fields\">Fields</a></h3>\
<div class=\"sidebar-links\">",
);
for field in fields {
sidebar.push_str(&field);
}
sidebar.push_str("</div>");
}
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
fn sidebar_enum(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item, e: &clean::Enum) {
let mut sidebar = Buffer::new();
let mut variants = e
.variants
.iter()
.filter_map(|v| match v.name {
Some(ref name) => Some(format!("<a href=\"#variant.{name}\">{name}</a>", name = name)),
_ => None,
})
.collect::<Vec<_>>();
if !variants.is_empty() {
variants.sort_unstable();
sidebar.push_str(&format!(
"<h3 class=\"sidebar-title\"><a href=\"#variants\">Variants</a></h3>\
<div class=\"sidebar-links\">{}</div>",
variants.join(""),
));
}
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
fn item_ty_to_strs(ty: ItemType) -> (&'static str, &'static str) {
match ty {
ItemType::ExternCrate | ItemType::Import => ("reexports", "Re-exports"),
ItemType::Module => ("modules", "Modules"),
ItemType::Struct => ("structs", "Structs"),
ItemType::Union => ("unions", "Unions"),
ItemType::Enum => ("enums", "Enums"),
ItemType::Function => ("functions", "Functions"),
ItemType::Typedef => ("types", "Type Definitions"),
ItemType::Static => ("statics", "Statics"),
ItemType::Constant => ("constants", "Constants"),
ItemType::Trait => ("traits", "Traits"),
ItemType::Impl => ("impls", "Implementations"),
ItemType::TyMethod => ("tymethods", "Type Methods"),
ItemType::Method => ("methods", "Methods"),
ItemType::StructField => ("fields", "Struct Fields"),
ItemType::Variant => ("variants", "Variants"),
ItemType::Macro => ("macros", "Macros"),
ItemType::Primitive => ("primitives", "Primitive Types"),
ItemType::AssocType => ("associated-types", "Associated Types"),
ItemType::AssocConst => ("associated-consts", "Associated Constants"),
ItemType::ForeignType => ("foreign-types", "Foreign Types"),
ItemType::Keyword => ("keywords", "Keywords"),
ItemType::OpaqueTy => ("opaque-types", "Opaque Types"),
ItemType::ProcAttribute => ("attributes", "Attribute Macros"),
ItemType::ProcDerive => ("derives", "Derive Macros"),
ItemType::TraitAlias => ("trait-aliases", "Trait aliases"),
}
}
fn sidebar_module(buf: &mut Buffer, items: &[clean::Item]) {
let mut sidebar = String::new();
// Re-exports are handled a bit differently because they can be extern crates or imports.
if items.iter().any(|it| {
it.name.is_some()
&& (it.type_() == ItemType::ExternCrate
|| (it.type_() == ItemType::Import && !it.is_stripped()))
}) {
let (id, name) = item_ty_to_strs(ItemType::Import);
sidebar.push_str(&format!("<li><a href=\"#{}\">{}</a></li>", id, name));
}
// ordering taken from item_module, reorder, where it prioritized elements in a certain order
// to print its headings
for &myty in &[
ItemType::Primitive,
ItemType::Module,
ItemType::Macro,
ItemType::Struct,
ItemType::Enum,
ItemType::Constant,
ItemType::Static,
ItemType::Trait,
ItemType::Function,
ItemType::Typedef,
ItemType::Union,
ItemType::Impl,
ItemType::TyMethod,
ItemType::Method,
ItemType::StructField,
ItemType::Variant,
ItemType::AssocType,
ItemType::AssocConst,
ItemType::ForeignType,
ItemType::Keyword,
] {
if items.iter().any(|it| !it.is_stripped() && it.type_() == myty && it.name.is_some()) {
let (id, name) = item_ty_to_strs(myty);
sidebar.push_str(&format!("<li><a href=\"#{}\">{}</a></li>", id, name));
}
}
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\"><ul>{}</ul></div>", sidebar);
}
}
fn sidebar_foreign_type(cx: &Context<'_>, buf: &mut Buffer, it: &clean::Item) {
let mut sidebar = Buffer::new();
sidebar_assoc_items(cx, &mut sidebar, it);
if !sidebar.is_empty() {
write!(buf, "<div class=\"block items\">{}</div>", sidebar.into_inner());
}
}
crate const BASIC_KEYWORDS: &str = "rust, rustlang, rust-lang";
/// Returns a list of all paths used in the type.
/// This is used to help deduplicate imported impls
/// for reexported types. If any of the contained
/// types are re-exported, we don't use the corresponding
/// entry from the js file, as inlining will have already
/// picked up the impl
fn collect_ | y: clean::Type, cache: &Cache) -> Vec<String> {
let mut out = Vec::new();
let mut visited = FxHashSet::default();
let mut work = VecDeque::new();
work.push_back(first_ty);
while let Some(ty) = work.pop_front() {
if !visited.insert(ty.clone()) {
continue;
}
match ty {
clean::Type::ResolvedPath { did, .. } => {
let get_extern = || cache.external_paths.get(&did).map(|s| s.0.clone());
let fqp = cache.exact_paths.get(&did).cloned().or_else(get_extern);
if let Some(path) = fqp {
out.push(path.join("::"));
}
}
clean::Type::Tuple(tys) => {
work.extend(tys.into_iter());
}
clean::Type::Slice(ty) => {
work.push_back(*ty);
}
clean::Type::Array(ty, _) => {
work.push_back(*ty);
}
clean::Type::RawPointer(_, ty) => {
work.push_back(*ty);
}
clean::Type::BorrowedRef { type_, .. } => {
work.push_back(*type_);
}
clean::Type::QPath { self_type, trait_, .. } => {
work.push_back(*self_type);
work.push_back(*trait_);
}
_ => {}
}
}
out
}
| paths_for_type(first_t |
coops_ndbc_obs_collector.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Read obs from NDBC and Co-opc
Generate csv data files
functions to write and read the data
Observations will be chosen based on the hurricane track to avoid more than neccessary download.
"""
__author__ = "Saeed Moghimi"
__copyright__ = "Copyright 2018, UCAR/NOAA"
__license__ = "GPL"
__version__ = "1.0"
__email__ = "[email protected]"
#
import pandas as pd
import numpy as np
#
from bs4 import BeautifulSoup
import requests
import lxml.html
import sys,os
#
from pyoos.collectors.ndbc.ndbc_sos import NdbcSos
from pyoos.collectors.coops.coops_sos import CoopsSos
from retrying import retry
import datetime
import dateparser
import cf_units
from io import BytesIO
from ioos_tools.ioos import collector2table
import pickle
import arrow
#
sys.path.append('/disks/NASARCHIVE/saeed_moghimi/opt/python-packages/')
import geopandas as gpd
from shapely.geometry import LineString
#####################################################
if 'base_info' in sys.modules:
del(sys.modules["base_info"])
from base_info import *
#
##### Fucnstions #####
def url_lister(url):
urls = []
connection = urlopen(url)
dom = lxml.html.fromstring(connection.read())
for link in dom.xpath('//a/@href'):
urls.append(link)
return urls
#################
def download(url, path, fname):
sys.stdout.write(fname + '\n')
if not os.path.isfile(path):
urlretrieve(
url,
filename=path,
reporthook=progress_hook(sys.stdout)
)
sys.stdout.write('\n')
sys.stdout.flush()
#################
def progress_hook(out):
"""
Return a progress hook function, suitable for passing to
urllib.retrieve, that writes to the file object *out*.
"""
def it(n, bs, ts):
got = n * bs
if ts < 0:
outof = ''
else:
# On the last block n*bs can exceed ts, so we clamp it
# to avoid awkward questions.
got = min(got, ts)
outof = '/%d [%d%%]' % (ts, 100 * got // ts)
out.write("\r %d%s" % (got, outof))
out.flush()
return it
#################
def get_nhc_storm_info (year,name):
"""
"""
print('Read list of hurricanes from NHC based on year')
if int(year) < 2008:
print (' ERROR: GIS Data is not available for storms before 2008 ')
sys.exit('Exiting .....')
url = 'http://www.nhc.noaa.gov/gis/archive_wsurge.php?year='+year
r = requests.get(url,headers=headers,verify=False)
soup = BeautifulSoup(r.content, 'lxml')
table = soup.find('table')
#table = [row.get_text().strip().split(maxsplit=1) for row in table.find_all('tr')]
tab = []
for row in table.find_all('tr'):
tmp = row.get_text().strip().split()
tab.append([tmp[0],tmp[-1]])
print (tab)
df = pd.DataFrame(
data=tab[:],
columns=['identifier', 'name'],
).set_index('name')
###############################
print(' > based on specific storm go fetch gis files')
hid = df.to_dict()['identifier'][name.upper()]
al_code = ('{}'+year).format(hid)
hurricane_gis_files = '{}_5day'.format(al_code)
return al_code,hurricane_gis_files
#################
#@retry(stop_max_attempt_number=5, wait_fixed=3000)
def download_nhc_gis_files(hurricane_gis_files):
"""
"""
base = os.path.abspath(
os.path.join(os.path.curdir, 'data', hurricane_gis_files)
)
if len (glob(base+'/*')) < 1:
nhc = 'http://www.nhc.noaa.gov/gis/forecast/archive/'
# We don't need the latest file b/c that is redundant to the latest number.
fnames = [
fname for fname in url_lister(nhc)
if fname.startswith(hurricane_gis_files) and 'latest' not in fname
]
if not os.path.exists(base):
os.makedirs(base)
for fname in fnames:
path1 = os.path.join(base, fname)
if not os.path.exists(path1):
url = '{}/{}'.format(nhc, fname)
download(url, path1,fname)
return base
#################################
def read_advisory_cones_info(hurricane_gis_files,base,year,code):
print(' > Read cones shape file ...')
cones, points = [], []
for fname in sorted(glob(os.path.join(base, '{}_*.zip'.format(hurricane_gis_files)))):
number = os.path.splitext(os.path.split(fname)[-1])[0].split('_')[-1]
# read cone shapefiles
if int(year) < 2014:
#al092008.001_5day_pgn.shp
divd = '.'
else:
divd = '-'
pgn = gpd.read_file(
('/{}'+divd+'{}_5day_pgn.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
cones.append(pgn)
#read points shapefiles
pts = gpd.read_file(
('/{}'+divd+'{}_5day_pts.shp').format(code, number),
vfs='zip://{}'.format(fname)
)
# Only the first "obsevartion."
points.append(pts.iloc[0])
return cones,points,pts
#################
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_coops(start, end, sos_name, units, bbox,datum='NAVD', verbose=True):
"""
function to read COOPS data
We need to retry in case of failure b/c the server cannot handle
the high traffic during hurricane season.
"""
collector = CoopsSos()
collector.set_bbox(bbox)
collector.end_time = end
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
config = dict(
units=units,
sos_name=sos_name,
datum = datum, ###Saeed added ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
)
data = collector2table(
collector=collector,
config=config,
col='{} ({})'.format(sos_name, units.format(cf_units.UT_ISO_8859_1))
)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
@retry(stop_max_attempt_number=5, wait_fixed=3000)
def get_ndbc(start, end, bbox , sos_name='waves',datum='MSL', verbose=True):
"""
function to read NBDC data
###################
sos_name = waves
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'sea_surface_wave_significant_height (m)',
'sea_surface_wave_peak_period (s)', 'sea_surface_wave_mean_period (s)',
'sea_surface_swell_wave_significant_height (m)',
'sea_surface_swell_wave_period (s)',
'sea_surface_wind_wave_significant_height (m)',
'sea_surface_wind_wave_period (s)', 'sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)',
'sea_surface_swell_wave_to_direction (degree)',
'sea_surface_wind_wave_to_direction (degree)',
'number_of_frequencies (count)', 'center_frequencies (Hz)',
'bandwidths (Hz)', 'spectral_energy (m**2/Hz)',
'mean_wave_direction (degree)', 'principal_wave_direction (degree)',
'polar_coordinate_r1 (1)', 'polar_coordinate_r2 (1)',
'calculation_method', 'sampling_rate (Hz)', 'name'])
sos_name = winds
all_col = (['station_id', 'sensor_id', 'latitude (degree)', 'longitude (degree)',
'date_time', 'depth (m)', 'wind_from_direction (degree)',
'wind_speed (m/s)', 'wind_speed_of_gust (m/s)',
'upward_air_velocity (m/s)', 'name'])
"""
#add remove from above
if sos_name == 'waves':
col = ['sea_surface_wave_significant_height (m)','sea_surface_wave_peak_period (s)',
'sea_surface_wave_mean_period (s)','sea_water_temperature (c)',
'sea_surface_wave_to_direction (degree)']
elif sos_name == 'winds':
col = ['wind_from_direction (degree)','wind_speed (m/s)',
'wind_speed_of_gust (m/s)','upward_air_velocity (m/s)']
#if sos_name == 'waves':
# col = ['sea_surface_wave_significant_height (m)']
#elif sos_name == 'winds':
# col = ['wind_speed (m/s)']
collector = NdbcSos()
collector.set_bbox(bbox)
collector.start_time = start
collector.variables = [sos_name]
ofrs = collector.server.offerings
title = collector.server.identification.title
collector.features = None
collector.end_time = start + datetime.timedelta(1)
response = collector.raw(responseFormat='text/csv')
df = pd.read_csv(BytesIO(response), parse_dates=True)
g = df.groupby('station_id')
df = dict()
for station in g.groups.keys():
df.update({station: g.get_group(station).iloc[0]})
df = pd.DataFrame.from_dict(df).T
station_dict = {}
for offering in collector.server.offerings:
station_dict.update({offering.name: offering.description})
names = []
for sta in df.index:
names.append(station_dict.get(sta, sta))
df['name'] = names
#override short time
collector.end_time = end
data = []
for k, row in df.iterrows():
station_id = row['station_id'].split(':')[-1]
collector.features = [station_id]
response = collector.raw(responseFormat='text/csv')
kw = dict(parse_dates=True, index_col='date_time')
obs = pd.read_csv(BytesIO(response), **kw).reset_index()
obs = obs.drop_duplicates(subset='date_time').set_index('date_time')
series = obs[col]
series._metadata = dict(
station=row.get('station_id'),
station_name=row.get('name'),
station_code=str(row.get('station_id').split(':')[-1]),
sensor=row.get('sensor_id'),
lon=row.get('longitude (degree)'),
lat=row.get('latitude (degree)'),
depth=row.get('depth (m)'),
)
data.append(series)
# Clean the table.
table = dict(
station_name = [s._metadata.get('station_name') for s in data],
station_code = [s._metadata.get('station_code') for s in data],
sensor = [s._metadata.get('sensor') for s in data],
lon = [s._metadata.get('lon') for s in data],
lat = [s._metadata.get('lat') for s in data],
depth = [s._metadata.get('depth', 'NA') for s in data],
)
table = pd.DataFrame(table).set_index('station_name')
if verbose:
print('Collector offerings')
print('{}: {} offerings'.format(title, len(ofrs)))
return data, table
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
#################
def write_csv(obs_dir, name, year, table, data, label):
"""
examples
print(' > write csv files')
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
#label = 'coops_ssh'
#table = ssh_table
#data = ssh
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
os.makedirs(outd)
table.to_csv(os.path.join(outt,'table.csv'))
stations = table['station_code']
for ista in range(len(stations)):
sta = str(stations [ista])
fname = os.path.join(outd,sta+'.csv')
df = data[ista]
try:
#in case it is still a series like ssh
df = df.to_frame()
except:
pass
df.to_csv(fname)
fmeta = os.path.join(outd,sta)+'_metadata.csv'
metadata = pd.DataFrame.from_dict( data[ista]._metadata , orient="index")
metadata.to_csv(fmeta)
def read_csv(obs_dir, name, year, label):
"""
examples
print(' > write csv files')
write_csv(base_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
write_csv(base_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
write_csv(base_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
write_csv(base_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
"""
outt = os.path.join(obs_dir, name+year,label)
outd = os.path.join(outt,'data')
if not os.path.exists(outd):
sys.exit('ERROR: check path to: ',outd )
table = pd.read_csv(os.path.join(outt,'table.csv')).set_index('station_name')
table['station_code'] = table['station_code'].astype('str')
stations = table['station_code']
data = []
metadata = []
for ista in range(len(stations)):
sta = stations [ista]
fname8 = os.path.join(outd,sta)+'.csv'
df = pd.read_csv(fname8,parse_dates = ['date_time']).set_index('date_time')
fmeta = os.path.join(outd,sta) + '_metadata.csv'
meta = pd.read_csv(fmeta, header=0, names = ['names','info']).set_index('names')
meta_dict = meta.to_dict()['info']
meta_dict['lon'] = float(meta_dict['lon'])
meta_dict['lat'] = float(meta_dict['lat'])
df._metadata = meta_dict
data.append(df)
return table,data
def | (obs_dir, name, year):
url = 'https://stn.wim.usgs.gov/STNServices/HWMs/FilteredHWMs.json'
params = {'EventType': 2, # 2 for hurricane
'EventStatus': 0} # 0 for completed
default_filter = {"riverine": True,
"non_still_water": True}
nameyear = (name+year).lower()
out_dir = os.path.join(obs_dir,'hwm')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
fname = os.path.join(out_dir,nameyear+'.csv')
usgs_json_file = os.path.join(out_dir,'usgs_hwm_tmp.json')
if not os.path.exists( usgs_json_file):
response = requests.get(url, params=params, headers=headers,verify=False)
response.raise_for_status()
json_data = json.loads(response.text)
with open(usgs_json_file, 'w') as outfile:
json.dump(json_data, outfile )
else:
with open(usgs_json_file) as json_file:
json_data = json.load(json_file)
hwm_stations = dict()
for data in json_data:
if 'elev_ft' in data.keys() and name.lower() in data['eventName'].lower():
hwm_stations[str(data['hwm_id'])] = data
log = pd.DataFrame.from_dict(hwm_stations)
hwm = []
ii = 0
for key in log.keys():
l0 = []
for key0 in log[key].keys() :
l0.append(log[key][key0])
hwm.append(l0)
#
hwm = np.array(hwm)
df = pd.DataFrame(data=hwm, columns=log[key].keys())
drop_poor = False
if drop_poor:
for i in range(len(df)):
tt = df.hwmQualityName[i]
if 'poor' in tt.lower():
df.hwmQualityName[i] = np.nan
df = df.dropna()
df['elev_m'] = pd.to_numeric(df['elev_ft']) * 0.3048 #in meter
#
df.to_csv(fname)
def get_all_data()
###############################################
###############################################
############ MAIN code Starts here ############
if False:
# not needed. will take from the storm specific obs list from coops and ndbc
obs_station_list_gen()
#
#######
# out dir
obs_dir = os.path.join(base_dirf,'obs')
if get_usgs_hwm:
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print(' > Get USGS HWM for ', name)
try:
write_high_water_marks(obs_dir, name, year)
except:
print (' > Get USGS HWM for ', name , ' ERROR ...')
for key in storms.keys():
name = storms[key]['name']
year = storms[key]['year']
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print( '******************************************************** \n\n\n\n\n\n')
#if bbox_from_best_track:
try:
#bbox_from_best_track = False
code,hurricane_gis_files = get_nhc_storm_info (year,name)
###############################################################################
#download gis zip files
base = download_nhc_gis_files(hurricane_gis_files)
# get advisory cones and track points
cones,pts_actual,points_actual = read_advisory_cones_info(hurricane_gis_files,base,year,code)
start = pts_actual[0] ['FLDATELBL']
end = pts_actual[-1]['FLDATELBL']
#start_txt_actual = ('20' + start[:-2]).replace('/','')
#end_txt_actual = ('20' + end [:-2]).replace('/','')
#print('\n\n\n\n\n\n ********************************************************')
#for key1 in pts_actual[0].keys():
# print( '***** pts_actual[0] [', key1, ']',pts_actual[0] [key1] , '*********')
#print( '******************************************************** \n\n\n\n\n\n')
start_dt = dateparser.parse(start,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) - obs_xtra_days
end_dt = dateparser.parse(end ,settings={"TO_TIMEZONE": "UTC"}).replace(tzinfo=None) + obs_xtra_days
#try:
# # bbox_from_best_track:
# start_txt = start_txt_best
# end_txt = end_txt_best
# #bbox = bbox_best
#except:
# start_txt = start_txt_actual
# end_txt = end_txt_actual
#
#start_dt = arrow.get(start_txt, 'YYYYMMDDhh').datetime - obs_xtra_days
#end_dt = arrow.get(end_txt , 'YYYYMMDDhh').datetime + obs_xtra_days
#if False:
# get bbox from actual data
last_cone = cones[-1]['geometry'].iloc[0]
track = LineString([point['geometry'] for point in pts_actual])
lons_actual = track.coords.xy[0]
lats_actual = track.coords.xy[1]
bbox_actual = min(lons_actual)-2, min(lats_actual)-2, max(lons_actual)+2, max(lats_actual)+2
################################################################################
# Find the bounding box to search the data.
bbox_from_best_track = False
bbox = bbox_actual
except:
start_dt = storms[key]['start']
end_dt = storms[key]['end' ]
bounds = storms[key]['bbox' ]
if storms[key]['bbox'] is not None:
bbox = storms[key]['bbox']
#print('\n\n\n\n >>>>> Download and read all GIS data for Storm >',name, ' Year > ', year, '\n ** This is an old STORM !!!!!! \n\n\n\n')
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
#
# Note that the bounding box is derived from the track and the latest prediction cone.
strbbox = ', '.join(format(v, '.2f') for v in bbox)
print('\n\n\n\n\n\n********************************************************')
print( '***** Storm name ',name, ' Year ', year, ' *********')
print('bbox: {}\nstart: {}\n end: {}'.format(strbbox, start_dt, end_dt))
print( '******************************************************** \n\n\n\n\n\n')
#
#########
if get_cops_wlev:
try:
print(' > Get water level information CO-OPS ... ')
# ["MLLW","MSL","MHW","STND","IGLD", "NAVD"]
datum = 'NAVD'
datum = 'MSL'
print ('datum=', datum )
ssh, ssh_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='water_surface_height_above_reference_datum',
units=cf_units.Unit('meters'),
datum = datum ,
bbox=bbox,
)
write_csv(obs_dir, name, year, table=ssh_table , data= ssh , label='coops_ssh' )
except:
print(' > Get water level information CO-OPS >>>> ERRORRRRR')
######
if get_cops_wind:
try:
print(' > Get wind information CO-OPS ... ')
wnd_obs, wnd_obs_table = get_coops(
start=start_dt,
end=end_dt,
sos_name='wind_speed',
units=cf_units.Unit('m/s'),
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_obs_table, data= wnd_obs , label='coops_wind')
except:
print(' > Get wind information CO-OPS >>> ERORRRR')
######
if get_ndbc_wind:
try:
print(' > Get wind ocean information (ndbc) ... ')
wnd_ocn, wnd_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='winds',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wnd_ocn_table, data= wnd_ocn , label='ndbc_wind' )
except:
print(' > Get wind ocean information (ndbc) >>> ERRRORRRR')
######
if get_ndbc_wave:
try:
print(' > Get wave ocean information (ndbc) ... ')
wav_ocn, wav_ocn_table = get_ndbc(
start=start_dt,
end=end_dt,
sos_name='waves',
bbox=bbox,
)
write_csv(obs_dir, name, year, table=wav_ocn_table, data= wav_ocn , label='ndbc_wave' )
except:
print(' > Get wave ocean information (ndbc) >>> ERRORRRR ')
######
if False:
# test reading files
ssh_table1 , ssh1 = read_csv (obs_dir, name, year, label='coops_ssh' )
wnd_obs_table1, wnd_obs1 = read_csv (obs_dir, name, year, label='coops_wind')
wnd_ocn_table1, wnd_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wind' )
wav_ocn_table1, wav_ocn1 = read_csv (obs_dir, name, year, label='ndbc_wave' )
#if False:
#
# back up script file
args=sys.argv
scr_name = args[0]
scr_dir = os.path.join(obs_dir, name+year)
os.system('cp -fr ' + scr_name + ' ' + scr_dir)
#
#with open(pick, "rb") as f:
# w = pickle.load(f)
#f = open(pick, "rb")
#w = pickle.load(f)
#if __name__ == "__main__":
# main()
if __name__ == "__main__":
get_all_data()
| write_high_water_marks |
dbg.go | /* A simple Debug/trace facility for Go programs
* It's lineage can be traced back to a set of C preprocessor macros that we used at Dynabase Systems
* back in the early 90's.
*
* This is better (imo) than debug/log levels because it allows instrumentation to remain in place
* and be selectively turned on or off as needed.
*
* License: MIT
*
* (c) 2015 Vince Hodges <[email protected]>
*/
package dbg
import (
"encoding/hex"
"log"
"os"
)
// Flags is a bit mask representing the set of traces that are enabled.
var Flags int64
// Logger is used to write debug messages
var Logger *log.Logger
// init makes sure there's a default logger to write to
func init() {
// Log instead of fmt so we get datestamped entries.
Logger = log.New(os.Stdout, "[Debug] ", log.LstdFlags)
}
// Printf will check flag against Flags to see if the bit is set and
// will print the trace if so
func Printf(flag int64, format string, args ...interface{} ) {
if (Flags != 0 && Flags & flag == flag) {
Logger.Printf(format, args...)
}
}
// HexDump formats a HexDump of data and prints it to Logger if Flags has the
// bit set for flag.
func HexDump(flag int64, data []byte) {
if (Flags != 0 && Flags & flag == flag) |
}
// TODO Add helpers for printing/parsing command line string into debug flag
| {
Logger.Printf("Hex:\n%s", hex.Dump(data))
} |
sumrz.go | package main
import (
"encoding/csv"
"flag"
"fmt"
"io"
"os"
//"runtime/pprof"
)
func | () {
//f,_ := os.Create("sumrz.prof")
//pprof.StartCPUProfile(f)
//defer pprof.StopCPUProfile()
var delim = '\t'
flag.Parse()
args := flag.Args()
if len(args) > 1 {
printUsageExit()
}
if len(args) == 1 {
if len(args[0]) != 1 {
printUsageExit()
}
delim = rune(args[0][0])
}
var stats TableStats
err := readCsvAndComputeStats(os.Stdin, &stats, delim)
if err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
} else {
fmt.Println(stats.String())
}
}
func printUsageExit() {
fmt.Fprint(os.Stderr, "Usage: sumrz < file.csv\n" +
" or: sumrz '\t' < file.txt")
os.Exit(1)
}
func readCsvAndComputeStats(reader io.Reader, stats *TableStats, delim rune) error {
csvReader := csv.NewReader(reader)
csvReader.LazyQuotes = true
csvReader.Comma = delim
var err error
lineNum := 0
for {
var values []string
values, err = csvReader.Read()
if err != nil {
break
}
lineNum++
if lineNum == 1 {
err = stats.init(values)
} else {
err = stats.update(values)
}
if lineNum % 10000 == 0 || err == io.EOF {
fmt.Fprintf(os.Stderr, "Read %d lines\n", lineNum)
}
if err != nil {
break
}
}
if err != io.EOF {
return fmt.Errorf("Failed on line %d: %v\n", lineNum, err)
}
return nil
}
| main |
aws.go | package registry
import (
"fmt"
"sync"
)
// References:
// - https://github.com/bzon/ecr-k8s-secret-creator
// - https://github.com/kubernetes/kubernetes/blob/master/pkg/credentialprovider/aws/aws_credentials.go
// - https://github.com/fluxcd/flux/pull/1455
import (
"strings"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ecr"
"github.com/go-kit/kit/log"
)
const (
// For recognising ECR hosts
awsPartitionSuffix = ".amazonaws.com"
awsCnPartitionSuffix = ".amazonaws.com.cn"
// How long AWS tokens remain valid, according to AWS docs; this
// is used as an upper bound, overridden by any sooner expiry
// returned in the API response.
defaultTokenValid = 12 * time.Hour
// how long to skip refreshing a region after we've failed
embargoDuration = 10 * time.Minute
EKS_SYSTEM_ACCOUNT = "602401143452"
EKS_SYSTEM_ACCOUNT_CN = "918309763551"
)
// AWSRegistryConfig supplies constraints for scanning AWS (ECR) image
// registries. Fields may be left empty.
type AWSRegistryConfig struct {
Regions []string
AccountIDs []string
BlockIDs []string
}
func contains(strs []string, str string) bool {
for _, s := range strs {
if s == str {
return true
}
}
return false
}
func validECRHost(domain string) bool {
switch {
case strings.HasSuffix(domain, awsPartitionSuffix):
return true
case strings.HasSuffix(domain, awsCnPartitionSuffix):
return true
}
return false
}
// ImageCredsWithAWSAuth wraps an image credentials func with another
// that adds two capabilities:
//
// - it will include or exclude images from ECR accounts and regions
// according to the config given; and,
//
// - if it can reach the AWS API, it will obtain credentials for ECR
// accounts from it, automatically refreshing them when necessary.
//
// It also returns a "pre-flight check" that can be used to verify
// that the AWS API is available while starting up.
//
// ECR registry URLs look like this:
//
// <account-id>.dkr.ecr.<region>.amazonaws.com
//
// i.e., they can differ in the account ID and in the region. It's
// possible to refer to any registry from any cluster (although, being
// AWS, there will be a cost incurred). The config supplied can
// restrict based on the region:
//
// - if a region or regions are supplied, exactly those regions shall
// be included;
// - if no region is supplied, but it can be detected, the detected
// region is included
// - if no region is supplied _or_ detected, no region is included
//
// .. and on the account ID:
//
// - if account IDs to include are supplied, only those are included
// - otherwise, all account IDs are included
// - the supplied list may be empty
// with the exception
// - if account IDs to _exclude_ are supplied, those shall be not be
// included
func ImageCredsWithAWSAuth(lookup func() ImageCreds, logger log.Logger, config AWSRegistryConfig) (func() error, func() ImageCreds) {
// only ever do the preflight check once; all subsequent calls
// will succeed trivially, so the first caller should pay
// attention to the return value.
var preflightOnce sync.Once
// it's possible to fail the pre-flight check, but still apply the
// constraints given in the config. `okToUseAWS` is true if using
// the AWS API to get credentials is expected to work.
var okToUseAWS bool
preflight := func() error {
var preflightErr error
preflightOnce.Do(func() {
defer func() {
logger.Log("info", "restricting ECR registry scans",
"regions", fmt.Sprintf("%v", config.Regions),
"include-ids", fmt.Sprintf("%v", config.AccountIDs),
"exclude-ids", fmt.Sprintf("%v", config.BlockIDs))
}()
// This forces the AWS SDK to load config, so we can get
// the default region if it's there.
sess := session.Must(session.NewSessionWithOptions(session.Options{
SharedConfigState: session.SharedConfigEnable,
}))
// Always try to connect to the metadata service, so we
// can fail fast if it's not available.
ec2 := ec2metadata.New(sess)
metadataRegion, err := ec2.Region()
if err != nil {
preflightErr = err
if config.Regions == nil {
config.Regions = []string{}
}
logger.Log("error", "fetching region for AWS", "err", err)
return
}
okToUseAWS = true
if config.Regions == nil {
clusterRegion := *sess.Config.Region
regionSource := "local config"
if clusterRegion == "" {
// no region set in config; in that case, use what we got from the EC2 metadata service
clusterRegion = metadataRegion
regionSource = "EC2 metadata service"
}
logger.Log("info", "detected cluster region", "source", regionSource, "region", clusterRegion)
config.Regions = []string{clusterRegion}
}
})
return preflightErr | }
awsCreds := NoCredentials()
// this has the expiry time from the last request made per region. We request new tokens whenever
// - we don't have credentials for the particular registry URL
// - the credentials have expired
// and when we do, we get new tokens for all account IDs in the
// region that we've seen. This means that credentials are
// fetched, and expire, per region.
regionExpire := map[string]time.Time{}
// we can get an error when refreshing the credentials; to avoid
// spamming the log, keep track of failed refreshes.
regionEmbargo := map[string]time.Time{}
// should this registry be scanned?
var shouldScan func(string, string) bool
if config.AccountIDs == nil {
shouldScan = func(region, accountID string) bool {
return contains(config.Regions, region) && !contains(config.BlockIDs, accountID)
}
} else {
shouldScan = func(region, accountID string) bool {
return contains(config.Regions, region) && contains(config.AccountIDs, accountID) && !contains(config.BlockIDs, accountID)
}
}
ensureCreds := func(domain, region, accountID string, now time.Time) error {
// if we had an error getting a token before, don't try again
// until the embargo has passed
if embargo, ok := regionEmbargo[region]; ok {
if embargo.After(now) {
return nil // i.e., fail silently
}
delete(regionEmbargo, region)
}
// if we don't have the entry at all, we need to get a
// token. NB we can't check the inverse and return early,
// since if the creds do exist, we need to check their expiry.
if c := awsCreds.credsFor(domain); c == (creds{}) {
goto refresh
}
// otherwise, check if the tokens have expired
if expiry, ok := regionExpire[region]; !ok || expiry.Before(now) {
goto refresh
}
// the creds exist and are before the use-by; nothing to be done.
return nil
refresh:
// unconditionally append the sought-after account, and let
// the AWS API figure out if it's a duplicate.
accountIDs := append(allAccountIDsInRegion(awsCreds.Hosts(), region), accountID)
logger.Log("info", "attempting to refresh auth tokens", "region", region, "account-ids", strings.Join(accountIDs, ", "))
regionCreds, expiry, err := fetchAWSCreds(region, accountIDs)
if err != nil {
regionEmbargo[region] = now.Add(embargoDuration)
logger.Log("error", "fetching credentials for AWS region", "region", region, "err", err, "embargo", embargoDuration)
return err
}
regionExpire[region] = expiry
awsCreds.Merge(regionCreds)
return nil
}
lookupECR := func() ImageCreds {
imageCreds := lookup()
for name, creds := range imageCreds {
domain := name.Domain
if !validECRHost(domain) {
continue
}
bits := strings.Split(domain, ".")
if bits[1] != "dkr" || bits[2] != "ecr" {
logger.Log("warning", "AWS registry domain not in expected format <account-id>.dkr.ecr.<region>.amazonaws.<extension>", "domain", domain)
continue
}
accountID := bits[0]
region := bits[3]
// Before deciding whether an image is included, we need to establish the included regions,
// and whether we can use the AWS API to get credentials. But we don't need to log any problem
// that arises _unless_ there's an image that ends up being included in the scanning.
preflightErr := preflight()
if !shouldScan(region, accountID) {
delete(imageCreds, name)
continue
}
if preflightErr != nil {
logger.Log("warning", "AWS auth implied by ECR image, but AWS API is not available. You can ignore this if you are providing credentials some other way (e.g., through imagePullSecrets)", "image", name.String(), "err", preflightErr)
}
if okToUseAWS {
if err := ensureCreds(domain, region, accountID, time.Now()); err != nil {
logger.Log("warning", "unable to ensure credentials for ECR", "domain", domain, "err", err)
}
newCreds := NoCredentials()
newCreds.Merge(awsCreds)
newCreds.Merge(creds)
imageCreds[name] = newCreds
}
}
return imageCreds
}
return preflight, lookupECR
}
func allAccountIDsInRegion(hosts []string, region string) []string {
var ids []string
// this returns a list of unique accountIDs, assuming that the input is unique hostnames
for _, host := range hosts {
bits := strings.Split(host, ".")
if len(bits) != 6 {
continue
}
if bits[3] == region {
ids = append(ids, bits[0])
}
}
return ids
}
func fetchAWSCreds(region string, accountIDs []string) (Credentials, time.Time, error) {
sess := session.Must(session.NewSession(&aws.Config{Region: aws.String(region)}))
svc := ecr.New(sess)
ecrToken, err := svc.GetAuthorizationToken(&ecr.GetAuthorizationTokenInput{
RegistryIds: aws.StringSlice(accountIDs),
})
if err != nil {
return Credentials{}, time.Time{}, err
}
auths := make(map[string]creds)
expiry := time.Now().Add(defaultTokenValid)
for _, v := range ecrToken.AuthorizationData {
// Remove the https prefix
host := strings.TrimPrefix(*v.ProxyEndpoint, "https://")
creds, err := parseAuth(*v.AuthorizationToken)
if err != nil {
return Credentials{}, time.Time{}, err
}
creds.provenance = "AWS API"
creds.registry = host
auths[host] = creds
ex := *v.ExpiresAt
if ex.Before(expiry) {
expiry = ex
}
}
return Credentials{m: auths}, expiry, nil
} | |
t59_if_expression.rs | //! Tests auto-converted from "sass-spec/spec/non_conformant/basic/59_if_expression.hrx"
#[allow(unused)]
fn | () -> crate::TestRunner {
super::runner()
}
#[test]
fn test() {
assert_eq!(
runner().ok(
"$x: 0;\
\n$if-false: whatever;\n\
\ndiv {\
\n foo: if($if-true: hey, $if-false: ho, $condition: true);\
\n foo: if($if-true: hey, $if-false: ho, $condition: false);\
\n foo: if($x != 0, if($x, true, false), unquote(\"x is zero\"));\
\n foo: if(false, 1/0, $if-false: $if-false);\
\n}"
),
"div {\
\n foo: hey;\
\n foo: ho;\
\n foo: x is zero;\
\n foo: whatever;\
\n}\n"
);
}
| runner |
test_extraesia_settings.py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Youssef Restom and Contributors
# See license.txt
from __future__ import unicode_literals
# import frappe
import unittest
class | (unittest.TestCase):
pass
| TestExtraesiaSettings |
contract.rs | use crate::error::CalcError;
use crate::message::CalcMsg;
use kelk_env::{context::Context, Response};
fn add(_ctx: Context, a: i32, b: i32) -> Result<i32, CalcError> {
Ok(a + b)
}
fn sub(_ctx: Context, a: i32, b: i32) -> Result<i32, CalcError> {
Ok(a - b)
}
fn mul(_ctx: Context, a: i32, b: i32) -> Result<i32, CalcError> {
Ok(a * b)
}
fn div(_ctx: Context, a: i32, b: i32) -> Result<i32, CalcError> {
if b == 0 {
return Err(CalcError::DivByZero);
}
Ok(a / b)
}
// pub fn set_memory(ctx: Context, m: i32) -> Result<i32, CalcError> {
// let d = m.to_be_bytes();
// ctx.write_storage(0, d)?;
// Ok(m)
// }
// pub fn memory(ctx: Context) -> Result<i32, CalcError> {
// let d = ctx.read_storage(0, 4)?;
// let m = i32::from_be_bytes(d);
// Ok(m)
// }
/// The "instantiate" will be executed only once on instantiating the contract actor
#[cfg(target_arch = "wasm32")] | extern "C" fn instantiate() -> u32 {
kelk_env::do_instantiate(&super::instantiate)
}
}
#[cfg(target_arch = "wasm32")]
mod __wasm_export_process_msg {
#[no_mangle]
extern "C" fn process_msg(msg_ptr: *const u8, length: u32) -> u64 {
kelk_env::do_process_msg(&super::process_msg, msg_ptr, length)
}
}
// #[kelk_derive(instantiate)]
pub fn instantiate(_ctx: Context) -> Result<Response, CalcError> {
Ok(Response { res: 0 })
}
/// _ctx: Context) The process_msg function is the main function of the *deployed* contract actor
// #[kelk_derive(process_msg)]
pub fn process_msg(ctx: Context, msg: CalcMsg) -> Result<Response, CalcError> {
let ans = match msg {
CalcMsg::Add { a, b } => add(ctx, a, b),
CalcMsg::Sub { a, b } => sub(ctx, a, b),
CalcMsg::Mul { a, b } => mul(ctx, a, b),
CalcMsg::Div { a, b } => div(ctx, a, b),
}?;
Ok(Response { res: ans })
}
#[cfg(test)]
#[path = "./contract_test.rs"]
mod contract_test; | mod __wasm_export_instantiate {
#[no_mangle] |
_domain.py | from plotly.basedatatypes import BaseLayoutHierarchyType
import copy
class Domain(BaseLayoutHierarchyType):
# column
# ------
@property
def column(self):
|
@column.setter
def column(self, val):
self['column'] = val
# row
# ---
@property
def row(self):
"""
If there is a layout grid, use the domain for this row in the
grid for this scene subplot .
The 'row' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['row']
@row.setter
def row(self, val):
self['row'] = val
# x
# -
@property
def x(self):
"""
Sets the horizontal domain of this scene subplot (in plot
fraction).
The 'x' property is an info array that may be specified as a
list or tuple of 2 elements where:
(0) The 'x[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'x[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self['x']
@x.setter
def x(self, val):
self['x'] = val
# y
# -
@property
def y(self):
"""
Sets the vertical domain of this scene subplot (in plot
fraction).
The 'y' property is an info array that may be specified as a
list or tuple of 2 elements where:
(0) The 'y[0]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
(1) The 'y[1]' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
list
"""
return self['y']
@y.setter
def y(self, val):
self['y'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'layout.scene'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
"""
def __init__(
self, arg=None, column=None, row=None, x=None, y=None, **kwargs
):
"""
Construct a new Domain object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of plotly.graph_objs.layout.scene.Domain
column
If there is a layout grid, use the domain for this
column in the grid for this scene subplot .
row
If there is a layout grid, use the domain for this row
in the grid for this scene subplot .
x
Sets the horizontal domain of this scene subplot (in
plot fraction).
y
Sets the vertical domain of this scene subplot (in plot
fraction).
Returns
-------
Domain
"""
super(Domain, self).__init__('domain')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.scene.Domain
constructor must be a dict or
an instance of plotly.graph_objs.layout.scene.Domain"""
)
# Import validators
# -----------------
from plotly.validators.layout.scene import (domain as v_domain)
# Initialize validators
# ---------------------
self._validators['column'] = v_domain.ColumnValidator()
self._validators['row'] = v_domain.RowValidator()
self._validators['x'] = v_domain.XValidator()
self._validators['y'] = v_domain.YValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop('column', None)
self.column = column if column is not None else _v
_v = arg.pop('row', None)
self.row = row if row is not None else _v
_v = arg.pop('x', None)
self.x = x if x is not None else _v
_v = arg.pop('y', None)
self.y = y if y is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
| """
If there is a layout grid, use the domain for this column in
the grid for this scene subplot .
The 'column' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self['column'] |
scraper.py | import requests
import lxml.html as html
import os
import datetime
#El text-fill es un h2 pero la libreria no identifica el h2 como tal sino como text-fill
HOME_URL = 'https://www.larepublica.co/'
XPATH_LINK_TO_ARTICLE = '//text-fill/a/@href'
XPATH_TITLE = '//div[@class="mb-auto"]/text-fill/span//text()'
XPATH_SUMMARY = '//div[@class="lead"]/p//text()'
XPATH_BODY = '//div[@class="html-content"]/p//text()'
def parse_notice(link, today):
try:
response = requests.get(link)
if response.status_code == 200:
notice = response.content.decode('utf-8')
parsed = html.fromstring(notice)
try:
title = parsed.xpath(XPATH_TITLE)[0]
title = title.replace('\"','')
summary = parsed.xpath(XPATH_SUMMARY)[0]
body = parsed.xpath(XPATH_BODY)
except IndexError:
return
with open(f'{today}/{title}.txt', 'w', encoding='utf-8') as f:
f.write(str(title))
f.write('\n\n')
f.write(str(summary))
f.write('\n\n')
for p in body:
f.write(str(p))
f.write('\n')
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def parse_home():
try:
response = requests.get(HOME_URL)
if response.status_code == 200:
home = response.content.decode('utf-8')
parsed = html.fromstring(home)
links_to_notices = parsed.xpath(XPATH_LINK_TO_ARTICLE)
# print(links_to_notices)
today = datetime.date.today().strftime('%d-%m-%Y')
if not os.path.isdir(today):
os.mkdir(today)
for link in links_to_notices:
parse_notice(link, today)
else:
raise ValueError(f'Error: {response.status_code}')
except ValueError as ve:
print(ve)
def run():
parse_home()
if __name__ == '__main__':
| run() |
|
postgres.js | const { Pool } = require('pg');
export default async function | (request, response) {
const pool = new Pool({
host: request.body.db.host,
database: request.body.db.database,
port: request.body.db.port,
user: request.body.db.username,
password: request.body.db.password,
ssl: {
rejectUnauthorized: false,
}
});
const client = await pool.connect();
client.query(request.body.query, [])
.then(res => {
client.release();
return response.status(200).json(res.rows);
})
.catch(err => {
console.log('fetching data failed', err);
client.release();
return response.status(500).json({ err: "failed to fetch data for panel" });
});
} | handler |
dbgmcu_v1.rs | #![allow(non_snake_case, non_upper_case_globals)]
#![allow(non_camel_case_types)]
//! Debug support
//!
//! Used by: stm32f401, stm32f410, stm32f411, stm32f412, stm32f413
use crate::{RORegister, RWRegister};
#[cfg(not(feature = "nosync"))]
use core::marker::PhantomData;
/// IDCODE
pub mod IDCODE {
/// DEV_ID
pub mod DEV_ID {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (12 bits: 0xfff << 0)
pub const mask: u32 = 0xfff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// REV_ID
pub mod REV_ID {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (16 bits: 0xffff << 16)
pub const mask: u32 = 0xffff << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Control Register
pub mod CR {
/// DBG_SLEEP
pub mod DBG_SLEEP {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_STOP
pub mod DBG_STOP {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_STANDBY
pub mod DBG_STANDBY {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TRACE_IOEN
pub mod TRACE_IOEN {
/// Offset (5 bits)
pub const offset: u32 = 5;
/// Mask (1 bit: 1 << 5)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TRACE_MODE
pub mod TRACE_MODE {
/// Offset (6 bits)
pub const offset: u32 = 6;
/// Mask (2 bits: 0b11 << 6)
pub const mask: u32 = 0b11 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Debug MCU APB1 Freeze registe
pub mod APB1_FZ {
/// DBG_TIM2_STOP
pub mod DBG_TIM2_STOP {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_TIM3 _STOP
pub mod DBG_TIM3_STOP {
/// Offset (1 bits)
pub const offset: u32 = 1;
/// Mask (1 bit: 1 << 1)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_TIM4_STOP
pub mod DBG_TIM4_STOP {
/// Offset (2 bits)
pub const offset: u32 = 2;
/// Mask (1 bit: 1 << 2)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_TIM5_STOP
pub mod DBG_TIM5_STOP {
/// Offset (3 bits)
pub const offset: u32 = 3;
/// Mask (1 bit: 1 << 3)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// RTC stopped when Core is halted
pub mod DBG_RTC_Stop {
/// Offset (10 bits)
pub const offset: u32 = 10;
/// Mask (1 bit: 1 << 10)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_WWDG_STOP
pub mod DBG_WWDG_STOP {
/// Offset (11 bits)
pub const offset: u32 = 11;
/// Mask (1 bit: 1 << 11)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_IWDEG_STOP
pub mod DBG_IWDG_STOP {
/// Offset (12 bits)
pub const offset: u32 = 12;
/// Mask (1 bit: 1 << 12)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_J2C1_SMBUS_TIMEOUT
pub mod DBG_I2C1_SMBUS_TIMEOUT {
/// Offset (21 bits)
pub const offset: u32 = 21;
/// Mask (1 bit: 1 << 21)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_J2C2_SMBUS_TIMEOUT
pub mod DBG_I2C2_SMBUS_TIMEOUT {
/// Offset (22 bits)
pub const offset: u32 = 22;
/// Mask (1 bit: 1 << 22)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// DBG_J2C3SMBUS_TIMEOUT
pub mod DBG_I2C3SMBUS_TIMEOUT {
/// Offset (23 bits)
pub const offset: u32 = 23;
/// Mask (1 bit: 1 << 23)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
/// Debug MCU APB2 Freeze registe
pub mod APB2_FZ {
/// TIM1 counter stopped when core is halted
pub mod DBG_TIM1_STOP {
/// Offset (0 bits)
pub const offset: u32 = 0;
/// Mask (1 bit: 1 << 0)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TIM9 counter stopped when core is halted
pub mod DBG_TIM9_STOP {
/// Offset (16 bits)
pub const offset: u32 = 16;
/// Mask (1 bit: 1 << 16)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TIM10 counter stopped when core is halted
pub mod DBG_TIM10_STOP {
/// Offset (17 bits)
pub const offset: u32 = 17;
/// Mask (1 bit: 1 << 17)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
/// TIM11 counter stopped when core is halted
pub mod DBG_TIM11_STOP {
/// Offset (18 bits)
pub const offset: u32 = 18;
/// Mask (1 bit: 1 << 18)
pub const mask: u32 = 1 << offset;
/// Read-only values (empty)
pub mod R {}
/// Write-only values (empty)
pub mod W {}
/// Read-write values (empty)
pub mod RW {}
}
}
#[repr(C)]
pub struct RegisterBlock {
/// IDCODE
pub IDCODE: RORegister<u32>,
/// Control Register
pub CR: RWRegister<u32>,
/// Debug MCU APB1 Freeze registe
pub APB1_FZ: RWRegister<u32>,
/// Debug MCU APB2 Freeze registe
pub APB2_FZ: RWRegister<u32>,
}
pub struct ResetValues {
pub IDCODE: u32,
pub CR: u32,
pub APB1_FZ: u32,
pub APB2_FZ: u32,
}
#[cfg(not(feature = "nosync"))]
pub struct Instance {
pub(crate) addr: u32,
pub(crate) _marker: PhantomData<*const RegisterBlock>,
}
#[cfg(not(feature = "nosync"))]
impl ::core::ops::Deref for Instance {
type Target = RegisterBlock;
#[inline(always)]
fn | (&self) -> &RegisterBlock {
unsafe { &*(self.addr as *const _) }
}
}
#[cfg(feature = "rtic")]
unsafe impl Send for Instance {}
| deref |
language.ts | import { languages } from 'countries-list';
import { Language } from './index.d';
export default function(langCode: string): Language {
// @ts-ignore
const { name, native, rtl } = languages[langCode];
return { | native,
rtl
};
} | code: langCode,
name, |
admin.py | from django.contrib import admin
from django.contrib.auth import admin as auth_admin
from django.contrib.auth import get_user_model
from feedbackproj.users.forms import UserChangeForm, UserCreationForm
User = get_user_model()
@admin.register(User)
class UserAdmin(auth_admin.UserAdmin): | add_form = UserCreationForm
fieldsets = (("User", {"fields": ("name",)}),) + auth_admin.UserAdmin.fieldsets
list_display = ["username", "name", "is_superuser"]
search_fields = ["name"] |
form = UserChangeForm |
doghouse.go | package main
import (
"context"
"errors"
"fmt"
"io"
"log"
"net/http"
"os"
"sort"
"strings"
"github.com/reviewdog/reviewdog"
"github.com/reviewdog/reviewdog/cienv"
"github.com/reviewdog/reviewdog/doghouse"
"github.com/reviewdog/reviewdog/doghouse/client"
"github.com/reviewdog/reviewdog/project"
"golang.org/x/oauth2"
"golang.org/x/sync/errgroup"
)
func runDoghouse(ctx context.Context, r io.Reader, w io.Writer, opt *option, isProject bool, allowNonPR bool) error {
ghInfo, isPr, err := cienv.GetBuildInfo()
if err != nil {
return err
}
if !isPr && !allowNonPR {
fmt.Fprintln(os.Stderr, "reviewdog: this is not PullRequest build.")
return nil
}
resultSet, err := checkResultSet(ctx, r, opt, isProject)
if err != nil {
return err
}
cli, err := newDoghouseCli(ctx)
if err != nil {
return err
}
filteredResultSet, err := postResultSet(ctx, resultSet, ghInfo, cli)
if err != nil {
return err
}
if foundResultInDiff := reportResults(w, filteredResultSet); foundResultInDiff {
return errors.New("found at least one result in diff")
}
return nil
}
func newDoghouseCli(ctx context.Context) (client.DogHouseClientInterface, error) {
// If skipDoghouseServer is true, run doghouse code directly instead of talking to
// the doghouse server because provided GitHub API Token has Check API scope.
skipDoghouseServer := cienv.IsInGitHubAction() && os.Getenv("REVIEWDOG_TOKEN") == ""
if skipDoghouseServer {
token, err := nonEmptyEnv("REVIEWDOG_GITHUB_API_TOKEN")
if err != nil {
return nil, err
}
ghcli, err := githubClient(ctx, token)
if err != nil {
return nil, err
}
return &client.GitHubClient{Client: ghcli}, nil
}
return newDoghouseServerCli(ctx), nil
}
func newDoghouseServerCli(ctx context.Context) *client.DogHouseClient {
httpCli := http.DefaultClient
if token := os.Getenv("REVIEWDOG_TOKEN"); token != "" {
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: token},
)
httpCli = oauth2.NewClient(ctx, ts)
}
return client.New(httpCli)
}
var projectRunAndParse = project.RunAndParse
func checkResultSet(ctx context.Context, r io.Reader, opt *option, isProject bool) (*reviewdog.ResultMap, error) {
resultSet := new(reviewdog.ResultMap)
if isProject {
conf, err := projectConfig(opt.conf)
if err != nil {
return nil, err
}
resultSet, err = projectRunAndParse(ctx, conf, buildRunnersMap(opt.runners), opt.level)
if err != nil {
return nil, err
}
} else {
p, err := newParserFromOpt(opt)
if err != nil {
return nil, err
}
rs, err := p.Parse(r)
if err != nil {
return nil, err
}
resultSet.Store(toolName(opt), &reviewdog.Result{
Level: opt.level,
CheckResults: rs,
})
}
return resultSet, nil
}
func | (ctx context.Context, resultSet *reviewdog.ResultMap, ghInfo *cienv.BuildInfo, cli client.DogHouseClientInterface) (*reviewdog.FilteredCheckMap, error) {
var g errgroup.Group
wd, _ := os.Getwd()
filteredResultSet := new(reviewdog.FilteredCheckMap)
resultSet.Range(func(name string, result *reviewdog.Result) {
checkResults := result.CheckResults
as := make([]*doghouse.Annotation, 0, len(checkResults))
for _, r := range checkResults {
as = append(as, checkResultToAnnotation(r, wd))
}
req := &doghouse.CheckRequest{
Name: name,
Owner: ghInfo.Owner,
Repo: ghInfo.Repo,
PullRequest: ghInfo.PullRequest,
SHA: ghInfo.SHA,
Branch: ghInfo.Branch,
Annotations: as,
Level: result.Level,
}
g.Go(func() error {
res, err := cli.Check(ctx, req)
if err != nil {
return fmt.Errorf("post failed for %s: %v", name, err)
}
if res.ReportURL != "" {
log.Printf("[%s] reported: %s", name, res.ReportURL)
}
if res.CheckedResults != nil {
filteredResultSet.Store(name, res.CheckedResults)
}
if res.ReportURL == "" && res.CheckedResults == nil {
return fmt.Errorf("No result found for %q", name)
}
return nil
})
})
return filteredResultSet, g.Wait()
}
func checkResultToAnnotation(c *reviewdog.CheckResult, wd string) *doghouse.Annotation {
return &doghouse.Annotation{
Path: reviewdog.CleanPath(c.Path, wd),
Line: c.Lnum,
Message: c.Message,
RawMessage: strings.Join(c.Lines, "\n"),
}
}
// reportResults reports results to given io.Writer and return true if at least
// one annotation result is in diff.
func reportResults(w io.Writer, filteredResultSet *reviewdog.FilteredCheckMap) bool {
// Sort names to get deterministic result.
var names []string
filteredResultSet.Range(func(name string, results []*reviewdog.FilteredCheck) {
names = append(names, name)
})
sort.Strings(names)
foundInDiff := false
for _, name := range names {
results, err := filteredResultSet.Load(name)
if err != nil {
// Should not happen.
log.Printf("reviewdog: result not found for %q", name)
continue
}
fmt.Fprintf(w, "reviwedog: Reporting results for %q\n", name)
foundResultPerName := false
filteredNum := 0
for _, result := range results {
if !result.InDiff {
filteredNum++
continue
}
foundInDiff = true
foundResultPerName = true
// Output original lines.
for _, line := range result.Lines {
fmt.Fprintln(w, line)
}
}
if !foundResultPerName {
fmt.Fprintf(w, "reviwedog: No results found for %q. %d results found outside diff.\n", name, filteredNum)
}
}
return foundInDiff
}
| postResultSet |
seo-social-share-data.ts | export interface SeoSocialShareData {
title?: string;
keywords?: string;
description?: string; | url?: string;
type?: string;
author?: string;
section?: string;
published?: string;
modified?: string;
}
export interface ImageAuxData {
width?: number;
height?: number;
secureUrl?: string;
mimeType?: string;
alt?: string;
} | image?: string;
imageAuxData?: ImageAuxData; |
ubuntuazure.go | // Copyright © 2017 The Kubicorn Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package profiles
import (
"fmt"
"github.com/kris-nova/kubicorn/apis/cluster"
"github.com/kris-nova/kubicorn/cutil/kubeadm"
)
// NewUbuntuAzureCluster creates a basic Digitalocean cluster profile, to bootstrap Kubernetes.
func NewUbuntuAzureCluster(name string) *cluster.Cluster { |
return &cluster.Cluster{
Name: name,
Cloud: cluster.CloudAzure,
Location: "eastus",
SSH: &cluster.SSH{
PublicKeyPath: "~/.ssh/id_rsa.pub",
User: "root",
},
KubernetesAPI: &cluster.KubernetesAPI{
Port: "443",
},
Values: &cluster.Values{
ItemMap: map[string]string{
"INJECTEDTOKEN": kubeadm.GetRandomToken(),
},
},
ServerPools: []*cluster.ServerPool{
{
Type: cluster.ServerPoolTypeMaster,
Name: fmt.Sprintf("%s-master", name),
MaxCount: 1,
Image: "UbuntuServer",
Size: "Standard_DS3_v2 ",
BootstrapScripts: []string{},
Firewalls: []*cluster.Firewall{
{
Name: fmt.Sprintf("%s-master", name),
IngressRules: []*cluster.IngressRule{
{
IngressToPort: "22",
IngressSource: "0.0.0.0/0",
IngressProtocol: "tcp",
},
{
IngressToPort: "443",
IngressSource: "0.0.0.0/0",
IngressProtocol: "tcp",
},
{
IngressToPort: "1194",
IngressSource: "0.0.0.0/0",
IngressProtocol: "udp",
},
},
EgressRules: []*cluster.EgressRule{
{
EgressToPort: "all", // By default all egress from VM
EgressDestination: "0.0.0.0/0",
EgressProtocol: "tcp",
},
{
EgressToPort: "all", // By default all egress from VM
EgressDestination: "0.0.0.0/0",
EgressProtocol: "udp",
},
},
},
},
},
{
Type: cluster.ServerPoolTypeNode,
Name: fmt.Sprintf("%s-node", name),
MaxCount: 1,
Image: "UbuntuServer",
Size: "Standard_DS3_v2 ",
BootstrapScripts: []string{},
Firewalls: []*cluster.Firewall{
{
Name: fmt.Sprintf("%s-node", name),
IngressRules: []*cluster.IngressRule{
{
IngressToPort: "22",
IngressSource: "0.0.0.0/0",
IngressProtocol: "tcp",
},
{
IngressToPort: "1194",
IngressSource: "0.0.0.0/0",
IngressProtocol: "udp",
},
},
EgressRules: []*cluster.EgressRule{
{
EgressToPort: "all", // By default all egress from VM
EgressDestination: "0.0.0.0/0",
EgressProtocol: "tcp",
},
{
EgressToPort: "all", // By default all egress from VM
EgressDestination: "0.0.0.0/0",
EgressProtocol: "udp",
},
},
},
},
},
},
}
}
|
|
index.js | /**
* WelcomePage
*
* This is the page we show when the user visits a url that doesn't have a route
*/
import React from 'react';
import PropTypes from 'prop-types';
import { StepHeader, StepContent, StepFooter, Stepper } from '../components';
import {
StyledRow,
StyledCol,
H1,
H2,
P,
Container,
StyledPrimaryButton,
FlexContainer, | import Oval from '../components/Stepper/Oval';
import { DashedDiv } from './styles';
export default function WelcomePage(props) {
return (
<Container height="100vh">
<StyledRow>
<StyledCol xs={12} md={8}>
<FlexContainer>
<StepHeader>
<H2>TrackBook</H2>
</StepHeader>
<StepContent>
<H1>Welcome to TrackBook</H1>
<P>Get your TrackBook up and running in 3 steps</P>
</StepContent>
<StepFooter>
<FlexEndContainer margin="0 46px 0 0">
<StyledPrimaryButton
appearance="primary"
onClick={() => props.history.push('/paymentconnectors')}
>
Get Started
</StyledPrimaryButton>
</FlexEndContainer>
</StepFooter>
</FlexContainer>
</StyledCol>
<StyledCol xs={12} md={4}>
<FlexContainer padding="15% 66px" backgroundColor="#f4f5f8">
<DashedDiv>
<Stepper number={1} title="Connect TrackBook with AckNow" />
<Oval />
<Stepper number={2} title="Connect TrackBook with Fusion" />
<Stepper number={3} title="Add your preferences" />
</DashedDiv>
</FlexContainer>
</StyledCol>
</StyledRow>
</Container>
);
}
WelcomePage.propTypes = {
history: PropTypes.shape({
push: PropTypes.func,
}),
}; | FlexEndContainer,
} from '../../../components'; |
engine.py | """
This is the Scrapy engine which controls the Scheduler, Downloader and Spiders.
For more information see docs/topics/architecture.rst
"""
import logging
from time import time
from twisted.internet import defer, task
from twisted.python.failure import Failure
from scrapy import signals
from scrapy.core.scraper import Scraper
from scrapy.exceptions import DontCloseSpider
from scrapy.http import Response, Request
from scrapy.utils.misc import load_object
from scrapy.utils.reactor import CallLaterOnce
from scrapy.utils.log import logformatter_adapter, failure_to_exc_info
logger = logging.getLogger(__name__)
class Slot(object):
def __init__(self, start_requests, close_if_idle, nextcall, scheduler):
self.closing = False
self.inprogress = set() # requests in progress
self.start_requests = iter(start_requests)
self.close_if_idle = close_if_idle
self.nextcall = nextcall
self.scheduler = scheduler
self.heartbeat = task.LoopingCall(nextcall.schedule)
def add_request(self, request):
self.inprogress.add(request)
def remove_request(self, request):
self.inprogress.remove(request)
self._maybe_fire_closing()
def close(self):
self.closing = defer.Deferred()
self._maybe_fire_closing()
return self.closing
def _maybe_fire_closing(self):
if self.closing and not self.inprogress:
if self.nextcall:
self.nextcall.cancel()
if self.heartbeat.running:
self.heartbeat.stop()
self.closing.callback(None)
class ExecutionEngine(object):
# __init__ 对爬虫的核心组件进行了初始化.
def __init__(self, crawler, spider_closed_callback):
self.crawler = crawler
self.settings = crawler.settings
self.signals = crawler.signals
self.logformatter = crawler.logformatter
self.slot = None
self.spider = None
self.running = False
self.paused = False
self.scheduler_cls = load_object(self.settings['SCHEDULER'])
downloader_cls = load_object(self.settings['DOWNLOADER'])
self.downloader = downloader_cls(crawler)
self.scraper = Scraper(crawler)
self._spider_closed_callback = spider_closed_callback
@defer.inlineCallbacks
def start(self):
"""Start the execution engine"""
assert not self.running, "Engine already running"
self.start_time = time()
yield self.signals.send_catch_log_deferred(signal=signals.engine_started)
self.running = True
self._closewait = defer.Deferred()
yield self._closewait
def stop(self):
"""Stop the execution engine gracefully"""
assert self.running, "Engine not running"
self.running = False
dfd = self._close_all_spiders()
return dfd.addBoth(lambda _: self._finish_stopping_engine())
def close(self):
"""Close the execution engine gracefully.
If it has already been started, stop it. In all cases, close all spiders
and the downloader.
"""
if self.running:
# Will also close spiders and downloader
return self.stop()
elif self.open_spiders:
# Will also close downloader
return self._close_all_spiders()
else:
return defer.succeed(self.downloader.close())
def pause(self):
"""Pause the execution engine"""
self.paused = True
def unpause(self):
"""Resume the execution engine"""
self.paused = False
def _next_request(self, spider):
slot = self.slot
if not slot:
return
if self.paused:
return
while not self._needs_backout(spider):
if not self._next_request_from_scheduler(spider):
break
if slot.start_requests and not self._needs_backout(spider):
try:
request = next(slot.start_requests)
except StopIteration:
slot.start_requests = None
except Exception:
slot.start_requests = None
logger.error('Error while obtaining start requests',
exc_info=True, extra={'spider': spider})
else:
self.crawl(request, spider)
if self.spider_is_idle(spider) and slot.close_if_idle:
self._spider_idle(spider)
def _needs_backout(self, spider):
slot = self.slot
return not self.running \
or slot.closing \
or self.downloader.needs_backout() \
or self.scraper.slot.needs_backout()
def _next_request_from_scheduler(self, spider):
slot = self.slot
request = slot.scheduler.next_request()
if not request:
return
d = self._download(request, spider)
d.addBoth(self._handle_downloader_output, request, spider)
d.addErrback(lambda f: logger.info('Error while handling downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.remove_request(request))
d.addErrback(lambda f: logger.info('Error while removing request from slot',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
d.addBoth(lambda _: slot.nextcall.schedule())
d.addErrback(lambda f: logger.info('Error while scheduling new request',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def _handle_downloader_output(self, response, request, spider):
assert isinstance(response, (Request, Response, Failure)), response
# downloader middleware can return requests (for example, redirects)
if isinstance(response, Request):
self.crawl(response, spider)
return
# response is a Response or Failure
d = self.scraper.enqueue_scrape(response, request, spider)
d.addErrback(lambda f: logger.error('Error while enqueuing downloader output',
exc_info=failure_to_exc_info(f),
extra={'spider': spider}))
return d
def spider_is_idle(self, spider) | ot self.scraper.slot.is_idle():
# scraper is not idle
return False
if self.downloader.active:
# downloader has pending requests
return False
if self.slot.start_requests is not None:
# not all start requests are handled
return False
if self.slot.scheduler.has_pending_requests():
# scheduler has pending requests
return False
return True
@property
def open_spiders(self):
return [self.spider] if self.spider else []
def has_capacity(self):
"""Does the engine have capacity to handle more spiders"""
return not bool(self.slot)
def crawl(self, request, spider):
assert spider in self.open_spiders, \
"Spider %r not opened when crawling: %s" % (spider.name, request)
self.schedule(request, spider)
self.slot.nextcall.schedule()
def schedule(self, request, spider):
self.signals.send_catch_log(signal=signals.request_scheduled,
request=request, spider=spider)
if not self.slot.scheduler.enqueue_request(request):
self.signals.send_catch_log(signal=signals.request_dropped,
request=request, spider=spider)
def download(self, request, spider):
d = self._download(request, spider)
d.addBoth(self._downloaded, self.slot, request, spider)
return d
def _downloaded(self, response, slot, request, spider):
slot.remove_request(request)
return self.download(response, spider) \
if isinstance(response, Request) else response
def _download(self, request, spider):
slot = self.slot
slot.add_request(request)
def _on_success(response):
assert isinstance(response, (Response, Request))
if isinstance(response, Response):
response.request = request # tie request to response received
logkws = self.logformatter.crawled(request, response, spider)
logger.log(*logformatter_adapter(logkws), extra={'spider': spider})
self.signals.send_catch_log(signal=signals.response_received, \
response=response, request=request, spider=spider)
return response
def _on_complete(_):
slot.nextcall.schedule()
return _
dwld = self.downloader.fetch(request, spider)
dwld.addCallbacks(_on_success)
dwld.addBoth(_on_complete)
return dwld
@defer.inlineCallbacks
def open_spider(self, spider, start_requests=(), close_if_idle=True):
# 函数 实例了调度器, 如何爬取,爬取过滤方法等等.
assert self.has_capacity(), "No free spider slot when opening %r" % \
spider.name
logger.info("Spider opened", extra={'spider': spider})
nextcall = CallLaterOnce(self._next_request, spider) # 这是给异步 的循环调用用的东西.
scheduler = self.scheduler_cls.from_crawler(self.crawler) # 调度器的实例化
start_requests = yield self.scraper.spidermw.process_start_requests(start_requests, spider) # 这里调用spdiermw 会读取你配置文件中中间件并处理
slot = Slot(start_requests, close_if_idle, nextcall, scheduler)
self.slot = slot
self.spider = spider
yield scheduler.open(spider)
yield self.scraper.open_spider(spider)
self.crawler.stats.open_spider(spider)
yield self.signals.send_catch_log_deferred(signals.spider_opened, spider=spider)
slot.nextcall.schedule()
slot.heartbeat.start(5)
def _spider_idle(self, spider):
"""Called when a spider gets idle. This function is called when there
are no remaining pages to download or schedule. It can be called
multiple times. If some extension raises a DontCloseSpider exception
(in the spider_idle signal handler) the spider is not closed until the
next loop and this function is guaranteed to be called (at least) once
again for this spider.
"""
res = self.signals.send_catch_log(signal=signals.spider_idle, \
spider=spider, dont_log=DontCloseSpider)
if any(isinstance(x, Failure) and isinstance(x.value, DontCloseSpider) \
for _, x in res):
return
if self.spider_is_idle(spider):
self.close_spider(spider, reason='finished')
def close_spider(self, spider, reason='cancelled'):
"""Close (cancel) spider and clear all its outstanding requests"""
slot = self.slot
if slot.closing:
return slot.closing
logger.info("Closing spider (%(reason)s)",
{'reason': reason},
extra={'spider': spider})
dfd = slot.close()
def log_failure(msg):
def errback(failure):
logger.error(
msg,
exc_info=failure_to_exc_info(failure),
extra={'spider': spider}
)
return errback
dfd.addBoth(lambda _: self.downloader.close())
dfd.addErrback(log_failure('Downloader close failure'))
dfd.addBoth(lambda _: self.scraper.close_spider(spider))
dfd.addErrback(log_failure('Scraper close failure'))
dfd.addBoth(lambda _: slot.scheduler.close(reason))
dfd.addErrback(log_failure('Scheduler close failure'))
dfd.addBoth(lambda _: self.signals.send_catch_log_deferred(
signal=signals.spider_closed, spider=spider, reason=reason))
dfd.addErrback(log_failure('Error while sending spider_close signal'))
dfd.addBoth(lambda _: self.crawler.stats.close_spider(spider, reason=reason))
dfd.addErrback(log_failure('Stats close failure'))
dfd.addBoth(lambda _: logger.info("Spider closed (%(reason)s)",
{'reason': reason},
extra={'spider': spider}))
dfd.addBoth(lambda _: setattr(self, 'slot', None))
dfd.addErrback(log_failure('Error while unassigning slot'))
dfd.addBoth(lambda _: setattr(self, 'spider', None))
dfd.addErrback(log_failure('Error while unassigning spider'))
dfd.addBoth(lambda _: self._spider_closed_callback(spider))
return dfd
def _close_all_spiders(self):
dfds = [self.close_spider(s, reason='shutdown') for s in self.open_spiders]
dlist = defer.DeferredList(dfds)
return dlist
@defer.inlineCallbacks
def _finish_stopping_engine(self):
yield self.signals.send_catch_log_deferred(signal=signals.engine_stopped)
self._closewait.callback(None)
| :
if n |
formdata.py | # -*- coding: utf-8 -*-
from typing import Any, Iterable, List, Optional
from aiohttp import FormData as _FormData
import aiohttp.multipart as multipart
class FormData(_FormData):
def __init__(
self,
fields: Iterable[Any] = (),
quote_fields: bool = True,
charset: Optional[str] = None,
boundary: Optional[str] = None
) -> None:
self._writer = multipart.MultipartWriter("form-data", boundary=boundary)
self._fields = [] # type: List[Any]
self._is_multipart = False
self._is_processed = False
self._quote_fields = quote_fields
self._charset = charset
if isinstance(fields, dict):
fields = list(fields.items())
elif not isinstance(fields, (list, tuple)):
|
self.add_fields(*fields)
| fields = (fields,) |
input_output.rs | //! This module contains code to equate the input/output types appearing
//! in the MIR with the expected input/output types from the function | //! signature. This requires a bit of processing, as the expected types
//! are supplied to us before normalization and may contain opaque
//! `impl Trait` instances. In contrast, the input/output types found in
//! the MIR (specifically, in the special local variables for the
//! `RETURN_PLACE` the MIR arguments) are always fully normalized (and
//! contain revealed `impl Trait` values).
use rustc::infer::LateBoundRegionConversionTime;
use rustc::mir::*;
use rustc::ty::Ty;
use rustc_index::vec::Idx;
use syntax_pos::Span;
use crate::borrow_check::universal_regions::UniversalRegions;
use super::{Locations, TypeChecker};
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
pub(super) fn equate_inputs_and_outputs(
&mut self,
body: &Body<'tcx>,
universal_regions: &UniversalRegions<'tcx>,
normalized_inputs_and_output: &[Ty<'tcx>],
) {
let (&normalized_output_ty, normalized_input_tys) =
normalized_inputs_and_output.split_last().unwrap();
// If the user explicitly annotated the input types, extract
// those.
//
// e.g., `|x: FxHashMap<_, &'static u32>| ...`
let user_provided_sig;
if !self.tcx().is_closure(self.mir_def_id) {
user_provided_sig = None;
} else {
let typeck_tables = self.tcx().typeck_tables_of(self.mir_def_id);
user_provided_sig = match typeck_tables.user_provided_sigs.get(&self.mir_def_id) {
None => None,
Some(user_provided_poly_sig) => {
// Instantiate the canonicalized variables from
// user-provided signature (e.g., the `_` in the code
// above) with fresh variables.
let (poly_sig, _) = self.infcx.instantiate_canonical_with_fresh_inference_vars(
body.span,
&user_provided_poly_sig,
);
// Replace the bound items in the fn sig with fresh
// variables, so that they represent the view from
// "inside" the closure.
Some(
self.infcx
.replace_bound_vars_with_fresh_vars(
body.span,
LateBoundRegionConversionTime::FnCall,
&poly_sig,
)
.0,
)
}
}
};
// Equate expected input tys with those in the MIR.
for (&normalized_input_ty, argument_index) in normalized_input_tys.iter().zip(0..) {
// In MIR, argument N is stored in local N+1.
let local = Local::new(argument_index + 1);
debug!("equate_inputs_and_outputs: normalized_input_ty = {:?}", normalized_input_ty);
let mir_input_ty = body.local_decls[local].ty;
let mir_input_span = body.local_decls[local].source_info.span;
self.equate_normalized_input_or_output(
normalized_input_ty,
mir_input_ty,
mir_input_span,
);
}
if let Some(user_provided_sig) = user_provided_sig {
for (&user_provided_input_ty, argument_index) in
user_provided_sig.inputs().iter().zip(0..)
{
// In MIR, closures begin an implicit `self`, so
// argument N is stored in local N+2.
let local = Local::new(argument_index + 2);
let mir_input_ty = body.local_decls[local].ty;
let mir_input_span = body.local_decls[local].source_info.span;
// If the user explicitly annotated the input types, enforce those.
let user_provided_input_ty =
self.normalize(user_provided_input_ty, Locations::All(mir_input_span));
self.equate_normalized_input_or_output(
user_provided_input_ty,
mir_input_ty,
mir_input_span,
);
}
}
assert!(
body.yield_ty.is_some() && universal_regions.yield_ty.is_some()
|| body.yield_ty.is_none() && universal_regions.yield_ty.is_none()
);
if let Some(mir_yield_ty) = body.yield_ty {
let ur_yield_ty = universal_regions.yield_ty.unwrap();
let yield_span = body.local_decls[RETURN_PLACE].source_info.span;
self.equate_normalized_input_or_output(ur_yield_ty, mir_yield_ty, yield_span);
}
// Return types are a bit more complex. They may contain opaque `impl Trait` types.
let mir_output_ty = body.local_decls[RETURN_PLACE].ty;
let output_span = body.local_decls[RETURN_PLACE].source_info.span;
if let Err(terr) = self.eq_opaque_type_and_type(
mir_output_ty,
normalized_output_ty,
self.mir_def_id,
Locations::All(output_span),
ConstraintCategory::BoringNoLocation,
) {
span_mirbug!(
self,
Location::START,
"equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
normalized_output_ty,
mir_output_ty,
terr
);
};
// If the user explicitly annotated the output types, enforce those.
// Note that this only happens for closures.
if let Some(user_provided_sig) = user_provided_sig {
let user_provided_output_ty = user_provided_sig.output();
let user_provided_output_ty =
self.normalize(user_provided_output_ty, Locations::All(output_span));
if let Err(err) = self.eq_opaque_type_and_type(
mir_output_ty,
user_provided_output_ty,
self.mir_def_id,
Locations::All(output_span),
ConstraintCategory::BoringNoLocation,
) {
span_mirbug!(
self,
Location::START,
"equate_inputs_and_outputs: `{:?}=={:?}` failed with `{:?}`",
mir_output_ty,
user_provided_output_ty,
err
);
}
}
}
fn equate_normalized_input_or_output(&mut self, a: Ty<'tcx>, b: Ty<'tcx>, span: Span) {
debug!("equate_normalized_input_or_output(a={:?}, b={:?})", a, b);
if let Err(terr) =
self.eq_types(a, b, Locations::All(span), ConstraintCategory::BoringNoLocation)
{
span_mirbug!(
self,
Location::START,
"equate_normalized_input_or_output: `{:?}=={:?}` failed with `{:?}`",
a,
b,
terr
);
}
}
} | |
interface.go | /*
Copyright 2021 The OpenEBS Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
internalinterfaces "github.com/openebs/lvm-localpv/pkg/generated/informer/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// LVMSnapshots returns a LVMSnapshotInformer.
LVMSnapshots() LVMSnapshotInformer
// LVMVolumes returns a LVMVolumeInformer.
LVMVolumes() LVMVolumeInformer
}
type version struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface |
// LVMSnapshots returns a LVMSnapshotInformer.
func (v *version) LVMSnapshots() LVMSnapshotInformer {
return &lVMSnapshotInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// LVMVolumes returns a LVMVolumeInformer.
func (v *version) LVMVolumes() LVMVolumeInformer {
return &lVMVolumeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
| {
return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
} |
average_rotated_trials.py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 12 09:12:00 2016
Average the spatial influence of the morphology
@author: young
"""
import os
from os.path import join
import pylab as plt
import numpy as np
def average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection):
yfileName = dirName+'_0/'+dataName+str(z_pos)+'.npy'
y=np.load(yfileName)
y=y[idxSection]
for i in range(1,numRotation,1):
yfileName = dirName+'_'+str(i)+'/'+dataName+str(z_pos)+'.npy'
temp_y = np.load(yfileName)
temp_y = temp_y[idxSection]
y = y + temp_y
y = y/numRotation
return y
| if __name__ == '__main__':
numRotation = 8
dirName = 'sim_results'
xfileName = dirName+'_0/'+'tvec.npy'
x=np.load(xfileName)
fig = plt.figure(figsize=[10, 10])
# Extracellular_potential
ax1 = plt.subplot(311, ylabel='$\mu$V',
title='Extracellular\npotential')
# share x only
ax2 = plt.subplot(312, sharex=ax1, ylabel='mV',
title='Membrane potential')
ax3 = plt.subplot(313, sharex=ax1, xlabel='ms', ylabel='nA',
title='Return currents')
legendList = []
for z_pos in range(20,301,10):
legendList.append('z:'+str(z_pos)+'$\mu$m')
dataName='phi_z'
y1 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=2) # centre electrode
ax1.plot(x, y1)
dataName='phi_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y1)
dataName='vmem_z'
y2 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0) # soma index = 0
ax2.plot(x, y2)
dataName='vmem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y2)
dataName='imem_z'
y3 = average_data_in_dif_folder(dirName,dataName,z_pos,numRotation,idxSection=0)
ax3.plot(x, y3)
dataName='imem_z'+str(z_pos)+'.npy'
np.save(join('averaged_result_for_real_neuron', dataName), y3)
plt.legend(legendList, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.savefig('averaged_z_profile', bbox_inches='tight') | |
Node.js | export default class Node {
ancestor(level) {
let node = this;
while (level--) {
node = node.parent;
if (!node) return null;
}
return node;
}
contains(node) {
while (node) {
if (node === this) return true;
node = node.parent;
}
return false;
}
findLexicalBoundary() {
return this.parent.findLexicalBoundary();
}
findNearest(type) {
if (typeof type === 'string') type = new RegExp(`^${type}$`);
if (type.test(this.type)) return this;
return this.parent.findNearest(type);
}
unparenthesizedParent() {
let node = this.parent;
while (node && node.type === 'ParenthesizedExpression') {
node = node.parent;
}
return node;
}
unparenthesize() {
let node = this;
while (node.type === 'ParenthesizedExpression') {
node = node.expression;
}
return node;
}
| findScope(functionScope) {
return this.parent.findScope(functionScope);
}
getIndentation() {
return this.parent.getIndentation();
}
initialise(transforms) {
for (const key of this.keys) {
const value = this[key];
if (Array.isArray(value)) {
value.forEach(node => node && node.initialise(transforms));
} else if (value && typeof value === 'object') {
value.initialise(transforms);
}
}
}
toJSON() {
return toJSON(this);
}
toString() {
return this.program.magicString.original.slice(this.start, this.end);
}
transpile(code, transforms) {
for (const key of this.keys) {
const value = this[key];
if (Array.isArray(value)) {
value.forEach(node => node && node.transpile(code, transforms));
} else if (value && typeof value === 'object') {
value.transpile(code, transforms);
}
}
}
} | |
main.rs | use game::Game;
mod character;
mod game;
mod item;
mod location;
mod log;
mod randomizer;
use crate::location::Location;
use clap::{crate_version, Clap};
/// Your filesystem as a dungeon!
#[derive(Clap)]
#[clap(version = crate_version!(), author = "Facundo Olano <[email protected]>")]
struct Opts {
/// Moves the hero to the supplied destination.
/// When omitted to just prints the hero's status
destination: Option<String>,
/// Prints the hero's current location
#[clap(long)]
pwd: bool,
/// Resets the current game.
#[clap(long)]
reset: bool,
/// Attempt to avoid battles by running away.
#[clap(long)]
run: bool,
/// Attempt to avoid battles by bribing the enemy.
#[clap(long)]
bribe: bool,
/// Buys an item from the shop.
/// If name is omitted lists the items available for sale.
#[clap(short, long)]
shop: bool,
/// Uses an item from the inventory.
/// If name is omitted lists the inventory contents.
#[clap(short, long)]
inventory: bool,
}
fn main() {
let opts: Opts = Opts::parse();
let mut game = Game::load().unwrap_or_else(|_| Game::new());
if opts.pwd {
println!("{}", game.location.path_string());
} else if opts.reset {
game.reset()
} else if opts.shop {
// when -s flag is provided, the positional argument is assumed to be an item
shop(&mut game, &opts.destination);
} else if opts.inventory {
// when -i flag is provided, the positional argument is assumed to be an item
inventory(&mut game, &opts.destination);
} else if let Some(dest) = opts.destination {
go_to(&mut game, &dest, opts.run, opts.bribe);
} else {
log::status(&game);
}
game.save().unwrap()
}
/// Main command, attempt to move the hero to the supplied location,
/// possibly engaging in combat along the way.
fn go_to(game: &mut Game, dest: &str, run: bool, bribe: bool) {
if let Ok(dest) = Location::from(&dest) {
if let Err(game::Error::GameOver) = game.go_to(&dest, run, bribe) {
game.reset();
}
} else {
println!("No such file or directory");
std::process::exit(1);
}
}
/// Buy an item from the shop or list the available items if no item name is provided.
/// Shopping is only allowed when the player is at the home directory.
fn shop(game: &mut Game, item_name: &Option<String>) {
if game.location.is_home() {
if let Some(item_name) = item_name {
let item_name = sanitize(item_name);
match item::shop::buy(game, &item_name) {
Err(item::shop::Error::NotEnoughGold) => {
println!("Not enough gold.")
}
Err(item::shop::Error::ItemNotAvailable) => {
println!("Item not available.")
}
Ok(()) => {}
}
} else {
item::shop::list(game);
}
} else {
println!("Shop is only allowed at home.")
}
}
/// Use an item from the inventory or list the inventory contents if no item name is provided.
fn inventory(game: &mut Game, item_name: &Option<String>) {
if let Some(item_name) = item_name {
let item_name = sanitize(item_name);
if let Err(game::Error::ItemNotFound) = game.use_item(&item_name) {
println!("Item not found.");
}
} else {
println!("{}", log::format_inventory(&game));
}
}
/// Return a clean version of an item/equipment name, including aliases
fn | (name: &str) -> String {
let name = name.to_lowercase();
let name = match name.as_str() {
"p" | "potion" => "potion",
"e" | "escape" => "escape",
"sw" | "sword" => "sword",
"sh" | "shield" => "shield",
n => n,
};
name.to_string()
}
| sanitize |
print.go | package util
import (
"fmt"
"io"
"reflect"
"strings"
"text/template"
"github.com/ghodss/yaml"
"github.com/gogo/protobuf/proto"
"github.com/pkg/errors"
"github.com/solo-io/gloo/pkg/protoutil"
)
// Printer represents a function that prints a value to io.Writer, usually using
// a table
type Printer func(interface{}, io.Writer) error
// Print - prints the given proto.Message to io.Writer using the specified output format
func Print(output, template string, m proto.Message, tblPrn Printer, w io.Writer) error {
switch strings.ToLower(output) {
case "yaml":
return PrintYAML(m, w)
case "json":
return PrintJSON(m, w)
case "template":
return PrintTemplate(m, template, w)
default:
return tblPrn(m, w)
}
}
// PrintList - prints the given list of values to io.Writer using the specified output format
func PrintList(output, template string, list interface{}, tblPrn Printer, w io.Writer) error {
switch strings.ToLower(output) {
case "yaml":
return PrintYAMLList(list, w)
case "json":
return PrintJSONList(list, w)
case "template":
return PrintTemplate(list, template, w)
default:
return tblPrn(list, w)
}
}
// PrintJSON - prints the given proto.Message to io.Writer in JSON
func PrintJSON(m proto.Message, w io.Writer) error {
b, err := protoutil.Marshal(m)
if err != nil {
return errors.Wrap(err, "unable to convert to JSON")
}
_, err = fmt.Fprintln(w, string(b))
return err
}
// PrintYAML - prints the given proto.Message to io.Writer in YAML
func PrintYAML(m proto.Message, w io.Writer) error {
jsn, err := protoutil.Marshal(m)
if err != nil {
return errors.Wrap(err, "uanble to marshal")
}
b, err := yaml.JSONToYAML(jsn)
if err != nil {
return errors.Wrap(err, "unable to convert to YAML")
}
_, err = fmt.Fprintln(w, string(b))
return err
}
// PrintJSONList - prints the given list to io.Writer in JSON
func | (data interface{}, w io.Writer) error {
list := reflect.ValueOf(data)
_, err := fmt.Fprintln(w, "[")
if err != nil {
return errors.Wrap(err, "unable to print JSON list")
}
for i := 0; i < list.Len(); i++ {
v, ok := list.Index(i).Interface().(proto.Message)
if !ok {
return errors.New("unable to convert to proto message")
}
if i != 0 {
_, err = fmt.Fprintln(w, ",")
if err != nil {
return errors.Wrap(err, "unable to print JSON list")
}
}
err = PrintJSON(v, w)
if err != nil {
return err
}
}
_, err = fmt.Fprintln(w, "]")
return err
}
// PrintYAMLList - prints the given list to io.Writer in YAML
func PrintYAMLList(data interface{}, w io.Writer) error {
list := reflect.ValueOf(data)
for i := 0; i < list.Len(); i++ {
v, ok := list.Index(i).Interface().(proto.Message)
if !ok {
return errors.New("unable to convert to proto message")
}
if _, err := fmt.Fprintln(w, "---"); err != nil {
return errors.Wrap(err, "unable to print YAML list")
}
if err := PrintYAML(v, w); err != nil {
return err
}
}
return nil
}
// PrintTemplate prints the give value using the provided Go template to io.Writer
func PrintTemplate(data interface{}, tmpl string, w io.Writer) error {
t, err := template.New("output").Parse(tmpl)
if err != nil {
return errors.Wrap(err, "unable to parse template")
}
return t.Execute(w, data)
}
| PrintJSONList |
owl.carousel.min.js | !function(a,b,c,d){function e(b,c){this.settings=null,this.options=a.extend({},e.Defaults,c),this.$element=a(b),this.drag=a.extend({},m),this.state=a.extend({},n),this.e=a.extend({},o),this._plugins={},this._supress={},this._current=null,this._speed=null,this._coordinates=[],this._breakpoint=null,this._width=null,this._items=[],this._clones=[],this._mergers=[],this._invalidated={},this._pipe=[],a.each(e.Plugins,a.proxy(function(a,b){this._plugins[a[0].toLowerCase()+a.slice(1)]=new b(this)},this)),a.each(e.Pipe,a.proxy(function(b,c){this._pipe.push({filter:c.filter,run:a.proxy(c.run,this)})},this)),this.setup(),this.initialize()}function f(a){if(a.touches!==d)return{x:a.touches[0].pageX,y:a.touches[0].pageY};if(a.touches===d){if(a.pageX!==d)return{x:a.pageX,y:a.pageY};if(a.pageX===d)return{x:a.clientX,y:a.clientY}}}function g(a){var b,d,e=c.createElement("div"),f=a;for(b in f)if(d=f[b],"undefined"!=typeof e.style[d])return e=null,[d,b];return[!1]}function h(){return g(["transition","WebkitTransition","MozTransition","OTransition"])[1]}function i(){return g(["transform","WebkitTransform","MozTransform","OTransform","msTransform"])[0]}function j(){return g(["perspective","webkitPerspective","MozPerspective","OPerspective","MsPerspective"])[0]}function k(){return"ontouchstart"in b||!!navigator.msMaxTouchPoints}function l(){return b.navigator.msPointerEnabled}var m,n,o;m={start:0,startX:0,startY:0,current:0,currentX:0,currentY:0,offsetX:0,offsetY:0,distance:null,startTime:0,endTime:0,updatedX:0,targetEl:null},n={isTouch:!1,isScrolling:!1,isSwiping:!1,direction:!1,inMotion:!1},o={_onDragStart:null,_onDragMove:null,_onDragEnd:null,_transitionEnd:null,_resizer:null,_responsiveCall:null,_goToLoop:null,_checkVisibile:null},e.Defaults={items:3,loop:!1,center:!1,mouseDrag:!0,touchDrag:!0,pullDrag:!0,freeDrag:!1,margin:0,stagePadding:0,merge:!1,mergeFit:!0,autoWidth:!1,startPosition:0,rtl:!1,smartSpeed:250,fluidSpeed:!1,dragEndSpeed:!1,responsive:{},responsiveRefreshRate:200,responsiveBaseElement:b,responsiveClass:!1,fallbackEasing:"swing",info:!1,nestedItemSelector:!1,itemElement:"div",stageElement:"div",themeClass:"owl-theme",baseClass:"owl-carousel",itemClass:"owl-item",centerClass:"center",activeClass:"active"},e.Width={Default:"default",Inner:"inner",Outer:"outer"},e.Plugins={},e.Pipe=[{filter:["width","items","settings"],run:function(a){a.current=this._items&&this._items[this.relative(this._current)]}},{filter:["items","settings"],run:function(){var a=this._clones,b=this.$stage.children(".cloned");(b.length!==a.length||!this.settings.loop&&a.length>0)&&(this.$stage.children(".cloned").remove(),this._clones=[])}},{filter:["items","settings"],run:function(){var a,b,c=this._clones,d=this._items,e=this.settings.loop?c.length-Math.max(2*this.settings.items,4):0;for(a=0,b=Math.abs(e/2);b>a;a++)e>0?(this.$stage.children().eq(d.length+c.length-1).remove(),c.pop(),this.$stage.children().eq(0).remove(),c.pop()):(c.push(c.length/2),this.$stage.append(d[c[c.length-1]].clone().addClass("cloned")),c.push(d.length-1-(c.length-1)/2),this.$stage.prepend(d[c[c.length-1]].clone().addClass("cloned")))}},{filter:["width","items","settings"],run:function(){var a,b,c,d=this.settings.rtl?1:-1,e=(this.width()/this.settings.items).toFixed(3),f=0;for(this._coordinates=[],b=0,c=this._clones.length+this._items.length;c>b;b++)a=this._mergers[this.relative(b)],a=this.settings.mergeFit&&Math.min(a,this.settings.items)||a,f+=(this.settings.autoWidth?this._items[this.relative(b)].width()+this.settings.margin:e*a)*d,this._coordinates.push(f)}},{filter:["width","items","settings"],run:function(){var b,c,d=(this.width()/this.settings.items).toFixed(3),e={width:Math.abs(this._coordinates[this._coordinates.length-1])+2*this.settings.stagePadding,"padding-left":this.settings.stagePadding||"","padding-right":this.settings.stagePadding||""};if(this.$stage.css(e),e={width:this.settings.autoWidth?"auto":d-this.settings.margin},e[this.settings.rtl?"margin-left":"margin-right"]=this.settings.margin,!this.settings.autoWidth&&a.grep(this._mergers,function(a){return a>1}).length>0)for(b=0,c=this._coordinates.length;c>b;b++)e.width=Math.abs(this._coordinates[b])-Math.abs(this._coordinates[b-1]||0)-this.settings.margin,this.$stage.children().eq(b).css(e);else this.$stage.children().css(e)}},{filter:["width","items","settings"],run:function(a){a.current&&this.reset(this.$stage.children().index(a.current))}},{filter:["position"],run:function(){this.animate(this.coordinates(this._current))}},{filter:["width","position","items","settings"],run:function(){var a,b,c,d,e=this.settings.rtl?1:-1,f=2*this.settings.stagePadding,g=this.coordinates(this.current())+f,h=g+this.width()*e,i=[];for(c=0,d=this._coordinates.length;d>c;c++)a=this._coordinates[c-1]||0,b=Math.abs(this._coordinates[c])+f*e,(this.op(a,"<=",g)&&this.op(a,">",h)||this.op(b,"<",g)&&this.op(b,">",h))&&i.push(c);this.$stage.children("."+this.settings.activeClass).removeClass(this.settings.activeClass),this.$stage.children(":eq("+i.join("), :eq(")+")").addClass(this.settings.activeClass),this.settings.center&&(this.$stage.children("."+this.settings.centerClass).removeClass(this.settings.centerClass),this.$stage.children().eq(this.current()).addClass(this.settings.centerClass))}}],e.prototype.initialize=function(){if(this.trigger("initialize"),this.$element.addClass(this.settings.baseClass).addClass(this.settings.themeClass).toggleClass("owl-rtl",this.settings.rtl),this.browserSupport(),this.settings.autoWidth&&this.state.imagesLoaded!==!0){var b,c,e;if(b=this.$element.find("img"),c=this.settings.nestedItemSelector?"."+this.settings.nestedItemSelector:d,e=this.$element.children(c).width(),b.length&&0>=e)return this.preloadAutoWidthImages(b),!1}this.$element.addClass("owl-loading"),this.$stage=a("<"+this.settings.stageElement+' class="owl-stage"/>').wrap('<div class="owl-stage-outer">'),this.$element.append(this.$stage.parent()),this.replace(this.$element.children().not(this.$stage.parent())),this._width=this.$element.width(),this.refresh(),this.$element.removeClass("owl-loading").addClass("owl-loaded"),this.eventsCall(),this.internalEvents(),this.addTriggerableEvents(),this.trigger("initialized")},e.prototype.setup=function(){var b=this.viewport(),c=this.options.responsive,d=-1,e=null;c?(a.each(c,function(a){b>=a&&a>d&&(d=Number(a))}),e=a.extend({},this.options,c[d]),delete e.responsive,e.responsiveClass&&this.$element.attr("class",function(a,b){return b.replace(/\b owl-responsive-\S+/g,"")}).addClass("owl-responsive-"+d)):e=a.extend({},this.options),(null===this.settings||this._breakpoint!==d)&&(this.trigger("change",{property:{name:"settings",value:e}}),this._breakpoint=d,this.settings=e,this.invalidate("settings"),this.trigger("changed",{property:{name:"settings",value:this.settings}}))},e.prototype.optionsLogic=function(){this.$element.toggleClass("owl-center",this.settings.center),this.settings.loop&&this._items.length<this.settings.items&&(this.settings.loop=!1),this.settings.autoWidth&&(this.settings.stagePadding=!1,this.settings.merge=!1)},e.prototype.prepare=function(b){var c=this.trigger("prepare",{content:b});return c.data||(c.data=a("<"+this.settings.itemElement+"/>").addClass(this.settings.itemClass).append(b)),this.trigger("prepared",{content:c.data}),c.data},e.prototype.update=function(){for(var b=0,c=this._pipe.length,d=a.proxy(function(a){return this[a]},this._invalidated),e={};c>b;)(this._invalidated.all||a.grep(this._pipe[b].filter,d).length>0)&&this._pipe[b].run(e),b++;this._invalidated={}},e.prototype.width=function(a){switch(a=a||e.Width.Default){case e.Width.Inner:case e.Width.Outer:return this._width;default:return this._width-2*this.settings.stagePadding+this.settings.margin}},e.prototype.refresh=function(){if(0===this._items.length)return!1;(new Date).getTime();this.trigger("refresh"),this.setup(),this.optionsLogic(),this.$stage.addClass("owl-refresh"),this.update(),this.$stage.removeClass("owl-refresh"),this.state.orientation=b.orientation,this.watchVisibility(),this.trigger("refreshed")},e.prototype.eventsCall=function(){this.e._onDragStart=a.proxy(function(a){this.onDragStart(a)},this),this.e._onDragMove=a.proxy(function(a){this.onDragMove(a)},this),this.e._onDragEnd=a.proxy(function(a){this.onDragEnd(a)},this),this.e._onResize=a.proxy(function(a){this.onResize(a)},this),this.e._transitionEnd=a.proxy(function(a){this.transitionEnd(a)},this),this.e._preventClick=a.proxy(function(a){this.preventClick(a)},this)},e.prototype.onThrottledResize=function(){b.clearTimeout(this.resizeTimer),this.resizeTimer=b.setTimeout(this.e._onResize,this.settings.responsiveRefreshRate)},e.prototype.onResize=function(){return this._items.length?this._width===this.$element.width()?!1:this.trigger("resize").isDefaultPrevented()?!1:(this._width=this.$element.width(),this.invalidate("width"),this.refresh(),void this.trigger("resized")):!1},e.prototype.eventsRouter=function(a){var b=a.type;"mousedown"===b||"touchstart"===b?this.onDragStart(a):"mousemove"===b||"touchmove"===b?this.onDragMove(a):"mouseup"===b||"touchend"===b?this.onDragEnd(a):"touchcancel"===b&&this.onDragEnd(a)},e.prototype.internalEvents=function(){var c=(k(),l());this.settings.mouseDrag?(this.$stage.on("mousedown",a.proxy(function(a){this.eventsRouter(a)},this)),this.$stage.on("dragstart",function(){return!1}),this.$stage.get(0).onselectstart=function(){return!1}):this.$element.addClass("owl-text-select-on"),this.settings.touchDrag&&!c&&this.$stage.on("touchstart touchcancel",a.proxy(function(a){this.eventsRouter(a)},this)),this.transitionEndVendor&&this.on(this.$stage.get(0),this.transitionEndVendor,this.e._transitionEnd,!1),this.settings.responsive!==!1&&this.on(b,"resize",a.proxy(this.onThrottledResize,this))},e.prototype.onDragStart=function(d){var e,g,h,i;if(e=d.originalEvent||d||b.event,3===e.which||this.state.isTouch)return!1;if("mousedown"===e.type&&this.$stage.addClass("owl-grab"),this.trigger("drag"),this.drag.startTime=(new Date).getTime(),this.speed(0),this.state.isTouch=!0,this.state.isScrolling=!1,this.state.isSwiping=!1,this.drag.distance=0,g=f(e).x,h=f(e).y,this.drag.offsetX=this.$stage.position().left,this.drag.offsetY=this.$stage.position().top,this.settings.rtl&&(this.drag.offsetX=this.$stage.position().left+this.$stage.width()-this.width()+this.settings.margin),this.state.inMotion&&this.support3d)i=this.getTransformProperty(),this.drag.offsetX=i,this.animate(i),this.state.inMotion=!0;else if(this.state.inMotion&&!this.support3d)return this.state.inMotion=!1,!1;this.drag.startX=g-this.drag.offsetX,this.drag.startY=h-this.drag.offsetY,this.drag.start=g-this.drag.startX,this.drag.targetEl=e.target||e.srcElement,this.drag.updatedX=this.drag.start,("IMG"===this.drag.targetEl.tagName||"A"===this.drag.targetEl.tagName)&&(this.drag.targetEl.draggable=!1),a(c).on("mousemove.owl.dragEvents mouseup.owl.dragEvents touchmove.owl.dragEvents touchend.owl.dragEvents",a.proxy(function(a){this.eventsRouter(a)},this))},e.prototype.onDragMove=function(a){var c,e,g,h,i,j;this.state.isTouch&&(this.state.isScrolling||(c=a.originalEvent||a||b.event,e=f(c).x,g=f(c).y,this.drag.currentX=e-this.drag.startX,this.drag.currentY=g-this.drag.startY,this.drag.distance=this.drag.currentX-this.drag.offsetX,this.drag.distance<0?this.state.direction=this.settings.rtl?"right":"left":this.drag.distance>0&&(this.state.direction=this.settings.rtl?"left":"right"),this.settings.loop?this.op(this.drag.currentX,">",this.coordinates(this.minimum()))&&"right"===this.state.direction?this.drag.currentX-=(this.settings.center&&this.coordinates(0))-this.coordinates(this._items.length):this.op(this.drag.currentX,"<",this.coordinates(this.maximum()))&&"left"===this.state.direction&&(this.drag.currentX+=(this.settings.center&&this.coordinates(0))-this.coordinates(this._items.length)):(h=this.coordinates(this.settings.rtl?this.maximum():this.minimum()),i=this.coordinates(this.settings.rtl?this.minimum():this.maximum()),j=this.settings.pullDrag?this.drag.distance/5:0,this.drag.currentX=Math.max(Math.min(this.drag.currentX,h+j),i+j)),(this.drag.distance>8||this.drag.distance<-8)&&(c.preventDefault!==d?c.preventDefault():c.returnValue=!1,this.state.isSwiping=!0),this.drag.updatedX=this.drag.currentX,(this.drag.currentY>16||this.drag.currentY<-16)&&this.state.isSwiping===!1&&(this.state.isScrolling=!0,this.drag.updatedX=this.drag.start),this.animate(this.drag.updatedX)))},e.prototype.onDragEnd=function(b){var d,e,f;if(this.state.isTouch){if("mouseup"===b.type&&this.$stage.removeClass("owl-grab"),this.trigger("dragged"),this.drag.targetEl.removeAttribute("draggable"),this.state.isTouch=!1,this.state.isScrolling=!1,this.state.isSwiping=!1,0===this.drag.distance&&this.state.inMotion!==!0)return this.state.inMotion=!1,!1;this.drag.endTime=(new Date).getTime(),d=this.drag.endTime-this.drag.startTime,e=Math.abs(this.drag.distance),(e>3||d>300)&&this.removeClick(this.drag.targetEl),f=this.closest(this.drag.updatedX),this.speed(this.settings.dragEndSpeed||this.settings.smartSpeed),this.current(f),this.invalidate("position"),this.update(),this.settings.pullDrag||this.drag.updatedX!==this.coordinates(f)||this.transitionEnd(),this.drag.distance=0,a(c).off(".owl.dragEvents")}},e.prototype.removeClick=function(c){this.drag.targetEl=c,a(c).on("click.preventClick",this.e._preventClick),b.setTimeout(function(){a(c).off("click.preventClick")},300)},e.prototype.preventClick=function(b){b.preventDefault?b.preventDefault():b.returnValue=!1,b.stopPropagation&&b.stopPropagation(),a(b.target).off("click.preventClick")},e.prototype.getTransformProperty=function(){var a,c;return a=b.getComputedStyle(this.$stage.get(0),null).getPropertyValue(this.vendorName+"transform"),a=a.replace(/matrix(3d)?\(|\)/g,"").split(","),c=16===a.length,c!==!0?a[4]:a[12]},e.prototype.closest=function(b){var c=-1,d=30,e=this.width(),f=this.coordinates();return this.settings.freeDrag||a.each(f,a.proxy(function(a,g){return b>g-d&&g+d>b?c=a:this.op(b,"<",g)&&this.op(b,">",f[a+1]||g-e)&&(c="left"===this.state.direction?a+1:a),-1===c},this)),this.settings.loop||(this.op(b,">",f[this.minimum()])?c=b=this.minimum():this.op(b,"<",f[this.maximum()])&&(c=b=this.maximum())),c},e.prototype.animate=function(b){this.trigger("translate"),this.state.inMotion=this.speed()>0,this.support3d?this.$stage.css({transform:"translate3d("+b+"px,0px, 0px)",transition:this.speed()/1e3+"s"}):this.state.isTouch?this.$stage.css({left:b+"px"}):this.$stage.animate({left:b},this.speed()/1e3,this.settings.fallbackEasing,a.proxy(function(){this.state.inMotion&&this.transitionEnd()},this))},e.prototype.current=function(a){if(a===d)return this._current;if(0===this._items.length)return d;if(a=this.normalize(a),this._current!==a){var b=this.trigger("change",{property:{name:"position",value:a}});b.data!==d&&(a=this.normalize(b.data)),this._current=a,this.invalidate("position"),this.trigger("changed",{property:{name:"position",value:this._current}})}return this._current},e.prototype.invalidate=function(a){this._invalidated[a]=!0},e.prototype.reset=function(a){a=this.normalize(a),a!==d&&(this._speed=0,this._current=a,this.suppress(["translate","translated"]),this.animate(this.coordinates(a)),this.release(["translate","translated"]))},e.prototype.normalize=function(b,c){var e=c?this._items.length:this._items.length+this._clones.length;return!a.isNumeric(b)||1>e?d:b=this._clones.length?(b%e+e)%e:Math.max(this.minimum(c),Math.min(this.maximum(c),b))},e.prototype.relative=function(a){return a=this.normalize(a),a-=this._clones.length/2,this.normalize(a,!0)},e.prototype.maximum=function(a){var b,c,d,e=0,f=this.settings;if(a)return this._items.length-1;if(!f.loop&&f.center)b=this._items.length-1;else if(f.loop||f.center)if(f.loop||f.center)b=this._items.length+f.items;else{if(!f.autoWidth&&!f.merge)throw"Can not detect maximum absolute position.";for(revert=f.rtl?1:-1,c=this.$stage.width()-this.$element.width();(d=this.coordinates(e))&&!(d*revert>=c);)b=++e}else b=this._items.length-f.items;return b},e.prototype.minimum=function(a){return a?0:this._clones.length/2},e.prototype.items=function(a){return a===d?this._items.slice():(a=this.normalize(a,!0),this._items[a])},e.prototype.mergers=function(a){return a===d?this._mergers.slice():(a=this.normalize(a,!0),this._mergers[a])},e.prototype.clones=function(b){var c=this._clones.length/2,e=c+this._items.length,f=function(a){return a%2===0?e+a/2:c-(a+1)/2};return b===d?a.map(this._clones,function(a,b){return f(b)}):a.map(this._clones,function(a,c){return a===b?f(c):null})},e.prototype.speed=function(a){return a!==d&&(this._speed=a),this._speed},e.prototype.coordinates=function(b){var c=null;return b===d?a.map(this._coordinates,a.proxy(function(a,b){return this.coordinates(b)},this)):(this.settings.center?(c=this._coordinates[b],c+=(this.width()-c+(this._coordinates[b-1]||0))/2*(this.settings.rtl?-1:1)):c=this._coordinates[b-1]||0,c)},e.prototype.duration=function(a,b,c){return Math.min(Math.max(Math.abs(b-a),1),6)*Math.abs(c||this.settings.smartSpeed)},e.prototype.to=function(c,d){if(this.settings.loop){var e=c-this.relative(this.current()),f=this.current(),g=this.current(),h=this.current()+e,i=0>g-h?!0:!1,j=this._clones.length+this._items.length;h<this.settings.items&&i===!1?(f=g+this._items.length,this.reset(f)):h>=j-this.settings.items&&i===!0&&(f=g-this._items.length,this.reset(f)),b.clearTimeout(this.e._goToLoop),this.e._goToLoop=b.setTimeout(a.proxy(function(){this.speed(this.duration(this.current(),f+e,d)),this.current(f+e),this.update()},this),30)}else this.speed(this.duration(this.current(),c,d)),this.current(c),this.update()},e.prototype.next=function(a){a=a||!1,this.to(this.relative(this.current())+1,a)},e.prototype.prev=function(a){a=a||!1,this.to(this.relative(this.current())-1,a)},e.prototype.transitionEnd=function(a){return a!==d&&(a.stopPropagation(),(a.target||a.srcElement||a.originalTarget)!==this.$stage.get(0))?!1:(this.state.inMotion=!1,void this.trigger("translated"))},e.prototype.viewport=function(){var d;if(this.options.responsiveBaseElement!==b)d=a(this.options.responsiveBaseElement).width();else if(b.innerWidth)d=b.innerWidth;else{if(!c.documentElement||!c.documentElement.clientWidth)throw"Can not detect viewport width.";d=c.documentElement.clientWidth}return d},e.prototype.replace=function(b){this.$stage.empty(),this._items=[],b&&(b=b instanceof jQuery?b:a(b)),this.settings.nestedItemSelector&&(b=b.find("."+this.settings.nestedItemSelector)),b.filter(function(){return 1===this.nodeType}).each(a.proxy(function(a,b){b=this.prepare(b),this.$stage.append(b),this._items.push(b),this._mergers.push(1*b.find("[data-merge]").andSelf("[data-merge]").attr("data-merge")||1)},this)),this.reset(a.isNumeric(this.settings.startPosition)?this.settings.startPosition:0),this.invalidate("items")},e.prototype.add=function(a,b){b=b===d?this._items.length:this.normalize(b,!0),this.trigger("add",{content:a,position:b}),0===this._items.length||b===this._items.length?(this.$stage.append(a),this._items.push(a),this._mergers.push(1*a.find("[data-merge]").andSelf("[data-merge]").attr("data-merge")||1)):(this._items[b].before(a),this._items.splice(b,0,a),this._mergers.splice(b,0,1*a.find("[data-merge]").andSelf("[data-merge]").attr("data-merge")||1)),this.invalidate("items"),this.trigger("added",{content:a,position:b})},e.prototype.remove=function(a){a=this.normalize(a,!0),a!==d&&(this.trigger("remove",{content:this._items[a],position:a}),this._items[a].remove(),this._items.splice(a,1),this._mergers.splice(a,1),this.invalidate("items"),this.trigger("removed",{content:null,position:a}))},e.prototype.addTriggerableEvents=function(){var b=a.proxy(function(b,c){return a.proxy(function(a){a.relatedTarget!==this&&(this.suppress([c]),b.apply(this,[].slice.call(arguments,1)),this.release([c]))},this)},this);a.each({next:this.next,prev:this.prev,to:this.to,destroy:this.destroy,refresh:this.refresh,replace:this.replace,add:this.add,remove:this.remove},a.proxy(function(a,c){this.$element.on(a+".owl.carousel",b(c,a+".owl.carousel"))},this))},e.prototype.watchVisibility=function(){function c(a){return a.offsetWidth>0&&a.offsetHeight>0}function d(){c(this.$element.get(0))&&(this.$element.removeClass("owl-hidden"),this.refresh(),b.clearInterval(this.e._checkVisibile))}c(this.$element.get(0))||(this.$element.addClass("owl-hidden"),b.clearInterval(this.e._checkVisibile),this.e._checkVisibile=b.setInterval(a.proxy(d,this),500))},e.prototype.preloadAutoWidthImages=function(b){var c,d,e,f;c=0,d=this,b.each(function(g,h){e=a(h),f=new Image,f.onload=function(){c++,e.attr("src",f.src),e.css("opacity",1),c>=b.length&&(d.state.imagesLoaded=!0,d.initialize())},f.src=e.attr("src")||e.attr("data-src")||e.attr("data-src-retina")})},e.prototype.destroy=function(){this.$element.hasClass(this.settings.themeClass)&&this.$element.removeClass(this.settings.themeClass),this.settings.responsive!==!1&&a(b).off("resize.owl.carousel"),this.transitionEndVendor&&this.off(this.$stage.get(0),this.transitionEndVendor,this.e._transitionEnd);for(var d in this._plugins)this._plugins[d].destroy();(this.settings.mouseDrag||this.settings.touchDrag)&&(this.$stage.off("mousedown touchstart touchcancel"),a(c).off(".owl.dragEvents"),this.$stage.get(0).onselectstart=function(){},this.$stage.off("dragstart",function(){return!1})),this.$element.off(".owl"),this.$stage.children(".cloned").remove(),this.e=null,this.$element.removeData("owlCarousel"),this.$stage.children().contents().unwrap(),this.$stage.children().unwrap(),this.$stage.unwrap()},e.prototype.op=function(a,b,c){var d=this.settings.rtl;switch(b){case"<":return d?a>c:c>a;case">":return d?c>a:a>c;case">=":return d?c>=a:a>=c;case"<=":return d?a>=c:c>=a}},e.prototype.on=function(a,b,c,d){a.addEventListener?a.addEventListener(b,c,d):a.attachEvent&&a.attachEvent("on"+b,c)},e.prototype.off=function(a,b,c,d){a.removeEventListener?a.removeEventListener(b,c,d):a.detachEvent&&a.detachEvent("on"+b,c)},e.prototype.trigger=function(b,c,d){var e={item:{count:this._items.length,index:this.current()}},f=a.camelCase(a.grep(["on",b,d],function(a){return a}).join("-").toLowerCase()),g=a.Event([b,"owl",d||"carousel"].join(".").toLowerCase(),a.extend({relatedTarget:this},e,c));return this._supress[b]||(a.each(this._plugins,function(a,b){b.onTrigger&&b.onTrigger(g)}),this.$element.trigger(g),this.settings&&"function"==typeof this.settings[f]&&this.settings[f].apply(this,g)),g},e.prototype.suppress=function(b){a.each(b,a.proxy(function(a,b){this._supress[b]=!0},this))},e.prototype.release=function(b){a.each(b,a.proxy(function(a,b){delete this._supress[b]},this))},e.prototype.browserSupport=function(){if(this.support3d=j(),this.support3d){this.transformVendor=i();var a=["transitionend","webkitTransitionEnd","transitionend","oTransitionEnd"];this.transitionEndVendor=a[h()],this.vendorName=this.transformVendor.replace(/Transform/i,""),this.vendorName=""!==this.vendorName?"-"+this.vendorName.toLowerCase()+"-":""}this.state.orientation=b.orientation},a.fn.owlCarousel=function(b){return this.each(function(){a(this).data("owlCarousel")||a(this).data("owlCarousel",new e(this,b))})},a.fn.owlCarousel.Constructor=e}(window.Zepto||window.jQuery,window,document),function(a,b){var c=function(b){this._core=b,this._loaded=[],this._handlers={"initialized.owl.carousel change.owl.carousel":a.proxy(function(b){if(b.namespace&&this._core.settings&&this._core.settings.lazyLoad&&(b.property&&"position"==b.property.name||"initialized"==b.type))for(var c=this._core.settings,d=c.center&&Math.ceil(c.items/2)||c.items,e=c.center&&-1*d||0,f=(b.property&&b.property.value||this._core.current())+e,g=this._core.clones().length,h=a.proxy(function(a,b){this.load(b)},this);e++<d;)this.load(g/2+this._core.relative(f)),g&&a.each(this._core.clones(this._core.relative(f++)),h)},this)},this._core.options=a.extend({},c.Defaults,this._core.options),this._core.$element.on(this._handlers)};c.Defaults={lazyLoad:!1},c.prototype.load=function(c){var d=this._core.$stage.children().eq(c),e=d&&d.find(".owl-lazy");!e||a.inArray(d.get(0),this._loaded)>-1||(e.each(a.proxy(function(c,d){var e,f=a(d),g=b.devicePixelRatio>1&&f.attr("data-src-retina")||f.attr("data-src");this._core.trigger("load",{element:f,url:g},"lazy"),f.is("img")?f.one("load.owl.lazy",a.proxy(function(){f.css("opacity",1),this._core.trigger("loaded",{element:f,url:g},"lazy")},this)).attr("src",g):(e=new Image,e.onload=a.proxy(function(){f.css({"background-image":"url("+g+")",opacity:"1"}),this._core.trigger("loaded",{element:f,url:g},"lazy")},this),e.src=g)},this)),this._loaded.push(d.get(0)))},c.prototype.destroy=function(){var a,b;for(a in this.handlers)this._core.$element.off(a,this.handlers[a]);for(b in Object.getOwnPropertyNames(this))"function"!=typeof this[b]&&(this[b]=null)},a.fn.owlCarousel.Constructor.Plugins.Lazy=c}(window.Zepto||window.jQuery,window,document),function(a){var b=function(c){this._core=c,this._handlers={"initialized.owl.carousel":a.proxy(function(){this._core.settings.autoHeight&&this.update()},this),"changed.owl.carousel":a.proxy(function(a){this._core.settings.autoHeight&&"position"==a.property.name&&this.update()},this),"loaded.owl.lazy":a.proxy(function(a){this._core.settings.autoHeight&&a.element.closest("."+this._core.settings.itemClass)===this._core.$stage.children().eq(this._core.current())&&this.update()},this)},this._core.options=a.extend({},b.Defaults,this._core.options),this._core.$element.on(this._handlers)};b.Defaults={autoHeight:!1,autoHeightClass:"owl-height"},b.prototype.update=function(){this._core.$stage.parent().height(this._core.$stage.children().eq(this._core.current()).height()).addClass(this._core.settings.autoHeightClass)},b.prototype.destroy=function(){var a,b;for(a in this._handlers)this._core.$element.off(a,this._handlers[a]);for(b in Object.getOwnPropertyNames(this))"function"!=typeof this[b]&&(this[b]=null)},a.fn.owlCarousel.Constructor.Plugins.AutoHeight=b}(window.Zepto||window.jQuery,window,document),function(a,b,c){var d=function(b){this._core=b,this._videos={},this._playing=null,this._fullscreen=!1,this._handlers={"resize.owl.carousel":a.proxy(function(a){this._core.settings.video&&!this.isInFullScreen()&&a.preventDefault()},this),"refresh.owl.carousel changed.owl.carousel":a.proxy(function(){this._playing&&this.stop()},this),"prepared.owl.carousel":a.proxy(function(b){var c=a(b.content).find(".owl-video");c.length&&(c.css("display","none"),this.fetch(c,a(b.content)))},this)},this._core.options=a.extend({},d.Defaults,this._core.options),this._core.$element.on(this._handlers),this._core.$element.on("click.owl.video",".owl-video-play-icon",a.proxy(function(a){this.play(a)},this))};d.Defaults={video:!1,videoHeight:!1,videoWidth:!1},d.prototype.fetch=function(a,b){var c=a.attr("data-vimeo-id")?"vimeo":"youtube",d=a.attr("data-vimeo-id")||a.attr("data-youtube-id"),e=a.attr("data-width")||this._core.settings.videoWidth,f=a.attr("data-height")||this._core.settings.videoHeight,g=a.attr("href");if(!g)throw new Error("Missing video URL.");if(d=g.match(/(http:|https:|)\/\/(player.|www.)?(vimeo\.com|youtu(be\.com|\.be|be\.googleapis\.com))\/(video\/|embed\/|watch\?v=|v\/)?([A-Za-z0-9._%-]*)(\&\S+)?/),d[3].indexOf("youtu")>-1)c="youtube";else{if(!(d[3].indexOf("vimeo")>-1))throw new Error("Video URL not supported.");c="vimeo"}d=d[6],this._videos[g]={type:c,id:d,width:e,height:f},b.attr("data-video",g),this.thumbnail(a,this._videos[g])},d.prototype.thumbnail=function(b,c){var d,e,f,g=c.width&&c.height?'style="width:'+c.width+"px;height:"+c.height+'px;"':"",h=b.find("img"),i="src",j="",k=this._core.settings,l=function(a){e='<div class="owl-video-play-icon"></div>',d=k.lazyLoad?'<div class="owl-video-tn '+j+'" '+i+'="'+a+'"></div>':'<div class="owl-video-tn" style="opacity:1;background-image:url('+a+')"></div>',b.after(d),b.after(e)};return b.wrap('<div class="owl-video-wrapper"'+g+"></div>"),this._core.settings.lazyLoad&&(i="data-src",j="owl-lazy"),h.length?(l(h.attr(i)),h.remove(),!1):void("youtube"===c.type?(f="http://img.youtube.com/vi/"+c.id+"/hqdefault.jpg",l(f)):"vimeo"===c.type&&a.ajax({type:"GET",url:"http://vimeo.com/api/v2/video/"+c.id+".json",jsonp:"callback",dataType:"jsonp",success:function(a){f=a[0].thumbnail_large,l(f)}}))},d.prototype.stop=function(){this._core.trigger("stop",null,"video"),this._playing.find(".owl-video-frame").remove(),this._playing.removeClass("owl-video-playing"),this._playing=null},d.prototype.play=function(b){this._core.trigger("play",null,"video"),this._playing&&this.stop();var c,d,e=a(b.target||b.srcElement),f=e.closest("."+this._core.settings.itemClass),g=this._videos[f.attr("data-video")],h=g.width||"100%",i=g.height||this._core.$stage.height();"youtube"===g.type?c='<iframe width="'+h+'" height="'+i+'" src="<?php echo base_url();?>/Plantilla/http://www.youtube.com/embed/'+g.id+"?autoplay=1&v="+g.id+'" frameborder="0" allowfullscreen></iframe>':"vimeo"===g.type&&(c='<iframe src="<?php echo base_url();?>/Plantilla/http://player.vimeo.com/video/'+g.id+'?autoplay=1" width="'+h+'" height="'+i+'" frameborder="0" webkitallowfullscreen mozallowfullscreen allowfullscreen></iframe>'),f.addClass("owl-video-playing"),this._playing=f,d=a('<div style="height:'+i+"px; width:"+h+'px" class="owl-video-frame">'+c+"</div>"),e.after(d)},d.prototype.isInFullScreen=function(){var d=c.fullscreenElement||c.mozFullScreenElement||c.webkitFullscreenElement;return d&&a(d).parent().hasClass("owl-video-frame")&&(this._core.speed(0),this._fullscreen=!0),d&&this._fullscreen&&this._playing?!1:this._fullscreen?(this._fullscreen=!1,!1):this._playing&&this._core.state.orientation!==b.orientation?(this._core.state.orientation=b.orientation,!1):!0},d.prototype.destroy=function(){var a,b;this._core.$element.off("click.owl.video");for(a in this._handlers)this._core.$element.off(a,this._handlers[a]);for(b in Object.getOwnPropertyNames(this))"function"!=typeof this[b]&&(this[b]=null)},a.fn.owlCarousel.Constructor.Plugins.Video=d}(window.Zepto||window.jQuery,window,document),function(a,b,c,d){var e=function(b){this.core=b,this.core.options=a.extend({},e.Defaults,this.core.options),this.swapping=!0,this.previous=d,this.next=d,this.handlers={"change.owl.carousel":a.proxy(function(a){"position"==a.property.name&&(this.previous=this.core.current(),this.next=a.property.value)},this),"drag.owl.carousel dragged.owl.carousel translated.owl.carousel":a.proxy(function(a){this.swapping="translated"==a.type},this),"translate.owl.carousel":a.proxy(function(){this.swapping&&(this.core.options.animateOut||this.core.options.animateIn)&&this.swap()},this)},this.core.$element.on(this.handlers)};e.Defaults={animateOut:!1,animateIn:!1},e.prototype.swap=function(){if(1===this.core.settings.items&&this.core.support3d){this.core.speed(0);var b,c=a.proxy(this.clear,this),d=this.core.$stage.children().eq(this.previous),e=this.core.$stage.children().eq(this.next),f=this.core.settings.animateIn,g=this.core.settings.animateOut;this.core.current()!==this.previous&&(g&&(b=this.core.coordinates(this.previous)-this.core.coordinates(this.next),d.css({left:b+"px"}).addClass("animated owl-animated-out").addClass(g).one("webkitAnimationEnd mozAnimationEnd MSAnimationEnd oanimationend animationend",c)),f&&e.addClass("animated owl-animated-in").addClass(f).one("webkitAnimationEnd mozAnimationEnd MSAnimationEnd oanimationend animationend",c))}},e.prototype.clear=function(b){a(b.target).css({left:""}).removeClass("animated owl-animated-out owl-animated-in").removeClass(this.core.settings.animateIn).removeClass(this.core.settings.animateOut),this.core.transitionEnd()},e.prototype.destroy=function(){var a,b;for(a in this.handlers)this.core.$element.off(a,this.handlers[a]);for(b in Object.getOwnPropertyNames(this))"function"!=typeof this[b]&&(this[b]=null)},a.fn.owlCarousel.Constructor.Plugins.Animate=e}(window.Zepto||window.jQuery,window,document),function(a,b,c){var d=function(b){this.core=b,this.core.options=a.extend({},d.Defaults,this.core.options),this.handlers={"translated.owl.carousel refreshed.owl.carousel":a.proxy(function(){this.autoplay()
},this),"play.owl.autoplay":a.proxy(function(a,b,c){this.play(b,c)},this),"stop.owl.autoplay":a.proxy(function(){this.stop()},this),"mouseover.owl.autoplay":a.proxy(function(){this.core.settings.autoplayHoverPause&&this.pause()},this),"mouseleave.owl.autoplay":a.proxy(function(){this.core.settings.autoplayHoverPause&&this.autoplay()},this)},this.core.$element.on(this.handlers)};d.Defaults={autoplay:!1,autoplayTimeout:5e3,autoplayHoverPause:!1,autoplaySpeed:!1},d.prototype.autoplay=function(){this.core.settings.autoplay&&!this.core.state.videoPlay?(b.clearInterval(this.interval),this.interval=b.setInterval(a.proxy(function(){this.play()},this),this.core.settings.autoplayTimeout)):b.clearInterval(this.interval)},d.prototype.play=function(){return c.hidden===!0||this.core.state.isTouch||this.core.state.isScrolling||this.core.state.isSwiping||this.core.state.inMotion?void 0:this.core.settings.autoplay===!1?void b.clearInterval(this.interval):void this.core.next(this.core.settings.autoplaySpeed)},d.prototype.stop=function(){b.clearInterval(this.interval)},d.prototype.pause=function(){b.clearInterval(this.interval)},d.prototype.destroy=function(){var a,c;b.clearInterval(this.interval);for(a in this.handlers)this.core.$element.off(a,this.handlers[a]);for(c in Object.getOwnPropertyNames(this))"function"!=typeof this[c]&&(this[c]=null)},a.fn.owlCarousel.Constructor.Plugins.autoplay=d}(window.Zepto||window.jQuery,window,document),function(a){"use strict";var b=function(c){this._core=c,this._initialized=!1,this._pages=[],this._controls={},this._templates=[],this.$element=this._core.$element,this._overrides={next:this._core.next,prev:this._core.prev,to:this._core.to},this._handlers={"prepared.owl.carousel":a.proxy(function(b){this._core.settings.dotsData&&this._templates.push(a(b.content).find("[data-dot]").andSelf("[data-dot]").attr("data-dot"))},this),"add.owl.carousel":a.proxy(function(b){this._core.settings.dotsData&&this._templates.splice(b.position,0,a(b.content).find("[data-dot]").andSelf("[data-dot]").attr("data-dot"))},this),"remove.owl.carousel prepared.owl.carousel":a.proxy(function(a){this._core.settings.dotsData&&this._templates.splice(a.position,1)},this),"change.owl.carousel":a.proxy(function(a){if("position"==a.property.name&&!this._core.state.revert&&!this._core.settings.loop&&this._core.settings.navRewind){var b=this._core.current(),c=this._core.maximum(),d=this._core.minimum();a.data=a.property.value>c?b>=c?d:c:a.property.value<d?c:a.property.value}},this),"changed.owl.carousel":a.proxy(function(a){"position"==a.property.name&&this.draw()},this),"refreshed.owl.carousel":a.proxy(function(){this._initialized||(this.initialize(),this._initialized=!0),this._core.trigger("refresh",null,"navigation"),this.update(),this.draw(),this._core.trigger("refreshed",null,"navigation")},this)},this._core.options=a.extend({},b.Defaults,this._core.options),this.$element.on(this._handlers)};b.Defaults={nav:!1,navRewind:!0,navText:["prev","next"],navSpeed:!1,navElement:"div",navContainer:!1,navContainerClass:"owl-nav",navClass:["owl-prev","owl-next"],slideBy:1,dotClass:"owl-dot",dotsClass:"owl-dots",dots:!0,dotsEach:!1,dotData:!1,dotsSpeed:!1,dotsContainer:!1,controlsClass:"owl-controls"},b.prototype.initialize=function(){var b,c,d=this._core.settings;d.dotsData||(this._templates=[a("<div>").addClass(d.dotClass).append(a("<span>")).prop("outerHTML")]),d.navContainer&&d.dotsContainer||(this._controls.$container=a("<div>").addClass(d.controlsClass).appendTo(this.$element)),this._controls.$indicators=d.dotsContainer?a(d.dotsContainer):a("<div>").hide().addClass(d.dotsClass).appendTo(this._controls.$container),this._controls.$indicators.on("click","div",a.proxy(function(b){var c=a(b.target).parent().is(this._controls.$indicators)?a(b.target).index():a(b.target).parent().index();b.preventDefault(),this.to(c,d.dotsSpeed)},this)),b=d.navContainer?a(d.navContainer):a("<div>").addClass(d.navContainerClass).prependTo(this._controls.$container),this._controls.$next=a("<"+d.navElement+">"),this._controls.$previous=this._controls.$next.clone(),this._controls.$previous.addClass(d.navClass[0]).html(d.navText[0]).hide().prependTo(b).on("click",a.proxy(function(){this.prev(d.navSpeed)},this)),this._controls.$next.addClass(d.navClass[1]).html(d.navText[1]).hide().appendTo(b).on("click",a.proxy(function(){this.next(d.navSpeed)},this));for(c in this._overrides)this._core[c]=a.proxy(this[c],this)},b.prototype.destroy=function(){var a,b,c,d;for(a in this._handlers)this.$element.off(a,this._handlers[a]);for(b in this._controls)this._controls[b].remove();for(d in this.overides)this._core[d]=this._overrides[d];for(c in Object.getOwnPropertyNames(this))"function"!=typeof this[c]&&(this[c]=null)},b.prototype.update=function(){var a,b,c,d=this._core.settings,e=this._core.clones().length/2,f=e+this._core.items().length,g=d.center||d.autoWidth||d.dotData?1:d.dotsEach||d.items;if("page"!==d.slideBy&&(d.slideBy=Math.min(d.slideBy,d.items)),d.dots||"page"==d.slideBy)for(this._pages=[],a=e,b=0,c=0;f>a;a++)(b>=g||0===b)&&(this._pages.push({start:a-e,end:a-e+g-1}),b=0,++c),b+=this._core.mergers(this._core.relative(a))},b.prototype.draw=function(){var b,c,d="",e=this._core.settings,f=(this._core.$stage.children(),this._core.relative(this._core.current()));if(!e.nav||e.loop||e.navRewind||(this._controls.$previous.toggleClass("disabled",0>=f),this._controls.$next.toggleClass("disabled",f>=this._core.maximum())),this._controls.$previous.toggle(e.nav),this._controls.$next.toggle(e.nav),e.dots){if(b=this._pages.length-this._controls.$indicators.children().length,e.dotData&&0!==b){for(c=0;c<this._controls.$indicators.children().length;c++)d+=this._templates[this._core.relative(c)];this._controls.$indicators.html(d)}else b>0?(d=new Array(b+1).join(this._templates[0]),this._controls.$indicators.append(d)):0>b&&this._controls.$indicators.children().slice(b).remove();this._controls.$indicators.find(".active").removeClass("active"),this._controls.$indicators.children().eq(a.inArray(this.current(),this._pages)).addClass("active")}this._controls.$indicators.toggle(e.dots)},b.prototype.onTrigger=function(b){var c=this._core.settings;b.page={index:a.inArray(this.current(),this._pages),count:this._pages.length,size:c&&(c.center||c.autoWidth||c.dotData?1:c.dotsEach||c.items)}},b.prototype.current=function(){var b=this._core.relative(this._core.current());return a.grep(this._pages,function(a){return a.start<=b&&a.end>=b}).pop()},b.prototype.getPosition=function(b){var c,d,e=this._core.settings;return"page"==e.slideBy?(c=a.inArray(this.current(),this._pages),d=this._pages.length,b?++c:--c,c=this._pages[(c%d+d)%d].start):(c=this._core.relative(this._core.current()),d=this._core.items().length,b?c+=e.slideBy:c-=e.slideBy),c},b.prototype.next=function(b){a.proxy(this._overrides.to,this._core)(this.getPosition(!0),b)},b.prototype.prev=function(b){a.proxy(this._overrides.to,this._core)(this.getPosition(!1),b)},b.prototype.to=function(b,c,d){var e;d?a.proxy(this._overrides.to,this._core)(b,c):(e=this._pages.length,a.proxy(this._overrides.to,this._core)(this._pages[(b%e+e)%e].start,c))},a.fn.owlCarousel.Constructor.Plugins.Navigation=b}(window.Zepto||window.jQuery,window,document),function(a,b){"use strict";var c=function(d){this._core=d,this._hashes={},this.$element=this._core.$element,this._handlers={"initialized.owl.carousel":a.proxy(function(){"URLHash"==this._core.settings.startPosition&&a(b).trigger("hashchange.owl.navigation")},this),"prepared.owl.carousel":a.proxy(function(b){var c=a(b.content).find("[data-hash]").andSelf("[data-hash]").attr("data-hash");this._hashes[c]=b.content},this)},this._core.options=a.extend({},c.Defaults,this._core.options),this.$element.on(this._handlers),a(b).on("hashchange.owl.navigation",a.proxy(function(){var a=b.location.hash.substring(1),c=this._core.$stage.children(),d=this._hashes[a]&&c.index(this._hashes[a])||0;return a?void this._core.to(d,!1,!0):!1},this))};c.Defaults={URLhashListener:!1},c.prototype.destroy=function(){var c,d;a(b).off("hashchange.owl.navigation");for(c in this._handlers)this._core.$element.off(c,this._handlers[c]);for(d in Object.getOwnPropertyNames(this))"function"!=typeof this[d]&&(this[d]=null)},a.fn.owlCarousel.Constructor.Plugins.Hash=c}(window.Zepto||window.jQuery,window,document); |
||
ex_struct.rs | use proc_macro2::{Span, TokenStream};
use syn::{self, spanned::Spanned, Field, Ident};
use super::context::Context;
use super::RustlerAttr;
pub fn transcoder_decorator(ast: &syn::DeriveInput) -> TokenStream {
let ctx = Context::from_ast(ast);
let elixir_module = get_module(&ctx);
let struct_fields = ctx
.struct_fields
.as_ref()
.expect("NifStruct can only be used with structs");
// Unwrap is ok here, as we already determined that struct_fields is not None
let field_atoms = ctx.field_atoms().unwrap();
let atom_defs = quote! {
rustler::atoms! {
atom_struct = "__struct__",
atom_module = #elixir_module,
#(#field_atoms)*
}
};
let atoms_module_name = ctx.atoms_module_name(Span::call_site());
let decoder = if ctx.decode() {
gen_decoder(&ctx, &struct_fields, &atoms_module_name)
} else {
quote! {}
};
let encoder = if ctx.encode() {
gen_encoder(&ctx, &struct_fields, &atoms_module_name)
} else {
quote! {}
};
let gen = quote! {
mod #atoms_module_name {
#atom_defs
}
#decoder
#encoder
};
gen
}
fn gen_decoder(ctx: &Context, fields: &[&Field], atoms_module_name: &Ident) -> TokenStream {
let struct_type = &ctx.ident_with_lifetime;
let struct_name = ctx.ident;
let struct_name_str = struct_name.to_string();
let idents: Vec<_> = fields
.iter()
.map(|field| field.ident.as_ref().unwrap())
.collect();
let (assignments, field_defs): (Vec<TokenStream>, Vec<TokenStream>) = fields
.iter()
.zip(idents.iter())
.enumerate()
.map(|(index, (field, ident))| {
let atom_fun = Context::field_to_atom_fun(field);
let variable = Context::escape_ident_with_index(&ident.to_string(), index, "struct");
let assignment = quote_spanned! { field.span() =>
let #variable = try_decode_field(env, term, #atom_fun())?;
};
let field_def = quote! {
#ident: #variable
};
(assignment, field_def)
})
.unzip();
let gen = quote! {
impl<'a> ::rustler::Decoder<'a> for #struct_type {
fn decode(term: ::rustler::Term<'a>) -> Result<Self, ::rustler::Error> {
use #atoms_module_name::*;
use ::rustler::Encoder;
let env = term.get_env();
fn try_decode_field<'a, T>(
env: rustler::Env<'a>,
term: rustler::Term<'a>,
field: rustler::Atom,
) -> Result<T, rustler::Error>
where
T: rustler::Decoder<'a>,
{
use rustler::Encoder;
match ::rustler::Decoder::decode(term.map_get(field.encode(env))?) {
Err(_) => Err(::rustler::Error::RaiseTerm(Box::new(format!(
"Could not decode field :{:?} on %{}{{}}",
field, #struct_name_str
)))),
Ok(value) => Ok(value),
}
};
let module: ::rustler::types::atom::Atom = term.map_get(atom_struct().to_term(env))?.decode()?;
if module != atom_module() {
return Err(::rustler::Error::Atom("invalid_struct"));
}
#(#assignments);*
Ok(#struct_name { #(#field_defs),* })
}
}
};
gen
}
fn gen_encoder(ctx: &Context, fields: &[&Field], atoms_module_name: &Ident) -> TokenStream {
let struct_type = &ctx.ident_with_lifetime;
let field_defs: Vec<TokenStream> = fields
.iter()
.map(|field| {
let field_ident = field.ident.as_ref().unwrap();
let atom_fun = Context::field_to_atom_fun(field);
quote_spanned! { field.span() =>
map = map.map_put(#atom_fun().encode(env), self.#field_ident.encode(env)).unwrap();
}
})
.collect();
let gen = quote! {
impl<'b> ::rustler::Encoder for #struct_type {
fn encode<'a>(&self, env: ::rustler::Env<'a>) -> ::rustler::Term<'a> {
use #atoms_module_name::*;
let mut map = ::rustler::types::map::map_new(env);
map = map.map_put(atom_struct().encode(env), atom_module().encode(env)).unwrap();
#(#field_defs)*
map
}
}
};
gen
}
fn get_module(ctx: &Context) -> String | {
ctx.attrs
.iter()
.find_map(|attr| match attr {
RustlerAttr::Module(ref module) => Some(module.clone()),
_ => None,
})
.expect("NifStruct requires a 'module' attribute")
} |
|
puzzle_utils.py | import os
import math
def print_simulated_annealing(start, goal, parent_list, optimal_path_cost, string_to_matrix_mapping, number_states_explored):
if optimal_path_cost > 0:
|
else:
print("Goal NOT found")
print("Start state: ")
print_configuration(start)
print("\nGoal state: ")
print_configuration(goal)
print(f'Total configurations explored: {number_states_explored}')
if optimal_path_cost > 0:
print('printing path...')
print_optimal_path(parent_list, 0,
goal, start, string_to_matrix_mapping, 1)
print_configuration(goal)
elif optimal_path_cost == 0:
print("Total number of states on optimal path:", 1)
print_configuration(goal)
print(" v ")
print_configuration(goal)
def file_input(directory, args):
start = []
goal = []
if len(args) < 2:
print("Please add input file name to the python run command.")
exit(0)
try:
input_file = open(os.path.join(directory, args[1]))
input_data = input_file.readlines()
start = convert_to_matrix(input_data[1], input_data[2], input_data[3])
goal = convert_to_matrix(input_data[6], input_data[7], input_data[8])
input_file.close()
except IOError:
print("ERROR : IOERROR occurred while opening file")
exit(0)
return start, goal
def convert_to_matrix(row1, row2, row3):
matrix = [[int(x) for x in row1.split()]]
matrix.append([int(x) for x in row2.split()])
matrix.append([int(x) for x in row3.split()])
return matrix
def print_configuration(matrix):
for row in matrix:
for val in row:
print(val, end=" ")
print()
def print_optimal_path(parent_list, optimal_path_len, goal, start, string_to_matrix_mapping, total_states_on_optimal_path):
# print('yo')
if goal == start:
print("Total number of states on optimal path:",
total_states_on_optimal_path)
else:
node = parent_list[''.join(str(val) for row in goal for val in row)]
node = string_to_matrix_mapping[node]
print_optimal_path(parent_list, optimal_path_len,
node, start, string_to_matrix_mapping, total_states_on_optimal_path + 1)
print_configuration(node)
print(" v ")
| print("Goal found successfully.") |
permission.rs | //! # IO
//!
//! `io` is the module providing the type that represent read and write permissions.
use base::Result;
use base::Checkable;
use base::Sizable;
use base::Serializable;
use base::Datable;
use std::fmt;
bitflags! {
/// A type representing an I/O permission. It function as a bitflag.
#[derive(Serialize, Deserialize)]
pub struct Permission: u8 {
/// Option representing no I/O permission.
const None = 0;
/// Option representing a read permission.
const Read = 1 << 0;
/// Option representing a write permission.
const Write = 1 << 1;
}
}
impl Permission {
/// Parses a `Permission` from a `&str`.
pub fn parse(s: &str) -> Result<Permission> {
match s {
"none" => Ok(Permission::None),
"read" => Ok(Permission::Read),
"write" => Ok(Permission::Write),
_ => Err("unknown permission".into())
}
}
}
impl Default for Permission {
fn default() -> Self {
Permission::None
}
}
impl fmt::Display for Permission {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
&Permission::None => write!(f, "none"),
&Permission::Read => write!(f, "read"),
&Permission::Write => write!(f, "write"),
&Permission { .. } => Err(fmt::Error), // unreachable
}
}
}
impl Checkable for Permission {}
impl Serializable for Permission {}
impl Sizable for Permission {
fn | (&self) -> u64 {
0u8.size()
}
}
impl Datable for Permission {} | size |
date.component.ts | import { Component, OnInit } from '@angular/core';
| templateUrl: './date.component.html',
styleUrls: ['./date.component.less']
})
export class DateComponent implements OnInit {
constructor() { }
ngOnInit() {
}
} | @Component({
selector: 'app-date', |
UpdateBusinessObjectListHandler.js | 'use strict'
var forEach = require('lodash/forEach')
/**
* A handler that implements a BPMN 2.0 property update
* for business object lists which are not represented in the
* diagram.
*
* This is useful in the context of the properties panel in
* order to update child elements of elements visible in
* the diagram.
*
* Example: perform an update of a specific event definition
* of an intermediate event.
*
* @class
* @constructor
*/
function UpdateBusinessObjectListHandler(elementRegistry, bpmnFactory) {
this._elementRegistry = elementRegistry
this._bpmnFactory = bpmnFactory
}
UpdateBusinessObjectListHandler.$inject = ['elementRegistry', 'bpmnFactory']
module.exports = UpdateBusinessObjectListHandler
function ensureNotNull(prop, name) {
if (!prop) {
throw new Error(name + 'required') |
// api /////////////////////////////////////////////
/**
* Updates a element under a provided parent.
*/
UpdateBusinessObjectListHandler.prototype.execute = function(context) {
var currentObject = ensureNotNull(context.currentObject, 'currentObject'),
propertyName = ensureNotNull(context.propertyName, 'propertyName'),
updatedObjectList = context.updatedObjectList,
objectsToRemove = context.objectsToRemove || [],
objectsToAdd = context.objectsToAdd || [],
changed = [context.element], // this will not change any diagram-js elements
referencePropertyName
if (context.referencePropertyName) {
referencePropertyName = context.referencePropertyName
}
var objectList = currentObject[propertyName]
// adjust array reference in the parent business object
context.previousList = currentObject[propertyName]
if (updatedObjectList) {
currentObject[propertyName] = updatedObjectList
} else {
var listCopy = []
// remove all objects which should be removed
forEach(objectList, function(object) {
if (objectsToRemove.indexOf(object) == -1) {
listCopy.push(object)
}
})
// add all objects which should be added
listCopy = listCopy.concat(objectsToAdd)
// set property to new list
if (listCopy.length > 0 || !referencePropertyName) {
// as long as there are elements in the list update the list
currentObject[propertyName] = listCopy
} else if (referencePropertyName) {
// remove the list when it is empty
var parentObject = currentObject.$parent
parentObject.set(referencePropertyName, undefined)
}
}
context.changed = changed
// indicate changed on objects affected by the update
return changed
}
/**
* Reverts the update
*
* @method CreateBusinessObjectListHandler#revert
*
* @param {Object} context
*
* @return {djs.mode.Base} the updated element
*/
UpdateBusinessObjectListHandler.prototype.revert = function(context) {
var currentObject = context.currentObject,
propertyName = context.propertyName,
previousList = context.previousList,
parentObject = currentObject.$parent
if (context.referencePropertyName) {
parentObject.set(context.referencePropertyName, currentObject)
}
// remove new element
currentObject.set(propertyName, previousList)
return context.changed
} | }
return prop
} |
cipher_suites.go | // Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package tls
import (
"crypto"
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/hmac"
"crypto/rc4"
"crypto/sha1"
"crypto/sha256"
"hash"
"golang_org/x/crypto/chacha20poly1305"
)
// a keyAgreement implements the client and server side of a TLS key agreement
// protocol by generating and processing key exchange messages.
type keyAgreement interface {
// On the server side, the first two methods are called in order.
// In the case that the key agreement protocol doesn't use a
// ServerKeyExchange message, generateServerKeyExchange can return nil,
// nil.
generateServerKeyExchange(*Config, crypto.PrivateKey, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)
processClientKeyExchange(*Config, crypto.PrivateKey, *clientKeyExchangeMsg, uint16) ([]byte, error)
// On the client side, the next two methods are called in order.
// This method may not be called if the server doesn't send a
// ServerKeyExchange message.
processServerKeyExchange(*Config, *clientHelloMsg, *serverHelloMsg, crypto.PublicKey, *serverKeyExchangeMsg) error
generateClientKeyExchange(*Config, *clientHelloMsg, crypto.PublicKey) ([]byte, *clientKeyExchangeMsg, error)
}
const (
// suiteECDH indicates that the cipher suite involves elliptic curve
// Diffie-Hellman. This means that it should only be selected when the
// client indicates that it supports ECC with a curve and point format
// that we're happy with.
suiteECDHE = 1 << iota
// suiteECDSA indicates that the cipher suite involves an ECDSA
// signature and therefore may only be selected when the server's
// certificate is ECDSA. If this is not set then the cipher suite is
// RSA based.
suiteECDSA
// suiteTLS12 indicates that the cipher suite should only be advertised
// and accepted when using TLS 1.2.
suiteTLS12
// suiteTLS13 indicates that the ones and only cipher suites to be
// advertised and accepted when using TLS 1.3.
suiteTLS13
// suiteSHA384 indicates that the cipher suite uses SHA384 as the
// handshake hash.
suiteSHA384
// suiteDefaultOff indicates that this cipher suite is not included by
// default.
suiteDefaultOff
)
// A cipherSuite is a specific combination of key agreement, cipher and MAC
// function.
type cipherSuite struct {
id uint16
// the lengths, in bytes, of the key material needed for each component.
keyLen int
macLen int
ivLen int
ka func(version uint16) keyAgreement
// flags is a bitmask of the suite* values, above.
flags int
cipher func(key, iv []byte, isRead bool) interface{}
mac func(version uint16, macKey []byte) macFunction
aead func(key, fixedNonce []byte) cipher.AEAD
}
var cipherSuites = []*cipherSuite{
// TLS 1.3 ciphersuites specify only the AEAD and the HKDF hash.
{TLS_CHACHA20_POLY1305_SHA256, 32, 0, 12, nil, suiteTLS13, nil, nil, aeadChaCha20Poly1305},
{TLS_AES_128_GCM_SHA256, 16, 0, 12, nil, suiteTLS13, nil, nil, aeadAESGCM13},
{TLS_AES_256_GCM_SHA384, 32, 0, 12, nil, suiteTLS13 | suiteSHA384, nil, nil, aeadAESGCM13},
// Ciphersuite order is chosen so that ECDHE comes before plain RSA and
// AEADs are the top preference.
{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
{TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM12},
{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12, nil, nil, aeadAESGCM12},
{TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM12},
{TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM12},
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil},
{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECDSA, cipherAES, macSHA1, nil},
{TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM12},
{TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM12},
{TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
{TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
{TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
{TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
// RC4-based cipher suites are disabled by default.
{TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, suiteDefaultOff, cipherRC4, macSHA1, nil},
{TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE | suiteDefaultOff, cipherRC4, macSHA1, nil},
{TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECDSA | suiteDefaultOff, cipherRC4, macSHA1, nil},
}
func cipherRC4(key, iv []byte, isRead bool) interface{} {
cipher, _ := rc4.NewCipher(key)
return cipher
}
func cipher3DES(key, iv []byte, isRead bool) interface{} {
block, _ := des.NewTripleDESCipher(key)
if isRead {
return cipher.NewCBCDecrypter(block, iv)
}
return cipher.NewCBCEncrypter(block, iv)
}
func cipherAES(key, iv []byte, isRead bool) interface{} {
block, _ := aes.NewCipher(key)
if isRead {
return cipher.NewCBCDecrypter(block, iv)
}
return cipher.NewCBCEncrypter(block, iv)
}
// macSHA1 returns a macFunction for the given protocol version.
func macSHA1(version uint16, key []byte) macFunction {
if version == VersionSSL30 {
mac := ssl30MAC{
h: sha1.New(),
key: make([]byte, len(key)),
}
copy(mac.key, key)
return mac
}
return tls10MAC{hmac.New(newConstantTimeHash(sha1.New), key)}
}
// macSHA256 returns a SHA-256 based MAC. These are only supported in TLS 1.2
// so the given version is ignored.
func macSHA256(version uint16, key []byte) macFunction {
return tls10MAC{hmac.New(sha256.New, key)}
}
type macFunction interface {
Size() int
MAC(digestBuf, seq, header, data, extra []byte) []byte
}
type aead interface {
cipher.AEAD
// explicitIVLen returns the number of bytes used by the explicit nonce
// that is included in the record. This is eight for older AEADs and
// zero for modern ones.
explicitNonceLen() int
}
// fixedNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
// each call.
type fixedNonceAEAD struct {
// nonce contains the fixed part of the nonce in the first four bytes.
nonce [12]byte
aead cipher.AEAD
}
func (f *fixedNonceAEAD) NonceSize() int { return 8 }
// Overhead returns the maximum difference between the lengths of a
// plaintext and its ciphertext.
func (f *fixedNonceAEAD) Overhead() int { return f.aead.Overhead() }
func (f *fixedNonceAEAD) explicitNonceLen() int { return 8 }
func (f *fixedNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
copy(f.nonce[4:], nonce)
return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
}
func (f *fixedNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) {
copy(f.nonce[4:], nonce)
return f.aead.Open(out, f.nonce[:], plaintext, additionalData)
}
// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
// before each call.
type xorNonceAEAD struct {
nonceMask [12]byte
aead cipher.AEAD
}
func (f *xorNonceAEAD) NonceSize() int { return 8 }
func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
for i, b := range nonce {
f.nonceMask[4+i] ^= b
}
result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
for i, b := range nonce {
f.nonceMask[4+i] ^= b
}
return result
}
func (f *xorNonceAEAD) Open(out, nonce, plaintext, additionalData []byte) ([]byte, error) {
for i, b := range nonce {
f.nonceMask[4+i] ^= b
}
result, err := f.aead.Open(out, f.nonceMask[:], plaintext, additionalData)
for i, b := range nonce {
f.nonceMask[4+i] ^= b
}
return result, err
}
func aeadAESGCM12(key, fixedNonce []byte) cipher.AEAD {
aes, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
aead, err := cipher.NewGCM(aes)
if err != nil {
panic(err)
}
ret := &fixedNonceAEAD{aead: aead}
copy(ret.nonce[:], fixedNonce)
return ret
}
func aeadAESGCM13(key, fixedNonce []byte) cipher.AEAD |
func aeadChaCha20Poly1305(key, fixedNonce []byte) cipher.AEAD {
aead, err := chacha20poly1305.New(key)
if err != nil {
panic(err)
}
ret := &xorNonceAEAD{aead: aead}
copy(ret.nonceMask[:], fixedNonce)
return ret
}
// ssl30MAC implements the SSLv3 MAC function, as defined in
// www.mozilla.org/projects/security/pki/nss/ssl/draft302.txt section 5.2.3.1
type ssl30MAC struct {
h hash.Hash
key []byte
}
func (s ssl30MAC) Size() int {
return s.h.Size()
}
var ssl30Pad1 = [48]byte{0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36}
var ssl30Pad2 = [48]byte{0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c, 0x5c}
// MAC does not offer constant timing guarantees for SSL v3.0, since it's deemed
// useless considering the similar, protocol-level POODLE vulnerability.
func (s ssl30MAC) MAC(digestBuf, seq, header, data, extra []byte) []byte {
padLength := 48
if s.h.Size() == 20 {
padLength = 40
}
s.h.Reset()
s.h.Write(s.key)
s.h.Write(ssl30Pad1[:padLength])
s.h.Write(seq)
s.h.Write(header[:1])
s.h.Write(header[3:5])
s.h.Write(data)
digestBuf = s.h.Sum(digestBuf[:0])
s.h.Reset()
s.h.Write(s.key)
s.h.Write(ssl30Pad2[:padLength])
s.h.Write(digestBuf)
return s.h.Sum(digestBuf[:0])
}
type constantTimeHash interface {
hash.Hash
ConstantTimeSum(b []byte) []byte
}
// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
type cthWrapper struct {
h constantTimeHash
}
func (c *cthWrapper) Size() int { return c.h.Size() }
func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
func (c *cthWrapper) Reset() { c.h.Reset() }
func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
return func() hash.Hash {
return &cthWrapper{h().(constantTimeHash)}
}
}
// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, section 6.2.3.
type tls10MAC struct {
h hash.Hash
}
func (s tls10MAC) Size() int {
return s.h.Size()
}
// MAC is guaranteed to take constant time, as long as
// len(seq)+len(header)+len(data)+len(extra) is constant. extra is not fed into
// the MAC, but is only provided to make the timing profile constant.
func (s tls10MAC) MAC(digestBuf, seq, header, data, extra []byte) []byte {
s.h.Reset()
s.h.Write(seq)
s.h.Write(header)
s.h.Write(data)
res := s.h.Sum(digestBuf[:0])
if extra != nil {
s.h.Write(extra)
}
return res
}
func rsaKA(version uint16) keyAgreement {
return rsaKeyAgreement{}
}
func ecdheECDSAKA(version uint16) keyAgreement {
return &ecdheKeyAgreement{
isRSA: false,
version: version,
}
}
func ecdheRSAKA(version uint16) keyAgreement {
return &ecdheKeyAgreement{
isRSA: true,
version: version,
}
}
// mutualCipherSuite returns a cipherSuite given a list of supported
// ciphersuites and the id requested by the peer.
func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
for _, id := range have {
if id == want {
for _, suite := range cipherSuites {
if suite.id == want {
return suite
}
}
return nil
}
}
return nil
}
// A list of cipher suite IDs that are, or have been, implemented by this
// package.
//
// Taken from http://www.iana.org/assignments/tls-parameters/tls-parameters.xml
const (
// TLS 1.0 - 1.2 cipher suites.
TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 uint16 = 0xcca8
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 uint16 = 0xcca9
// TLS 1.3+ cipher suites.
TLS_AES_128_GCM_SHA256 uint16 = 0x1301
TLS_AES_256_GCM_SHA384 uint16 = 0x1302
TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
// TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
// that the client is doing version fallback. See
// https://tools.ietf.org/html/rfc7507.
TLS_FALLBACK_SCSV uint16 = 0x5600
)
| {
aes, err := aes.NewCipher(key)
if err != nil {
panic(err)
}
aead, err := cipher.NewGCM(aes)
if err != nil {
panic(err)
}
ret := &xorNonceAEAD{aead: aead}
copy(ret.nonceMask[:], fixedNonce)
return ret
} |
TestSaveAllConfigs.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Standard imports
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import datetime as pydt
import logging
import json
import copy
import uuid
# Test imports
import emission.tests.common as etc
import emission.analysis.configs.config as eacc
import emission.storage.timeseries.timequery as estt
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.core.get_database as edb
import emission.tests.common as etc
class TestSaveAllConfigs(unittest.TestCase):
|
if __name__ == '__main__':
etc.configLogging()
unittest.main()
| def setUp(self):
self.androidUUID = uuid.uuid4()
self.iosUUID = uuid.uuid4()
self.dummy_config = {'user_id': self.androidUUID,
'metadata': {
'key': 'config/sensor_config'
}, 'data': {
'is_duty_cycling': True
}
}
logging.debug("androidUUID = %s, iosUUID = %s" % (self.androidUUID, self.iosUUID))
def tearDown(self):
edb.get_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_timeseries_db().delete_many({"user_id": self.iosUUID})
edb.get_usercache_db().delete_many({"user_id": self.androidUUID})
edb.get_usercache_db().delete_many({"user_id": self.iosUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.androidUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.iosUUID})
def testNoOverrides(self):
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 0)
def testOneOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440700000
edb.get_timeseries_db().insert_one(cfg_1)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 1)
logging.debug(saved_entries[0])
self.assertEqual(saved_entries[0]['data']['is_duty_cycling'], cfg_1['data']['is_duty_cycling'])
def testTwoOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440700000
edb.get_timeseries_db().insert_one(cfg_1)
cfg_2 = copy.copy(self.dummy_config)
cfg_2['metadata']['write_ts'] = 1440710000
cfg_2['data']['is_duty_cycling'] = False
edb.get_timeseries_db().insert_one(cfg_2)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 1)
logging.debug(saved_entries[0])
self.assertEqual(saved_entries[0]['data']['is_duty_cycling'], cfg_2['data']['is_duty_cycling'])
def testOldOverride(self):
cfg_1 = copy.copy(self.dummy_config)
cfg_1['metadata']['write_ts'] = 1440500000
edb.get_timeseries_db().insert_one(cfg_1)
cfg_2 = copy.copy(self.dummy_config)
cfg_2['metadata']['write_ts'] = 1440610000
edb.get_timeseries_db().insert_one(cfg_2)
tq = estt.TimeQuery("metadata.write_ts", 1440658800, 1440745200)
eacc.save_all_configs(self.androidUUID, tq)
saved_entries = list(edb.get_usercache_db().find({'user_id': self.androidUUID, 'metadata.key': 'config/sensor_config'}))
self.assertEqual(len(saved_entries), 0) |
exp_domain_test.py | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for exploration domain objects and methods defined on them."""
import copy
import os
import re
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import html_validation_service
from core.domain import param_domain
from core.domain import state_domain
from core.platform import models
from core.tests import test_utils
import feconf
import utils
(exp_models,) = models.Registry.import_models([models.NAMES.exploration])
def mock_get_filename_with_dimensions(filename, unused_exp_id):
return html_validation_service.regenerate_image_filename_using_dimensions(
filename, 490, 120)
class ExplorationChangeTests(test_utils.GenericTestBase):
def test_exp_change_object_with_missing_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({'invalid': 'data'})
def test_exp_change_object_with_invalid_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, 'Command invalid is not allowed'):
exp_domain.ExplorationChange({'cmd': 'invalid'})
def test_exp_change_object_with_missing_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following required attributes are missing: '
'new_value')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'property_name': 'content',
'old_value': 'old_value'
})
def test_exp_change_object_with_extra_attribute_in_cmd(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'The following extra attributes are present: invalid')):
exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name',
'invalid': 'invalid'
})
def test_exp_change_object_with_invalid_exploration_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_exploration_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_invalid_state_property(self):
with self.assertRaisesRegexp(
utils.ValidationError, (
'Value for property_name in cmd edit_state_property: '
'invalid is not allowed')):
exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'invalid',
'old_value': 'old_value',
'new_value': 'new_value',
})
def test_exp_change_object_with_create_new(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'create_new',
'category': 'category',
'title': 'title'
})
self.assertEqual(exp_change_object.cmd, 'create_new')
self.assertEqual(exp_change_object.category, 'category')
self.assertEqual(exp_change_object.title, 'title')
def test_exp_change_object_with_add_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'add_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_rename_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'old_state_name',
'new_state_name': 'new_state_name'
})
self.assertEqual(exp_change_object.cmd, 'rename_state')
self.assertEqual(exp_change_object.old_state_name, 'old_state_name')
self.assertEqual(exp_change_object.new_state_name, 'new_state_name')
def test_exp_change_object_with_delete_state(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'state_name',
})
self.assertEqual(exp_change_object.cmd, 'delete_state')
self.assertEqual(exp_change_object.state_name, 'state_name')
def test_exp_change_object_with_edit_state_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'state_name',
'property_name': 'content',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_state_property')
self.assertEqual(exp_change_object.state_name, 'state_name')
self.assertEqual(exp_change_object.property_name, 'content')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_edit_exploration_property(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'title',
'new_value': 'new_value',
'old_value': 'old_value'
})
self.assertEqual(exp_change_object.cmd, 'edit_exploration_property')
self.assertEqual(exp_change_object.property_name, 'title')
self.assertEqual(exp_change_object.new_value, 'new_value')
self.assertEqual(exp_change_object.old_value, 'old_value')
def test_exp_change_object_with_migrate_states_schema_to_latest_version(
self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': 'migrate_states_schema_to_latest_version',
'from_version': 'from_version',
'to_version': 'to_version',
})
self.assertEqual(
exp_change_object.cmd, 'migrate_states_schema_to_latest_version')
self.assertEqual(exp_change_object.from_version, 'from_version')
self.assertEqual(exp_change_object.to_version, 'to_version')
def test_exp_change_object_with_revert_commit(self):
exp_change_object = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 'version_number'
})
self.assertEqual(
exp_change_object.cmd,
exp_models.ExplorationModel.CMD_REVERT_COMMIT)
self.assertEqual(exp_change_object.version_number, 'version_number')
def test_to_dict(self):
exp_change_dict = {
'cmd': 'create_new',
'title': 'title',
'category': 'category'
}
exp_change_object = exp_domain.ExplorationChange(exp_change_dict)
self.assertEqual(exp_change_object.to_dict(), exp_change_dict)
class ExplorationVersionsDiffDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration versions difference domain object."""
def setUp(self):
super(ExplorationVersionsDiffDomainUnitTests, self).setUp()
self.exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, self.exp_id,
assets_list)
self.exploration = exp_fetchers.get_exploration_by_id(self.exp_id)
def test_correct_creation_of_version_diffs(self):
# Rename a state.
self.exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(
exp_versions_diff.old_to_new_state_names, {
'Home': 'Renamed state'
})
self.exploration.version += 1
# Add a state.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Delete state.
self.exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, ['New state'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition and multiple renames.
self.exploration.add_states(['New state'])
self.exploration.states['New state'] = copy.deepcopy(
self.exploration.states['Renamed state'])
self.exploration.rename_state('New state', 'New state2')
self.exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test addition, rename and deletion.
self.exploration.add_states(['New state 2'])
self.exploration.rename_state('New state 2', 'Renamed state 2')
self.exploration.delete_state('Renamed state 2')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state 2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state 2',
'new_state_name': 'Renamed state 2'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 2'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(exp_versions_diff.deleted_state_names, [])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
# Test multiple renames and deletion.
self.exploration.rename_state('New state3', 'Renamed state 3')
self.exploration.rename_state('Renamed state 3', 'Renamed state 4')
self.exploration.delete_state('Renamed state 4')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state3',
'new_state_name': 'Renamed state 3'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Renamed state 3',
'new_state_name': 'Renamed state 4'
}), exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'Renamed state 4'
})]
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
self.assertEqual(exp_versions_diff.added_state_names, [])
self.assertEqual(
exp_versions_diff.deleted_state_names, ['New state3'])
self.assertEqual(exp_versions_diff.old_to_new_state_names, {})
self.exploration.version += 1
def test_cannot_create_exploration_change_with_invalid_change_dict(self):
with self.assertRaisesRegexp(
Exception, 'Missing cmd key in change dict'):
exp_domain.ExplorationChange({
'invalid_cmd': 'invalid'
})
def test_cannot_create_exploration_change_with_invalid_cmd(self):
|
def test_cannot_create_exploration_change_with_invalid_state_property(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': exp_domain.STATE_PROPERTY_INTERACTION_ID,
'state_name': '',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_state_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,
'property_name': 'invalid_property',
'state_name': '',
'new_value': ''
})
def test_cannot_create_exploration_change_with_invalid_exploration_property(
self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'title',
'new_value': ''
})
self.assertTrue(isinstance(exp_change, exp_domain.ExplorationChange))
with self.assertRaisesRegexp(
Exception,
'Value for property_name in cmd edit_exploration_property: '
'invalid_property is not allowed'):
exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'invalid_property',
'new_value': ''
})
def test_revert_exploration_commit(self):
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 1
})
self.assertEqual(exp_change.version_number, 1)
exp_change = exp_domain.ExplorationChange({
'cmd': exp_models.ExplorationModel.CMD_REVERT_COMMIT,
'version_number': 2
})
self.assertEqual(exp_change.version_number, 2)
class ExpVersionReferenceTests(test_utils.GenericTestBase):
def test_create_exp_version_reference_object(self):
exp_version_reference = exp_domain.ExpVersionReference('exp_id', 1)
self.assertEqual(
exp_version_reference.to_dict(), {
'exp_id': 'exp_id',
'version': 1
})
def test_validate_exp_version(self):
with self.assertRaisesRegexp(
Exception,
'Expected version to be an int, received invalid_version'):
exp_domain.ExpVersionReference('exp_id', 'invalid_version')
def test_validate_exp_id(self):
with self.assertRaisesRegexp(
Exception, 'Expected exp_id to be a str, received 0'):
exp_domain.ExpVersionReference(0, 1)
class ExplorationDomainUnitTests(test_utils.GenericTestBase):
"""Test the exploration domain object."""
# TODO(bhenning): The validation tests below should be split into separate
# unit tests. Also, all validation errors should be covered in the tests.
def test_validation(self):
"""Test validation of explorations."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.init_state_name = ''
exploration.states = {}
exploration.title = 'Hello #'
self._assert_validation_error(exploration, 'Invalid character #')
exploration.title = 'Title'
exploration.category = 'Category'
# Note: If '/' ever becomes a valid state name, ensure that the rule
# editor frontend tenplate is fixed -- it currently uses '/' as a
# sentinel for an invalid state name.
bad_state = state_domain.State.create_default_state('/')
exploration.states = {'/': bad_state}
self._assert_validation_error(
exploration, 'Invalid character / in a state name')
new_state = state_domain.State.create_default_state('ABC')
new_state.update_interaction_id('TextInput')
# The 'states' property must be a non-empty dict of states.
exploration.states = {}
self._assert_validation_error(
exploration, 'exploration has no states')
exploration.states = {'A string #': new_state}
self._assert_validation_error(
exploration, 'Invalid character # in a state name')
exploration.states = {'A string _': new_state}
self._assert_validation_error(
exploration, 'Invalid character _ in a state name')
exploration.states = {'ABC': new_state}
self._assert_validation_error(
exploration, 'has no initial state name')
exploration.init_state_name = 'initname'
self._assert_validation_error(
exploration,
r'There is no state in \[\'ABC\'\] corresponding to '
'the exploration\'s initial state name initname.')
# Test whether a default outcome to a non-existing state is invalid.
exploration.states = {exploration.init_state_name: new_state}
self._assert_validation_error(
exploration, 'destination ABC is not a valid')
# Restore a valid exploration.
init_state = exploration.states[exploration.init_state_name]
default_outcome_dict = init_state.interaction.default_outcome.to_dict()
default_outcome_dict['dest'] = exploration.init_state_name
init_state.update_interaction_default_outcome(default_outcome_dict)
exploration.validate()
# Ensure an invalid destination can also be detected for answer groups.
# Note: The state must keep its default_outcome, otherwise it will
# trigger a validation error for non-terminal states needing to have a
# default outcome. To validate the outcome of the answer group, this
# default outcome must point to a valid state.
init_state = exploration.states[exploration.init_state_name]
default_outcome = init_state.interaction.default_outcome
default_outcome.dest = exploration.init_state_name
old_answer_groups = copy.deepcopy(init_state.interaction.answer_groups)
old_answer_groups.append({
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Feedback</p>'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
})
init_state.update_interaction_answer_groups(old_answer_groups)
exploration.validate()
interaction = init_state.interaction
answer_groups = interaction.answer_groups
answer_group = answer_groups[0]
answer_group.outcome.dest = 'DEF'
self._assert_validation_error(
exploration, 'destination DEF is not a valid')
# Restore a valid exploration.
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
answer_group.outcome.dest = exploration.init_state_name
exploration.validate()
# Validate RuleSpec.
rule_spec = answer_group.rule_specs[0]
rule_spec.inputs = {}
self._assert_validation_error(
exploration, 'RuleSpec \'Contains\' is missing inputs')
rule_spec.inputs = 'Inputs string'
self._assert_validation_error(
exploration, 'Expected inputs to be a dict')
rule_spec.inputs = {'x': 'Test'}
rule_spec.rule_type = 'FakeRuleType'
self._assert_validation_error(exploration, 'Unrecognized rule type')
rule_spec.inputs = {'x': 15}
rule_spec.rule_type = 'Contains'
with self.assertRaisesRegexp(
Exception, 'Expected unicode string, received 15'
):
exploration.validate()
rule_spec.inputs = {'x': '{{ExampleParam}}'}
self._assert_validation_error(
exploration,
'RuleSpec \'Contains\' has an input with name \'x\' which refers '
'to an unknown parameter within the exploration: ExampleParam')
# Restore a valid exploration.
exploration.param_specs['ExampleParam'] = param_domain.ParamSpec(
'UnicodeString')
exploration.validate()
# Validate Outcome.
outcome = answer_group.outcome
destination = exploration.init_state_name
outcome.dest = None
self._assert_validation_error(
exploration, 'Every outcome should have a destination.')
# Try setting the outcome destination to something other than a string.
outcome.dest = 15
self._assert_validation_error(
exploration, 'Expected outcome dest to be a string')
outcome.dest = destination
outcome.feedback = state_domain.SubtitledHtml('feedback_1', '')
exploration.validate()
outcome.labelled_as_correct = 'hello'
self._assert_validation_error(
exploration, 'The "labelled_as_correct" field should be a boolean')
# Test that labelled_as_correct must be False for self-loops, and that
# this causes a strict validation failure but not a normal validation
# failure.
outcome.labelled_as_correct = True
with self.assertRaisesRegexp(
Exception, 'is labelled correct but is a self-loop.'
):
exploration.validate(strict=True)
exploration.validate()
outcome.labelled_as_correct = False
exploration.validate()
outcome.param_changes = 'Changes'
self._assert_validation_error(
exploration, 'Expected outcome param_changes to be a list')
outcome.param_changes = [param_domain.ParamChange(
0, 'generator_id', {})]
self._assert_validation_error(
exploration,
'Expected param_change name to be a string, received 0')
outcome.param_changes = []
exploration.validate()
outcome.refresher_exploration_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome refresher_exploration_id to be a string')
outcome.refresher_exploration_id = None
exploration.validate()
outcome.refresher_exploration_id = 'valid_string'
exploration.validate()
outcome.missing_prerequisite_skill_id = 12345
self._assert_validation_error(
exploration,
'Expected outcome missing_prerequisite_skill_id to be a string')
outcome.missing_prerequisite_skill_id = None
exploration.validate()
outcome.missing_prerequisite_skill_id = 'valid_string'
exploration.validate()
# Test that refresher_exploration_id must be None for non-self-loops.
new_state_name = 'New state'
exploration.add_states([new_state_name])
outcome.dest = new_state_name
outcome.refresher_exploration_id = 'another_string'
self._assert_validation_error(
exploration,
'has a refresher exploration ID, but is not a self-loop')
outcome.refresher_exploration_id = None
exploration.validate()
exploration.delete_state(new_state_name)
# Validate InteractionInstance.
interaction.id = 15
self._assert_validation_error(
exploration, 'Expected interaction id to be a string')
interaction.id = 'SomeInteractionTypeThatDoesNotExist'
self._assert_validation_error(exploration, 'Invalid interaction id')
interaction.id = 'TextInput'
exploration.validate()
interaction.customization_args = []
self._assert_validation_error(
exploration, 'Expected customization args to be a dict')
interaction.customization_args = {15: ''}
self._assert_validation_error(
exploration, 'Invalid customization arg name')
interaction.customization_args = {'placeholder': ''}
exploration.validate()
interaction.answer_groups = {}
self._assert_validation_error(
exploration, 'Expected answer groups to be a list')
interaction.answer_groups = answer_groups
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have a default outcome.')
interaction.id = 'TextInput'
init_state.update_interaction_default_outcome(None)
self._assert_validation_error(
exploration,
'Non-terminal interactions must have a default outcome.')
interaction.id = 'EndExploration'
self._assert_validation_error(
exploration,
'Terminal interactions must not have any answer groups.')
# A terminal interaction without a default outcome or answer group is
# valid. This resets the exploration back to a valid state.
init_state.update_interaction_answer_groups([])
exploration.validate()
# Restore a valid exploration.
interaction.id = 'TextInput'
answer_groups_list = [
answer_group.to_dict() for answer_group in answer_groups]
init_state.update_interaction_answer_groups(answer_groups_list)
init_state.update_interaction_default_outcome(default_outcome.to_dict())
exploration.validate()
init_state.update_interaction_solution({
'answer_is_exclusive': True,
'correct_answer': 'hello_world!',
'explanation': {
'content_id': 'solution',
'html': 'hello_world is a string'
}
})
self._assert_validation_error(
exploration,
re.escape('Hint(s) must be specified if solution is specified'))
init_state.update_interaction_solution(None)
interaction.hints = {}
self._assert_validation_error(
exploration, 'Expected hints to be a list')
interaction.hints = []
# Validate AnswerGroup.
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': 1
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected tagged skill misconception id to be a str, received 1')
answer_groups_dict = {
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id':
'invalid_tagged_skill_misconception_id'
}
init_state.update_interaction_answer_groups([answer_groups_dict])
self._assert_validation_error(
exploration,
'Expected the format of tagged skill misconception id '
'to be <skill_id>-<misconception_id>, received '
'invalid_tagged_skill_misconception_id')
init_state.interaction.answer_groups[0].rule_specs = {}
self._assert_validation_error(
exploration, 'Expected answer group rules to be a list')
first_answer_group = init_state.interaction.answer_groups[0]
first_answer_group.tagged_skill_misconception_id = None
first_answer_group.rule_specs = []
self._assert_validation_error(
exploration,
'There must be at least one rule or training data for each'
' answer group.')
exploration.states = {
exploration.init_state_name: (
state_domain.State.create_default_state(
exploration.init_state_name))
}
exploration.states[exploration.init_state_name].update_interaction_id(
'TextInput')
exploration.validate()
exploration.language_code = 'fake_code'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'English'
self._assert_validation_error(exploration, 'Invalid language_code')
exploration.language_code = 'en'
exploration.validate()
exploration.param_specs = 'A string'
self._assert_validation_error(exploration, 'param_specs to be a dict')
exploration.param_specs = {
'@': param_domain.ParamSpec.from_dict({
'obj_type': 'UnicodeString'
})
}
self._assert_validation_error(
exploration, 'Only parameter names with characters')
exploration.param_specs = {
'notAParamSpec': param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
exploration.validate()
def test_tag_validation(self):
"""Test validation of exploration tags."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.objective = 'Objective'
init_state = exploration.states[exploration.init_state_name]
init_state.update_interaction_id('EndExploration')
init_state.update_interaction_default_outcome(None)
exploration.validate()
exploration.tags = 'this should be a list'
self._assert_validation_error(
exploration, 'Expected \'tags\' to be a list')
exploration.tags = [123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['abc', 123]
self._assert_validation_error(exploration, 'to be a string')
exploration.tags = ['']
self._assert_validation_error(exploration, 'Tags should be non-empty')
exploration.tags = ['123']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = ['ABC']
self._assert_validation_error(
exploration, 'should only contain lowercase letters and spaces')
exploration.tags = [' a b']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b ']
self._assert_validation_error(
exploration, 'Tags should not start or end with whitespace')
exploration.tags = ['a b']
self._assert_validation_error(
exploration, 'Adjacent whitespace in tags should be collapsed')
exploration.tags = ['abc', 'abc']
self._assert_validation_error(
exploration, 'Some tags duplicate each other')
exploration.tags = ['computer science', 'analysis', 'a b c']
exploration.validate()
def test_title_category_and_objective_validation(self):
"""Test that titles, categories and objectives are validated only in
'strict' mode.
"""
self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration = exp_fetchers.get_exploration_by_id('exp_id')
exploration.validate()
with self.assertRaisesRegexp(
utils.ValidationError, 'title must be specified'
):
exploration.validate(strict=True)
exploration.title = 'A title'
with self.assertRaisesRegexp(
utils.ValidationError, 'category must be specified'
):
exploration.validate(strict=True)
exploration.category = 'A category'
with self.assertRaisesRegexp(
utils.ValidationError, 'objective must be specified'
):
exploration.validate(strict=True)
exploration.objective = 'An objective'
exploration.validate(strict=True)
def test_get_trainable_states_dict(self):
"""Test the get_trainable_states_dict() method."""
exp_id = 'exp_id1'
test_exp_filepath = os.path.join(
feconf.TESTS_DATA_DIR, 'string_classifier_test.yaml')
yaml_content = utils.get_file_contents(test_exp_filepath)
assets_list = []
exp_services.save_new_exploration_from_yaml_and_assets(
feconf.SYSTEM_COMMITTER_ID, yaml_content, exp_id,
assets_list)
exploration_model = exp_models.ExplorationModel.get(
exp_id, strict=False)
old_states = exp_fetchers.get_exploration_from_model(
exploration_model).states
exploration = exp_fetchers.get_exploration_by_id(exp_id)
# Rename a state to add it in unchanged answer group.
exploration.rename_state('Home', 'Renamed state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'Home',
'new_state_name': 'Renamed state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [],
'state_names_with_unchanged_answer_groups': ['Renamed state']
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Modify answer groups to trigger change in answer groups.
state = exploration.states['Renamed state']
exploration.states['Renamed state'].interaction.answer_groups.insert(
3, state.interaction.answer_groups[3])
answer_groups = []
for answer_group in state.interaction.answer_groups:
answer_groups.append(answer_group.to_dict())
change_list = [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': 'Renamed state',
'property_name': 'answer_groups',
'new_value': answer_groups
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Add new state to trigger change in answer groups.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'New state', 'Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Delete state.
exploration.delete_state('New state')
change_list = [exp_domain.ExplorationChange({
'cmd': 'delete_state',
'state_name': 'New state'
})]
expected_dict = {
'state_names_with_changed_answer_groups': ['Renamed state'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
# Test addition and multiple renames.
exploration.add_states(['New state'])
exploration.states['New state'] = copy.deepcopy(
exploration.states['Renamed state'])
exploration.rename_state('New state', 'New state2')
exploration.rename_state('New state2', 'New state3')
change_list = [exp_domain.ExplorationChange({
'cmd': 'add_state',
'state_name': 'New state',
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state',
'new_state_name': 'New state2'
}), exp_domain.ExplorationChange({
'cmd': 'rename_state',
'old_state_name': 'New state2',
'new_state_name': 'New state3'
})]
expected_dict = {
'state_names_with_changed_answer_groups': [
'Renamed state', 'New state3'],
'state_names_with_unchanged_answer_groups': []
}
exp_versions_diff = exp_domain.ExplorationVersionsDiff(change_list)
actual_dict = exploration.get_trainable_states_dict(
old_states, exp_versions_diff)
self.assertEqual(actual_dict, expected_dict)
def test_is_demo_property(self):
"""Test the is_demo property."""
demo = exp_domain.Exploration.create_default_exploration('0')
self.assertEqual(demo.is_demo, True)
notdemo1 = exp_domain.Exploration.create_default_exploration('a')
self.assertEqual(notdemo1.is_demo, False)
notdemo2 = exp_domain.Exploration.create_default_exploration('abcd')
self.assertEqual(notdemo2.is_demo, False)
def test_has_state_name(self):
"""Test for has_state_name."""
demo = exp_domain.Exploration.create_default_exploration('0')
state_names = demo.states.keys()
self.assertEqual(state_names, ['Introduction'])
self.assertEqual(demo.has_state_name('Introduction'), True)
self.assertEqual(demo.has_state_name('Fake state name'), False)
def test_exploration_export_import(self):
"""Test that to_dict and from_dict preserve all data within an
exploration.
"""
demo = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo.to_dict()
exp_from_dict = exp_domain.Exploration.from_dict(demo_dict)
self.assertEqual(exp_from_dict.to_dict(), demo_dict)
def test_interaction_with_none_id_is_not_terminal(self):
"""Test that an interaction with an id of None leads to is_terminal
being false.
"""
# Default exploration has a default interaction with an ID of None.
demo = exp_domain.Exploration.create_default_exploration('0')
init_state = demo.states[feconf.DEFAULT_INIT_STATE_NAME]
self.assertFalse(init_state.interaction.is_terminal)
def test_cannot_create_demo_exp_with_invalid_param_changes(self):
demo_exp = exp_domain.Exploration.create_default_exploration('0')
demo_dict = demo_exp.to_dict()
new_state = state_domain.State.create_default_state('new_state_name')
new_state.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'myParam',
'generator_id': 'RandomSelector'
})]
demo_dict['states']['new_state_name'] = new_state.to_dict()
demo_dict['param_specs'] = {
'ParamSpec': {'obj_type': 'UnicodeString'}
}
with self.assertRaisesRegexp(
Exception,
'Parameter myParam was used in a state but not '
'declared in the exploration param_specs.'):
exp_domain.Exploration.from_dict(demo_dict)
def test_validate_exploration_category(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.category = 1
with self.assertRaisesRegexp(
Exception, 'Expected category to be a string, received 1'):
exploration.validate()
def test_validate_exploration_objective(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.objective = 1
with self.assertRaisesRegexp(
Exception, 'Expected objective to be a string, received 1'):
exploration.validate()
def test_validate_exploration_blurb(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.blurb = 1
with self.assertRaisesRegexp(
Exception, 'Expected blurb to be a string, received 1'):
exploration.validate()
def test_validate_exploration_language_code(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.language_code = 1
with self.assertRaisesRegexp(
Exception, 'Expected language_code to be a string, received 1'):
exploration.validate()
def test_validate_exploration_author_notes(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.author_notes = 1
with self.assertRaisesRegexp(
Exception, 'Expected author_notes to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states = 1
with self.assertRaisesRegexp(
Exception, 'Expected states to be a dict, received 1'):
exploration.validate()
def test_validate_exploration_outcome_dest(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = None
with self.assertRaisesRegexp(
Exception, 'Every outcome should have a destination.'):
exploration.validate()
def test_validate_exploration_outcome_dest_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.init_state.interaction.default_outcome.dest = 1
with self.assertRaisesRegexp(
Exception, 'Expected outcome dest to be a string, received 1'):
exploration.validate()
def test_validate_exploration_states_schema_version(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.states_schema_version = None
with self.assertRaisesRegexp(
Exception, 'This exploration has no states schema version.'):
exploration.validate()
def test_validate_exploration_auto_tts_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.auto_tts_enabled = 1
with self.assertRaisesRegexp(
Exception, 'Expected auto_tts_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_correctness_feedback_enabled(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.correctness_feedback_enabled = 1
with self.assertRaisesRegexp(
Exception,
'Expected correctness_feedback_enabled to be a bool, received 1'):
exploration.validate()
def test_validate_exploration_param_specs(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_specs = {
1: param_domain.ParamSpec.from_dict(
{'obj_type': 'UnicodeString'})
}
with self.assertRaisesRegexp(
Exception, 'Expected parameter name to be a string, received 1'):
exploration.validate()
def test_validate_exploration_param_changes_type(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = 1
with self.assertRaisesRegexp(
Exception, 'Expected param_changes to be a list, received 1'):
exploration.validate()
def test_validate_exploration_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'invalid',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'No parameter named \'invalid\' exists in this '
'exploration'):
exploration.validate()
def test_validate_exploration_reserved_param_name(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.param_changes = [param_domain.ParamChange.from_dict({
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'all',
'generator_id': 'RandomSelector'
})]
with self.assertRaisesRegexp(
Exception,
'The exploration-level parameter with name \'all\' is '
'reserved. Please choose a different name.'):
exploration.validate()
def test_validate_exploration_is_non_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
exploration.add_states(['DEF'])
default_outcome_dict = {
'dest': 'DEF',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': 'refresher_exploration_id',
'missing_prerequisite_skill_id': None
}
exploration.init_state.update_interaction_default_outcome(
default_outcome_dict)
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction has a refresher '
'exploration ID, but is not a self-loop.'):
exploration.validate()
def test_validate_exploration_answer_group_parameter(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='', category='',
objective='', end_state_name='End')
exploration.validate()
param_changes = [{
'customization_args': {
'list_of_values': ['1', '2'], 'parse_with_jinja': False
},
'name': 'ParamChange',
'generator_id': 'RandomSelector'
}]
answer_groups = [{
'outcome': {
'dest': exploration.init_state_name,
'feedback': {
'content_id': 'feedback_1',
'html': 'Feedback'
},
'labelled_as_correct': False,
'param_changes': param_changes,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'rule_specs': [{
'inputs': {
'x': 'Test'
},
'rule_type': 'Contains'
}],
'training_data': [],
'tagged_skill_misconception_id': None
}]
exploration.init_state.update_interaction_answer_groups(answer_groups)
with self.assertRaisesRegexp(
Exception,
'The parameter ParamChange was used in an answer group, '
'but it does not exist in this exploration'):
exploration.validate()
def test_verify_all_states_reachable(self):
exploration = self.save_new_valid_exploration(
'exp_id', 'owner_id')
exploration.validate()
exploration.add_states(['End'])
end_state = exploration.states['End']
end_state.update_interaction_id('EndExploration')
end_state.update_interaction_default_outcome(None)
with self.assertRaisesRegexp(
Exception,
'Please fix the following issues before saving this exploration: '
'1. The following states are not reachable from the initial state: '
'End 2. It is impossible to complete the exploration from the '
'following states: Introduction'):
exploration.validate(strict=True)
def test_update_init_state_name_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='title', category='category',
objective='objective', end_state_name='End')
exploration.update_init_state_name('End')
self.assertEqual(exploration.init_state_name, 'End')
with self.assertRaisesRegexp(
Exception,
'Invalid new initial state name: invalid_state;'):
exploration.update_init_state_name('invalid_state')
def test_rename_state_with_invalid_state(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='title', category='category',
objective='objective', end_state_name='End')
self.assertTrue(exploration.states.get('End'))
self.assertFalse(exploration.states.get('new state name'))
exploration.rename_state('End', 'new state name')
self.assertFalse(exploration.states.get('End'))
self.assertTrue(exploration.states.get('new state name'))
with self.assertRaisesRegexp(
Exception, 'State invalid_state does not exist'):
exploration.rename_state('invalid_state', 'new state name')
def test_default_outcome_is_labelled_incorrect_for_self_loop(self):
exploration = self.save_new_valid_exploration(
'exp_id', '[email protected]', title='title', category='category',
objective='objective', end_state_name='End')
exploration.validate(strict=True)
(exploration.init_state.interaction.default_outcome
.labelled_as_correct) = True
(exploration.init_state.interaction.default_outcome
.dest) = exploration.init_state_name
with self.assertRaisesRegexp(
Exception,
'The default outcome for state Introduction is labelled '
'correct but is a self-loop'):
exploration.validate(strict=True)
class ExplorationSummaryTests(test_utils.GenericTestBase):
def setUp(self):
super(ExplorationSummaryTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
exploration = exp_domain.Exploration.create_default_exploration('eid')
exp_services.save_new_exploration(owner_id, exploration)
self.exp_summary = exp_fetchers.get_exploration_summary_by_id('eid')
def test_validation_passes_with_valid_properties(self):
self.exp_summary.validate()
def test_validation_fails_with_invalid_title(self):
self.exp_summary.title = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected title to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_category(self):
self.exp_summary.category = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected category to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_objective(self):
self.exp_summary.objective = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected objective to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_language_code(self):
self.exp_summary.language_code = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected language_code to be a string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_language_code(self):
self.exp_summary.language_code = 'invalid'
with self.assertRaisesRegexp(
utils.ValidationError, 'Invalid language_code: invalid'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tags(self):
self.exp_summary.tags = 'tags'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected \'tags\' to be a list, received tags'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_tag_in_tags(self):
self.exp_summary.tags = ['tag', 2]
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each tag in \'tags\' to be a string, received \'2\''):
self.exp_summary.validate()
def test_validation_fails_with_empty_tag_in_tags(self):
self.exp_summary.tags = ['', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, 'Tags should be non-empty'):
self.exp_summary.validate()
def test_validation_fails_with_unallowed_characters_in_tag(self):
self.exp_summary.tags = ['123', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Tags should only contain lowercase '
'letters and spaces, received \'123\'')):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_start(self):
self.exp_summary.tags = [' ab', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \' ab\''):
self.exp_summary.validate()
def test_validation_fails_with_whitespace_in_tag_end(self):
self.exp_summary.tags = ['ab ', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError,
'Tags should not start or end with whitespace, received \'ab \''):
self.exp_summary.validate()
def test_validation_fails_with_adjacent_whitespace_in_tag(self):
self.exp_summary.tags = ['a b', 'abc']
with self.assertRaisesRegexp(
utils.ValidationError, (
'Adjacent whitespace in tags should '
'be collapsed, received \'a b\'')):
self.exp_summary.validate()
def test_validation_fails_with_duplicate_tags(self):
self.exp_summary.tags = ['abc', 'abc', 'ab']
with self.assertRaisesRegexp(
utils.ValidationError, 'Some tags duplicate each other'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_type(self):
self.exp_summary.ratings = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected ratings to be a dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_rating_keys(self):
self.exp_summary.ratings = {'1': 0, '10': 1}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected ratings to have keys: 1, 2, 3, 4, 5, received 1, 10'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_type_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': 'one', '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected value to be int, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_value_for_ratings(self):
self.exp_summary.ratings = {'1': 0, '2': -1, '3': 0, '4': 0, '5': 0}
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected value to be non-negative, received -1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_scaled_average_rating(self):
self.exp_summary.scaled_average_rating = 'one'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected scaled_average_rating to be float, received one'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_status(self):
self.exp_summary.status = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected status to be string, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_community_owned(self):
self.exp_summary.community_owned = '1'
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected community_owned to be bool, received 1'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributors_summary(self):
self.exp_summary.contributors_summary = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributors_summary to be dict, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_ids_type(self):
self.exp_summary.owner_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError, 'Expected owner_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_owner_id_in_owner_ids(self):
self.exp_summary.owner_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in owner_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_ids_type(self):
self.exp_summary.editor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected editor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_editor_id_in_editor_ids(self):
self.exp_summary.editor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in editor_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_ids_type(self):
self.exp_summary.voice_artist_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected voice_artist_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_voice_artist_id_in_voice_artists_ids(
self):
self.exp_summary.voice_artist_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in voice_artist_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_ids_type(self):
self.exp_summary.viewer_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected viewer_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_viewer_id_in_viewer_ids(self):
self.exp_summary.viewer_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in viewer_ids to be string, received 2'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_ids_type(self):
self.exp_summary.contributor_ids = 0
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected contributor_ids to be list, received 0'):
self.exp_summary.validate()
def test_validation_fails_with_invalid_contributor_id_in_contributor_ids(
self):
self.exp_summary.contributor_ids = ['1', 2, '3']
with self.assertRaisesRegexp(
utils.ValidationError,
'Expected each id in contributor_ids to be string, received 2'):
self.exp_summary.validate()
class YamlCreationUnitTests(test_utils.GenericTestBase):
"""Test creation of explorations from YAML files."""
EXP_ID = 'An exploration_id'
def test_yaml_import_and_export(self):
"""Test the from_yaml() and to_yaml() methods."""
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, title='Title', category='Category')
exploration.add_states(['New state'])
self.assertEqual(len(exploration.states), 2)
exploration.validate()
yaml_content = exploration.to_yaml()
self.assertEqual(yaml_content, self.SAMPLE_YAML_CONTENT)
exploration2 = exp_domain.Exploration.from_yaml('exp2', yaml_content)
self.assertEqual(len(exploration2.states), 2)
yaml_content_2 = exploration2.to_yaml()
self.assertEqual(yaml_content_2, yaml_content)
# Verify SAMPLE_UNTITLED_YAML_CONTENT can be converted to an exploration
# without error.
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml('exp3', 'No_initial_state_name')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'Invalid\ninit_state_name:\nMore stuff')
with self.assertRaises(Exception):
exp_domain.Exploration.from_yaml(
'exp4', 'State1:\n(\nInvalid yaml')
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version >= 10, received: 9'
):
exp_domain.Exploration.from_yaml(
'exp4', self.SAMPLE_UNTITLED_YAML_CONTENT)
with self.assertRaisesRegexp(
Exception, 'Expected a YAML version <= 9'
):
exp_domain.Exploration.from_untitled_yaml(
'exp4', 'Title', 'Category', self.SAMPLE_YAML_CONTENT)
class SchemaMigrationMethodsUnitTests(test_utils.GenericTestBase):
"""Tests the presence of appropriate schema migration methods in the
Exploration domain object class.
"""
def test_correct_states_schema_conversion_methods_exist(self):
"""Test that the right states schema conversion methods exist."""
current_states_schema_version = (
feconf.CURRENT_STATE_SCHEMA_VERSION)
for version_num in range(current_states_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_states_v%s_dict_to_v%s_dict' % (
current_states_schema_version,
current_states_schema_version + 1)))
def test_correct_exploration_schema_conversion_methods_exist(self):
"""Test that the right exploration schema conversion methods exist."""
current_exp_schema_version = (
exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION)
for version_num in range(1, current_exp_schema_version):
self.assertTrue(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
version_num, version_num + 1)))
self.assertFalse(hasattr(
exp_domain.Exploration,
'_convert_v%s_dict_to_v%s_dict' % (
current_exp_schema_version, current_exp_schema_version + 1)))
class SchemaMigrationUnitTests(test_utils.GenericTestBase):
"""Test migration methods for yaml content."""
YAML_CONTENT_V1 = ("""default_skin: conversation_v1
param_changes: []
param_specs: {}
schema_version: 1
states:
- content:
- type: text
value: ''
name: (untitled state)
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
- content:
- type: text
value: ''
name: New state
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V2 = ("""default_skin: conversation_v1
init_state_name: (untitled state)
param_changes: []
param_specs: {}
schema_version: 2
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V3 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 3
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
New state:
content:
- type: text
value: ''
param_changes: []
widget:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
""")
YAML_CONTENT_V4 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 4
skill_tags: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
""")
YAML_CONTENT_V5 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 5
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
YAML_CONTENT_V6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
YAML_CONTENT_V7 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 7
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 4
tags: []
""")
YAML_CONTENT_V8 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 8
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 5
tags: []
""")
YAML_CONTENT_V9 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 9
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
language:
value: ''
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: CodeRepl
param_changes: []
states_schema_version: 6
tags: []
""")
YAML_CONTENT_V10 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 10
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 7
tags: []
title: Title
""")
YAML_CONTENT_V11 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 11
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
states_schema_version: 8
tags: []
title: Title
""")
YAML_CONTENT_V12 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 12
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
id: TextInput
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
id: EndExploration
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks:
- outcome:
dest: END
feedback:
- Correct!
id: TextInput
param_changes: []
states_schema_version: 9
tags: []
title: Title
""")
YAML_CONTENT_V13 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 13
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
- type: text
value: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
- type: text
value: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 10
tags: []
title: Title
""")
YAML_CONTENT_V14 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 14
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: []
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: []
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 11
tags: []
title: Title
""")
YAML_CONTENT_V15 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 15
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
fallbacks: []
hints: []
id: EndExploration
solution: {}
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
fallbacks: []
hints: []
id: TextInput
solution: {}
param_changes: []
states_schema_version: 12
tags: []
title: Title
""")
YAML_CONTENT_V16 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 16
skin_customizations:
panels_contents:
bottom: []
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V17 = ("""author_notes: ''
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 17
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V18 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 18
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
- Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback: []
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback: []
param_changes: []
hints:
- hint_text: ''
id: TextInput
solution:
explanation: ''
answer_is_exclusive: False
correct_answer: Answer
param_changes: []
states_schema_version: 13
tags: []
title: Title
""")
YAML_CONTENT_V19 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 19
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 14
tags: []
title: Title
""")
YAML_CONTENT_V20 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 20
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 15
tags: []
title: Title
""")
YAML_CONTENT_V21 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 21
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- labelled_as_correct: false
outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
states_schema_version: 16
tags: []
title: Title
""")
YAML_CONTENT_V22 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 22
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 17
tags: []
title: Title
""")
YAML_CONTENT_V23 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 23
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 18
tags: []
title: Title
""")
YAML_CONTENT_V24 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 24
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 19
tags: []
title: Title
""")
YAML_CONTENT_V25 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 25
states:
(untitled state):
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
audio_translations: {}
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
audio_translations: {}
html: Congratulations, you have finished!
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
audio_translations: {}
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
audio_translations: {}
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 20
tags: []
title: Title
""")
YAML_CONTENT_V26 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: Correct!
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: Congratulations, you have finished!
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: Title
""")
YAML_CONTENT_V27 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V28 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 28
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 23
tags: []
title: Title
""")
YAML_CONTENT_V29 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 29
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
imageAndRegions:
value:
imagePath: s1ImagePath.png
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
states_schema_version: 24
tags: []
title: Title
""")
YAML_CONTENT_V30 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 30
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 25
tags: []
title: Title
""")
YAML_CONTENT_V31 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 31
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
new_content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 26
tags: []
title: Title
""")
YAML_CONTENT_V32 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 32
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 27
tags: []
title: Title
""")
YAML_CONTENT_V33 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 33
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 28
tags: []
title: Title
""")
YAML_CONTENT_V34 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 34
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 29
tags: []
title: Title
""")
YAML_CONTENT_V35 = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
_LATEST_YAML_CONTENT = YAML_CONTENT_V35
def test_load_from_v1(self):
"""Test direct loading from a v1 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V1)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v2(self):
"""Test direct loading from a v2 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V2)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v3(self):
"""Test direct loading from a v3 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V3)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v4(self):
"""Test direct loading from a v4 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V4)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v5(self):
"""Test direct loading from a v5 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V5)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v6(self):
"""Test direct loading from a v6 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V6)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_v6_with_invalid_handler_name(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: invalid_handler_name
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with a name '
'\'submit\' in states v3 to v4 conversion process. '):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_rule(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: invalid_rule
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert default and atomic '
'rules in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_subject(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
subject: invalid_subject
dest: END
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Error: Can only convert rules with an \'answer\' '
'subject in states v3 to v4 conversion process.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_cannot_load_from_v6_with_invalid_interaction_id(self):
invalid_yaml_content_v6 = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 6
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
triggers: []
param_changes: []
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: invalid_id
triggers: []
param_changes: []
states_schema_version: 3
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Trying to migrate exploration containing non-existent '
'interaction ID'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', invalid_yaml_content_v6)
def test_load_from_v7(self):
"""Test direct loading from a v7 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V7)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v8(self):
"""Test direct loading from a v8 yaml file."""
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V8)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v9(self):
"""Test direct loading from a v9 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
language:
value: python
placeholder:
value: ''
postCode:
value: ''
preCode:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: CodeRepl
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', self.YAML_CONTENT_V9)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v10(self):
"""Test direct loading from a v10 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V10)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v11(self):
"""Test direct loading from a v11 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V11)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v12(self):
"""Test direct loading from a v12 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Correct!</p>
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V12)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v13(self):
"""Test direct loading from a v13 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V13)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v14(self):
"""Test direct loading from a v14 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V14)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v15(self):
"""Test direct loading from a v15 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V15)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v16(self):
"""Test direct loading from a v16 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V16)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v17(self):
"""Test direct loading from a v17 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V17)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v18(self):
"""Test direct loading from a v18 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: ''
id: TextInput
solution:
answer_is_exclusive: false
correct_answer: Answer
explanation:
content_id: solution
html: ''
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
hint_1: {}
solution: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V18)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v19(self):
"""Test direct loading from a v19 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V19)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v20(self):
"""Test direct loading from a v20 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V20)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v21(self):
"""Test direct loading from a v21 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
allowImproperFraction:
value: true
allowNonzeroIntegerPart:
value: true
customPlaceholder:
value: ''
placeholder:
value: ''
requireSimplestForm:
value: false
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: FractionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V21)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v22(self):
"""Test direct loading from a v22 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V22)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v23(self):
"""Test direct loading from a v23 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V23)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v24(self):
"""Test direct loading from a v24 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V24)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v25(self):
"""Test direct loading from a v25 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V25)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v26(self):
"""Test direct loading from a v26 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v27(self):
"""Test direct loading from a v27 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v28(self):
"""Test direct loading from a v28 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V28)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v29(self):
"""Test direct loading from a v29 yaml file."""
latest_yaml_content = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
highlightRegionsOnHover:
value: false
imageAndRegions:
value:
imagePath: s1ImagePath_height_120_width_120.png
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ImageClickInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V29)
self.assertEqual(exploration.to_yaml(), latest_yaml_content)
def test_load_from_v30(self):
"""Test direct loading from a v30 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V30)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v31(self):
"""Test direct loading from a v31 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V31)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v32(self):
"""Test direct loading from a v32 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V32)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_load_from_v33(self):
"""Test direct loading from a v33 yaml file."""
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V33)
self.assertEqual(exploration.to_yaml(), self._LATEST_YAML_CONTENT)
def test_cannot_load_from_yaml_with_no_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception, 'Invalid YAML file: no schema version specified.'):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
def test_cannot_load_from_yaml_with_invalid_schema_version(self):
sample_yaml_content = ("""author_notes: ''
blurb: ''
default_skin: conversation_v1
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 0
skin_customizations:
panels_contents: {}
states:
(untitled state):
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
inputs:
x: InputString
name: Equals
rule_type: atomic
dest: END
feedback:
- Correct!
param_changes: []
- definition:
rule_type: default
dest: (untitled state)
feedback: []
param_changes: []
id: TextInput
param_changes: []
New state:
content:
- type: text
value: ''
interaction:
customization_args:
placeholder:
value: ''
rows:
value: 1
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: TextInput
param_changes: []
widget:
customization_args: {}
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
sticky: false
widget_id: TextInput
END:
content:
- type: text
value: Congratulations, you have finished!
interaction:
customization_args:
recommendedExplorationIds:
value: []
handlers:
- name: submit
rule_specs:
- definition:
rule_type: default
dest: END
feedback: []
param_changes: []
id: EndExploration
triggers: []
param_changes: []
tags: []
""")
with self.assertRaisesRegexp(
Exception,
'Sorry, we can only process v1 to v%s exploration YAML files '
'at present.' % exp_domain.Exploration.CURRENT_EXP_SCHEMA_VERSION):
exp_domain.Exploration.from_untitled_yaml(
'eid', 'Title', 'Category', sample_yaml_content)
class HTMLMigrationUnitTests(test_utils.GenericTestBase):
"""Test HTML migration."""
YAML_CONTENT_V26_TEXTANGULAR = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 26
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
solution: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: Default <p>outcome</p> for state1
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: This is <i>solution</i> for state1
param_changes: []
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p>this <i>is </i>state2
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <div>Outcome1 for state2</div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: <pre>Outcome2 <br>for state2</pre>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p>value1 <br>for MultipleChoice
- This is value2<span> for <br>MultipleChoice</span>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is<div> html1<b> for </b></div>state2</p>
- hint_content:
content_id: hint_2
html: Here is link 2 <oppia-noninteractive-link
text-with-value="&quot;discussion forum&quot;"
url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;">
</oppia-noninteractive-link>
id: MultipleChoiceInput
solution: null
param_changes: []
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: Here is the image1 <i><oppia-noninteractive-image
caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></i>Here is the image2
<div><oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue.png&quot;"
alt-with-value="&quot;&quot;">
</oppia-noninteractive-image></div>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- This <span>is value1 for </span>ItemSelectionInput
rule_type: Equals
- inputs:
x:
- This is value3 for ItemSelectionInput
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- This <span>is value1 for </span>ItemSelection
- This <code>is value2</code> for ItemSelection
- This is value3 for ItemSelection
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
states_schema_version: 21
tags: []
title: title
""")
# pylint: disable=line-too-long
YAML_CONTENT_V35_IMAGE_DIMENSIONS = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: category
correctness_feedback_enabled: false
init_state_name: Introduction
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
Introduction:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args: {}
default_outcome:
dest: Introduction
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: null
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
state1:
classifier_model_id: null
content:
content_id: content
html: <blockquote><p>Hello, this is state1</p></blockquote>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: <p>Default </p><p>outcome</p><p> for state1</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution:
answer_is_exclusive: true
correct_answer: Answer1
explanation:
content_id: solution
html: <p>This is <em>solution</em> for state1</p>
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solution: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
solution: {}
state2:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, </p><p>this <em>is </em>state2</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Outcome1 for state2</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
- inputs:
x: 1
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
- outcome:
dest: state3
feedback:
content_id: feedback_2
html: "<pre>Outcome2 \\nfor state2</pre>"
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: 0
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is </p><p>value1 <br>for MultipleChoice</p>
- <p>This is value2 for <br>MultipleChoice</p>
default_outcome:
dest: state2
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints:
- hint_content:
content_id: hint_1
html: <p>Hello, this is</p><p> html1<strong> for </strong></p><p>state2</p>
- hint_content:
content_id: hint_2
html: <p>Here is link 2 <oppia-noninteractive-link text-with-value="&quot;discussion
forum&quot;" url-with-value="&quot;https://groups.google.com/
forum/?fromgroups#!forum/oppia&quot;"> </oppia-noninteractive-link></p>
id: MultipleChoiceInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
feedback_2: {}
hint_1: {}
hint_2: {}
state3:
classifier_model_id: null
content:
content_id: content
html: <p>Hello, this is state3</p>
interaction:
answer_groups:
- outcome:
dest: state1
feedback:
content_id: feedback_1
html: <p>Here is the image1 </p><oppia-noninteractive-image alt-with-value="&quot;&quot;"
caption-with-value="&quot;&quot;" filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image><p>Here is the image2 </p><oppia-noninteractive-image
alt-with-value="&quot;&quot;" caption-with-value="&quot;&quot;"
filepath-with-value="&quot;startBlue_height_490_width_120.png&quot;">
</oppia-noninteractive-image>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x:
- <p>This is value1 for ItemSelectionInput</p>
rule_type: Equals
- inputs:
x:
- <p>This is value3 for ItemSelectionInput</p>
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
choices:
value:
- <p>This is value1 for ItemSelection</p>
- <p>This is value2 for ItemSelection</p>
- <p>This is value3 for ItemSelection</p>
maxAllowableSelectionCount:
value: 1
minAllowableSelectionCount:
value: 1
default_outcome:
dest: state3
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: ItemSelectionInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
states_schema_version: 30
tags: []
title: title
""")
YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 27
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <p><oppia-noninteractive-image filepath-with-value="&quot;random.png&quot;"></oppia-noninteractive-image>Hello this
is test case to check image tag inside p tag</p>
content_ids_to_audio_translations:
content: {}
default_outcome: {}
feedback_1: {}
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
content_ids_to_audio_translations:
content: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
New state:
classifier_model_id: null
content:
content_id: content
html: ''
content_ids_to_audio_translations:
content: {}
default_outcome: {}
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
states_schema_version: 22
tags: []
title: Title
""")
YAML_CONTENT_V35_WITH_IMAGE_CAPTION = ("""author_notes: ''
auto_tts_enabled: true
blurb: ''
category: Category
correctness_feedback_enabled: false
init_state_name: (untitled state)
language_code: en
objective: ''
param_changes: []
param_specs: {}
schema_version: 35
states:
(untitled state):
classifier_model_id: null
content:
content_id: content
html: <oppia-noninteractive-image caption-with-value="&quot;&quot;"
filepath-with-value="&quot;random_height_490_width_120.png&quot;"></oppia-noninteractive-image><p>Hello
this is test case to check image tag inside p tag</p>
interaction:
answer_groups:
- outcome:
dest: END
feedback:
content_id: feedback_1
html: <p>Correct!</p>
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
rule_specs:
- inputs:
x: InputString
rule_type: Equals
tagged_skill_misconception_id: null
training_data: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: (untitled state)
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
feedback_1: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
feedback_1: {}
END:
classifier_model_id: null
content:
content_id: content
html: <p>Congratulations, you have finished!</p>
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
recommendedExplorationIds:
value: []
default_outcome: null
hints: []
id: EndExploration
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
New state:
classifier_model_id: null
content:
content_id: content
html: ''
interaction:
answer_groups: []
confirmed_unclassified_answers: []
customization_args:
placeholder:
value: ''
rows:
value: 1
default_outcome:
dest: END
feedback:
content_id: default_outcome
html: ''
labelled_as_correct: false
missing_prerequisite_skill_id: null
param_changes: []
refresher_exploration_id: null
hints: []
id: TextInput
solution: null
param_changes: []
recorded_voiceovers:
voiceovers_mapping:
content: {}
default_outcome: {}
solicit_answer_details: false
written_translations:
translations_mapping:
content: {}
default_outcome: {}
states_schema_version: 30
tags: []
title: Title
""")
# pylint: enable=line-too-long
def test_load_from_v26_textangular(self):
"""Test direct loading from a v26 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V26_TEXTANGULAR)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_IMAGE_DIMENSIONS)
def test_load_from_v27_without_image_caption(self):
"""Test direct loading from a v27 yaml file."""
mock_get_filename_with_dimensions_context = self.swap(
html_validation_service, 'get_filename_with_dimensions',
mock_get_filename_with_dimensions)
with mock_get_filename_with_dimensions_context:
exploration = exp_domain.Exploration.from_yaml(
'eid', self.YAML_CONTENT_V27_WITHOUT_IMAGE_CAPTION)
self.assertEqual(
exploration.to_yaml(), self.YAML_CONTENT_V35_WITH_IMAGE_CAPTION)
class ConversionUnitTests(test_utils.GenericTestBase):
"""Test conversion methods."""
def test_convert_exploration_to_player_dict(self):
exp_title = 'Title'
second_state_name = 'first state'
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title=exp_title, category='Category')
exploration.add_states([second_state_name])
def _get_default_state_dict(content_str, dest_name):
"""Gets the default state dict of the exploration."""
return {
'classifier_model_id': None,
'content': {
'content_id': 'content',
'html': content_str,
},
'recorded_voiceovers': {
'voiceovers_mapping': {
'content': {},
'default_outcome': {}
}
},
'solicit_answer_details': False,
'written_translations': {
'translations_mapping': {
'content': {},
'default_outcome': {}
}
},
'interaction': {
'answer_groups': [],
'confirmed_unclassified_answers': [],
'customization_args': {},
'default_outcome': {
'dest': dest_name,
'feedback': {
'content_id': feconf.DEFAULT_OUTCOME_CONTENT_ID,
'html': ''
},
'labelled_as_correct': False,
'param_changes': [],
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'hints': [],
'id': None,
'solution': None,
},
'param_changes': [],
}
self.assertEqual(exploration.to_player_dict(), {
'init_state_name': feconf.DEFAULT_INIT_STATE_NAME,
'title': exp_title,
'objective': feconf.DEFAULT_EXPLORATION_OBJECTIVE,
'states': {
feconf.DEFAULT_INIT_STATE_NAME: _get_default_state_dict(
feconf.DEFAULT_INIT_STATE_CONTENT_STR,
feconf.DEFAULT_INIT_STATE_NAME),
second_state_name: _get_default_state_dict(
'', second_state_name),
},
'param_changes': [],
'param_specs': {},
'language_code': 'en',
'correctness_feedback_enabled': False,
})
class StateOperationsUnitTests(test_utils.GenericTestBase):
"""Test methods operating on states."""
def test_delete_state(self):
"""Test deletion of states."""
exploration = exp_domain.Exploration.create_default_exploration('eid')
exploration.add_states(['first state'])
with self.assertRaisesRegexp(
ValueError, 'Cannot delete initial state'
):
exploration.delete_state(exploration.init_state_name)
exploration.add_states(['second state'])
exploration.delete_state('second state')
with self.assertRaisesRegexp(ValueError, 'fake state does not exist'):
exploration.delete_state('fake state')
class HtmlCollectionTests(test_utils.GenericTestBase):
"""Test method to obtain all html strings."""
def test_all_html_strings_are_collected(self):
exploration = exp_domain.Exploration.create_default_exploration(
'eid', title='title', category='category')
exploration.add_states(['state1', 'state2', 'state3', 'state4'])
state1 = exploration.states['state1']
state2 = exploration.states['state2']
state3 = exploration.states['state3']
state4 = exploration.states['state4']
content1_dict = {
'content_id': 'content',
'html': '<blockquote>Hello, this is state1</blockquote>'
}
content2_dict = {
'content_id': 'content',
'html': '<pre>Hello, this is state2</pre>'
}
content3_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state3</p>'
}
content4_dict = {
'content_id': 'content',
'html': '<p>Hello, this is state4</p>'
}
state1.update_content(
state_domain.SubtitledHtml.from_dict(content1_dict))
state2.update_content(
state_domain.SubtitledHtml.from_dict(content2_dict))
state3.update_content(
state_domain.SubtitledHtml.from_dict(content3_dict))
state4.update_content(
state_domain.SubtitledHtml.from_dict(content4_dict))
state1.update_interaction_id('TextInput')
state2.update_interaction_id('MultipleChoiceInput')
state3.update_interaction_id('ItemSelectionInput')
state4.update_interaction_id('DragAndDropSortInput')
customization_args_dict1 = {
'placeholder': {'value': ''},
'rows': {'value': 1}
}
customization_args_dict2 = {
'choices': {'value': [
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>'
]}
}
customization_args_dict3 = {
'choices': {'value': [
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>'
]}
}
customization_args_dict4 = {
'choices': {'value': [
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>',
]}
}
state1.update_interaction_customization_args(customization_args_dict1)
state2.update_interaction_customization_args(customization_args_dict2)
state3.update_interaction_customization_args(customization_args_dict3)
state4.update_interaction_customization_args(customization_args_dict4)
default_outcome_dict1 = {
'dest': 'state2',
'feedback': {
'content_id': 'default_outcome',
'html': '<p>Default outcome for state1</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
}
state1.update_interaction_default_outcome(default_outcome_dict1)
hint_list2 = [{
'hint_content': {
'content_id': 'hint_1',
'html': '<p>Hello, this is html1 for state2</p>'
}
}, {
'hint_content': {
'content_id': 'hint_2',
'html': '<p>Hello, this is html2 for state2</p>'
}
}]
state2.update_interaction_hints(hint_list2)
solution_dict1 = {
'interaction_id': '',
'answer_is_exclusive': True,
'correct_answer': 'Answer1',
'explanation': {
'content_id': 'solution',
'html': '<p>This is solution for state1</p>'
}
}
state1.update_interaction_solution(solution_dict1)
answer_group_list2 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}, {
'rule_type': 'Equals',
'inputs': {'x': 1}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome1 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}, {
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': 0}
}],
'outcome': {
'dest': 'state3',
'feedback': {
'content_id': 'feedback_2',
'html': '<p>Outcome2 for state2</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
answer_group_list3 = [{
'rule_specs': [{
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value1 for ItemSelectionInput</p>'
]}
}, {
'rule_type': 'Equals',
'inputs': {'x': [
'<p>This is value3 for ItemSelectionInput</p>'
]}
}],
'outcome': {
'dest': 'state1',
'feedback': {
'content_id': 'feedback_1',
'html': '<p>Outcome for state3</p>'
},
'param_changes': [],
'labelled_as_correct': False,
'refresher_exploration_id': None,
'missing_prerequisite_skill_id': None
},
'training_data': [],
'tagged_skill_misconception_id': None
}]
state2.update_interaction_answer_groups(answer_group_list2)
state3.update_interaction_answer_groups(answer_group_list3)
expected_html_list = [
'',
'',
'<pre>Hello, this is state2</pre>',
'<p>Outcome1 for state2</p>',
'<p>Outcome2 for state2</p>',
'',
'<p>Hello, this is html1 for state2</p>',
'<p>Hello, this is html2 for state2</p>',
'<p>This is value1 for MultipleChoice</p>',
'<p>This is value2 for MultipleChoice</p>',
'<blockquote>Hello, this is state1</blockquote>',
'<p>Default outcome for state1</p>',
'<p>This is solution for state1</p>',
'<p>Hello, this is state3</p>',
'<p>Outcome for state3</p>',
'<p>This is value1 for ItemSelectionInput</p>',
'<p>This is value3 for ItemSelectionInput</p>',
'',
'<p>This is value1 for ItemSelection</p>',
'<p>This is value2 for ItemSelection</p>',
'<p>This is value3 for ItemSelection</p>',
'<p>Hello, this is state4</p>',
'',
'<p>This is value1 for DragAndDropSortInput</p>',
'<p>This is value2 for DragAndDropSortInput</p>'
]
actual_outcome_list = exploration.get_all_html_content_strings()
self.assertEqual(actual_outcome_list, expected_html_list)
| with self.assertRaisesRegexp(
Exception, 'Command invalid_cmd is not allowed'):
exp_domain.ExplorationChange({
'cmd': 'invalid_cmd'
}) |
client.rs | // Copyright 2017 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// See the License for the specific language governing permissions and
// limitations under the License.
use futures::Future;
use async::Executor;
use async::Kicker;
use call::client::{
CallOption, ClientCStreamReceiver, ClientCStreamSender, ClientDuplexReceiver,
ClientDuplexSender, ClientSStreamReceiver, ClientUnaryReceiver,
};
use call::{Call, Method};
use channel::Channel;
use error::Result;
/// A generic client for making RPC calls.
pub struct Client {
channel: Channel,
// Used to kick its completion queue.
kicker: Kicker,
}
impl Client {
/// Initialize a new [`Client`].
pub fn new(channel: Channel) -> Client |
/// Create a synchronized unary RPC call.
pub fn unary_call<Req, Resp>(
&self,
method: &Method<Req, Resp>,
req: &Req,
opt: CallOption,
) -> Result<Resp> {
let f = self.unary_call_async(method, req, opt)?;
f.wait()
}
/// Create an asynchronized unary RPC call.
pub fn unary_call_async<Req, Resp>(
&self,
method: &Method<Req, Resp>,
req: &Req,
opt: CallOption,
) -> Result<ClientUnaryReceiver<Resp>> {
Call::unary_async(&self.channel, method, req, opt)
}
/// Create an asynchronized client streaming call.
///
/// Client can send a stream of requests and server responds with a single response.
pub fn client_streaming<Req, Resp>(
&self,
method: &Method<Req, Resp>,
opt: CallOption,
) -> Result<(ClientCStreamSender<Req>, ClientCStreamReceiver<Resp>)> {
Call::client_streaming(&self.channel, method, opt)
}
/// Create an asynchronized server streaming call.
///
/// Client sends on request and server responds with a stream of responses.
pub fn server_streaming<Req, Resp>(
&self,
method: &Method<Req, Resp>,
req: &Req,
opt: CallOption,
) -> Result<ClientSStreamReceiver<Resp>> {
Call::server_streaming(&self.channel, method, req, opt)
}
/// Create an asynchronized duplex streaming call.
///
/// Client sends a stream of requests and server responds with a stream of responses.
/// The response stream is completely independent and both side can be sending messages
/// at the same time.
pub fn duplex_streaming<Req, Resp>(
&self,
method: &Method<Req, Resp>,
opt: CallOption,
) -> Result<(ClientDuplexSender<Req>, ClientDuplexReceiver<Resp>)> {
Call::duplex_streaming(&self.channel, method, opt)
}
/// Spawn the future into current gRPC poll thread.
///
/// This can reduce a lot of context switching, but please make
/// sure there is no heavy work in the future.
pub fn spawn<F>(&self, f: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
{
let kicker = self.kicker.clone();
Executor::new(self.channel.cq()).spawn(f, kicker)
}
}
| {
let kicker = channel.create_kicker().unwrap();
Client { channel, kicker }
} |
verify.py | import json
from flask import (
abort,
current_app,
flash,
redirect,
render_template, | url_for,
)
from itsdangerous import SignatureExpired
from notifications_utils.url_safe_token import check_token
from app import user_api_client
from app.main import main
from app.main.forms import TwoFactorForm
from app.models.service import Service
from app.models.user import InvitedOrgUser, InvitedUser, User
from app.utils.login import redirect_to_sign_in
@main.route('/verify', methods=['GET', 'POST'])
@redirect_to_sign_in
def verify():
user_id = session['user_details']['id']
def _check_code(code):
return user_api_client.check_verify_code(user_id, code, 'sms')
form = TwoFactorForm(_check_code)
if form.validate_on_submit():
session.pop('user_details', None)
return activate_user(user_id)
return render_template('views/two-factor-sms.html', form=form)
@main.route('/verify-email/<token>')
def verify_email(token):
try:
token_data = check_token(
token,
current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'],
current_app.config['EMAIL_EXPIRY_SECONDS']
)
except SignatureExpired:
flash("The link in the email we sent you has expired. We've sent you a new one.")
return redirect(url_for('main.resend_email_verification'))
# token contains json blob of format: {'user_id': '...', 'secret_code': '...'} (secret_code is unused)
token_data = json.loads(token_data)
user = User.from_id(token_data['user_id'])
if not user:
abort(404)
if user.is_active:
flash("That verification link has expired.")
return redirect(url_for('main.sign_in'))
if user.email_auth:
session.pop('user_details', None)
return activate_user(user.id)
user.send_verify_code()
session['user_details'] = {"email": user.email_address, "id": user.id}
return redirect(url_for('main.verify'))
def activate_user(user_id):
user = User.from_id(user_id)
# the user will have a new current_session_id set by the API - store it in the cookie for future requests
session['current_session_id'] = user.current_session_id
organisation_id = session.get('organisation_id')
activated_user = user.activate()
activated_user.login()
invited_user = InvitedUser.from_session()
if invited_user:
service_id = _add_invited_user_to_service(invited_user)
service = Service.from_id(service_id)
if service.has_permission('broadcast'):
if service.live:
return redirect(url_for('main.broadcast_tour_live', service_id=service.id, step_index=1))
else:
return redirect(url_for('main.broadcast_tour', service_id=service.id, step_index=1))
return redirect(url_for('main.service_dashboard', service_id=service_id))
invited_org_user = InvitedOrgUser.from_session()
if invited_org_user:
user_api_client.add_user_to_organisation(invited_org_user.organisation, user_id)
if organisation_id:
return redirect(url_for('main.organisation_dashboard', org_id=organisation_id))
else:
return redirect(url_for('main.add_service', first='first'))
def _add_invited_user_to_service(invitation):
user = User.from_id(session['user_id'])
service_id = invitation.service
user.add_to_service(
service_id,
invitation.permissions,
invitation.folder_permissions,
invitation.from_user.id,
)
return service_id | session, |
nh_index_price.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Date: 2021/12/20 14:52
Desc: 南华期货-商品指数历史走势-价格指数-数值
http://www.nanhua.net/nhzc/varietytrend.html
1000 点开始, 用收益率累计
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
"""
import time
import requests
import pandas as pd
def futures_nh_index_symbol_table() -> pd.DataFrame:
"""
南华期货-南华指数所有品种一览表
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:return: 南华指数所有品种一览表
:rtype: pandas.DataFrame
"""
url = "http://www.nanhua.net/ianalysis/plate-variety.json"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df['firstday'] = pd.to_datetime(temp_df['firstday']).dt.date
return temp_df
def futures_nh_price_index(symbol: str = "A") -> pd.DataFrame:
"""
南华期货-南华指数单品种-价格-所有历史数据
http://www.nanhua.net/ianalysis/varietyindex/price/A.json?t=1574932974280
:param symbol: 通过 ak.futures_nh_index_symbol_table() 获取
:type symbol: str
:return: 南华期货-南华指数单品种-价格-所有历史数据
:rtype: pandas.Series
""" | symbol_df = futures_nh_index_symbol_table()
if symbol in symbol_df["code"].tolist():
t = time.time()
url = f"http://www.nanhua.net/ianalysis/varietyindex/price/{symbol}.json?t={int(round(t * 1000))}"
r = requests.get(url)
data_json = r.json()
temp_df = pd.DataFrame(data_json)
temp_df.columns = ["date", "value"]
temp_df['date'] = pd.to_datetime(temp_df["date"], unit='ms').dt.date
return temp_df
if __name__ == "__main__":
futures_nh_index_symbol_table_df = futures_nh_index_symbol_table()
print(futures_nh_index_symbol_table_df)
futures_nh_price_index_df = futures_nh_price_index(symbol="NHAI")
print(futures_nh_price_index_df) | |
usuarios.py | import abc
import os
from ferramentas_produtos import *
from ferramentas_usuarios import *
class Usuario(abc.ABC):
licenca = None # Atributo de classe.
def __init__(self, login, senha):
|
def cadastra_produto(self, data_entrega, qtd):
if not os.path.exists('produtos'):
os.mkdir('produtos')
cod = gera_codigo()
arq1 = open('produtos/' + cod + '.txt', 'w+')
arq1.writelines('Código: ' + cod + u'\n')
arq1.writelines('Quantidade: ' + qtd + u'\n')
arq1.writelines('Geração: ' + data_ger_formatada() + ' - Prazo: ' + data_entrega_formatada(data_entrega) +
' - (VEND: ' + str(self.login) + ')' u'\n')
arq1.writelines(u'\n')
arq1.close()
arq2 = open('produtos_registrados.txt', 'a')
arq2.writelines(cod + u'\n')
arq2.close()
return f'Produto "{cod}" registrado com sucesso!'
def cadastra_processo(self, codigo, processo):
arq = open('produtos/' + codigo + '.txt')
infile = arq.readlines()
arq.close()
arq = open('produtos/' + codigo + '.txt', 'a')
if 'Processos:\n' not in infile:
arq.writelines('Processos:' + u'\n')
n = 1
else:
n = len(infile) - 4
arq.writelines(str(n) + ') ' + processo + ' - ' + data_ger_formatada() + ' - ' + self.login + u'\n')
arq.close()
return f'Processo "{processo}" para o produto "{codigo}" cadastrado com sucesso.'
def inicia_processo(self, codigo):
arq = open('produtos/' + codigo + '.txt')
arq2 = open('produtos/' + codigo + '.txt')
infile = arq.readlines()
infile2 = arq2.read()
arq.close()
arq = open('produtos/' + codigo + '.txt', 'a')
if ') Controle - ' not in infile2:
return f'Os processos ainda não foram definidos, não é possível iniciar o processo.\n'
if 'Histórico de operações:\n' not in infile:
arq.writelines(u'\n')
arq.writelines('Histórico de operações:' + u'\n')
n = 1
arq.writelines(str(n) + ') ' + 'Início em: ' + data_ger_formatada() + ' (OP: ' + self.login + ')' + u'\n')
arq.close()
return f'Confirmado inicio do processo "{n}" para o produto "{codigo}".'
else:
if ' Finalizado em: ' not in infile[len(infile) - 1]:
return 'Não é possível iniciar o processo pois a etapa anterior ainda nao foi finalizada.\n'
n = int(infile[len(infile) - 1][0]) + 1
if n >= int(infile2[infile2.index('Controle') - 3]):
return f'Os processos para o produto "{codigo}" já foram concluídos.\n'
arq.writelines(str(n) + ') ' + 'Início em: ' + data_ger_formatada() + ' (OP: ' + self.login + ')' + u'\n')
arq.close()
return f'Confirmado inicio do processo "{n}°" para o produto "{codigo}".'
def finaliza_processo(self, codigo):
arq = open('produtos/' + codigo + '.txt')
infile = arq.readlines()
arq.close()
arq = open('produtos/' + codigo + '.txt', 'a')
if 'Histórico de operações:\n' in infile[len(infile) - 2]:
n = 1
arq.writelines(str(n) + ') ' + 'Finalizado em: ' + data_ger_formatada() + ' (OP: ' + self.login + ')' +
u'\n')
arq.close()
return f'Confirmado termino do processo "{n}°" para o produto "{codigo}".'
else:
if ' Finalizado em: ' in infile[len(infile) - 1]:
return 'Não há processo em aberto para ser finalizado.\n'
n = int(infile[len(infile) - 1][0])
arq.writelines(str(n) + ') ' + 'Finalizado em: ' + data_ger_formatada() + ' (OP: ' + self.login + ')'
+ u'\n')
arq.close()
return f'Confirmado termino do processo "{n}°" para o produto "{codigo}".'
def inicia_controle(self, codigo):
arq = open('produtos/' + codigo + '.txt')
infile = arq.readlines()
arq.close()
if self.inicia_processo(codigo) == f'Os processos para o produto "{codigo}" já foram concluídos.\n':
if 'Controle de qualidade:\n' not in infile:
arq = open('produtos/' + codigo + '.txt', 'a')
arq.writelines(u'\n')
arq.writelines('Controle de qualidade:' + u'\n')
arq.writelines('Recebido em: ' + data_ger_formatada() + ' (Téc: ' + self.login + ')' + u'\n')
arq.close()
return f'Registrado recebimento do produto "{codigo}" ao controle de qualidade.'
else:
return f'Já foi registrada a chegada do produto "{codigo}" ao controle de qualidade.'
else:
arq.close()
return f'O produto "{codigo}" ainda não foi concluído ou já foi recebido pelo controle de qualidade.'
def finaliza_controle(self, codigo, decisao):
arq = open('produtos/' + codigo + '.txt')
infile = arq.readlines()
arq.close()
if 'Controle de qualidade:\n' in infile:
arq = open('produtos/' + codigo + '.txt', 'a')
if decisao == '1':
arq.writelines('Aprovado em: ' + data_ger_formatada() + ' (Téc: ' + self.login + ')' + u'\n')
arq.close()
return f'O produto "{codigo}" foi aprovado.'
else:
arq.writelines('Reprovado em: ' + data_ger_formatada() + ' (Téc: ' + self.login + ')' + u'\n')
arq.close()
return f'O produto "{codigo}" foi aprovado.'
else:
return f'O produto "{codigo}" ainda não pode ser finalizado.'
def pesquisar_produto(self, codigo):
arq = open('produtos/' + codigo + '.txt')
infile = arq.read()
arq.close()
return infile
class Admin(Usuario):
def __init__(self, login, senha):
super().__init__(login, senha)
@staticmethod
def cadastra_usuario(username, password, licenca):
arq = open('registrados.txt', 'a')
arq.write(username + ' ' + password + ' ' + licenca + '\n')
arq.close() # O arquivo é fechado do modo de adição para ser aberto
return f'Usuario "{username}" realizado com sucesso!\n'
class Comercial(Usuario):
def __init__(self, login, senha):
super().__init__(login, senha)
def cadastra_processo(self, codigo, processo):
raise NotImplementedError
def inicia_processo(self, codigo):
raise NotImplementedError
def finaliza_processo(self, codigo):
raise NotImplementedError
def inicia_controle(self, codigo):
raise NotImplementedError
def finaliza_controle(self, codigo, decisao):
raise NotImplementedError
class Engenharia(Usuario):
def __init__(self, login, senha):
super().__init__(login, senha)
def cadastra_produto(self, data_entrega, qtd):
raise NotImplementedError
def inicia_processo(self, codigo):
raise NotImplementedError
def finaliza_processo(self, codigo):
raise NotImplementedError
def inicia_controle(self, codigo):
raise NotImplementedError
def finaliza_controle(self, codigo, decisao):
raise NotImplementedError
class Operador(Usuario):
def __init__(self, login, senha):
super().__init__(login, senha)
def cadastra_processo(self, codigo, processo):
raise NotImplementedError
def cadastra_produto(self, data_entrega, qtd):
raise NotImplementedError
def inicia_controle(self, codigo):
raise NotImplementedError
def finaliza_controle(self, codigo, decisao):
raise NotImplementedError
class ContQualidade(Usuario):
def __init__(self, login, senha):
super().__init__(login, senha)
def cadastra_processo(self, codigo, processo):
raise NotImplementedError
def cadastra_produto(self, data_entrega, qtd):
raise NotImplementedError
def inicia_processo(self, codigo):
raise NotImplementedError
def finaliza_processo(self, codigo):
raise NotImplementedError
| self.login = login
self.senha = senha
self.licenca = Usuario.licenca |
fabcar.go | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* The sample smart contract for documentation topic:
* Writing Your First Blockchain Application
*/
package main
/* Imports | * 4 utility libraries for formatting, handling bytes, reading and writing JSON, and string manipulation
* 2 specific Hyperledger Fabric specific libraries for Smart Contracts
*/
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"github.com/hyperledger/fabric/core/chaincode/shim"
sc "github.com/hyperledger/fabric/protos/peer"
)
// Define the Smart Contract structure
type SmartContract struct {
}
// Define the car structure, with 4 properties. Structure tags are used by encoding/json library
type Car struct {
Make string `json:"make"`
Model string `json:"model"`
Colour string `json:"colour"`
Owner string `json:"owner"`
}
type User struct {
TableName string `json:"tableName"`
Key string `json:"key"`
Token string `json:"token"`
Name string `json:"name"`
Email string `json:"email"`
PasswordHash string `json:"passwordHash"`
PublicKey string `json:"publickey"`
NewFilePath string `json:"newFilePath"`
LatestHashFile string `json:"latestHashFile"`
Balance string `json:"balance"`
}
type RAPC struct {
TableName string `json:"tableName"`
Key string `json:"key"`
UserKey string `json:"userKey"`
Name string `json:"name"`
Email string `json:"email"`
PostCnt int `json:"postCnt"`
LoveCnt int `json:"loveCnt"`
}
type IP struct {
TableName string `json:"tableName"`
Key string `json:"key"`
UserKey string `json:"userKey"`
Name string `json:"name"`
Email string `json:"email"`
FileName string `json:"fileName"`
NewFilePath string `json:"newFilePath"`
LatestHashFile string `json:"latestHashFile"`
DateTime string `json:"dateTime"`
IsImageOrPdf string `json:"isImageOrPdf"`
IsImage string `json:"isImage"`
IsPdf string `json:"isPdf"`
Price string `json:"price"`
PrevOwnerKey string `json:"prevOwnerKey"`
}
type Request struct {
TableName string `json:"tableName"`
Key string `json:"key"`
IPKey string `json:"ipKey"`
SenderKey string `json:"senderKey"`
ReceiverKey string `json:"receiverKey"`
SenderName string `json:"senderName"`
ReceiverName string `json:"receiverName"`
SenderEmail string `json:"senderEmail"`
ReceiverEmail string `json:"receiverEmail"`
FileName string `json:"fileName"`
LatestHashFile string `json:"latestHashFile"`
IsValid string `json:"isValid"`
}
/*
* The Init method is called when the Smart Contract "fabcar" is instantiated by the blockchain network
* Best practice is to have any Ledger initialization in separate function -- see initLedger()
*/
func (s *SmartContract) Init(APIstub shim.ChaincodeStubInterface) sc.Response {
return shim.Success(nil)
}
/*
* The Invoke method is called as a result of an application request to run the Smart Contract "fabcar"
* The calling application program has also specified the particular smart contract function to be called, with arguments
*/
func (s *SmartContract) Invoke(APIstub shim.ChaincodeStubInterface) sc.Response {
// Retrieve the requested Smart Contract function and arguments
function, args := APIstub.GetFunctionAndParameters()
// Route to the appropriate handler function to interact with the ledger appropriately
if function == "queryCar" {
return s.queryCar(APIstub, args)
} else if function == "initLedger" {
return s.initLedger(APIstub)
} else if function == "createCar" {
return s.createCar(APIstub, args)
} else if function == "queryAllCars" {
return s.queryAllCars(APIstub)
} else if function == "changeCarOwner" {
return s.changeCarOwner(APIstub, args)
} else if function == "createUser" {
return s.createUser(APIstub, args)
} else if function == "loginUser" {
return s.loginUser(APIstub, args)
} else if function == "profileInformation" {
return s.profileInformation(APIstub, args)
} else if function == "ReactAndPostUser" {
return s.ReactAndPostUser(APIstub, args)
} else if function == "findUserForRAPC" {
return s.findUserForRAPC(APIstub, args)
} else if function == "ValueReactAndPostUser" {
return s.ValueReactAndPostUser(APIstub, args)
} else if function == "sendIP" {
return s.sendIP(APIstub, args)
} else if function == "topPeopleIP" {
return s.topPeopleIP(APIstub, args)
} else if function == "findPostCnt" {
return s.findPostCnt(APIstub, args)
} else if function == "allFriendPost" {
return s.allFriendPost(APIstub, args)
} else if function == "ipVerification" {
return s.ipVerification(APIstub, args)
} else if function == "sendReq" {
return s.sendReq(APIstub, args)
} else if function == "getReq" {
return s.getReq(APIstub, args)
} else if function == "changeReq" {
return s.changeReq(APIstub, args)
} else if function == "changeOwner" {
return s.changeOwner(APIstub, args)
} else if function == "reducePost" {
return s.reducePost(APIstub, args)
} else if function == "subBalance" {
return s.subBalance(APIstub, args)
} else if function == "addBalance" {
return s.addBalance(APIstub, args)
}
return shim.Error("Invalid Smart Contract function name.")
}
func (s *SmartContract) subBalance(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
var priceS string = args[1]
/** converting the str1 variable into an int using Atoi method */
price, err:= strconv.Atoi(priceS)
if err == nil {
fmt.Println(price)
}
userAsBytes, _ := APIstub.GetState(args[0])
// car := Car{}
user := User{}
json.Unmarshal(userAsBytes, &user)
userBalance, err:= strconv.Atoi(user.Balance)
if err == nil {
fmt.Println(userBalance)
}
if userBalance - price >= 0 {
userBalance = userBalance - price
} else if userBalance - price < 0 {
userBalance = 0
}
t := strconv.Itoa(userBalance)
user.Balance = t
userAsBytes, _ = json.Marshal(user)
APIstub.PutState(args[0], userAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) addBalance(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
var priceS string = args[1]
/** converting the str1 variable into an int using Atoi method */
price ,err:= strconv.Atoi(priceS)
if err == nil {
fmt.Println(price)
}
userAsBytes, _ := APIstub.GetState(args[0])
// car := Car{}
user := User{}
json.Unmarshal(userAsBytes, &user)
userBalance, err:= strconv.Atoi(user.Balance)
if err == nil {
fmt.Println(userBalance)
}
userBalance = userBalance + price
t := strconv.Itoa(userBalance)
user.Balance = t
userAsBytes, _ = json.Marshal(user)
APIstub.PutState(args[0], userAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) queryCar(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
carAsBytes, _ := APIstub.GetState(args[0])
return shim.Success(carAsBytes)
}
func (s *SmartContract) sendReq(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 11 {
return shim.Error("Incorrect number of arguments. Expecting 11")
}
var docType string = "Request"
//fmt.Println("User Name : ",name)
var ip = Request{TableName : docType,Key : args[0],IPKey : args[1],SenderKey : args[2],ReceiverKey : args[3],SenderName : args[4], ReceiverName : args[5] , SenderEmail : args[6], ReceiverEmail : args[7], FileName : args[8],LatestHashFile : args[9], IsValid : args[10] }
// var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]}
ipAsBytes, _ := json.Marshal(ip)
APIstub.PutState(args[0], ipAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) initLedger(APIstub shim.ChaincodeStubInterface) sc.Response {
cars := []Car{
Car{Make: "Toyota", Model: "Prius", Colour: "blue", Owner: "Tomoko"},
Car{Make: "Ford", Model: "Mustang", Colour: "red", Owner: "Brad"},
Car{Make: "Hyundai", Model: "Tucson", Colour: "green", Owner: "Jin Soo"},
Car{Make: "Volkswagen", Model: "Passat", Colour: "yellow", Owner: "Max"},
Car{Make: "Tesla", Model: "S", Colour: "black", Owner: "Adriana"},
Car{Make: "Peugeot", Model: "205", Colour: "purple", Owner: "Michel"},
Car{Make: "Chery", Model: "S22L", Colour: "white", Owner: "Aarav"},
Car{Make: "Fiat", Model: "Punto", Colour: "violet", Owner: "Pari"},
Car{Make: "Tata", Model: "Nano", Colour: "indigo", Owner: "Valeria"},
Car{Make: "Holden", Model: "Barina", Colour: "brown", Owner: "Shotaro"},
}
i := 0
for i < len(cars) {
fmt.Println("i is ", i)
carAsBytes, _ := json.Marshal(cars[i])
APIstub.PutState("CAR"+strconv.Itoa(i), carAsBytes)
fmt.Println("Added", cars[i])
i = i + 1
}
return shim.Success(nil)
}
func (s *SmartContract) createCar(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 5 {
return shim.Error("Incorrect number of arguments. Expecting 5")
}
var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]}
carAsBytes, _ := json.Marshal(car)
APIstub.PutState(args[0], carAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) queryAllCars(APIstub shim.ChaincodeStubInterface) sc.Response {
startKey := "CAR0"
endKey := "CAR999"
resultsIterator, err := APIstub.GetStateByRange(startKey, endKey)
if err != nil {
return shim.Error(err.Error())
}
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
bArrayMemberAlreadyWritten := false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) changeOwner(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 6 {
return shim.Error("Incorrect number of arguments. Expecting 6")
}
ipAsBytes, _ := APIstub.GetState(args[0])
ip := IP{}
//ipKey, userKey, name, email, dateTime
json.Unmarshal(ipAsBytes, &ip)
ip.UserKey = args[1]
ip.Name = args[2]
ip.Email = args[3]
ip.DateTime =args[4]
ip.PrevOwnerKey=args[5]
ipAsBytes, _ = json.Marshal(ip)
APIstub.PutState(args[0], ipAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) changeReq(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
reqAsBytes, _ := APIstub.GetState(args[0])
req := Request{}
json.Unmarshal(reqAsBytes, &req)
req.IsValid = args[1]
reqAsBytes, _ = json.Marshal(req)
APIstub.PutState(args[0], reqAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) changeCarOwner(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
carAsBytes, _ := APIstub.GetState(args[0])
car := Car{}
json.Unmarshal(carAsBytes, &car)
car.Owner = args[1]
carAsBytes, _ = json.Marshal(car)
APIstub.PutState(args[0], carAsBytes)
return shim.Success(nil)
}
// Intellectual Property Project Start
func (s *SmartContract) createUser(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 9 {
return shim.Error("Incorrect number of arguments. Expecting 9")
}
var docType string = "User"
var key string = args[0]
var token string = args[1]
var name string = args[2]
var email string = args[3]
var password string = args[4]
var publicKey string = args[5]
var filePath string = args[6]
var fileHash string = args[7]
var balance string = args[8]
//fmt.Println("User Name : ",name)
var user = User{TableName : docType,Key : key,Token : token,Name : name,Email : email,PasswordHash : password,PublicKey : publicKey,NewFilePath : filePath,LatestHashFile : fileHash, Balance : balance}
// var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]}
userAsBytes, _ := json.Marshal(user)
APIstub.PutState(key, userAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) loginUser(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
var useremail string = args[0]
var password string = args[1]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"User\",\"email\":\"%s\",\"passwordHash\":\"%s\"}}", useremail, password)
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) profileInformation(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
carAsBytes, _ := APIstub.GetState(args[0])
return shim.Success(carAsBytes)
}
func (s *SmartContract) findUserForRAPC(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
// var useremail string = args[0]
var key string = args[1]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"RAPC\",\"userKey\":\"%s\"}}", key)
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) ReactAndPostUser(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 4 {
return shim.Error("Incorrect number of arguments. Expecting 4")
}
var docType string = "RAPC"
var key string = args[0]
var userKey string = args[1]
var name string = args[2]
var email string = args[3]
var postCnt int = 0
var loveCnt int = 0
//fmt.Println("User Name : ",name)
var rapc = RAPC{TableName : docType,Key : key,UserKey : userKey,Name : name,Email : email,PostCnt : postCnt,LoveCnt : loveCnt}
// var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]}
rapcAsBytes, _ := json.Marshal(rapc)
APIstub.PutState(key, rapcAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) ValueReactAndPostUser(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
var flag string = args[1]
var post string = "post"
var love string = "love"
rapcAsBytes, _ := APIstub.GetState(args[0])
// car := Car{}
rapc := RAPC{}
json.Unmarshal(rapcAsBytes, &rapc)
if flag == post {
rapc.PostCnt = rapc.PostCnt + 1
} else if flag == love {
rapc.LoveCnt = rapc.LoveCnt + 1
}
rapcAsBytes, _ = json.Marshal(rapc)
APIstub.PutState(args[0], rapcAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) reducePost(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
rapcAsBytes, _ := APIstub.GetState(args[0])
// car := Car{}
rapc := RAPC{}
json.Unmarshal(rapcAsBytes, &rapc)
rapc.PostCnt = rapc.PostCnt - 1
rapcAsBytes, _ = json.Marshal(rapc)
APIstub.PutState(args[0], rapcAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) sendIP(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 13 {
return shim.Error("Incorrect number of arguments. Expecting 13")
}
var docType string = "IP"
var key string = args[0]
var name string = args[1]
var email string = args[2]
var userkey string = args[3]
var filename string = args[4]
var filepath string = args[5]
var filehash string = args[6]
var datetime string = args[7]
var isImageOrPdf string = args[8]
var isImage string = args[9]
var isPdf string = args[10]
var price string =args[11]
var prevOwnerKey string = args[12]
//fmt.Println("User Name : ",name)
var ip = IP{TableName : docType,Key : key,UserKey : userkey,Name : name,Email : email,FileName : filename, NewFilePath : filepath , LatestHashFile : filehash, DateTime : datetime, IsImageOrPdf : isImageOrPdf,IsImage : isImage, IsPdf : isPdf, Price : price, PrevOwnerKey : prevOwnerKey }
// var car = Car{Make: args[1], Model: args[2], Colour: args[3], Owner: args[4]}
ipAsBytes, _ := json.Marshal(ip)
APIstub.PutState(key, ipAsBytes)
return shim.Success(nil)
}
func (s *SmartContract) topPeopleIP(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
// var useremail string = args[0]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"RAPC\"}}") // the query select all people
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) allFriendPost(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
// var useremail string = args[0]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"IP\"}}") // the query select all people
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) findPostCnt(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
var useremail string = args[0]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"RAPC\",\"email\":\"%s\"}}", useremail)
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) getReq(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 1 {
return shim.Error("Incorrect number of arguments. Expecting 1")
}
var receiverEmail string = args[0]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"Request\",\"receiverEmail\":\"%s\"}}", receiverEmail)
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
return shim.Error(err.Error())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
func (s *SmartContract) ipVerification(APIstub shim.ChaincodeStubInterface, args []string) sc.Response {
if len(args) != 2 {
return shim.Error("Incorrect number of arguments. Expecting 2")
}
// var useremail string = args[0]
var hashOfFile string = args[1]
var queryString = fmt.Sprintf("{\"selector\":{\"tableName\":\"IP\",\"latestHashFile\":\"%s\"}}", hashOfFile)
resultsIterator, _ := APIstub.GetQueryResult(queryString) //skip the errors
//skipping error handling here :p
defer resultsIterator.Close()
// buffer is a JSON array containing QueryResults
var buffer bytes.Buffer
buffer.WriteString("[")
var bArrayMemberAlreadyWritten = false
for resultsIterator.HasNext() {
queryResponse, err := resultsIterator.Next()
if err != nil {
// return shim.Error(err.Error())
buffer.WriteString("]")
return shim.Success(buffer.Bytes())
}
// Add a comma before array members, suppress it for the first array member
if bArrayMemberAlreadyWritten == true {
buffer.WriteString(",")
}
buffer.WriteString("{\"Key\":")
buffer.WriteString("\"")
buffer.WriteString(queryResponse.Key)
buffer.WriteString("\"")
buffer.WriteString(", \"Record\":")
// Record is a JSON object, so we write as-is
buffer.WriteString(string(queryResponse.Value))
buffer.WriteString("}")
bArrayMemberAlreadyWritten = true
}
buffer.WriteString("]")
fmt.Printf("- queryAllCars:\n%s\n", buffer.String())
return shim.Success(buffer.Bytes())
}
// The main function is only relevant in unit test mode. Only included here for completeness.
func main() {
// Create a new Smart Contract
err := shim.Start(new(SmartContract))
if err != nil {
fmt.Printf("Error creating new Smart Contract: %s", err)
}
} | |
hosting_service_test.go | package hosting_service
import (
"testing"
"github.com/jesseduffield/lazygit/pkg/i18n"
"github.com/jesseduffield/lazygit/pkg/test"
"github.com/stretchr/testify/assert"
)
func TestGetRepoInfoFromURL(t *testing.T) {
type scenario struct {
serviceDefinition ServiceDefinition
testName string
repoURL string
test func(*RepoInformation)
}
scenarios := []scenario{
{
githubServiceDef,
"Returns repository information for git remote url",
"[email protected]:petersmith/super_calculator",
func(repoInfo *RepoInformation) {
assert.EqualValues(t, repoInfo.Owner, "petersmith")
assert.EqualValues(t, repoInfo.Repository, "super_calculator")
},
},
{
githubServiceDef,
"Returns repository information for git remote url, trimming trailing '.git'",
"[email protected]:petersmith/super_calculator.git",
func(repoInfo *RepoInformation) {
assert.EqualValues(t, repoInfo.Owner, "petersmith")
assert.EqualValues(t, repoInfo.Repository, "super_calculator")
},
},
{
githubServiceDef,
"Returns repository information for ssh remote url",
"ssh://[email protected]/petersmith/super_calculator",
func(repoInfo *RepoInformation) {
assert.EqualValues(t, repoInfo.Owner, "petersmith")
assert.EqualValues(t, repoInfo.Repository, "super_calculator")
},
},
{
githubServiceDef,
"Returns repository information for http remote url",
"https://[email protected]/johndoe/social_network.git",
func(repoInfo *RepoInformation) {
assert.EqualValues(t, repoInfo.Owner, "johndoe")
assert.EqualValues(t, repoInfo.Repository, "social_network")
},
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
result, err := s.serviceDefinition.getRepoInfoFromURL(s.repoURL)
assert.NoError(t, err)
s.test(result)
})
}
}
func TestGetPullRequestURL(t *testing.T) {
type scenario struct {
testName string
from string
to string
remoteUrl string
configServiceDomains map[string]string
test func(url string, err error)
expectedLoggedErrors []string
}
scenarios := []scenario{
{
testName: "Opens a link to new pull request on bitbucket",
from: "feature/profile-page",
remoteUrl: "[email protected]:johndoe/social_network.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/profile-page&t=1", url)
},
},
{
testName: "Opens a link to new pull request on bitbucket with http remote url",
from: "feature/events",
remoteUrl: "https://[email protected]/johndoe/social_network.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/events&t=1", url)
},
},
{
testName: "Opens a link to new pull request on github",
from: "feature/sum-operation",
remoteUrl: "[email protected]:peter/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://github.com/peter/calculator/compare/feature/sum-operation?expand=1", url)
},
},
{
testName: "Opens a link to new pull request on bitbucket with specific target branch",
from: "feature/profile-page/avatar",
to: "feature/profile-page",
remoteUrl: "[email protected]:johndoe/social_network.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/profile-page/avatar&dest=feature/profile-page&t=1", url)
},
},
{
testName: "Opens a link to new pull request on bitbucket with http remote url with specified target branch",
from: "feature/remote-events",
to: "feature/events",
remoteUrl: "https://[email protected]/johndoe/social_network.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/remote-events&dest=feature/events&t=1", url)
},
},
{
testName: "Opens a link to new pull request on github with specific target branch",
from: "feature/sum-operation",
to: "feature/operations",
remoteUrl: "[email protected]:peter/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://github.com/peter/calculator/compare/feature/operations...feature/sum-operation?expand=1", url)
},
},
{
testName: "Opens a link to new pull request on gitlab",
from: "feature/ui",
remoteUrl: "[email protected]:peter/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://gitlab.com/peter/calculator/merge_requests/new?merge_request[source_branch]=feature/ui", url)
},
},
{
testName: "Opens a link to new pull request on gitlab in nested groups",
from: "feature/ui",
remoteUrl: "[email protected]:peter/public/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://gitlab.com/peter/public/calculator/merge_requests/new?merge_request[source_branch]=feature/ui", url)
},
},
{
testName: "Opens a link to new pull request on gitlab with specific target branch",
from: "feature/commit-ui",
to: "epic/ui",
remoteUrl: "[email protected]:peter/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://gitlab.com/peter/calculator/merge_requests/new?merge_request[source_branch]=feature/commit-ui&merge_request[target_branch]=epic/ui", url)
},
},
{
testName: "Opens a link to new pull request on gitlab with specific target branch in nested groups",
from: "feature/commit-ui",
to: "epic/ui",
remoteUrl: "[email protected]:peter/public/calculator.git",
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://gitlab.com/peter/public/calculator/merge_requests/new?merge_request[source_branch]=feature/commit-ui&merge_request[target_branch]=epic/ui", url)
},
},
{
testName: "Throws an error if git service is unsupported",
from: "feature/divide-operation", | remoteUrl: "[email protected]:peter/calculator.git",
test: func(url string, err error) {
assert.EqualError(t, err, "Unsupported git service")
},
},
{
testName: "Does not log error when config service domains are valid",
from: "feature/profile-page",
remoteUrl: "[email protected]:johndoe/social_network.git",
configServiceDomains: map[string]string{
// valid configuration for a custom service URL
"git.work.com": "gitlab:code.work.com",
},
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/profile-page&t=1", url)
},
expectedLoggedErrors: nil,
},
{
testName: "Logs error when config service domain is malformed",
from: "feature/profile-page",
remoteUrl: "[email protected]:johndoe/social_network.git",
configServiceDomains: map[string]string{
"noservice.work.com": "noservice.work.com",
},
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/profile-page&t=1", url)
},
expectedLoggedErrors: []string{"Unexpected format for git service: 'noservice.work.com'. Expected something like 'github.com:github.com'"},
},
{
testName: "Logs error when config service domain uses unknown provider",
from: "feature/profile-page",
remoteUrl: "[email protected]:johndoe/social_network.git",
configServiceDomains: map[string]string{
"invalid.work.com": "noservice:invalid.work.com",
},
test: func(url string, err error) {
assert.NoError(t, err)
assert.Equal(t, "https://bitbucket.org/johndoe/social_network/pull-requests/new?source=feature/profile-page&t=1", url)
},
expectedLoggedErrors: []string{"Unknown git service type: 'noservice'. Expected one of github, bitbucket, gitlab"},
},
}
for _, s := range scenarios {
s := s
t.Run(s.testName, func(t *testing.T) {
tr := i18n.EnglishTranslationSet()
log := &test.FakeFieldLogger{}
hostingServiceMgr := NewHostingServiceMgr(log, &tr, s.remoteUrl, s.configServiceDomains)
s.test(hostingServiceMgr.GetPullRequestURL(s.from, s.to))
log.AssertErrors(t, s.expectedLoggedErrors)
})
}
} | |
group.rs | use crate::egml::{Fill, Stroke, Translate};
#[derive(Debug, Default)]
pub struct Group {
pub id: Option<String>,
pub stroke: Option<Stroke>,
pub fill: Option<Fill>,
pub translate: Option<Translate>,
}
impl Group {
pub fn id(&self) -> Option<&str> {
self.id.as_ref().map(|s| s.as_str())
}
pub fn empty_overrides(&self) -> bool |
} | {
self.stroke.is_none() && self.fill.is_none() && self.translate.is_none()
} |
setupJest.js | 'use strict';
require('core-js/es6/reflect');
require('core-js/es7/reflect');
require('zone.js/dist/zone.js');
require('zone.js/dist/proxy.js'); |
const getTestBed = require('@angular/core/testing').getTestBed;
const BrowserDynamicTestingModule = require('@angular/platform-browser-dynamic/testing').BrowserDynamicTestingModule;
const platformBrowserDynamicTesting = require('@angular/platform-browser-dynamic/testing').platformBrowserDynamicTesting;
getTestBed().initTestEnvironment(
BrowserDynamicTestingModule,
platformBrowserDynamicTesting()
); | require('zone.js/dist/sync-test');
require('zone.js/dist/async-test');
require('zone.js/dist/fake-async-test');
require('./zone-patch'); |
tailwind.config.js | const defaultTheme = require('tailwindcss/defaultTheme');
module.exports = {
mode: 'jit',
purge: [
'./vendor/laravel/framework/src/Illuminate/Pagination/resources/views/*.blade.php',
'./vendor/laravel/jetstream/**/*.blade.php',
'./storage/framework/views/*.php',
'./resources/views/**/*.blade.php',
'./resources/js/**/*.vue',
],
theme: {
extend: {
fontFamily: {
sans: ['Nunito', ...defaultTheme.fontFamily.sans],
}, | variants: {
opacity: ['responsive', 'hover', 'focus', 'overflow'],
cursor: ['hover'],
},
plugins: [require('@tailwindcss/forms'), require('@tailwindcss/typography')],
}; | },
},
|
gp_stan_transfer.py | """
Classes for GP models with Stan that perform transfer optimization.
"""
from argparse import Namespace
import numpy as np
import copy
from .gp_stan import StanGp
from .regression.transfer_regression import TransferRegression
from ..util.misc_util import dict_to_namespace
class StanTransferGp(StanGp):
"""
GP model with transferred prior mean based on a regression model.
"""
def __init__(self, params=None, data=None, verbose=None):
self.set_params(params)
self.set_verbose(verbose)
self.set_model(data)
def set_params(self, params):
"""Set self.params, the parameters for this model."""
super().set_params(params)
params = dict_to_namespace(params)
assert hasattr(params, 'transfer_config')
self.params.transfer_config = params.transfer_config
def set_model(self, data):
"""Set GP Stan model and regression model."""
self.model = self.get_model()
self.regressor = self.get_regressor(data)
#self.regressor = self.get_proxy_regressor(data) # TODO
def get_regressor(self, data):
"""Return transfer (prior mean) regressor."""
# Define regressor
regressor = TransferRegression(self.params.transfer_config)
if len(data.x) < 1:
regressor = None
else:
mean_errors = []
# TODO: remove extra files such as .DS_STORE (or ignore files that break)
for i, reg in enumerate(regressor.model_fnames):
try:
val_acc = regressor.evaluate_model(reg, data.x)
error = np.mean((data.y - val_acc) ** 2)
mean_errors.append((error, i))
except:
print(f'Transfer model file in tarball did not load: {reg}')
mean_errors.sort()
if mean_errors[0][0] > self.params.transfer_config.get('metric_threshold', 0.6):
regressor.set_best_model(-1)
else:
regressor.set_best_model(mean_errors[0][1])
return regressor
def | (self, data):
if not data:
regressor = None
else:
def regressor(x): return np.linalg.norm(x)
return regressor
def transform_data_y(self):
"""Transform data.y using PriorMeanDataTransformer."""
self.dt = PriorMeanDataTransformer(self.data, self.regressor, False)
y_trans = self.dt.transform_y_data()
self.data = Namespace(x=self.data.x, y=y_trans)
def gen_list(self, x_list, z, s, nsamp):
"""
Draw nsamp samples from generative process, given list of inputs
x_list, posterior sample z, and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,)
z : Namespace
Namespace of GP hyperparameters.
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from generative process.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_pred(nsamp, x_list)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def postgen_list(self, x_list, s, nsamp):
"""
Draw nsamp samples from posterior predictive distribution, given list
of inputs x_list and seed s.
Parameters
----------
x_list : list
List of numpy ndarrays each with shape=(self.params.ndimx,).
s : int
The seed, a positive integer.
nsamp : int
The number of samples to draw from the posterior predictive
distribution.
Returns
-------
list
A list with len=len(x_list) of numpy ndarrays, each with
shape=(nsamp,).
"""
x_list = self.transform_xin_list(x_list)
pred_list = self.sample_gp_post_pred(
nsamp, x_list, full_cov=True, nloop=np.min([50, nsamp])
)
pred_list = [
self.dt.inv_transform_y_data(pr, x) for pr, x in zip(pred_list, x_list)
]
return pred_list
def __str__(self):
return f'StanTransferGp with params={self.params}'
class PriorMeanDataTransformer:
"""
A class to transform (and inverse transform) data, based on a prior mean regression.
"""
def __init__(self, data, prior_mean_f, verbose=True):
"""
Parameters
----------
data : Namespace
Namespace containing data.
prior_mean_f : function
Prior mean function.
verbose : bool
If True, print description string.
"""
self._set_data(data)
self._set_prior_mean_f(prior_mean_f)
self._set_verbose(verbose)
def _set_data(self, data):
"""Set self.data"""
self.data = data
def _set_prior_mean_f(self, prior_mean_f):
"""Set self.prior_mean_f."""
if prior_mean_f is None:
# Default prior mean function is constant 0 function
def prior_mean_f(x): return 0.
self.prior_mean_f = prior_mean_f
def _set_verbose(self, verbose):
"""Set verbose options."""
self.verbose = verbose
if self.verbose:
self._print_str()
def transform_y_data(self, y_data=None, x_data=None):
"""Transform and return self.data.y"""
# Transform self.data.y into new list
y_trans = [y - self.prior_mean_f(x) for x, y in zip(self.data.x, self.data.y)]
return y_trans
def inv_transform_y_data(self, y_arr, x_single_arr):
"""Return inverse transform of y_arr."""
# Compute prior mean val for the single input
prior_mean_val = self.prior_mean_f(x_single_arr)
# Inverse transform y_arr into list
y_inv_trans_list = [y + prior_mean_val for y in list(y_arr)]
# Transform back to array and return
y_inv_trans = np.array(y_inv_trans_list).reshape(-1)
return y_inv_trans
def _print_str(self):
"""Print a description string."""
print('*PriorMeanDataTransformer')
| get_proxy_regressor |
operation.ts | import { Provider } from "ethers/providers";
import { Protocol, ProtocolRunner } from "../../../engine";
import { Store } from "../../../store";
export async function | (
store: Store,
protocolRunner: ProtocolRunner,
provider: Provider,
initiatorXpub: string,
responderXpub: string,
intermediaryXpub: string,
appInstanceId: string
): Promise<void> {
const stateChannel = await store.getChannelFromAppInstanceID(appInstanceId);
const appInstance = stateChannel.getAppInstance(appInstanceId);
await protocolRunner.initiateProtocol(Protocol.UninstallVirtualApp, {
initiatorXpub,
responderXpub,
intermediaryXpub,
targetOutcome: await appInstance.computeOutcome(
appInstance.state,
provider
),
targetAppIdentityHash: appInstance.identityHash
});
}
| uninstallVirtualAppInstanceFromChannel |
drgraph.rs | use criterion::{black_box, criterion_group, criterion_main, Criterion, ParameterizedBenchmark};
use storage_proofs_core::drgraph::*;
use storage_proofs_core::hasher::pedersen::*;
#[allow(clippy::unit_arg)]
fn | (c: &mut Criterion) {
let params = vec![12, 24, 128, 1024];
c.bench(
"sample",
ParameterizedBenchmark::new(
"bucket/m=6",
|b, n| {
let graph =
BucketGraph::<PedersenHasher>::new(*n, BASE_DEGREE, 0, [32; 32]).unwrap();
b.iter(|| {
let mut parents = vec![0; 6];
black_box(graph.parents(2, &mut parents).unwrap());
})
},
params,
),
);
}
criterion_group!(benches, drgraph);
criterion_main!(benches);
| drgraph |
blobstore.rs | use std::fs::{File, OpenOptions};
use std::io::{Seek, SeekFrom};
use serde::{Serialize, Deserialize};
use crate::blob::{read_u64, write_u64, Blob};
use crate::error::BlobError;
const CONT_SIZE: u64 = 32;
pub struct BlobStore{
file: File,
hseed: u64,
block_size: u64,
nblocks: u64,
elems: u64,
}
impl BlobStore {
pub fn new(fname: &str, block_size: u64, nblocks: u64) -> Result<Self, BlobError> |
pub fn open(fname: &str) -> Result<Self, BlobError> {
let mut ff = OpenOptions::new().write(true).read(true).open(fname)?;
let f = &mut ff;
f.seek(SeekFrom::Start(0))?;
let hseed = read_u64(f)?;
let block_size = read_u64(f)?;
let nblocks = read_u64(f)?;
let elems = read_u64(f)?;
Ok(
BlobStore {
hseed,
file: ff,
block_size,
nblocks,
elems: elems,
}
)
}
pub fn new_or_open(fname: &str, bsize: u64, nblocks: u64) -> Result<Self, BlobError> {
Self::new(fname, bsize, nblocks).or_else(|_| Self::open(fname))
}
pub fn inc_elems(&mut self, n: i32) -> Result<(), BlobError> {
if n > 0 {
self.elems += n as u64;
} else {
let n2 = (-n) as u64;
if self.elems > n2 {
self.elems -= n2;
}
}
self.file.seek(SeekFrom::Start(24))?;
write_u64(&mut self.file, self.elems)?;
Ok(())
}
fn insert<K: Serialize, V: Serialize>(&mut self, k: K, v: V) -> Result<(), BlobError> {
self.remove(&k).ok();
self.insert_only(k, v)
}
fn insert_only<K: Serialize, V: Serialize>(&mut self, k: K, v: V) -> Result<(), BlobError> {
let blob = Blob::from(&k, &v).unwrap();
if blob.len() > self.block_size {
return Err(BlobError::TooBig(blob.len()))
}
let bucket = blob.k_hash(self.hseed) % self.nblocks;
let f = &mut self.file;
let mut pos = f.seek(SeekFrom::Start(CONT_SIZE + self.block_size * bucket))?;
loop {
if pos > CONT_SIZE +self.block_size *(bucket+1) {
return Err(BlobError::NoRoom)
}
let klen = read_u64(f)?;
let vlen = read_u64(f)?;
if klen == 0 &&blob.len() < vlen {
f.seek(SeekFrom::Start(pos))?;
blob.out(f);
// add pointer immediately after data ends
write_u64(f, 0)?;
write_u64(f, (vlen - blob.len())-16)?;
self.inc_elems(1)?;
return Ok(());
}
pos = f.seek(SeekFrom::Start(pos + 16 + klen + vlen))?;
}
}
pub fn b_start(&self, b: u64) -> u64 {
CONT_SIZE + self.block_size * b
}
pub fn get<K: Serialize>(&mut self, k: &K) -> Result<Blob, BlobError>{
let s_blob = Blob::from(k, &0)?;
let bucket = s_blob.k_hash(self.hseed) % self.nblocks;
let b_start = self.b_start(bucket);
let b_end = self.b_start(bucket+1);
let f = &mut self.file;
let mut pos = f.seek(SeekFrom::Start(b_start))?;
loop {
if pos >= b_end {
return Err(BlobError::NotFound);
}
let b = Blob::read(f)?;
if b.key_match(&s_blob){
return Ok(b);
}
pos += b.len();
}
}
pub fn remove<K: Serialize>(&mut self, k: &K) -> Result<(), BlobError> {
let s_blob = Blob::from(k, &0)?;
let bucket = s_blob.k_hash(self.hseed) % self.nblocks;
let b_start = self.b_start(bucket);
let b_end = self.b_start(bucket+1);
let f = &mut self.file;
let mut pos = f.seek(SeekFrom::Start(b_start))?;
loop {
if pos >= b_end {
return Ok(());
}
let b = Blob::read(f)?;
if b.key_match(&s_blob){
let l = b.len();
if pos + l < b_end {
if read_u64(f)? == 0 {
let nlen = read_u64(f)?;
f.seek(SeekFrom::Start(pos))?;
write_u64(f, 0)?;
write_u64(f, l + nlen + 16)?;
return Ok(());
}
}
f.seek(SeekFrom::Start(pos))?;
write_u64(f, 0)?;
write_u64(f, l -16)?;
self.inc_elems(-1);
return Ok(());
}
pos = f.seek(SeekFrom::Start(pos + b.len()))?;
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
pub fn test_create_file() {
let fs = "test_data/create_file";
std::fs::remove_file(fs).ok();
let mut bs = BlobStore::new(fs, 1000, 10).unwrap();
let blocksize = bs.block_size;
let mut b2 = BlobStore::open(fs).unwrap();
assert_eq!(b2.block_size, blocksize);
b2.insert_only("fish", "so loing and thanks all the fish").unwrap();
b2.insert_only(23, "equisde xd im a string").unwrap();
b2.insert_only("green", "wats up, im a green").unwrap();
b2.insert_only("happy", "is friend with sleepy").unwrap();
drop(b2);
let mut b3 = BlobStore::open(fs).unwrap();
assert_eq!(b3.get(&"green").unwrap().get_v::<String>().unwrap(), "wats up, im a green".to_string());
b3.remove(&"green").ok();
assert!(b3.get(&"green").is_err());
assert!(b3.get(&"fish").is_ok());
}
} | {
let hseed = rand::random::<u64>();
let mut ff = OpenOptions::new()
.create_new(true)
.write(true)
.read(true)
.open(fname)?;
let f = &mut ff;
f.set_len(CONT_SIZE + block_size * nblocks)?;
f.seek(SeekFrom::Start(0))?;
write_u64(f, hseed)?;
write_u64(f, block_size)?;
write_u64(f, nblocks)?;
write_u64(f, 0)?; // 0 elements on create
// mark beginnings of each block to show empty
for x in 0 .. nblocks {
f.seek(SeekFrom::Start(CONT_SIZE + x * block_size))?;
write_u64(f, 0)?;
write_u64(f, block_size - 16)?;
}
Ok(
{BlobStore {
hseed,
file: ff,
block_size,
nblocks,
elems: 0,
}}
)
} |
error.rs | use okapi::openapi3::Responses;
use rocket::http::Status;
use rocket::response::Responder;
use rocket::{Request, Response};
use rocket_okapi::gen::OpenApiGenerator;
use rocket_okapi::response::OpenApiResponder;
use rocket_okapi::util::add_schema_response;
use std::io::Cursor;
pub type WalletResult<T> = Result<T, BackendError>;
#[derive(Clone, Debug, JsonSchema)]
pub enum BackendError {
Bson(String),
Database(String),
NotFound,
Yahoo(String),
}
#[macro_export]
macro_rules! dang {
($kind:ident, $original_err:expr) => {
BackendError::$kind(format!("{:?}", $original_err))
};
}
impl From<mongodb::error::Error> for BackendError {
fn from(error: mongodb::error::Error) -> Self {
dang!(Database, error)
}
}
impl From<mongodb::bson::de::Error> for BackendError {
fn from(error: mongodb::bson::de::Error) -> Self {
dang!(Bson, error)
}
}
impl From<mongodb::bson::ser::Error> for BackendError {
fn | (error: mongodb::bson::ser::Error) -> Self {
dang!(Bson, error)
}
}
impl From<std::option::NoneError> for BackendError {
fn from(error: std::option::NoneError) -> Self {
dang!(Bson, error)
}
}
impl Responder<'static> for BackendError {
fn respond_to(self, _: &Request) -> Result<Response<'static>, Status> {
let body;
let status = match self {
BackendError::Bson(msg) => {
body = msg;
Status::new(500, "Bson")
}
BackendError::Database(msg) => {
body = msg;
Status::new(500, "Database")
}
BackendError::NotFound => {
body = String::new();
Status::NotFound
}
BackendError::Yahoo(msg) => {
body = msg;
Status::new(500, "Yahoo")
}
};
Response::build()
.status(status)
.sized_body(Cursor::new(body))
.ok()
}
}
impl OpenApiResponder<'static> for BackendError {
fn responses(gen: &mut OpenApiGenerator) -> rocket_okapi::Result<Responses> {
let mut responses = Responses::default();
let schema = gen.json_schema::<String>();
add_schema_response(&mut responses, 500, "text/plain", schema.clone())?;
add_schema_response(&mut responses, 404, "text/plain", schema)?;
Ok(responses)
}
}
| from |
index.js | const base64Decode = require('fast-base64-decode')
const getRandomBase64 = require('./getRandomBase64')
class TypeMismatchError extends Error {}
class QuotaExceededError extends Error {}
let warned = false
function insecureRandomValues (array) {
if (!warned) { | }
for (let i = 0, r; i < array.length; i++) {
if ((i & 0x03) === 0) r = Math.random() * 0x100000000
array[i] = (r >>> ((i & 0x03) << 3)) & 0xff
}
return array
}
/**
* @param {Int8Array|Uint8Array|Int16Array|Uint16Array|Int32Array|Uint32Array|Uint8ClampedArray} array
*/
function getRandomValues (array) {
if (!(array instanceof Int8Array || array instanceof Uint8Array || array instanceof Int16Array || array instanceof Uint16Array || array instanceof Int32Array || array instanceof Uint32Array || array instanceof Uint8ClampedArray)) {
throw new TypeMismatchError('Expected an integer array')
}
if (array.byteLength > 65536) {
throw new QuotaExceededError('Can only request a maximum of 65536 bytes')
}
// Calling getRandomBase64 in debug mode leads to the error
// "Calling synchronous methods on native modules is not supported in Chrome".
// So in that specific case we fall back to just using Math.random.
if (__DEV__) {
if (typeof global.nativeCallSyncHook === 'undefined') {
return insecureRandomValues(array)
}
}
base64Decode(getRandomBase64(array.byteLength), new Uint8Array(array.buffer, array.byteOffset, array.byteLength))
return array
}
if (typeof global.crypto !== 'object') {
global.crypto = {}
}
if (typeof global.crypto.getRandomValues !== 'function') {
global.crypto.getRandomValues = getRandomValues
} | console.warn('Using an insecure random number generator, this should only happen when running in a debugger without support for crypto.getRandomValues')
warned = true |
dokku_config.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# from ansible.module_utils.basic import *
from ansible.module_utils.basic import AnsibleModule
import json
import pipes
import subprocess
import logging
try:
basestring
except NameError:
basestring = str
DOCUMENTATION = '''
---
module: dokku_config
short_description: Manage environment variables for a given dokku application
options:
app:
description:
- The name of the app... if global envar, use: --global
required: True
default: null
aliases: []
restart:
description:
- True or False for restarting the app to restart after applying the envar
required: False
default: False
aliases: []
config:
description:
- A map of environment variables where key => value
required: True
default: {}
aliases: []
unset:
description:
- A list of envar KEYS to unset. If you put the KEY in CONFIG and in UNSET it applies the key and then unsets it resulting in a "changed" state
required: False
default: []
aliases: []
author: Jose Diaz-Gonzalez
modified by: Elliott Castillo
requirements: [ ]
'''
EXAMPLES = '''
- name: set KEY=VALUE
dokku_config:
app: hello-world
restart: "True"
config:
KEY: VALUE_1
KEY_2: VALUE_2
unset: ["KEY_3", "KEY_4", "KEY_5"]
'''
def force_list(l):
if isinstance(l, list):
return l
return list(l)
def subprocess_check_output(command, split='\n'):
error = None
output = []
try:
output = subprocess.check_output(command, shell=True)
if isinstance(output, bytes):
output = output.decode("utf-8")
output = str(output).rstrip('\n')
if split is None:
return output, error
output = output.split(split)
output = force_list(filter(None, output))
output = [o.strip() for o in output]
except subprocess.CalledProcessError as e:
error = str(e)
return output, error
def dokku_config(app):
|
def dokku_config_set(data):
is_error = True
has_changed = False
meta = {'present': False}
meta = {'error1': None}
options = []
values = []
invalid_values = []
existing, error = dokku_config(data['app'])
for key, value in data['config'].items():
# if it's not a valid string or unicode string, add to invalid, skip
if not isinstance(value, basestring):
invalid_values.append(key)
continue
# if the value of the envar is unchanged, skip
if value == existing.get(key, None):
continue
values.append('{0}={1}'.format(key, pipes.quote(value)))
if invalid_values:
template = 'All values must be keys, found invalid types for {0}'
meta['error1'] = template.format(', '.join(invalid_values))
return (is_error, has_changed, meta)
if len(values) == 0:
is_error = False
has_changed = False
return (is_error, has_changed, meta)
if not data['restart']:
options.append('--no-restart')
# you can further modify this to add other params such as "encoded" or other options in future dokku versions
if data['app'] == '--global':
command = 'dokku config:set {1} --global {0}'.format(' '.join(values), ' '.join(options))
else:
command = 'dokku config:set {2} {0} {1}'.format(data['app'], ' '.join(values), ' '.join(options))
# config:set command execution
try:
subprocess.check_call(command, shell=True)
is_error = False
has_changed = True
except subprocess.CalledProcessError as e:
meta['error1'] = str(e)
return (is_error, has_changed, meta)
def dokku_config_unset(data):
unset_values = []
is_error = True
has_changed = False
meta = {'present': False}
meta.update({'error2': None})
options = []
# get existing keys
existing, error = dokku_config(data['app'])
# config:unset command execution
dl = force_list(data['unset'])
meta.update({'unset': data['unset']})
# if the delete list is not empty
if dl:
# construct the list of values that are to be unset
for ki in dl:
if ki in existing.keys():
unset_values.append(ki)
unset_values = force_list(unset_values)
# if there are values to unset
if unset_values:
if data['app'] == '--global':
unset_command = 'dokku config:unset --global {0}'.format(' '.join(unset_values))
else:
unset_command = 'dokku config:unset {2} {0} {1}'.format(data['app'], ' '.join(unset_values), ' '.join(options))
try:
subprocess.check_call(unset_command, shell=True)
is_error = False
has_changed = True
except subprocess.CalledProcessError as e:
meta['error2'] = str(e)
else:
is_error = False
has_changed = False
else:
is_error = False
return (is_error, has_changed, meta)
def main():
fields = {
'app': {
'required': True,
'type': 'str',
},
'restart': {
'required': False,
'type': 'bool',
'default': False
},
'config': {
'required': True,
'type': 'dict',
'no_log': True,
},
'unset': {
'required': False,
'type': 'list',
'default': []
},
}
module = AnsibleModule(
argument_spec=fields,
supports_check_mode=False
)
is_error2 = False
has_changed2 = False
result2 = {}
# Do the config:set operation
is_error1, has_changed1, result1 = dokku_config_set(module.params)
# Do the config:unset operation
if not module.params['unset'] == []:
is_error2, has_changed2, result2 = dokku_config_unset(module.params)
# check the error indicator for each operation
is_error = True if is_error1 or is_error2 else False
has_changed = True if has_changed1 or has_changed2 else False
if is_error:
module.fail_json(msg=result1['error1'], config_set_operation=result1, config_unset_operation=result2)
module.exit_json(changed=has_changed, config_set_operation=result1, config_unset_operation=result2)
if __name__ == '__main__':
main()
| if app == '--global':
command = 'dokku config:export --format json --global'
output, error = subprocess_check_output(command, split=None)
else:
command = 'dokku config:export --format json {0}'.format(app)
output, error = subprocess_check_output(command, split=None)
if error is None:
try:
output = json.loads(output)
except ValueError as e:
error = str(e)
# error = output
return output, error |
imaginary.go | package processor
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"github.com/Allaux/traefik-image-optimize/config"
)
type pipelineOperationParams struct {
Font string `json:"font,omitempty"`
Height int `json:"height,omitempty"`
Opacity float64 `json:"opacity,omitempty"`
Rotate int `json:"rotate,omitempty"`
Text string `json:"text,omitempty"`
Textwidth int `json:"textwidth,omitempty"`
Type string `json:"type,omitempty"`
Width int `json:"width,omitempty"`
StripMeta bool `json:"stripmeta,omitempty"`
}
type pipelineOperation struct {
Operation string `json:"operation"`
Params pipelineOperationParams `json:"params"`
}
type ImaginaryProcessor struct {
Url string
client http.Client
}
func isValidUrl(s string) error {
if s == "" {
return fmt.Errorf("url cannot be empty")
}
u, err := url.ParseRequestURI(s)
if err != nil {
return err
}
if u.Scheme != "http" && u.Scheme != "https" {
return fmt.Errorf("unvalid imaginary scheme")
}
return nil
}
func NewImaginary(conf config.Config) (*ImaginaryProcessor, error) {
err := isValidUrl(conf.Imaginary.Url)
if err != nil {
return nil, err
}
return &ImaginaryProcessor{
client: http.Client{},
Url: conf.Imaginary.Url,
}, nil
}
func (ip *ImaginaryProcessor) Optimize(media []byte, originalFormat string, targetFormat string, quality, width int) ([]byte, string, error) {
ope := []pipelineOperation{
{Operation: "convert", Params: pipelineOperationParams{Type: "webp", StripMeta: true}},
}
if width > 0 {
ope = append(ope, pipelineOperation{Operation: "resize", Params: pipelineOperationParams{Width: width}})
}
opString, err := json.Marshal(ope)
if err != nil {
return nil, "", err
}
u := fmt.Sprintf("%s/pipeline?operations=%s", ip.Url, url.QueryEscape(string(opString)))
method := "POST"
payload := &bytes.Buffer{}
writer := multipart.NewWriter(payload)
fileWriter, err := writer.CreateFormFile("file", "tmp.jpg")
if err != nil |
_, err = fileWriter.Write(media)
if err != nil {
return nil, "", err
}
err = writer.Close()
if err != nil {
return nil, "", err
}
req, err := http.NewRequest(method, u, payload)
if err != nil {
return nil, "", err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
res, err := ip.client.Do(req)
if err != nil {
return nil, "", err
}
body, err := ioutil.ReadAll(res.Body)
if err != nil {
return nil, "", err
}
err = res.Body.Close()
if err != nil {
return nil, "", err
}
return body, "image/webp", nil
}
| {
return nil, "", err
} |
charua.component.ts | import { Component, HostListener} from '@angular/core';
import { Router, ActivatedRoute } from '@angular/router';
import template from './charua.component.html';
import style from './charua.component.scss';
import '../../../../public/scripts/jquery.fullpage.min.js';
declare var $:any;
declare var moment:any;
@Component({
selector: 'charua',
template, | })
export class CharuaComponent {
constructor() {}
} | styles: [ style ] |
maintenance.rs | //! Maintenance example
use etcd_client::*;
#[tokio::main]
async fn | () -> Result<(), Error> {
let mut client = Client::connect(["localhost:2379"], None).await?;
// Get alarm.
let resp = client
.alarm(AlarmAction::Get, AlarmType::None, Some(AlarmOptions::new()))
.await?;
let mems = resp.alarms();
println!("{} members have alarm.", mems.len());
// Get status.
let resp = client.status().await?;
println!("version: {}, db_size: {}", resp.version(), resp.db_size());
// Defragment.
let _resp = client.defragment().await?;
// Get hash value.
let resp = client.hash().await?;
println!("hash: {}", resp.hash());
// Get hash key value.
let resp = client.hash_kv(0).await?;
println!(
"hash: {}, revision: {}",
resp.hash(),
resp.compact_version()
);
// Get snapshot.
let mut msg = client.snapshot().await?;
loop {
let resp = msg.message().await?;
if let Some(r) = resp {
println!("Receive blob len {}", r.blob().len());
if r.remaining_bytes() == 0 {
break;
}
}
}
// Mover leader
let resp = client.member_list().await?;
let member_list = resp.members();
let resp = client.status().await?;
let leader_id = resp.leader();
println!("status {:?}, leader_id {:?}", resp, resp.leader());
let mut member_id = leader_id;
for member in member_list {
if member.id() != leader_id {
member_id = member.id();
println!("member_id {:?}, name is {:?}", member.id(), member.name());
break;
}
}
let resp = client.move_leader(member_id).await?;
let header = resp.header();
if member_id == leader_id {
assert!(header.is_none());
} else {
println!("move_leader header {:?}", header);
}
Ok(())
}
| main |
softFalsy.js | /**
* Checks if a value is falsy, excluding empty string and 0. | * // Returns true
* @example
* softFalsy(0)
* // Returns true
* @example
* softFalsy(null)
* // Returns false
* @param {*} val - value to check
* @return {Boolean} - True if val is truthy, an empty string or 0
*/
export const softFalsy = val => Boolean(val || val === '' || val === 0) | * @function
* @example
* softFalsy('') |
views.py | from django.shortcuts import render
from mcstatus import MinecraftServer
from MinecraftServer.models import MCServer, Admin
# Create your views here.
def | (request):
server = []
status = []
servers = MCServer.objects.all()
for srv in servers:
server.append(srv)
try:
if srv.IPaddress:
mcsrv = MinecraftServer("%s" % srv.IPaddress, int(srv.port))
status.append(mcsrv.status())
elif srv.domain:
mcsrv = MinecraftServer("%s" % srv.domain, int(srv.port))
status.append(mcsrv.status())
else:
status = "Server doesnt contain any addresses. Where am I meant to look? Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
except Exception, e:
status = "Cant reach the server. Please contact the admin: " + str(e)
admin = Admin.objects.first()
return render(request, 'MinecraftServer/index.html', {'status': status, 'admin': admin})
return render(request, 'MinecraftServer/index.html', {'status': status}) | index |
index.js | import React from 'react';
import { Link } from 'react-router-dom';
import ReactMarkdown from 'react-markdown';
import { usePosts } from '../../contexts/PostContext';
import '../components.css';
function PostList() {
const postArray = usePosts();
let array = [...postArray];
if (array.length > 3) {
array.splice(3);
}
return (
<div className="postlist">
{array.length &&
array.map((post, i) => {
return (
<div className="post-card" key={post.postId}>
<Link className="post-link" to={`/post/${post.slug}`}>
<h2 className="post-title">{post.title}</h2>
<small>Published on {post.date}</small>
<hr></hr>
<ReactMarkdown className="post-card-summary">
{post.summary}
</ReactMarkdown>
</Link>
<small className="click">Click to read more...</small>
</div>
);
})}
</div> | );
}
export default PostList; |
|
main.go | // Copyright 2016 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"os"
"github.com/palantir/pkg/cobracli"
| func main() {
os.Exit(cobracli.ExecuteWithDefaultParams(cmd.RootCmd))
} | "github.com/palantir/go-nobadfuncs/cmd"
)
|
benchmark.py | from . import util
from .source import util as source_util
import gc
import decimal
import imp
import importlib
import sys
import timeit
def bench_cache(import_, repeat, number):
"""Measure the time it takes to pull from sys.modules."""
name = '<benchmark import>'
with util.uncache(name):
module = imp.new_module(name)
sys.modules[name] = module
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
import_(name)
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def | (import_, repeat, number, loc=100000):
"""Measure importing source from disk.
For worst-case scenario, the line endings are \\r\\n and thus require
universal newline translation.
"""
name = '__benchmark'
with source_util.create_modules(name) as mapping:
with open(mapping[name], 'w') as file:
for x in range(loc):
file.write("{0}\r\n".format(x))
with util.import_state(path=[mapping['.root']]):
runs = []
for x in range(repeat):
start_time = timeit.default_timer()
for y in range(number):
try:
import_(name)
finally:
del sys.modules[name]
end_time = timeit.default_timer()
runs.append(end_time - start_time)
return min(runs)
def main(import_):
args = [('sys.modules', bench_cache, 5, 500000),
('source', bench_importing_source, 5, 10000)]
test_msg = "{test}, {number} times (best of {repeat}):"
result_msg = "{result:.2f} secs"
gc.disable()
try:
for name, meth, repeat, number in args:
result = meth(import_, repeat, number)
print(test_msg.format(test=name, repeat=repeat,
number=number).ljust(40),
result_msg.format(result=result).rjust(10))
finally:
gc.enable()
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('-b', '--builtin', dest='builtin', action='store_true',
default=False, help="use the built-in __import__")
options, args = parser.parse_args()
if args:
raise RuntimeError("unrecognized args: {0}".format(args))
import_ = __import__
if not options.builtin:
import_ = importlib.__import__
main(import_)
| bench_importing_source |
flag_float64_slice.go | package cli
import (
"encoding/json"
"flag"
"fmt"
"strconv"
"strings"
)
// Float64Slice wraps []float64 to satisfy flag.Value
type Float64Slice struct {
slice []float64
hasBeenSet bool
}
// NewFloat64Slice makes a *Float64Slice with default values
func | (defaults ...float64) *Float64Slice {
return &Float64Slice{slice: append([]float64{}, defaults...)}
}
// clone allocate a copy of self object
func (f *Float64Slice) clone() *Float64Slice {
n := &Float64Slice{
slice: make([]float64, len(f.slice)),
hasBeenSet: f.hasBeenSet,
}
copy(n.slice, f.slice)
return n
}
// Set parses the value into a float64 and appends it to the list of values
func (f *Float64Slice) Set(value string) error {
if !f.hasBeenSet {
f.slice = []float64{}
f.hasBeenSet = true
}
if strings.HasPrefix(value, slPfx) {
// Deserializing assumes overwrite
_ = json.Unmarshal([]byte(strings.Replace(value, slPfx, "", 1)), &f.slice)
f.hasBeenSet = true
return nil
}
tmp, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
f.slice = append(f.slice, tmp)
return nil
}
// String returns a readable representation of this value (for usage defaults)
func (f *Float64Slice) String() string {
return fmt.Sprintf("%#v", f.slice)
}
// Serialize allows Float64Slice to fulfill Serializer
func (f *Float64Slice) Serialize() string {
jsonBytes, _ := json.Marshal(f.slice)
return fmt.Sprintf("%s%s", slPfx, string(jsonBytes))
}
// Value returns the slice of float64s set by this flag
func (f *Float64Slice) Value() []float64 {
return f.slice
}
// Get returns the slice of float64s set by this flag
func (f *Float64Slice) Get() interface{} {
return *f
}
// Float64SliceFlag is a flag with type *Float64Slice
type Float64SliceFlag struct {
Name string
Aliases []string
Usage string
EnvVars []string
FilePath string
Required bool
Hidden bool
Value *Float64Slice
DefaultText string
HasBeenSet bool
}
// IsSet returns whether or not the flag has been set through env or file
func (f *Float64SliceFlag) IsSet() bool {
return f.HasBeenSet
}
// String returns a readable representation of this value
// (for usage defaults)
func (f *Float64SliceFlag) String() string {
return FlagStringer(f)
}
// Names returns the names of the flag
func (f *Float64SliceFlag) Names() []string {
return flagNames(f.Name, f.Aliases)
}
// IsRequired returns whether or not the flag is required
func (f *Float64SliceFlag) IsRequired() bool {
return f.Required
}
// TakesValue returns true if the flag takes a value, otherwise false
func (f *Float64SliceFlag) TakesValue() bool {
return true
}
// GetUsage returns the usage string for the flag
func (f *Float64SliceFlag) GetUsage() string {
return f.Usage
}
// GetValue returns the flags value as string representation and an empty
// string if the flag takes no value at all.
func (f *Float64SliceFlag) GetValue() string {
if f.Value != nil {
return f.Value.String()
}
return ""
}
// IsVisible returns true if the flag is not hidden, otherwise false
func (f *Float64SliceFlag) IsVisible() bool {
return !f.Hidden
}
// Apply populates the flag given the flag set and environment
func (f *Float64SliceFlag) Apply(set *flag.FlagSet) error {
if val, ok := flagFromEnvOrFile(f.EnvVars, f.FilePath); ok {
if val != "" {
f.Value = &Float64Slice{}
for _, s := range strings.Split(val, ",") {
if err := f.Value.Set(strings.TrimSpace(s)); err != nil {
return fmt.Errorf("could not parse %q as float64 slice value for flag %s: %s", f.Value, f.Name, err)
}
}
f.HasBeenSet = true
}
}
if f.Value == nil {
f.Value = &Float64Slice{}
}
copyValue := f.Value.clone()
for _, name := range f.Names() {
set.Var(copyValue, name, f.Usage)
}
return nil
}
// Float64Slice looks up the value of a local Float64SliceFlag, returns
// nil if not found
func (c *Context) Float64Slice(name string) []float64 {
if fs := c.lookupFlagSet(name); fs != nil {
return lookupFloat64Slice(name, fs)
}
return nil
}
func lookupFloat64Slice(name string, set *flag.FlagSet) []float64 {
f := set.Lookup(name)
if f != nil {
if slice, ok := f.Value.(*Float64Slice); ok {
return slice.Value()
}
}
return nil
}
| NewFloat64Slice |
run_local_mertric.py | import os
import sys
import time
import numpy as np
import pandas as pd
import argparse
import math
import config as cfg
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='The Normarlized Error Mertric Calculation For FashionAI Keypoint Detection Script.')
train_set = parser.add_mutually_exclusive_group()
parser.add_argument('--prediction', default='',
help='The path of file containing the prediction of keypoints.')
parser.add_argument('--cat', type=lambda s: s.lower() in ['True', 'true', 't', 'yes', '1'], help="whether print Normarlized Error for each catgory")
parser.add_argument('--gt', default='./stage1_testb_gt.csv',
help='The path of file containing the ground truth of keypoints.') |
args = parser.parse_args()
def run():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print(sum_dist)
def run_by_cat():
if args.prediction.strip() == '' or args.gt.strip() == '':
parser.error('Must specify the file path of the prediction and ground truth.')
pred_df = pd.read_csv(args.prediction, encoding='utf-8')
gt_df = pd.read_csv(args.gt, encoding='utf-8').set_index('image_id')
for cat_ in cfg.CATEGORIES:
num_v = 0.
sum_dist = 0.
for index, row in pred_df.iterrows():
gt = gt_df.loc[row['image_id']]
img_cat = gt['image_category']
if cat_ not in img_cat:
continue
gt_points = {}
pred_points = {}
for kp in cfg.all_keys:
pred_kp = row[kp].strip().split('_')
gt_kp = gt[kp].strip().split('_')
pred_points[kp] = [int(_) for _ in pred_kp]
gt_points[kp] = [int(_) for _ in gt_kp]
lnorm_name, rnorm_name = cfg.normalize_point_name[img_cat]
lnorm, rnorm = gt_points[lnorm_name][:-1], gt_points[rnorm_name][:-1]
norm_value = math.pow(math.pow(lnorm[0] - rnorm[0], 2.) + math.pow(lnorm[1] - rnorm[1], 2.), 0.5)
for kp in cfg.all_keys:
if gt_points[kp][-1] == -1 or norm_value < 1e-3:
continue
num_v += 1.
dist = math.pow(math.pow(pred_points[kp][0] - gt_points[kp][0], 2.) + math.pow(pred_points[kp][1] - gt_points[kp][1], 2.), 0.5)
sum_dist += dist/norm_value
sum_dist = sum_dist/num_v
print('{}:'.format(cat_), sum_dist)
if __name__ == '__main__':
if not args.cat:
run()
else:
run_by_cat() | |
if1.rs | // if1.rs
pub fn bigger(a: i32, b: i32) -> i32 {
// Complete this function to return the bigger number!
if (a > b) {
a
} else {
b
}
// Do not use:
// - another function call
// - additional variables
// Execute `rustlings hint if1` for hints
}
// Don't mind this for now :)
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ten_is_bigger_than_eight() {
assert_eq!(10, bigger(10, 8));
}
#[test]
fn | () {
assert_eq!(42, bigger(32, 42));
}
}
| fortytwo_is_bigger_than_thirtytwo |
mnist.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app as absl_app
from absl import flags
flags.DEFINE_string(name="job-dir", default="/tmp", help="AI Platform Training passes this to the training script.")
import tensorflow as tf # pylint: disable=g-bad-import-order
from official.mnist import dataset
from official.utils.flags import core as flags_core
from official.utils.logs import hooks_helper
from official.utils.misc import distribution_utils
from official.utils.misc import model_helpers
LEARNING_RATE = 1e-4
def create_model(data_format):
"""Model to recognize digits in the MNIST dataset.
Network structure is equivalent to:
https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/examples/tutorials/mnist/mnist_deep.py
and
https://github.com/tensorflow/models/blob/master/tutorials/image/mnist/convolutional.py
But uses the tf.keras API.
Args:
data_format: Either 'channels_first' or 'channels_last'. 'channels_first' is
typically faster on GPUs while 'channels_last' is typically faster on
CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
Returns:
A tf.keras.Model.
"""
if data_format == 'channels_first':
input_shape = [1, 28, 28]
else:
assert data_format == 'channels_last'
input_shape = [28, 28, 1]
l = tf.keras.layers
max_pool = l.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
# The model consists of a sequential chain of layers, so tf.keras.Sequential
# (a subclass of tf.keras.Model) makes for a compact description.
return tf.keras.Sequential(
[
l.Reshape(
target_shape=input_shape,
input_shape=(28 * 28,)),
l.Conv2D(
32,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Conv2D(
64,
5,
padding='same',
data_format=data_format,
activation=tf.nn.relu),
max_pool,
l.Flatten(),
l.Dense(1024, activation=tf.nn.relu),
l.Dropout(0.4),
l.Dense(10)
])
def define_mnist_flags():
flags_core.define_base()
flags_core.define_performance(num_parallel_calls=False)
flags_core.define_image()
flags.adopt_module_key_flags(flags_core)
flags_core.set_defaults(data_dir='/tmp/mnist_data',
model_dir='/tmp/mnist_model',
batch_size=100,
train_epochs=40)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = create_model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE)
logits = model(image, training=True)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1))
# Name tensors to be logged with LoggingTensorHook.
tf.identity(LEARNING_RATE, 'learning_rate')
tf.identity(loss, 'cross_entropy')
tf.identity(accuracy[1], name='train_accuracy')
# Save accuracy scalar to Tensorboard output.
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=labels, predictions=tf.argmax(logits, axis=1)),
})
def run_mnist(flags_obj):
"""Run MNIST training and eval loop.
Args:
flags_obj: An object containing parsed flag values.
"""
model_helpers.apply_clean(flags_obj)
model_function = model_fn
session_config = tf.ConfigProto(
inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
allow_soft_placement=True)
distribution_strategy = distribution_utils.get_distribution_strategy(
flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
run_config = tf.estimator.RunConfig(
train_distribute=distribution_strategy, session_config=session_config)
data_format = flags_obj.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_function,
model_dir=flags_obj.model_dir,
config=run_config,
params={
'data_format': data_format,
})
# Set up training and evaluation input functions.
def train_input_fn():
"""Prepare data for training."""
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(flags_obj.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(flags_obj.batch_size)
# Iterate through the dataset a set number (`epochs_between_evals`) of times
# during each training session.
ds = ds.repeat(flags_obj.epochs_between_evals)
return ds
def eval_input_fn():
return dataset.test(flags_obj.data_dir).batch(
flags_obj.batch_size).make_one_shot_iterator().get_next()
# Set up hook that outputs training logs every 100 steps.
train_hooks = hooks_helper.get_train_hooks(
flags_obj.hooks, model_dir=flags_obj.model_dir,
batch_size=flags_obj.batch_size)
# Train and evaluate model.
for _ in range(flags_obj.train_epochs // flags_obj.epochs_between_evals):
mnist_classifier.train(input_fn=train_input_fn, hooks=train_hooks)
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print('\nEvaluation results:\n\t%s\n' % eval_results)
if model_helpers.past_stop_threshold(flags_obj.stop_threshold,
eval_results['accuracy']):
break
# Export the model
if flags_obj.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(flags_obj.export_dir, input_fn,
strip_default_attrs=True)
def | (_):
run_mnist(flags.FLAGS)
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
define_mnist_flags()
absl_app.run(main)
| main |
image_utils_test.go | package lite
import (
"reflect"
"testing"
)
func | (t *testing.T) {
body := `
image: cn-app-integration:v1.0.0
image: registry.cn-shanghai.aliyuncs.com/cnip/cn-app-integration:v1.0.0
imagePullPolicy: Always
image: cn-app-integration:v1.0.0
# image: cn-app-integration:v1.0.0
name: cn-app-demo`
type args struct {
body string
}
tests := []struct {
name string
args args
want []string
}{
{
"test get iamges form yaml",
args{body},
[]string{"cn-app-integration:v1.0.0", "registry.cn-shanghai.aliyuncs.com/cnip/cn-app-integration:v1.0.0", "cn-app-integration:v1.0.0"},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := DecodeImages(tt.args.body); !reflect.DeepEqual(got, tt.want) {
t.Errorf("decodeImages() = %v, want %v", got, tt.want)
}
})
}
}
| Test_decodeImages |
build.rs | // Copyright (c) 2018-2022 The MobileCoin Foundation
fn main() | {
// sgx_create_report, sgx_verify_report
mc_sgx_build::link_sgx_tservice();
} |
|
transform.interceptor.ts | /**
* See:
* https://gist.github.com/arielweinberger/f5c02406b48bb0e145e8542c7006649f
*/
import {
NestInterceptor,
ExecutionContext,
Injectable,
CallHandler,
} from '@nestjs/common';
import { map } from 'rxjs/operators';
@Injectable()
export class | implements NestInterceptor {
removeKeys(keys: string[], obj: any) {
for (const prop in obj) {
if (keys.includes(prop)) delete obj[prop];
else if (typeof obj[prop] === 'object') this.removeKeys(keys, obj[prop]);
}
return obj;
}
intercept(_context: ExecutionContext, next: CallHandler<any>) {
return next.handle().pipe(
map((data) => {
return this.removeKeys(['password', 'deletedAt'], data);
}),
);
}
}
| TransformInterceptor |
healthchecks.py | """
https://www.ianlewis.org/en/kubernetes-health-checks-django
"""
import logging
import os
import redis
from django.http import HttpResponse, HttpResponseServerError
logger = logging.getLogger("django")
r = redis.Redis(host=os.environ.get("REDIS_SERVICE_HOST"))
class HealthCheckMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
# One-time configuration and initialization.
def __call__(self, request):
if request.method == "GET":
if request.path == "/readiness": | return self.readiness(request)
elif request.path == "/healthz":
return self.healthz(request)
return self.get_response(request)
def healthz(self, request):
"""
Returns that the server is alive.
"""
return HttpResponse("OK")
def readiness(self, request):
# Connect to each database and do a generic standard SQL query
# that doesn't write any data and doesn't depend on any tables
# being present.
try:
from django.db import connections
for name in connections:
cursor = connections[name].cursor()
cursor.execute("SELECT 1;")
row = cursor.fetchone()
if row is None:
return HttpResponseServerError(
"Postgres: invalid response"
)
except Exception as e:
logger.exception(e)
return HttpResponseServerError(
"Postgres: cannot connect to database."
)
# Call get_stats() to connect to each memcached
# instance and get it's stats.
# This can effectively check if each is online.
try:
import redis
r = redis.Redis(host=os.environ.get("REDIS_SERVICE_HOST", "redis"))
r.ping()
except Exception as e:
logger.exception(e)
return HttpResponseServerError("Redis: cannot connect to redis.")
return HttpResponse("OK") | |
snapshotter.go | // +build linux
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package devmapper
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/mount"
"github.com/containerd/containerd/plugin"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/devmapper/dmsetup"
"github.com/containerd/containerd/snapshots/storage"
"github.com/hashicorp/go-multierror"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
func init() {
plugin.Register(&plugin.Registration{
Type: plugin.SnapshotPlugin,
ID: "devmapper",
Config: &Config{},
InitFn: func(ic *plugin.InitContext) (interface{}, error) {
ic.Meta.Platforms = append(ic.Meta.Platforms, ocispec.Platform{
OS: "linux",
Architecture: "amd64",
})
config, ok := ic.Config.(*Config)
if !ok {
return nil, errors.New("invalid devmapper configuration")
}
if config.PoolName == "" {
return nil, errors.New("devmapper not configured")
}
if config.RootPath == "" {
config.RootPath = ic.Root
}
return NewSnapshotter(ic.Context, config)
},
})
}
const (
metadataFileName = "metadata.db"
fsTypeExt4 = "ext4"
)
type closeFunc func() error
// Snapshotter implements containerd's snapshotter (https://godoc.org/github.com/containerd/containerd/snapshots#Snapshotter)
// based on Linux device-mapper targets.
type Snapshotter struct {
store *storage.MetaStore
pool *PoolDevice
config *Config
cleanupFn []closeFunc
closeOnce sync.Once
}
// NewSnapshotter creates new device mapper snapshotter.
// Internally it creates thin-pool device (or reloads if it's already exists) and
// initializes a database file for metadata.
func NewSnapshotter(ctx context.Context, config *Config) (*Snapshotter, error) {
// Make sure snapshotter configuration valid before running
if err := config.parse(); err != nil {
return nil, err
}
if err := config.Validate(); err != nil {
return nil, err
}
var cleanupFn []closeFunc
if err := os.MkdirAll(config.RootPath, 0750); err != nil && !os.IsExist(err) {
return nil, errors.Wrapf(err, "failed to create root directory: %s", config.RootPath)
}
store, err := storage.NewMetaStore(filepath.Join(config.RootPath, metadataFileName))
if err != nil {
return nil, errors.Wrap(err, "failed to create metastore")
}
cleanupFn = append(cleanupFn, store.Close)
poolDevice, err := NewPoolDevice(ctx, config)
if err != nil {
return nil, err
}
cleanupFn = append(cleanupFn, poolDevice.Close)
return &Snapshotter{
store: store,
config: config,
pool: poolDevice,
cleanupFn: cleanupFn,
}, nil
}
// Stat returns the info for an active or committed snapshot from store
func (s *Snapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) {
log.G(ctx).WithField("key", key).Debug("stat")
var (
info snapshots.Info
err error
)
err = s.withTransaction(ctx, false, func(ctx context.Context) error {
_, info, _, err = storage.GetInfo(ctx, key)
return err
})
return info, err
}
// Update updates an existing snapshot info's data
func (s *Snapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) {
log.G(ctx).Debugf("update: %s", strings.Join(fieldpaths, ", "))
var err error
err = s.withTransaction(ctx, true, func(ctx context.Context) error {
info, err = storage.UpdateInfo(ctx, info, fieldpaths...)
return err
})
return info, err
}
// Usage returns the resource usage of an active or committed snapshot excluding the usage of parent snapshots.
func (s *Snapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) {
log.G(ctx).WithField("key", key).Debug("usage")
var (
id string
err error
info snapshots.Info
usage snapshots.Usage
)
err = s.withTransaction(ctx, false, func(ctx context.Context) error {
id, info, usage, err = storage.GetInfo(ctx, key)
if err != nil |
if info.Kind == snapshots.KindActive {
deviceName := s.getDeviceName(id)
usage.Size, err = s.pool.GetUsage(deviceName)
if err != nil {
return err
}
}
if info.Parent != "" {
// GetInfo returns total number of bytes used by a snapshot (including parent).
// So subtract parent usage in order to get delta consumed by layer itself.
_, _, parentUsage, err := storage.GetInfo(ctx, info.Parent)
if err != nil {
return err
}
usage.Size -= parentUsage.Size
}
return err
})
return usage, err
}
// Mounts return the list of mounts for the active or view snapshot
func (s *Snapshotter) Mounts(ctx context.Context, key string) ([]mount.Mount, error) {
log.G(ctx).WithField("key", key).Debug("mounts")
var (
snap storage.Snapshot
err error
)
err = s.withTransaction(ctx, false, func(ctx context.Context) error {
snap, err = storage.GetSnapshot(ctx, key)
return err
})
return s.buildMounts(snap), nil
}
// Prepare creates thin device for an active snapshot identified by key
func (s *Snapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
log.G(ctx).WithFields(logrus.Fields{"key": key, "parent": parent}).Debug("prepare")
var (
mounts []mount.Mount
err error
)
err = s.withTransaction(ctx, true, func(ctx context.Context) error {
mounts, err = s.createSnapshot(ctx, snapshots.KindActive, key, parent, opts...)
return err
})
return mounts, err
}
// View creates readonly thin device for the given snapshot key
func (s *Snapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
log.G(ctx).WithFields(logrus.Fields{"key": key, "parent": parent}).Debug("prepare")
var (
mounts []mount.Mount
err error
)
err = s.withTransaction(ctx, true, func(ctx context.Context) error {
mounts, err = s.createSnapshot(ctx, snapshots.KindView, key, parent, opts...)
return err
})
return mounts, err
}
// Commit marks an active snapshot as committed in meta store.
// Block device unmount operation captures snapshot changes by itself, so no
// additional actions needed within Commit operation.
func (s *Snapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error {
log.G(ctx).WithFields(logrus.Fields{"name": name, "key": key}).Debug("commit")
return s.withTransaction(ctx, true, func(ctx context.Context) error {
id, _, _, err := storage.GetInfo(ctx, key)
if err != nil {
return err
}
deviceName := s.getDeviceName(id)
size, err := s.pool.GetUsage(deviceName)
if err != nil {
return err
}
usage := snapshots.Usage{
Size: size,
}
_, err = storage.CommitActive(ctx, key, name, usage, opts...)
if err != nil {
return err
}
// After committed, the snapshot device will not be directly
// used anymore. We'd better deativate it to make it *invisible*
// in userspace, so that tools like LVM2 and fdisk cannot touch it,
// and avoid useless IOs on it.
//
// Before deactivation, we need to flush the outstanding IO by suspend.
// Afterward, we resume it again to prevent a race window which may cause
// a process IO hang. See the issue below for details:
// (https://github.com/containerd/containerd/issues/4234)
err = s.pool.SuspendDevice(ctx, deviceName)
if err != nil {
return err
}
err = s.pool.ResumeDevice(ctx, deviceName)
if err != nil {
return err
}
return s.pool.DeactivateDevice(ctx, deviceName, false, false)
})
}
// Remove removes thin device and snapshot metadata by key
func (s *Snapshotter) Remove(ctx context.Context, key string) error {
log.G(ctx).WithField("key", key).Debug("remove")
return s.withTransaction(ctx, true, func(ctx context.Context) error {
return s.removeDevice(ctx, key)
})
}
func (s *Snapshotter) removeDevice(ctx context.Context, key string) error {
snapID, _, err := storage.Remove(ctx, key)
if err != nil {
return err
}
deviceName := s.getDeviceName(snapID)
if !s.config.AsyncRemove {
if err := s.pool.RemoveDevice(ctx, deviceName); err != nil {
log.G(ctx).WithError(err).Errorf("failed to remove device")
// Tell snapshot GC continue to collect other snapshots.
// Otherwise, one snapshot collection failure will stop
// the GC, and all snapshots won't be collected even though
// having no relationship with the failed one.
return errdefs.ErrFailedPrecondition
}
} else {
// The asynchronous cleanup will do the real device remove work.
log.G(ctx).WithField("device", deviceName).Debug("async remove")
if err := s.pool.MarkDeviceState(ctx, deviceName, Removed); err != nil {
log.G(ctx).WithError(err).Errorf("failed to mark device as removed")
return err
}
}
return nil
}
// Walk iterates through all metadata Info for the stored snapshots and calls the provided function for each.
func (s *Snapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc, fs ...string) error {
log.G(ctx).Debug("walk")
return s.withTransaction(ctx, false, func(ctx context.Context) error {
return storage.WalkInfo(ctx, fn, fs...)
})
}
// ResetPool deactivates and deletes all thin devices in thin-pool.
// Used for cleaning pool after benchmarking.
func (s *Snapshotter) ResetPool(ctx context.Context) error {
names, err := s.pool.metadata.GetDeviceNames(ctx)
if err != nil {
return err
}
var result *multierror.Error
for _, name := range names {
if err := s.pool.RemoveDevice(ctx, name); err != nil {
result = multierror.Append(result, err)
}
}
return result.ErrorOrNil()
}
// Close releases devmapper snapshotter resources.
// All subsequent Close calls will be ignored.
func (s *Snapshotter) Close() error {
log.L.Debug("close")
var result *multierror.Error
s.closeOnce.Do(func() {
for _, fn := range s.cleanupFn {
if err := fn(); err != nil {
result = multierror.Append(result, err)
}
}
})
return result.ErrorOrNil()
}
func (s *Snapshotter) createSnapshot(ctx context.Context, kind snapshots.Kind, key, parent string, opts ...snapshots.Opt) ([]mount.Mount, error) {
snap, err := storage.CreateSnapshot(ctx, kind, key, parent, opts...)
if err != nil {
return nil, err
}
if len(snap.ParentIDs) == 0 {
deviceName := s.getDeviceName(snap.ID)
log.G(ctx).Debugf("creating new thin device '%s'", deviceName)
err := s.pool.CreateThinDevice(ctx, deviceName, s.config.BaseImageSizeBytes)
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to create thin device for snapshot %s", snap.ID)
return nil, err
}
if err := s.mkfs(ctx, deviceName); err != nil {
// Rollback thin device creation if mkfs failed
return nil, multierror.Append(err,
s.pool.RemoveDevice(ctx, deviceName))
}
} else {
parentDeviceName := s.getDeviceName(snap.ParentIDs[0])
snapDeviceName := s.getDeviceName(snap.ID)
log.G(ctx).Debugf("creating snapshot device '%s' from '%s'", snapDeviceName, parentDeviceName)
err := s.pool.CreateSnapshotDevice(ctx, parentDeviceName, snapDeviceName, s.config.BaseImageSizeBytes)
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to create snapshot device from parent %s", parentDeviceName)
return nil, err
}
}
mounts := s.buildMounts(snap)
// Remove default directories not expected by the container image
_ = mount.WithTempMount(ctx, mounts, func(root string) error {
return os.Remove(filepath.Join(root, "lost+found"))
})
return mounts, nil
}
// mkfs creates ext4 filesystem on the given devmapper device
func (s *Snapshotter) mkfs(ctx context.Context, deviceName string) error {
args := []string{
"-E",
// We don't want any zeroing in advance when running mkfs on thin devices (see "man mkfs.ext4")
"nodiscard,lazy_itable_init=0,lazy_journal_init=0",
dmsetup.GetFullDevicePath(deviceName),
}
log.G(ctx).Debugf("mkfs.ext4 %s", strings.Join(args, " "))
output, err := exec.Command("mkfs.ext4", args...).CombinedOutput()
if err != nil {
log.G(ctx).WithError(err).Errorf("failed to write fs:\n%s", string(output))
return err
}
log.G(ctx).Debugf("mkfs:\n%s", string(output))
return nil
}
func (s *Snapshotter) getDeviceName(snapID string) string {
// Add pool name as prefix to avoid collisions with devices from other pools
return fmt.Sprintf("%s-snap-%s", s.config.PoolName, snapID)
}
func (s *Snapshotter) getDevicePath(snap storage.Snapshot) string {
name := s.getDeviceName(snap.ID)
return dmsetup.GetFullDevicePath(name)
}
func (s *Snapshotter) buildMounts(snap storage.Snapshot) []mount.Mount {
var options []string
if snap.Kind != snapshots.KindActive {
options = append(options, "ro")
}
mounts := []mount.Mount{
{
Source: s.getDevicePath(snap),
Type: fsTypeExt4,
Options: options,
},
}
return mounts
}
// withTransaction wraps fn callback with containerd's meta store transaction.
// If callback returns an error or transaction is not writable, database transaction will be discarded.
func (s *Snapshotter) withTransaction(ctx context.Context, writable bool, fn func(ctx context.Context) error) error {
ctx, trans, err := s.store.TransactionContext(ctx, writable)
if err != nil {
return err
}
var result *multierror.Error
err = fn(ctx)
if err != nil {
result = multierror.Append(result, err)
}
// Always rollback if transaction is not writable
if err != nil || !writable {
if terr := trans.Rollback(); terr != nil {
log.G(ctx).WithError(terr).Error("failed to rollback transaction")
result = multierror.Append(result, errors.Wrap(terr, "rollback failed"))
}
} else {
if terr := trans.Commit(); terr != nil {
log.G(ctx).WithError(terr).Error("failed to commit transaction")
result = multierror.Append(result, errors.Wrap(terr, "commit failed"))
}
}
if err := result.ErrorOrNil(); err != nil {
log.G(ctx).WithError(err).Debug("snapshotter error")
// Unwrap if just one error
if len(result.Errors) == 1 {
return result.Errors[0]
}
return err
}
return nil
}
func (s *Snapshotter) Cleanup(ctx context.Context) error {
var removedDevices []*DeviceInfo
if !s.config.AsyncRemove {
return nil
}
if err := s.pool.WalkDevices(ctx, func(info *DeviceInfo) error {
if info.State == Removed {
removedDevices = append(removedDevices, info)
}
return nil
}); err != nil {
log.G(ctx).WithError(err).Errorf("failed to query devices from metastore")
return err
}
var result *multierror.Error
for _, dev := range removedDevices {
log.G(ctx).WithField("device", dev.Name).Debug("cleanup device")
if err := s.pool.RemoveDevice(ctx, dev.Name); err != nil {
log.G(ctx).WithField("device", dev.Name).Error("failed to cleanup device")
result = multierror.Append(result, err)
} else {
log.G(ctx).WithField("device", dev.Name).Debug("cleanuped device")
}
}
return result.ErrorOrNil()
}
| {
return err
} |
lib.rs | #![doc(html_root_url = "https://docs.rs/tauon/0.0.0")]
//! A micro web framework based on Hyper | //! Not yet ready |
|
getProfile.ts | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import * as inputs from "../types/input";
import * as outputs from "../types/output";
import * as utilities from "../utilities";
/**
* Use this data source to access information about an existing CDN Profile.
*
*
* > This content is derived from https://github.com/terraform-providers/terraform-provider-azurerm/blob/master/website/docs/d/cdn_profile.html.markdown.
*/
export function | (args: GetProfileArgs, opts?: pulumi.InvokeOptions): Promise<GetProfileResult> {
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
return pulumi.runtime.invoke("azure:cdn/getProfile:getProfile", {
"name": args.name,
"resourceGroupName": args.resourceGroupName,
}, opts);
}
/**
* A collection of arguments for invoking getProfile.
*/
export interface GetProfileArgs {
/**
* The name of the CDN Profile.
*/
readonly name: string;
/**
* The name of the resource group in which the CDN Profile exists.
*/
readonly resourceGroupName: string;
}
/**
* A collection of values returned by getProfile.
*/
export interface GetProfileResult {
/**
* The Azure Region where the resource exists.
*/
readonly location: string;
readonly name: string;
readonly resourceGroupName: string;
/**
* The pricing related information of current CDN profile.
*/
readonly sku: string;
/**
* A mapping of tags assigned to the resource.
*/
readonly tags: {[key: string]: string};
/**
* id is the provider-assigned unique ID for this managed resource.
*/
readonly id: string;
}
| getProfile |
net.rs | use crate::helper;
use io_uring::opcode::{self, types};
use io_uring::squeue;
use io_uring::IoUring;
use once_cell::sync::OnceCell;
use std::net::{SocketAddr, TcpListener, TcpStream};
use std::os::unix::io::AsRawFd;
use std::{io, mem, thread};
static ECHO_TCP_SERVER: OnceCell<SocketAddr> = OnceCell::new();
pub fn test_tcp_write_read(ring: &mut IoUring) -> anyhow::Result<()> {
println!("test tcp_write_read");
let addr = ECHO_TCP_SERVER.get_or_try_init(init_echo_tcp_server)?;
let stream = TcpStream::connect(addr)?;
let fd = types::Fd(stream.as_raw_fd());
helper::write_read(ring, fd, fd)?;
Ok(())
}
pub fn test_tcp_writev_readv(ring: &mut IoUring) -> anyhow::Result<()> {
println!("test tcp_write_read");
let addr = ECHO_TCP_SERVER.get_or_try_init(init_echo_tcp_server)?;
let stream = TcpStream::connect(addr)?;
let fd = types::Fd(stream.as_raw_fd());
helper::writev_readv(ring, fd, fd)?;
Ok(())
}
pub fn test_tcp_send_recv(ring: &mut IoUring) -> anyhow::Result<()> {
println!("test tcp_send_recv");
let addr = ECHO_TCP_SERVER.get_or_try_init(init_echo_tcp_server)?;
let stream = TcpStream::connect(addr)?;
let fd = types::Fd(stream.as_raw_fd());
let text = b"The quick brown fox jumps over the lazy dog.";
let mut output = vec![0; text.len()];
let send_e = opcode::Send::new(fd, text.as_ptr(), text.len() as _);
let recv_e = opcode::Recv::new(fd, output.as_mut_ptr(), output.len() as _);
unsafe {
let mut queue = ring.submission().available();
let send_e = send_e.build().user_data(0x01).flags(squeue::Flags::IO_LINK);
queue.push(send_e).ok().expect("queue is full");
queue
.push(recv_e.build().user_data(0x02))
.ok()
.expect("queue is full");
}
ring.submit_and_wait(2)?;
let cqes = ring.completion().available().collect::<Vec<_>>();
assert_eq!(cqes.len(), 2);
assert_eq!(cqes[0].user_data(), 0x01);
assert_eq!(cqes[1].user_data(), 0x02);
assert_eq!(cqes[0].result(), text.len() as i32);
assert_eq!(cqes[1].result(), text.len() as i32);
assert_eq!(&output[..cqes[1].result() as usize], text);
Ok(())
}
pub fn test_tcp_sendmsg_recvmsg(ring: &mut IoUring) -> anyhow::Result<()> {
use std::mem::MaybeUninit;
println!("test tcp_sendmsg_recvmsg");
let addr = ECHO_TCP_SERVER.get_or_try_init(init_echo_tcp_server)?;
let sockaddr = socket2::SockAddr::from(*addr);
let stream = TcpStream::connect(addr)?;
let fd = types::Fd(stream.as_raw_fd());
let text = b"The quick brown fox jumps over the lazy dog.";
let mut buf2 = vec![0; text.len()];
let bufs = [io::IoSlice::new(text)];
let mut bufs2 = [io::IoSliceMut::new(&mut buf2)];
// build sendmsg
let mut msg = MaybeUninit::<libc::msghdr>::zeroed();
unsafe {
let p = msg.as_mut_ptr();
(*p).msg_name = sockaddr.as_ptr() as *const _ as *mut _;
(*p).msg_namelen = sockaddr.len();
(*p).msg_iov = bufs.as_ptr() as *const _ as *mut _;
(*p).msg_iovlen = 1;
}
let sendmsg_e = opcode::SendMsg::new(fd, msg.as_ptr());
// build recvmsg
let mut msg = MaybeUninit::<libc::msghdr>::zeroed();
unsafe {
let p = msg.as_mut_ptr();
(*p).msg_name = sockaddr.as_ptr() as *const _ as *mut _;
(*p).msg_namelen = sockaddr.len();
(*p).msg_iov = bufs2.as_mut_ptr() as *mut _;
(*p).msg_iovlen = 1;
}
let recvmsg_e = opcode::RecvMsg::new(fd, msg.as_mut_ptr());
// submit
unsafe {
let mut queue = ring.submission().available();
queue
.push(
sendmsg_e
.build()
.user_data(0x01)
.flags(squeue::Flags::IO_LINK),
)
.ok()
.expect("queue is full");
queue
.push(recvmsg_e.build().user_data(0x02))
.ok()
.expect("queue is full");
}
ring.submit_and_wait(2)?;
// complete
let cqes = ring.completion().available().collect::<Vec<_>>();
assert_eq!(cqes.len(), 2);
assert_eq!(cqes[0].user_data(), 0x01);
assert_eq!(cqes[1].user_data(), 0x02);
assert_eq!(cqes[0].result(), text.len() as _);
assert_eq!(cqes[1].result(), text.len() as _);
assert_eq!(buf2, text);
Ok(())
}
pub fn test_tcp_accept(ring: &mut IoUring) -> anyhow::Result<()> {
println!("test tcp_accept");
let listener = TcpListener::bind("0.0.0.0:0")?;
let addr = listener.local_addr()?;
let handle = thread::spawn(move || {
let stream = TcpStream::connect(addr)?;
let mut stream2 = &stream;
let mut stream3 = &stream;
io::copy(&mut stream2, &mut stream3)
});
let fd = types::Fd(listener.as_raw_fd());
let mut sockaddr: libc::sockaddr = unsafe { mem::zeroed() };
let mut addrlen: libc::socklen_t = mem::size_of::<libc::sockaddr>() as _;
let accept_e = opcode::Accept::new(fd, &mut sockaddr, &mut addrlen);
unsafe {
let mut queue = ring.submission().available();
queue
.push(accept_e.build().user_data(0x0e))
.ok()
.expect("queue is full");
}
ring.submit_and_wait(1)?;
let cqes = ring.completion().available().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x0e);
assert!(cqes[0].result() >= 0);
let fd = cqes[0].result();
helper::write_read(ring, types::Fd(fd), types::Fd(fd))?;
unsafe {
libc::close(fd);
}
handle.join().unwrap()?;
Ok(())
}
pub fn test_tcp_connect(ring: &mut IoUring) -> anyhow::Result<()> {
use socket2::{Domain, Protocol, SockAddr, Socket, Type};
println!("test tcp_connect");
let addr = ECHO_TCP_SERVER.get_or_try_init(init_echo_tcp_server)?;
let sockaddr = SockAddr::from(*addr);
let stream = Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp()))?;
let connect_e = opcode::Connect::new(
types::Fd(stream.as_raw_fd()),
sockaddr.as_ptr() as *const _,
sockaddr.len(),
);
unsafe {
let mut queue = ring.submission().available();
queue
.push(connect_e.build().user_data(0x0f))
.ok()
.expect("queue is full");
}
ring.submit_and_wait(1)?;
let cqes = ring.completion().available().collect::<Vec<_>>();
assert_eq!(cqes.len(), 1);
assert_eq!(cqes[0].user_data(), 0x0f);
assert_eq!(cqes[0].result(), 0);
let stream = stream.into_tcp_stream();
let fd = types::Fd(stream.as_raw_fd());
helper::write_read(ring, fd, fd)?;
Ok(())
}
fn init_echo_tcp_server() -> io::Result<SocketAddr> {
let listener = TcpListener::bind("0.0.0.0:0")?;
let addr = listener.local_addr()?;
thread::spawn(move || {
while let Ok((stream, _)) = listener.accept() {
let mut stream2 = &stream;
let mut stream3 = &stream;
io::copy(&mut stream2, &mut stream3).unwrap(); | Ok(addr)
} | }
});
|
apiCustomElement.ts | import {
ComponentOptionsMixin,
ComponentOptionsWithArrayProps,
ComponentOptionsWithObjectProps,
ComponentOptionsWithoutProps,
ComponentPropsOptions,
ComponentPublicInstance,
ComputedOptions,
EmitsOptions,
MethodOptions,
RenderFunction,
SetupContext,
ComponentInternalInstance,
VNode,
RootHydrateFunction, | defineComponent,
nextTick,
warn,
ConcreteComponent,
ComponentOptions
} from '@vue/runtime-core'
import { camelize, extend, hyphenate, isArray, toNumber } from '@vue/shared'
import { hydrate, render } from '.'
export type VueElementConstructor<P = {}> = {
new (initialProps?: Record<string, any>): VueElement & P
}
// defineCustomElement provides the same type inference as defineComponent
// so most of the following overloads should be kept in sync w/ defineComponent.
// overload 1: direct setup function
export function defineCustomElement<Props, RawBindings = object>(
setup: (
props: Readonly<Props>,
ctx: SetupContext
) => RawBindings | RenderFunction
): VueElementConstructor<Props>
// overload 2: object format with no props
export function defineCustomElement<
Props = {},
RawBindings = {},
D = {},
C extends ComputedOptions = {},
M extends MethodOptions = {},
Mixin extends ComponentOptionsMixin = ComponentOptionsMixin,
Extends extends ComponentOptionsMixin = ComponentOptionsMixin,
E extends EmitsOptions = EmitsOptions,
EE extends string = string
>(
options: ComponentOptionsWithoutProps<
Props,
RawBindings,
D,
C,
M,
Mixin,
Extends,
E,
EE
> & { styles?: string[] }
): VueElementConstructor<Props>
// overload 3: object format with array props declaration
export function defineCustomElement<
PropNames extends string,
RawBindings,
D,
C extends ComputedOptions = {},
M extends MethodOptions = {},
Mixin extends ComponentOptionsMixin = ComponentOptionsMixin,
Extends extends ComponentOptionsMixin = ComponentOptionsMixin,
E extends EmitsOptions = Record<string, any>,
EE extends string = string
>(
options: ComponentOptionsWithArrayProps<
PropNames,
RawBindings,
D,
C,
M,
Mixin,
Extends,
E,
EE
> & { styles?: string[] }
): VueElementConstructor<{ [K in PropNames]: any }>
// overload 4: object format with object props declaration
export function defineCustomElement<
PropsOptions extends Readonly<ComponentPropsOptions>,
RawBindings,
D,
C extends ComputedOptions = {},
M extends MethodOptions = {},
Mixin extends ComponentOptionsMixin = ComponentOptionsMixin,
Extends extends ComponentOptionsMixin = ComponentOptionsMixin,
E extends EmitsOptions = Record<string, any>,
EE extends string = string
>(
options: ComponentOptionsWithObjectProps<
PropsOptions,
RawBindings,
D,
C,
M,
Mixin,
Extends,
E,
EE
> & { styles?: string[] }
): VueElementConstructor<ExtractPropTypes<PropsOptions>>
// overload 5: defining a custom element from the returned value of
// `defineComponent`
export function defineCustomElement(options: {
new (...args: any[]): ComponentPublicInstance
}): VueElementConstructor
export function defineCustomElement(
options: any,
hydate?: RootHydrateFunction
): VueElementConstructor {
const Comp = defineComponent(options as any)
class VueCustomElement extends VueElement {
static def = Comp
constructor(initialProps?: Record<string, any>) {
super(Comp, initialProps, hydate)
}
}
return VueCustomElement
}
export const defineSSRCustomElement = ((options: any) => {
// @ts-ignore
return defineCustomElement(options, hydrate)
}) as typeof defineCustomElement
const BaseClass = (
typeof HTMLElement !== 'undefined' ? HTMLElement : class {}
) as typeof HTMLElement
type InnerComponentDef = ConcreteComponent & { styles?: string[] }
export class VueElement extends BaseClass {
/**
* @internal
*/
_instance: ComponentInternalInstance | null = null
private _connected = false
private _resolved = false
private _numberProps: Record<string, true> | null = null
private _styles?: HTMLStyleElement[]
constructor(
private _def: InnerComponentDef,
private _props: Record<string, any> = {},
hydrate?: RootHydrateFunction
) {
super()
if (this.shadowRoot && hydrate) {
hydrate(this._createVNode(), this.shadowRoot)
} else {
if (__DEV__ && this.shadowRoot) {
warn(
`Custom element has pre-rendered declarative shadow root but is not ` +
`defined as hydratable. Use \`defineSSRCustomElement\`.`
)
}
this.attachShadow({ mode: 'open' })
}
// set initial attrs
for (let i = 0; i < this.attributes.length; i++) {
this._setAttr(this.attributes[i].name)
}
// watch future attr changes
new MutationObserver(mutations => {
for (const m of mutations) {
this._setAttr(m.attributeName!)
}
}).observe(this, { attributes: true })
}
connectedCallback() {
this._connected = true
if (!this._instance) {
this._resolveDef()
this._update()
}
}
disconnectedCallback() {
this._connected = false
nextTick(() => {
if (!this._connected) {
render(null, this.shadowRoot!)
this._instance = null
}
})
}
/**
* resolve inner component definition (handle possible async component)
*/
private _resolveDef() {
if (this._resolved) {
return
}
const resolve = (def: InnerComponentDef) => {
this._resolved = true
const { props, styles } = def
const hasOptions = !isArray(props)
const rawKeys = props ? (hasOptions ? Object.keys(props) : props) : []
// cast Number-type props set before resolve
let numberProps
if (hasOptions) {
for (const key in this._props) {
const opt = props[key]
if (opt === Number || (opt && opt.type === Number)) {
this._props[key] = toNumber(this._props[key])
;(numberProps || (numberProps = Object.create(null)))[key] = true
}
}
}
if (numberProps) {
this._numberProps = numberProps
this._update()
}
// check if there are props set pre-upgrade or connect
for (const key of Object.keys(this)) {
if (key[0] !== '_') {
this._setProp(key, this[key as keyof this])
}
}
// defining getter/setters on prototype
for (const key of rawKeys.map(camelize)) {
Object.defineProperty(this, key, {
get() {
return this._getProp(key)
},
set(val) {
this._setProp(key, val)
}
})
}
this._applyStyles(styles)
}
const asyncDef = (this._def as ComponentOptions).__asyncLoader
if (asyncDef) {
asyncDef().then(resolve)
} else {
resolve(this._def)
}
}
protected _setAttr(key: string) {
let value = this.getAttribute(key)
if (this._numberProps && this._numberProps[key]) {
value = toNumber(value)
}
this._setProp(camelize(key), value, false)
}
/**
* @internal
*/
protected _getProp(key: string) {
return this._props[key]
}
/**
* @internal
*/
protected _setProp(key: string, val: any, shouldReflect = true) {
if (val !== this._props[key]) {
this._props[key] = val
if (this._instance) {
this._update()
}
// reflect
if (shouldReflect) {
if (val === true) {
this.setAttribute(hyphenate(key), '')
} else if (typeof val === 'string' || typeof val === 'number') {
this.setAttribute(hyphenate(key), val + '')
} else if (!val) {
this.removeAttribute(hyphenate(key))
}
}
}
}
private _update() {
render(this._createVNode(), this.shadowRoot!)
}
private _createVNode(): VNode<any, any> {
const vnode = createVNode(this._def, extend({}, this._props))
if (!this._instance) {
vnode.ce = instance => {
this._instance = instance
instance.isCE = true
// HMR
if (__DEV__) {
instance.ceReload = newStyles => {
// alawys reset styles
if (this._styles) {
this._styles.forEach(s => this.shadowRoot!.removeChild(s))
this._styles.length = 0
}
this._applyStyles(newStyles)
// if this is an async component, ceReload is called from the inner
// component so no need to reload the async wrapper
if (!(this._def as ComponentOptions).__asyncLoader) {
// reload
this._instance = null
this._update()
}
}
}
// intercept emit
instance.emit = (event: string, ...args: any[]) => {
this.dispatchEvent(
new CustomEvent(event, {
detail: args
})
)
}
// locate nearest Vue custom element parent for provide/inject
let parent: Node | null = this
while (
(parent =
parent && (parent.parentNode || (parent as ShadowRoot).host))
) {
if (parent instanceof VueElement) {
instance.parent = parent._instance
break
}
}
}
}
return vnode
}
private _applyStyles(styles: string[] | undefined) {
if (styles) {
styles.forEach(css => {
const s = document.createElement('style')
s.textContent = css
this.shadowRoot!.appendChild(s)
// record for HMR
if (__DEV__) {
;(this._styles || (this._styles = [])).push(s)
}
})
}
}
} | ExtractPropTypes,
createVNode, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.