file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
test_numbits.py | # Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
"""Tests for coverage.numbits"""
import json
import sqlite3
from hypothesis import example, given, settings
from hypothesis.strategies import sets, integers
from coverage import env
from coverage.numbits import (
nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection,
numbits_any_intersection, num_in_numbits, register_sqlite_functions,
)
from tests.coveragetest import CoverageTest
# Hypothesis-generated line number data
line_numbers = integers(min_value=1, max_value=9999)
line_number_sets = sets(line_numbers)
# When coverage-testing ourselves, hypothesis complains about a test being
# flaky because the first run exceeds the deadline (and fails), and the second
# run succeeds. Disable the deadline if we are coverage-testing.
default_settings = settings()
if env.METACOV:
default_settings = settings(default_settings, deadline=None)
def good_numbits(numbits):
"""Assert that numbits is good."""
# It shouldn't end with a zero byte, that should have been trimmed off.
assert (not numbits) or (numbits[-1] != 0)
class NumbitsOpTest(CoverageTest):
"""Tests of the numbits operations in numbits.py."""
run_in_temp_dir = False
@given(line_number_sets)
@settings(default_settings)
def test_conversion(self, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
nums2 = numbits_to_nums(numbits)
assert nums == set(nums2)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_union(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbu = numbits_union(nb1, nb2)
good_numbits(nbu)
union = numbits_to_nums(nbu)
assert nums1 | nums2 == set(union)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbi = numbits_intersection(nb1, nb2)
good_numbits(nbi)
intersection = numbits_to_nums(nbi)
assert nums1 & nums2 == set(intersection)
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_any_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
inter = numbits_any_intersection(nb1, nb2)
expect = bool(nums1 & nums2)
assert expect == bool(inter)
@given(line_numbers, line_number_sets)
@settings(default_settings)
@example(152, {144})
def test_num_in_numbits(self, num, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
is_in = num_in_numbits(num, numbits)
assert (num in nums) == is_in
class NumbitsSqliteFunctionTest(CoverageTest):
"""Tests of the SQLite integration for numbits functions."""
run_in_temp_dir = False
def | (self):
super().setup_test()
conn = sqlite3.connect(":memory:")
register_sqlite_functions(conn)
self.cursor = conn.cursor()
self.cursor.execute("create table data (id int, numbits blob)")
self.cursor.executemany(
"insert into data (id, numbits) values (?, ?)",
[
(i, nums_to_numbits(range(i, 100, i)))
for i in range(1, 11)
]
)
self.addCleanup(self.cursor.close)
def test_numbits_union(self):
res = self.cursor.execute(
"select numbits_union(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
expected = [
7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49,
54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99,
]
answer = numbits_to_nums(list(res)[0][0])
assert expected == answer
def test_numbits_intersection(self):
res = self.cursor.execute(
"select numbits_intersection(" +
"(select numbits from data where id = 7)," +
"(select numbits from data where id = 9)" +
")"
)
answer = numbits_to_nums(list(res)[0][0])
assert [63] == answer
def test_numbits_any_intersection(self):
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
)
answer = [any_inter for (any_inter,) in res]
assert [1] == answer
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
)
answer = [any_inter for (any_inter,) in res]
assert [0] == answer
def test_num_in_numbits(self):
res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id")
answer = [is_in for (id, is_in) in res]
assert [1, 1, 1, 1, 0, 1, 0, 0, 0, 0] == answer
def test_numbits_to_nums(self):
res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])])
assert [1, 2, 3] == json.loads(res.fetchone()[0])
| setup_test |
file_manager.py | import tempfile
import os
import requests
from tqdm import tqdm
from rich import print as rprint
from felicette.constants import band_tag_map
workdir = os.path.join(os.path.expanduser("~"), "felicette-data")
def check_sat_path(id):
data_path = os.path.join(workdir, id) |
def save_to_file(url, filename, id, info_message):
data_path = os.path.join(workdir, id)
data_id = filename.split("/")[-1].split("-")[1].split(".")[0]
rprint(info_message)
file_path = os.path.join(data_path, filename)
response = requests.get(url, stream=True)
with tqdm.wrapattr(
open(file_path, "wb"),
"write",
miniters=1,
desc=data_id,
total=int(response.headers.get("content-length", 0)),
) as fout:
for chunk in response.iter_content(chunk_size=4096):
fout.write(chunk)
fout.close()
def data_file_exists(filename):
return os.path.exists(filename)
def file_paths_wrt_id(id):
home_path_id = os.path.join(workdir, id)
return {
"base": home_path_id,
"preview": os.path.join(home_path_id, "%s-preview.jpg" % (id)),
"b5": os.path.join(home_path_id, "%s-b5.tiff" % (id)),
"b4": os.path.join(home_path_id, "%s-b4.tiff" % (id)),
"b3": os.path.join(home_path_id, "%s-b3.tiff" % (id)),
"b2": os.path.join(home_path_id, "%s-b2.tiff" % (id)),
"b8": os.path.join(home_path_id, "%s-b8.tiff" % (id)),
"stack": os.path.join(home_path_id, "%s-stack.tiff" % (id)),
"pan_sharpened": os.path.join(home_path_id, "%s-pan.tiff" % (id)),
"output_path": os.path.join(home_path_id, "%s-color-processed.tiff" % (id)),
"output_path_jpeg": os.path.join(
home_path_id, "%s-color-processed.jpeg" % (id)
),
"vegetation_path": os.path.join(home_path_id, "%s-vegetation.tiff" % (id)),
"vegetation_path_jpeg": os.path.join(home_path_id, "%s-vegetation.jpeg" % (id)),
} |
if not os.path.exists(data_path):
os.makedirs(data_path, exist_ok=True) |
binary-search.rs | use binary_search::find;
#[test]
fn finds_a_value_in_an_array_with_one_element() {
assert_eq!(find(&[6], 6), Some(0));
}
#[test]
#[ignore]
fn finds_first_value_in_an_array_with_two_element() {
assert_eq!(find(&[1, 2], 1), Some(0));
}
#[test]
#[ignore]
fn finds_second_value_in_an_array_with_two_element() {
assert_eq!(find(&[1, 2], 2), Some(1));
}
#[test]
#[ignore]
fn | () {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 6), Some(3));
}
#[test]
#[ignore]
fn finds_a_value_at_the_beginning_of_an_array() {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 1), Some(0));
}
#[test]
#[ignore]
fn finds_a_value_at_the_end_of_an_array() {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 11), Some(6));
}
#[test]
#[ignore]
fn finds_a_value_in_an_array_of_odd_length() {
assert_eq!(
find(&[1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 634], 144),
Some(9)
);
}
#[test]
#[ignore]
fn finds_a_value_in_an_array_of_even_length() {
assert_eq!(
find(&[1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377], 21),
Some(5)
);
}
#[test]
#[ignore]
fn identifies_that_a_value_is_not_included_in_the_array() {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 7), None);
}
#[test]
fn a_value_smaller_than_the_arrays_smallest_value_is_not_included() {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 0), None);
}
#[test]
#[ignore]
fn a_value_larger_than_the_arrays_largest_value_is_not_included() {
assert_eq!(find(&[1, 3, 4, 6, 8, 9, 11], 13), None);
}
#[test]
#[ignore]
fn nothing_is_included_in_an_empty_array() {
assert_eq!(find(&[], 1), None);
}
#[test]
#[ignore]
fn nothing_is_found_when_the_left_and_right_bounds_cross() {
assert_eq!(find(&[1, 2], 0), None);
}
#[test]
#[ignore]
#[cfg(feature = "generic")]
fn works_for_arrays() {
assert_eq!(find([6], 6), Some(0));
}
#[test]
#[ignore]
#[cfg(feature = "generic")]
fn works_for_vec() {
let vector = vec![6];
assert_eq!(find(&vector, 6), Some(0));
assert_eq!(find(vector, 6), Some(0));
}
#[test]
#[ignore]
#[cfg(feature = "generic")]
fn works_for_str_elements() {
assert_eq!(find(["a"], "a"), Some(0));
assert_eq!(find(["a", "b"], "b"), Some(1));
}
| finds_a_value_in_the_middle_of_an_array |
resource_kafka_topic.go | // Copyright (c) 2017 jelmersnoeck
// Copyright (c) 2018 Aiven, Helsinki, Finland. https://aiven.io/
package aiven
import (
"context"
"fmt"
"log"
"strings"
"time"
"github.com/aiven/aiven-go-client"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/resource"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
)
var aivenKafkaTopicSchema = map[string]*schema.Schema{
"project": commonSchemaProjectReference,
"service_name": commonSchemaServiceNameReference,
"topic_name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
Description: complex("The name of the topic.").forceNew().build(),
},
"partitions": {
Type: schema.TypeInt,
Required: true,
Description: "The number of partitions to create in the topic.",
},
"replication": {
Type: schema.TypeInt,
Required: true,
Description: "The replication factor for the topic.",
},
"retention_bytes": {
Type: schema.TypeInt,
Optional: true,
Deprecated: "use config.retention_bytes instead",
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
Description: complex("Retention bytes.").deprecate("use config.retention_bytes instead").build(),
},
"retention_hours": {
Type: schema.TypeInt,
Optional: true,
ValidateFunc: validation.IntAtLeast(-1),
Deprecated: "use config.retention_ms instead",
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
Description: complex("Retention period (hours).").deprecate("use config.retention_ms instead").build(),
},
"minimum_in_sync_replicas": {
Type: schema.TypeInt,
Optional: true,
Deprecated: "use config.min_insync_replicas instead",
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
Description: complex("Minimum required nodes in-sync replicas (ISR) to produce to a partition.").deprecate("use config.min_insync_replicas instead").build(),
},
"cleanup_policy": {
Type: schema.TypeString,
Optional: true,
Deprecated: "use config.cleanup_policy instead",
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
Description: complex("Topic cleanup policy.").deprecate("use config.cleanup_policy instead").possibleValues("delete", "compact").build(),
},
"termination_protection": {
Type: schema.TypeBool,
Optional: true,
Default: false,
Description: "It is a Terraform client-side deletion protection, which prevents a Kafka topic from being deleted. It is recommended to enable this for any production Kafka topic containing critical data.",
},
"tag": {
Type: schema.TypeSet,
Description: "Kafka Topic tag.",
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringLenBetween(1, 64),
Description: complex("Topic tag key.").maxLen(64).build(),
},
"value": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringLenBetween(0, 256),
Description: complex("Topic tag value.").maxLen(256).build(),
},
},
},
},
"config": {
Type: schema.TypeList,
Description: "Kafka topic configuration",
Optional: true,
MaxItems: 1,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cleanup_policy": {
Type: schema.TypeString,
Description: "cleanup.policy value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"compression_type": {
Type: schema.TypeString,
Description: "compression.type value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"delete_retention_ms": {
Type: schema.TypeString,
Description: "delete.retention.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"file_delete_delay_ms": {
Type: schema.TypeString,
Description: "file.delete.delay.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"flush_messages": {
Type: schema.TypeString,
Description: "flush.messages value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"flush_ms": {
Type: schema.TypeString,
Description: "flush.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"index_interval_bytes": {
Type: schema.TypeString,
Description: "index.interval.bytes value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"max_compaction_lag_ms": {
Type: schema.TypeString,
Description: "max.compaction.lag.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"max_message_bytes": {
Type: schema.TypeString,
Description: "max.message.bytes value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"message_downconversion_enable": {
Type: schema.TypeString,
Description: "message.downconversion.enable value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"message_format_version": {
Type: schema.TypeString,
Description: "message.format.version value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"message_timestamp_difference_max_ms": {
Type: schema.TypeString,
Description: "message.timestamp.difference.max.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"message_timestamp_type": {
Type: schema.TypeString,
Description: "message.timestamp.type value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"min_cleanable_dirty_ratio": {
Type: schema.TypeString,
Description: "min.cleanable.dirty.ratio value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"min_compaction_lag_ms": {
Type: schema.TypeString,
Description: "min.compaction.lag.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"min_insync_replicas": {
Type: schema.TypeString,
Description: "min.insync.replicas value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"preallocate": {
Type: schema.TypeString,
Description: "preallocate value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"retention_bytes": {
Type: schema.TypeString,
Description: "retention.bytes value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"retention_ms": {
Type: schema.TypeString,
Description: "retention.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"segment_bytes": {
Type: schema.TypeString,
Description: "segment.bytes value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"segment_index_bytes": {
Type: schema.TypeString,
Description: "segment.index.bytes value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"segment_jitter_ms": {
Type: schema.TypeString,
Description: "segment.jitter.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"segment_ms": {
Type: schema.TypeString,
Description: "segment.ms value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
"unclean_leader_election_enable": {
Type: schema.TypeString,
Description: "unclean.leader.election.enable value",
Optional: true,
DiffSuppressFunc: emptyObjectDiffSuppressFunc,
},
},
},
},
}
func resourceKafkaTopic() *schema.Resource {
return &schema.Resource{
Description: "The Kafka Topic resource allows the creation and management of Aiven Kafka Topics.",
CreateContext: resourceKafkaTopicCreate,
ReadContext: resourceKafkaTopicRead,
UpdateContext: resourceKafkaTopicUpdate,
DeleteContext: resourceKafkaTopicDelete,
Importer: &schema.ResourceImporter{
StateContext: resourceKafkaTopicState,
},
Timeouts: &schema.ResourceTimeout{
Create: schema.DefaultTimeout(5 * time.Minute),
Read: schema.DefaultTimeout(10 * time.Minute),
Delete: schema.DefaultTimeout(2 * time.Minute),
},
Schema: aivenKafkaTopicSchema,
}
}
func resourceKafkaTopicCreate(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
project := d.Get("project").(string)
serviceName := d.Get("service_name").(string)
topicName := d.Get("topic_name").(string)
partitions := d.Get("partitions").(int)
replication := d.Get("replication").(int)
createRequest := aiven.CreateKafkaTopicRequest{
CleanupPolicy: optionalStringPointer(d, "cleanup_policy"),
MinimumInSyncReplicas: optionalIntPointer(d, "minimum_in_sync_replicas"),
Partitions: &partitions,
Replication: &replication,
RetentionBytes: optionalIntPointer(d, "retention_bytes"),
RetentionHours: optionalIntPointer(d, "retention_hours"),
TopicName: topicName,
Config: getKafkaTopicConfig(d),
Tags: getTags(d),
}
w := &KafkaTopicCreateWaiter{
Client: m.(*aiven.Client),
Project: project,
ServiceName: serviceName,
CreateRequest: createRequest,
}
timeout := d.Timeout(schema.TimeoutCreate)
_, err := w.Conf(timeout).WaitForStateContext(ctx)
if err != nil {
return diag.FromErr(err)
}
d.SetId(buildResourceID(project, serviceName, topicName))
_, err = getTopic(ctx, d, m, true)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func getTags(d *schema.ResourceData) []aiven.KafkaTopicTag {
var tags []aiven.KafkaTopicTag
for _, tagD := range d.Get("tag").(*schema.Set).List() {
tagM := tagD.(map[string]interface{})
tag := aiven.KafkaTopicTag{
Key: tagM["key"].(string),
Value: tagM["value"].(string),
}
tags = append(tags, tag)
}
return tags
}
func getKafkaTopicConfig(d *schema.ResourceData) aiven.KafkaTopicConfig {
if len(d.Get("config").([]interface{})) == 0 {
return aiven.KafkaTopicConfig{}
}
if d.Get("config").([]interface{})[0] == nil {
return aiven.KafkaTopicConfig{}
}
configRaw := d.Get("config").([]interface{})[0].(map[string]interface{})
return aiven.KafkaTopicConfig{
CleanupPolicy: configRaw["cleanup_policy"].(string),
CompressionType: configRaw["compression_type"].(string),
DeleteRetentionMs: parseOptionalStringToInt64(configRaw["delete_retention_ms"]),
FileDeleteDelayMs: parseOptionalStringToInt64(configRaw["file_delete_delay_ms"]),
FlushMessages: parseOptionalStringToInt64(configRaw["flush_messages"]),
FlushMs: parseOptionalStringToInt64(configRaw["flush_ms"]),
IndexIntervalBytes: parseOptionalStringToInt64(configRaw["index_interval_bytes"]),
MaxCompactionLagMs: parseOptionalStringToInt64(configRaw["max_compaction_lag_ms"]),
MaxMessageBytes: parseOptionalStringToInt64(configRaw["max_message_bytes"]),
MessageDownconversionEnable: parseOptionalStringToBool(configRaw["message_downconversion_enable"]),
MessageFormatVersion: configRaw["message_format_version"].(string),
MessageTimestampDifferenceMaxMs: parseOptionalStringToInt64(configRaw["message_timestamp_difference_max_ms"]),
MessageTimestampType: configRaw["message_timestamp_type"].(string),
MinCleanableDirtyRatio: parseOptionalStringToFloat64(configRaw["min_cleanable_dirty_ratio"]),
MinCompactionLagMs: parseOptionalStringToInt64(configRaw["min_compaction_lag_ms"]),
MinInsyncReplicas: parseOptionalStringToInt64(configRaw["min_insync_replicas"]),
Preallocate: parseOptionalStringToBool(configRaw["preallocate"]),
RetentionBytes: parseOptionalStringToInt64(configRaw["retention_bytes"]),
RetentionMs: parseOptionalStringToInt64(configRaw["retention_ms"]),
SegmentBytes: parseOptionalStringToInt64(configRaw["segment_bytes"]),
SegmentIndexBytes: parseOptionalStringToInt64(configRaw["segment_index_bytes"]),
SegmentJitterMs: parseOptionalStringToInt64(configRaw["segment_jitter_ms"]),
SegmentMs: parseOptionalStringToInt64(configRaw["segment_ms"]),
UncleanLeaderElectionEnable: parseOptionalStringToBool(configRaw["unclean_leader_election_enable"]),
}
}
func resourceKafkaTopicRead(ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
project, serviceName, topicName := splitResourceID3(d.Id())
topic, err := getTopic(ctx, d, m, false)
if err != nil {
return diag.FromErr(resourceReadHandleNotFound(err, d))
}
if err := d.Set("project", project); err != nil {
return diag.FromErr(err)
}
if err := d.Set("service_name", serviceName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("topic_name", topicName); err != nil {
return diag.FromErr(err)
}
if err := d.Set("partitions", len(topic.Partitions)); err != nil {
return diag.FromErr(err)
}
if err := d.Set("replication", topic.Replication); err != nil {
return diag.FromErr(err)
}
if _, ok := d.GetOk("cleanup_policy"); ok {
if err := d.Set("cleanup_policy", topic.Config.CleanupPolicy.Value); err != nil {
return diag.FromErr(err)
}
}
if _, ok := d.GetOk("minimum_in_sync_replicas"); ok {
if err := d.Set("minimum_in_sync_replicas", topic.Config.MinInsyncReplicas.Value); err != nil {
return diag.FromErr(err)
}
}
if _, ok := d.GetOk("retention_bytes"); ok {
if err := d.Set("retention_bytes", topic.Config.RetentionBytes.Value); err != nil {
return diag.FromErr(err)
}
}
if err := d.Set("config", flattenKafkaTopicConfig(topic)); err != nil {
return diag.FromErr(err)
}
if _, ok := d.GetOk("retention_hours"); ok {
// it could be -1, which means infinite retention
if topic.Config.RetentionMs.Value != -1 {
if err := d.Set("retention_hours", topic.Config.RetentionMs.Value/(1000*60*60)); err != nil {
return diag.FromErr(err)
}
} else {
if err := d.Set("retention_hours", topic.Config.RetentionMs.Value); err != nil {
return diag.FromErr(err)
}
}
}
if err := d.Set("termination_protection", d.Get("termination_protection")); err != nil {
return diag.FromErr(err)
}
if err := d.Set("tag", flattenKafkaTopicTags(topic.Tags)); err != nil {
return diag.Errorf("error setting Kafka Topic Tags for resource %s: %s", d.Id(), err)
}
return nil
}
func flattenKafkaTopicTags(list []aiven.KafkaTopicTag) []map[string]interface{} {
var tags []map[string]interface{}
for _, tagS := range list {
tags = append(tags, map[string]interface{}{
"key": tagS.Key,
"value": tagS.Value,
})
}
return tags
}
func getTopic(ctx context.Context, d *schema.ResourceData, m interface{}, ignore404 bool) (aiven.KafkaTopic, error) {
project, serviceName, topicName := splitResourceID3(d.Id())
w := &KafkaTopicAvailabilityWaiter{
Client: m.(*aiven.Client),
Project: project,
ServiceName: serviceName,
TopicName: topicName,
Ignore404: ignore404,
}
timeout := d.Timeout(schema.TimeoutRead)
topic, err := w.Conf(timeout).WaitForStateContext(ctx)
if err != nil {
return aiven.KafkaTopic{}, fmt.Errorf("error waiting for Aiven Kafka topic to be ACTIVE: %s", err)
}
return topic.(aiven.KafkaTopic), nil
}
func resourceKafkaTopicUpdate(_ context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
client := m.(*aiven.Client)
partitions := d.Get("partitions").(int)
projectName, serviceName, topicName := splitResourceID3(d.Id())
err := client.KafkaTopics.Update(
projectName,
serviceName,
topicName,
aiven.UpdateKafkaTopicRequest{
MinimumInSyncReplicas: optionalIntPointer(d, "minimum_in_sync_replicas"),
Partitions: &partitions,
Replication: optionalIntPointer(d, "replication"),
RetentionBytes: optionalIntPointer(d, "retention_bytes"),
RetentionHours: optionalIntPointer(d, "retention_hours"),
Config: getKafkaTopicConfig(d),
Tags: getTags(d),
},
)
if err != nil {
return diag.FromErr(err)
}
return nil
}
func | (ctx context.Context, d *schema.ResourceData, m interface{}) diag.Diagnostics {
client := m.(*aiven.Client)
projectName, serviceName, topicName := splitResourceID3(d.Id())
if d.Get("termination_protection").(bool) {
return diag.Errorf("cannot delete kafka topic when termination_protection is enabled")
}
waiter := KafkaTopicDeleteWaiter{
Client: client,
ProjectName: projectName,
ServiceName: serviceName,
TopicName: topicName,
}
timeout := d.Timeout(schema.TimeoutDelete)
_, err := waiter.Conf(timeout).WaitForStateContext(ctx)
if err != nil {
return diag.Errorf("error waiting for Aiven Kafka Topic to be DELETED: %s", err)
}
return nil
}
func resourceKafkaTopicState(ctx context.Context, d *schema.ResourceData, m interface{}) ([]*schema.ResourceData, error) {
if len(strings.Split(d.Id(), "/")) != 3 {
return nil, fmt.Errorf("invalid identifier %v, expected <project_name>/<service_name>/<topic_name>", d.Id())
}
di := resourceKafkaTopicRead(ctx, d, m)
if di.HasError() {
return nil, fmt.Errorf("cannot get kafka topic: %v", di)
}
return []*schema.ResourceData{d}, nil
}
func flattenKafkaTopicConfig(t aiven.KafkaTopic) []map[string]interface{} {
return []map[string]interface{}{
{
"cleanup_policy": toOptionalString(t.Config.CleanupPolicy.Value),
"compression_type": toOptionalString(t.Config.CompressionType.Value),
"delete_retention_ms": toOptionalString(t.Config.DeleteRetentionMs.Value),
"file_delete_delay_ms": toOptionalString(t.Config.FileDeleteDelayMs.Value),
"flush_messages": toOptionalString(t.Config.FlushMessages.Value),
"flush_ms": toOptionalString(t.Config.FlushMs.Value),
"index_interval_bytes": toOptionalString(t.Config.IndexIntervalBytes.Value),
"max_compaction_lag_ms": toOptionalString(t.Config.MaxCompactionLagMs.Value),
"max_message_bytes": toOptionalString(t.Config.MaxMessageBytes.Value),
"message_downconversion_enable": toOptionalString(t.Config.MessageDownconversionEnable.Value),
"message_format_version": toOptionalString(t.Config.MessageFormatVersion.Value),
"message_timestamp_difference_max_ms": toOptionalString(t.Config.MessageTimestampDifferenceMaxMs.Value),
"message_timestamp_type": toOptionalString(t.Config.MessageTimestampType.Value),
"min_cleanable_dirty_ratio": toOptionalString(t.Config.MinCleanableDirtyRatio.Value),
"min_compaction_lag_ms": toOptionalString(t.Config.MinCompactionLagMs.Value),
"min_insync_replicas": toOptionalString(t.Config.MinInsyncReplicas.Value),
"preallocate": toOptionalString(t.Config.Preallocate.Value),
"retention_bytes": toOptionalString(t.Config.RetentionBytes.Value),
"retention_ms": toOptionalString(t.Config.RetentionMs.Value),
"segment_bytes": toOptionalString(t.Config.SegmentBytes.Value),
"segment_index_bytes": toOptionalString(t.Config.SegmentIndexBytes.Value),
"segment_jitter_ms": toOptionalString(t.Config.SegmentJitterMs.Value),
"segment_ms": toOptionalString(t.Config.SegmentMs.Value),
"unclean_leader_election_enable": toOptionalString(t.Config.UncleanLeaderElectionEnable.Value),
},
}
}
// KafkaTopicDeleteWaiter is used to wait for Kafka Topic to be deleted.
type KafkaTopicDeleteWaiter struct {
Client *aiven.Client
ProjectName string
ServiceName string
TopicName string
}
// RefreshFunc will call the Aiven client and refresh it's state.
func (w *KafkaTopicDeleteWaiter) RefreshFunc() resource.StateRefreshFunc {
return func() (interface{}, string, error) {
err := w.Client.KafkaTopics.Delete(w.ProjectName, w.ServiceName, w.TopicName)
if err != nil {
if !aiven.IsNotFound(err) {
return nil, "REMOVING", nil
}
}
return aiven.KafkaTopic{}, "DELETED", nil
}
}
// Conf sets up the configuration to refresh.
func (w *KafkaTopicDeleteWaiter) Conf(timeout time.Duration) *resource.StateChangeConf {
log.Printf("[DEBUG] Delete waiter timeout %.0f minutes", timeout.Minutes())
return &resource.StateChangeConf{
Pending: []string{"REMOVING"},
Target: []string{"DELETED"},
Refresh: w.RefreshFunc(),
Delay: 1 * time.Second,
Timeout: timeout,
MinTimeout: 1 * time.Second,
}
}
| resourceKafkaTopicDelete |
position_conversions.rs | use bevy::math::Vec3;
use leafwing_2d::continuous::F32;
use leafwing_2d::position::Position;
#[test]
fn position_to_vec3() {
assert_eq!(
Vec3::from(Position::<F32>::new(0., 0.)),
Vec3::new(0., 0., 0.)
);
assert_eq!(
Vec3::from(Position::<F32>::new(1., 0.)),
Vec3::new(1., 0., 0.)
);
assert_eq!(
Vec3::from(Position::<F32>::new(0., 1.)),
Vec3::new(0., 1., 0.)
);
assert_eq!(
Vec3::from(Position::<F32>::new(1., 1.)),
Vec3::new(1., 1., 0.)
);
assert_eq!(
Vec3::from(Position::<F32>::new(-1., -1.)),
Vec3::new(-1., -1., 0.)
);
assert_eq!(
Vec3::from(Position::<F32>::new(-42., 3.)),
Vec3::new(-42., 3., 0.)
);
}
#[test]
fn | () {
assert_eq!(
Ok(Position::<F32>::new(0., 0.)),
Vec3::new(0., 0., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(1., 0.)),
Vec3::new(1., 0., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(0., 1.)),
Vec3::new(0., 1., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(1., 1.)),
Vec3::new(1., 1., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(-1., -1.)),
Vec3::new(-1., -1., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(-42., 3.)),
Vec3::new(-42., 3., 0.).try_into()
);
assert_eq!(
Ok(Position::<F32>::new(-42., 3.)),
Vec3::new(-42., 3., 17.).try_into()
);
}
| vec3_to_position |
sha256.rs | use std::fmt;
use ring::digest::Context;
use ring::digest::SHA256;
use prelude::*;
use util::fmt_slice2hex;
#[derive(Debug, Default, Clone, Copy)]
pub struct Sha256();
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Sha256Value(pub [u8; 32]);
#[derive(Clone)]
pub struct Sha256Context {
context: Context,
}
impl fmt::Debug for Sha256Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "SHA256:")?;
fmt_slice2hex(f, &self.0[..])
}
}
impl Default for Sha256Context {
fn default() -> Self {
Sha256Context {
context: Context::new(&SHA256),
}
}
} | write!(f, "mt::fun::Sha256Hasher{{context: ring::digest::Context}}")
}
}
impl MTAlgorithm for Sha256 {
type Value = Sha256Value;
type Context = Sha256Context;
}
impl MTContext for Sha256Context {
type Out = Sha256Value;
fn new() -> Self {
Sha256Context::default()
}
fn update(&mut self, msg: &[u8]) {
self.context.update(msg)
}
fn finish(self) -> Self::Out {
let mut value: [u8; 32] = Default::default();
let digest = self.context.finish();
value.clone_from_slice(digest.as_ref());
Sha256Value(value)
}
}
impl MTHash for Sha256Value {
fn hash<H: MTContext>(&self, state: &mut H) {
state.update(self.0.as_ref())
}
}
#[cfg(test)]
mod tests {
use abc::MTAlgorithm;
use super::Sha256;
#[test]
fn sha256_works() {
let result = Sha256::eval_hash(&b"123".as_ref());
let as_string = format!("{:?}", result);
let sample = "SHA256:a665a45920422f9d417e4867efdc4fb8a04a1f3fff1fa07e998e86f7f7a27ae3";
assert_eq!(as_string, sample);
let result = Sha256::eval_hash(&result);
let as_string = format!("{:?}", result);
let sample = "SHA256:5a77d1e9612d350b3734f6282259b7ff0a3f87d62cfef5f35e91a5604c0490a3";
assert_eq!(as_string, sample);
}
} |
impl fmt::Debug for Sha256Context {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { |
slack.js | const slack = require("slack");
async function sendMessage(msg) {
slack.chat.postMessage({
token: process.env.SLACK_TOKEN,
channel: process.env.SLACK_CHANNEL,
text: msg
}).then(response => {
return response;
}).catch(error => {
return error;
})
}
module.exports = { | sendMessage
} | |
schema.rs | use std::env::current_dir;
use std::fs::create_dir_all;
use cosmwasm_schema::{export_schema, remove_schemas, schema_for};
use staking::msg::{
BalanceResponse, ClaimsResponse, HandleMsg, InitMsg, InvestmentResponse, QueryMsg,
TokenInfoResponse,
};
use staking::state::{InvestmentInfo, Supply};
fn | () {
let mut out_dir = current_dir().unwrap();
out_dir.push("schema");
create_dir_all(&out_dir).unwrap();
remove_schemas(&out_dir).unwrap();
export_schema(&schema_for!(InitMsg), &out_dir);
export_schema(&schema_for!(HandleMsg), &out_dir);
export_schema(&schema_for!(QueryMsg), &out_dir);
export_schema(&schema_for!(BalanceResponse), &out_dir);
export_schema(&schema_for!(ClaimsResponse), &out_dir);
export_schema(&schema_for!(InvestmentResponse), &out_dir);
export_schema(&schema_for!(TokenInfoResponse), &out_dir);
export_schema(&schema_for!(InvestmentInfo), &out_dir);
export_schema(&schema_for!(Supply), &out_dir);
}
| main |
bao_signal_handler.py | # vim:fileencoding=utf-8:noet
""" python method """
# Copyright (c) 2010 - 2019, © Badassops LLC / Luc Suryo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. | #
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#*
#* File : bao_signal_handler.py
#* Description : function to handle interrupts
#* Author : Luc Suryo <[email protected]>
#* Version : 0.2
#* Date : Feb 21, 2019
#*
#* History :
#* Date: Author: Info:
#* Jun 1, 2010 LIS First Release
#* Feb 21, 2019 LIS refactored
import signal
import sys
def signal_handler(signum, frame):
""" signal/interrupts handler
@param signum {int} The interrupt ID according to signal.h.
@param frame {string} Memory frame where the interrupted was called.
"""
if signum is int(signal.SIGHUP):
print('Received -HUP, app does not support reload. {}'.format(frame))
elif signum is int(signal.SIGINT):
print('Received ctrl-c, aborted on your request. {}'.format(frame))
elif signum is int(signal.SIGTERM):
print('Received kill -TERM, terminating. {}'.format(frame))
else:
print('Received unknwon interrupt : {}'.format(signum))
sys.exit(128 + signum)
def install_int_handler():
""" Install signal/interrupts handler, we capture only SIGHUP, SIGINT and TERM
"""
signal.signal(signal.SIGHUP, signal_handler)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler) | |
db.go | package server
import (
"time"
"github.com/golang/glog"
_ "github.com/jinzhu/gorm/dialects/postgres"
pg "github.com/tradingAI/go/db/postgres"
)
func (s *Servlet) autoReConnectDB() (err error) {
for {
if s.DB.DB().Ping() != nil {
s.DB, err = pg.NewPostgreSQL(s.Conf.DB)
if err != nil {
glog.Error(err) | } | }
}
time.Sleep(time.Second * s.Conf.DB.ReconnectSec)
} |
exception.go | // Copyright (c) 2015 Couchbase, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// package exception implements a Tokenizer which extracts pieces matched by a
// regular expression from the input data, delegates the rest to another
// tokenizer, then insert back extracted parts in the token stream. Use it to
// preserve sequences which a regular tokenizer would alter or remove.
//
// Its constructor takes the following arguments:
//
// "exceptions" ([]string): one or more Go regular expressions matching the
// sequence to preserve. Multiple expressions are combined with "|".
//
// "tokenizer" (string): the name of the tokenizer processing the data not
// matched by "exceptions".
package exception
import (
"fmt"
"regexp"
"strings"
"github.com/blevesearch/bleve/analysis"
"github.com/blevesearch/bleve/registry"
)
const Name = "exception"
type ExceptionsTokenizer struct {
exception *regexp.Regexp
remaining analysis.Tokenizer
}
func NewExceptionsTokenizer(exception *regexp.Regexp, remaining analysis.Tokenizer) *ExceptionsTokenizer {
return &ExceptionsTokenizer{
exception: exception,
remaining: remaining,
}
}
func (t *ExceptionsTokenizer) Tokenize(input []byte) analysis.TokenStream {
rv := make(analysis.TokenStream, 0)
matches := t.exception.FindAllIndex(input, -1)
currInput := 0
lastPos := 0
for _, match := range matches {
start := match[0]
end := match[1]
if start > currInput {
// need to defer to remaining for unprocessed section
intermediate := t.remaining.Tokenize(input[currInput:start])
// add intermediate tokens to our result stream
for _, token := range intermediate {
// adjust token offsets
token.Position += lastPos
token.Start += currInput
token.End += currInput
rv = append(rv, token)
}
lastPos += len(intermediate)
currInput = start
}
// create single token with this regexp match
token := &analysis.Token{
Term: input[start:end],
Start: start,
End: end,
Position: lastPos + 1,
}
rv = append(rv, token)
lastPos++
currInput = end
}
if currInput < len(input) {
// need to defer to remaining for unprocessed section
intermediate := t.remaining.Tokenize(input[currInput:])
// add intermediate tokens to our result stream
for _, token := range intermediate {
// adjust token offsets
token.Position += lastPos
token.Start += currInput
token.End += currInput
rv = append(rv, token)
}
}
return rv
}
func ExceptionsTokenizerConstructor(config map[string]interface{}, cache *registry.Cache) (analysis.Tokenizer, error) {
exceptions := []string{}
iexceptions, ok := config["exceptions"].([]interface{})
if ok {
for _, exception := range iexceptions {
exception, ok := exception.(string)
if ok {
exceptions = append(exceptions, exception)
}
}
}
aexceptions, ok := config["exceptions"].([]string)
if ok {
exceptions = append(exceptions, aexceptions...)
}
if len(exceptions) == 0 {
return nil, fmt.Errorf("no pattern found in 'exception' property")
}
exceptionPattern := strings.Join(exceptions, "|")
r, err := regexp.Compile(exceptionPattern)
if err != nil {
return nil, fmt.Errorf("unable to build regexp tokenizer: %v", err)
}
remainingName, ok := config["tokenizer"].(string)
if !ok {
return nil, fmt.Errorf("must specify tokenizer for remaining input")
}
remaining, err := cache.TokenizerNamed(remainingName)
if err != nil {
return nil, err
}
return NewExceptionsTokenizer(r, remaining), nil
}
func init() | {
registry.RegisterTokenizer(Name, ExceptionsTokenizerConstructor)
} |
|
loader.rs | // Copyright 2019 Authors of Red Sift
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use futures::channel::mpsc;
use futures::prelude::*;
use std::convert::AsRef;
use std::fs;
use std::io;
use std::path::Path;
use crate::{Program, cpus};
use crate::load::map_io::PerfMessageStream;
use crate::{Error, KProbe, Map, Module, PerfMap, SocketFilter, UProbe, XDP};
#[derive(Debug)]
pub enum LoaderError {
FileError(io::Error),
ParseError(Error),
LoadError(String, Error),
}
/// High level API to load bpf programs.
pub struct Loader {}
impl Loader {
/// Loads the programs included in `data`.
///
/// This will parse `data` with `Module::parse()` and load all the programs
/// present in the module.
pub fn load(data: &[u8]) -> Result<Loaded, LoaderError> {
let mut module = Module::parse(&data).map_err(|e| LoaderError::ParseError(e))?;
for program in module.programs.iter_mut() {
program
.load(module.version, module.license.clone())
.map_err(|e| LoaderError::LoadError(program.name().to_string(), e))?;
}
let online_cpus = cpus::get_online().unwrap();
let (sender, receiver) = mpsc::unbounded();
for m in module.maps.iter_mut().filter(|m| m.kind == 4) {
for cpuid in online_cpus.iter() {
let name = m.name.clone();
let map = PerfMap::bind(m, -1, *cpuid, 16, -1, 0).unwrap();
let stream = PerfMessageStream::new(name.clone(), map);
let mut s = sender.clone();
let fut = stream.for_each(move |events| {
s.start_send((name.clone(), events)).unwrap();
future::ready(())
});
tokio::spawn(fut);
}
}
Ok(Loaded {
module,
events: receiver,
})
}
/// Loads the BPF programs included in `file`.
///
/// See `load()`.
pub fn load_file<P: AsRef<Path>>(file: P) -> Result<Loaded, LoaderError> {
Loader::load(&fs::read(file).map_err(|e| LoaderError::FileError(e))?)
}
}
/// The `Loaded` object returned by `load()`.
pub struct Loaded {
pub module: Module,
/// The stream of events emitted by the BPF programs.
///
/// # Example
///
/// ```no_run
/// use std::path::Path;
/// use futures::stream::StreamExt;
/// use redbpf::load::Loader;
/// # async {
/// let mut loader = Loader::load_file(&Path::new("probe.elf")).unwrap();
/// while let Some((map_name, events)) = loader.events.next().await {
/// for event in events {
/// // ...
/// }
/// }
/// # };
/// ```
pub events: mpsc::UnboundedReceiver<(String, <PerfMessageStream as Stream>::Item)>,
}
impl Loaded {
pub fn map(&self, name: &str) -> Option<&Map> {
self.module.maps.iter().find(|m| m.name == name)
}
pub fn map_mut(&mut self, name: &str) -> Option<&mut Map> {
self.module.maps.iter_mut().find(|m| m.name == name)
}
pub fn program(&self, name: &str) -> Option<&Program> |
pub fn kprobes_mut(&mut self) -> impl Iterator<Item = &mut KProbe> {
self.module.kprobes_mut()
}
pub fn uprobes_mut(&mut self) -> impl Iterator<Item = &mut UProbe> {
self.module.uprobes_mut()
}
pub fn xdps_mut(&mut self) -> impl Iterator<Item = &mut XDP> {
self.module.xdps_mut()
}
pub fn socket_filters_mut(&mut self) -> impl Iterator<Item = &mut SocketFilter> {
self.module.socket_filters_mut()
}
}
| {
self.module.program(name)
} |
alexa keyword crawler.py | import requests
from bs4 import BeautifulSoup as b
from pymongo import MongoClient
import time
from multiprocessing import Pool
url = "http://www.alexa.com/siteinfo/"
file = open("filtered-domains.txt",'r')
client = MongoClient(connect=False)
db = client.alexa
keyword = db.keyword
bcolors={
"HEADER" : '\033[95m',
"INFO" : '\033[94m',
"SUCCESS" : '\033[92m',
"WARNING" : '\033[93m',
"FAIL" : '\033[91m',
"ENDC" : '\033[0m',
"BOLD" : '\033[1m',
"UNDERLINE" : '\033[4m'
}
def put(msg,type):
print bcolors[type.upper()] + ""+"["+time.asctime( time.localtime(time.time()) )+"]\t["+type.strip().capitalize()+"]\t"+str(msg)+"" + bcolors["ENDC"]
def soup(domain,link):
try:
tags = []
table = domain.find("table",{"id":"keywords_top_keywords_table"}).find("tbody").findAll("td") | tags.append(table[i].findAll("span")[1].text.encode('utf-8'))
put("found all tags of "+link,"INFO")
return tags
except Excption as e:
put(e,"WARNING")
def main(line):
try:
tags = soup(b(requests.get((url+line).strip()).content,"lxml"),line)
dic ={}
dic[line.strip()] = tags
put(dic,"SUCCESS")
keyword.insert(dic, check_keys=False)
put(line.strip()+" added to MongoClient","ENDC")
except Exception as e:
put(e,"FAIL")
if __name__ == "__main__":
p = Pool(50)
main(p.map(main, file)) | for i in range(len(table)):
if i%2 == 0: |
json.go | package semantic
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strconv"
"time"
)
func (p *Program) MarshalJSON() ([]byte, error) {
type Alias Program
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: p.NodeType(),
Alias: (*Alias)(p),
}
return json.Marshal(raw)
}
func (p *Program) UnmarshalJSON(data []byte) error {
type Alias Program
raw := struct {
*Alias
Body []json.RawMessage `json:"body"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*p = *(*Program)(raw.Alias)
}
p.Body = make([]Statement, len(raw.Body))
for i, r := range raw.Body {
s, err := unmarshalStatement(r)
if err != nil {
return err
}
p.Body[i] = s
}
return nil
}
func (s *BlockStatement) MarshalJSON() ([]byte, error) {
type Alias BlockStatement
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: s.NodeType(),
Alias: (*Alias)(s),
}
return json.Marshal(raw)
}
func (s *BlockStatement) UnmarshalJSON(data []byte) error {
type Alias BlockStatement
raw := struct {
*Alias
Body []json.RawMessage `json:"body"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*s = *(*BlockStatement)(raw.Alias)
}
s.Body = make([]Statement, len(raw.Body))
for i, r := range raw.Body {
stmt, err := unmarshalStatement(r)
if err != nil {
return err
}
s.Body[i] = stmt
}
return nil
}
func (s *OptionStatement) MarshalJSON() ([]byte, error) {
type Alias OptionStatement
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: s.NodeType(),
Alias: (*Alias)(s),
}
return json.Marshal(raw)
}
func (s *OptionStatement) UnmarshalJSON(data []byte) error {
type Alias OptionStatement
raw := struct {
*Alias
Declaration json.RawMessage `json:"declaration"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*s = *(*OptionStatement)(raw.Alias)
}
e, err := unmarshalVariableDeclaration(raw.Declaration)
if err != nil {
return err
}
s.Declaration = e
return nil
}
func (s *ExpressionStatement) MarshalJSON() ([]byte, error) {
type Alias ExpressionStatement
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: s.NodeType(),
Alias: (*Alias)(s),
}
return json.Marshal(raw)
}
func (s *ExpressionStatement) UnmarshalJSON(data []byte) error {
type Alias ExpressionStatement
raw := struct {
*Alias
Expression json.RawMessage `json:"expression"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*s = *(*ExpressionStatement)(raw.Alias)
}
e, err := unmarshalExpression(raw.Expression)
if err != nil {
return err
}
s.Expression = e
return nil
}
func (s *ReturnStatement) MarshalJSON() ([]byte, error) {
type Alias ReturnStatement
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: s.NodeType(),
Alias: (*Alias)(s),
}
return json.Marshal(raw)
}
func (s *ReturnStatement) UnmarshalJSON(data []byte) error {
type Alias ReturnStatement
raw := struct {
*Alias
Argument json.RawMessage `json:"argument"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*s = *(*ReturnStatement)(raw.Alias)
}
e, err := unmarshalExpression(raw.Argument)
if err != nil {
return err
}
s.Argument = e
return nil
}
func (d *NativeVariableDeclaration) MarshalJSON() ([]byte, error) {
type Alias NativeVariableDeclaration
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: d.NodeType(),
Alias: (*Alias)(d),
}
return json.Marshal(raw)
}
func (d *NativeVariableDeclaration) UnmarshalJSON(data []byte) error {
type Alias NativeVariableDeclaration
raw := struct {
*Alias
Init json.RawMessage `json:"init"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*d = *(*NativeVariableDeclaration)(raw.Alias)
}
e, err := unmarshalExpression(raw.Init)
if err != nil {
return err
}
d.Init = e
return nil
}
func (d *ExternalVariableDeclaration) MarshalJSON() ([]byte, error) {
return nil, errors.New("cannot marshal ExternalVariableDeclaration")
}
func (e *CallExpression) MarshalJSON() ([]byte, error) {
type Alias CallExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *CallExpression) UnmarshalJSON(data []byte) error {
type Alias CallExpression
raw := struct {
*Alias
Callee json.RawMessage `json:"callee"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*CallExpression)(raw.Alias)
}
callee, err := unmarshalExpression(raw.Callee)
if err != nil {
return err
}
e.Callee = callee
return nil
}
func (e *MemberExpression) MarshalJSON() ([]byte, error) {
type Alias MemberExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *MemberExpression) UnmarshalJSON(data []byte) error {
type Alias MemberExpression
raw := struct {
*Alias
Object json.RawMessage `json:"object"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*MemberExpression)(raw.Alias)
}
object, err := unmarshalExpression(raw.Object)
if err != nil {
return err
}
e.Object = object
return nil
}
func (e *FunctionExpression) MarshalJSON() ([]byte, error) {
type Alias FunctionExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *FunctionExpression) UnmarshalJSON(data []byte) error {
type Alias FunctionExpression
raw := struct {
*Alias
Body json.RawMessage `json:"body"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*FunctionExpression)(raw.Alias)
}
body, err := unmarshalNode(raw.Body)
if err != nil {
return err
}
e.Body = body
return nil
}
func (e *FunctionParam) MarshalJSON() ([]byte, error) {
type Alias FunctionParam
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *FunctionParam) UnmarshalJSON(data []byte) error {
type Alias FunctionParam
raw := struct {
*Alias
Default json.RawMessage `json:"default"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*FunctionParam)(raw.Alias)
}
def, err := unmarshalLiteral(raw.Default)
if err != nil {
return err
}
e.Default = def
return nil
}
func (e *BinaryExpression) MarshalJSON() ([]byte, error) {
type Alias BinaryExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *BinaryExpression) UnmarshalJSON(data []byte) error {
type Alias BinaryExpression
raw := struct {
*Alias
Left json.RawMessage `json:"left"`
Right json.RawMessage `json:"right"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*BinaryExpression)(raw.Alias)
}
l, err := unmarshalExpression(raw.Left)
if err != nil {
return err
}
e.Left = l
r, err := unmarshalExpression(raw.Right)
if err != nil {
return err
}
e.Right = r
return nil
}
func (e *UnaryExpression) MarshalJSON() ([]byte, error) {
type Alias UnaryExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *UnaryExpression) UnmarshalJSON(data []byte) error {
type Alias UnaryExpression
raw := struct {
*Alias
Argument json.RawMessage `json:"argument"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*UnaryExpression)(raw.Alias)
}
argument, err := unmarshalExpression(raw.Argument)
if err != nil {
return err
}
e.Argument = argument
return nil
}
func (e *LogicalExpression) MarshalJSON() ([]byte, error) {
type Alias LogicalExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *LogicalExpression) UnmarshalJSON(data []byte) error {
type Alias LogicalExpression
raw := struct {
*Alias
Left json.RawMessage `json:"left"`
Right json.RawMessage `json:"right"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*LogicalExpression)(raw.Alias)
}
l, err := unmarshalExpression(raw.Left)
if err != nil {
return err
}
e.Left = l
r, err := unmarshalExpression(raw.Right)
if err != nil {
return err
}
e.Right = r
return nil
}
func (e *ArrayExpression) MarshalJSON() ([]byte, error) {
type Alias ArrayExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *ArrayExpression) UnmarshalJSON(data []byte) error {
type Alias ArrayExpression
raw := struct {
*Alias
Elements []json.RawMessage `json:"elements"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*ArrayExpression)(raw.Alias)
}
e.Elements = make([]Expression, len(raw.Elements))
for i, r := range raw.Elements {
expr, err := unmarshalExpression(r)
if err != nil {
return err
}
e.Elements[i] = expr
}
return nil
}
func (e *ObjectExpression) MarshalJSON() ([]byte, error) {
type Alias ObjectExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *ConditionalExpression) MarshalJSON() ([]byte, error) {
type Alias ConditionalExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (e *ConditionalExpression) UnmarshalJSON(data []byte) error {
type Alias ConditionalExpression
raw := struct {
*Alias
Test json.RawMessage `json:"test"`
Alternate json.RawMessage `json:"alternate"`
Consequent json.RawMessage `json:"consequent"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*e = *(*ConditionalExpression)(raw.Alias)
}
test, err := unmarshalExpression(raw.Test)
if err != nil {
return err
}
e.Test = test
alternate, err := unmarshalExpression(raw.Alternate)
if err != nil {
return err
}
e.Alternate = alternate
consequent, err := unmarshalExpression(raw.Consequent)
if err != nil {
return err
}
e.Consequent = consequent
return nil
}
func (p *Property) MarshalJSON() ([]byte, error) {
type Alias Property
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: p.NodeType(),
Alias: (*Alias)(p),
}
return json.Marshal(raw)
}
func (p *Property) UnmarshalJSON(data []byte) error {
type Alias Property
raw := struct {
*Alias
Value json.RawMessage `json:"value"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*p = *(*Property)(raw.Alias)
}
if raw.Value != nil {
value, err := unmarshalExpression(raw.Value)
if err != nil {
return err
}
p.Value = value
}
return nil
}
func (e *IdentifierExpression) MarshalJSON() ([]byte, error) {
type Alias IdentifierExpression
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: e.NodeType(),
Alias: (*Alias)(e),
}
return json.Marshal(raw)
}
func (i *Identifier) MarshalJSON() ([]byte, error) {
type Alias Identifier
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: i.NodeType(),
Alias: (*Alias)(i),
}
return json.Marshal(raw)
}
func (l *StringLiteral) MarshalJSON() ([]byte, error) {
type Alias StringLiteral
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
}
return json.Marshal(raw)
}
func (l *BooleanLiteral) MarshalJSON() ([]byte, error) {
type Alias BooleanLiteral
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
}
return json.Marshal(raw)
}
func (l *FloatLiteral) MarshalJSON() ([]byte, error) {
type Alias FloatLiteral
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
}
return json.Marshal(raw)
}
func (l *IntegerLiteral) MarshalJSON() ([]byte, error) {
type Alias IntegerLiteral
raw := struct {
Type string `json:"type"`
*Alias
Value string `json:"value"`
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
Value: strconv.FormatInt(l.Value, 10),
}
return json.Marshal(raw)
}
func (l *IntegerLiteral) UnmarshalJSON(data []byte) error {
type Alias IntegerLiteral
raw := struct {
*Alias
Value string `json:"value"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*l = *(*IntegerLiteral)(raw.Alias)
}
value, err := strconv.ParseInt(raw.Value, 10, 64)
if err != nil {
return err
}
l.Value = value
return nil
}
func (l *UnsignedIntegerLiteral) MarshalJSON() ([]byte, error) {
type Alias UnsignedIntegerLiteral
raw := struct {
Type string `json:"type"`
*Alias
Value string `json:"value"`
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
Value: strconv.FormatUint(l.Value, 10),
}
return json.Marshal(raw)
}
func (l *UnsignedIntegerLiteral) UnmarshalJSON(data []byte) error {
type Alias UnsignedIntegerLiteral
raw := struct {
*Alias
Value string `json:"value"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*l = *(*UnsignedIntegerLiteral)(raw.Alias)
}
value, err := strconv.ParseUint(raw.Value, 10, 64)
if err != nil {
return err
}
l.Value = value
return nil
}
func (l *RegexpLiteral) MarshalJSON() ([]byte, error) {
type Alias RegexpLiteral
raw := struct {
Type string `json:"type"`
*Alias
Value string `json:"value"`
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
Value: l.Value.String(),
}
return json.Marshal(raw)
}
func (l *RegexpLiteral) UnmarshalJSON(data []byte) error {
type Alias RegexpLiteral
raw := struct {
*Alias
Value string `json:"value"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*l = *(*RegexpLiteral)(raw.Alias)
}
value, err := regexp.Compile(raw.Value)
if err != nil {
return err
}
l.Value = value
return nil
}
func (l *DurationLiteral) MarshalJSON() ([]byte, error) {
type Alias DurationLiteral
raw := struct {
Type string `json:"type"`
*Alias
Value string `json:"value"`
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
Value: l.Value.String(),
}
return json.Marshal(raw)
}
func (l *DurationLiteral) UnmarshalJSON(data []byte) error {
type Alias DurationLiteral
raw := struct {
*Alias
Value string `json:"value"`
}{}
if err := json.Unmarshal(data, &raw); err != nil {
return err
}
if raw.Alias != nil {
*l = *(*DurationLiteral)(raw.Alias)
}
value, err := time.ParseDuration(raw.Value)
if err != nil {
return err
}
l.Value = value
return nil
}
func (l *DateTimeLiteral) MarshalJSON() ([]byte, error) {
type Alias DateTimeLiteral
raw := struct {
Type string `json:"type"`
*Alias
}{
Type: l.NodeType(),
Alias: (*Alias)(l),
}
return json.Marshal(raw)
}
func checkNullMsg(msg json.RawMessage) bool {
switch len(msg) {
case 0:
return true
case 4:
return string(msg) == "null"
default:
return false
}
}
func unmarshalStatement(msg json.RawMessage) (Statement, error) {
if checkNullMsg(msg) {
return nil, nil
}
n, err := unmarshalNode(msg)
if err != nil {
return nil, err
}
s, ok := n.(Statement)
if !ok {
return nil, fmt.Errorf("node %q is not a statement", n.NodeType())
}
return s, nil
}
func unmarshalExpression(msg json.RawMessage) (Expression, error) {
if checkNullMsg(msg) {
return nil, nil
}
n, err := unmarshalNode(msg)
if err != nil {
return nil, err
}
e, ok := n.(Expression)
if !ok {
return nil, fmt.Errorf("node %q is not an expression", n.NodeType())
}
return e, nil
}
func unmarshalVariableDeclaration(msg json.RawMessage) (VariableDeclaration, error) {
if checkNullMsg(msg) {
return nil, nil
}
n, err := unmarshalNode(msg)
if err != nil {
return nil, err
}
v, ok := n.(VariableDeclaration)
if !ok {
return nil, fmt.Errorf("node %q is not a variable declaration", n.NodeType())
}
return v, nil
}
func | (msg json.RawMessage) (Literal, error) {
if checkNullMsg(msg) {
return nil, nil
}
n, err := unmarshalNode(msg)
if err != nil {
return nil, err
}
e, ok := n.(Literal)
if !ok {
return nil, fmt.Errorf("node %q is not a literal", n.NodeType())
}
return e, nil
}
func unmarshalNode(msg json.RawMessage) (Node, error) {
if checkNullMsg(msg) {
return nil, nil
}
type typeRawMessage struct {
Type string `json:"type"`
}
typ := typeRawMessage{}
if err := json.Unmarshal(msg, &typ); err != nil {
return nil, err
}
var node Node
switch typ.Type {
case "Program":
node = new(Program)
case "BlockStatement":
node = new(BlockStatement)
case "OptionStatement":
node = new(OptionStatement)
case "ExpressionStatement":
node = new(ExpressionStatement)
case "ReturnStatement":
node = new(ReturnStatement)
case "NativeVariableDeclaration":
node = new(NativeVariableDeclaration)
case "CallExpression":
node = new(CallExpression)
case "MemberExpression":
node = new(MemberExpression)
case "BinaryExpression":
node = new(BinaryExpression)
case "UnaryExpression":
node = new(UnaryExpression)
case "LogicalExpression":
node = new(LogicalExpression)
case "ObjectExpression":
node = new(ObjectExpression)
case "ConditionalExpression":
node = new(ConditionalExpression)
case "ArrayExpression":
node = new(ArrayExpression)
case "Identifier":
node = new(Identifier)
case "IdentifierExpression":
node = new(IdentifierExpression)
case "StringLiteral":
node = new(StringLiteral)
case "BooleanLiteral":
node = new(BooleanLiteral)
case "FloatLiteral":
node = new(FloatLiteral)
case "IntegerLiteral":
node = new(IntegerLiteral)
case "UnsignedIntegerLiteral":
node = new(UnsignedIntegerLiteral)
case "RegexpLiteral":
node = new(RegexpLiteral)
case "DurationLiteral":
node = new(DurationLiteral)
case "DateTimeLiteral":
node = new(DateTimeLiteral)
case "ArrowFunctionExpression":
node = new(FunctionExpression)
case "Property":
node = new(Property)
default:
return nil, fmt.Errorf("unknown type %q", typ.Type)
}
if err := json.Unmarshal(msg, node); err != nil {
return nil, err
}
return node, nil
}
func UnmarshalNode(data []byte) (Node, error) {
return unmarshalNode((json.RawMessage)(data))
}
| unmarshalLiteral |
device_test.go | //
// Copyright (C) 2020-2021 IOTech Ltd
//
// SPDX-License-Identifier: Apache-2.0
package http
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/edgexfoundry/edgex-go/internal/core/metadata/container"
dbMock "github.com/edgexfoundry/edgex-go/internal/core/metadata/infrastructure/interfaces/mocks"
"github.com/edgexfoundry/go-mod-bootstrap/v2/di"
"github.com/edgexfoundry/go-mod-core-contracts/v2/common"
"github.com/edgexfoundry/go-mod-core-contracts/v2/dtos"
commonDTO "github.com/edgexfoundry/go-mod-core-contracts/v2/dtos/common"
"github.com/edgexfoundry/go-mod-core-contracts/v2/dtos/requests"
responseDTO "github.com/edgexfoundry/go-mod-core-contracts/v2/dtos/responses"
"github.com/edgexfoundry/go-mod-core-contracts/v2/errors"
"github.com/edgexfoundry/go-mod-core-contracts/v2/models"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
)
var testDeviceLabels = []string{"MODBUS", "TEMP"}
func buildTestDeviceRequest() requests.AddDeviceRequest {
var testAutoEvents = []dtos.AutoEvent{
{SourceName: "TestResource", Interval: "300ms", OnChange: true},
}
var testProtocols = map[string]dtos.ProtocolProperties{
"modbus-ip": {
"Address": "localhost",
"Port": "1502",
"UnitID": "1",
},
}
var testAddDeviceReq = requests.AddDeviceRequest{
BaseRequest: commonDTO.BaseRequest{
RequestId: ExampleUUID,
Versionable: commonDTO.NewVersionable(),
},
Device: dtos.Device{
Id: ExampleUUID,
Name: TestDeviceName,
ServiceName: TestDeviceServiceName,
ProfileName: TestDeviceProfileName,
AdminState: models.Locked,
OperatingState: models.Up,
Labels: testDeviceLabels,
Location: "{40lat;45long}",
AutoEvents: testAutoEvents,
Protocols: testProtocols,
},
}
return testAddDeviceReq
}
func buildTestUpdateDeviceRequest() requests.UpdateDeviceRequest {
testUUID := ExampleUUID
testName := TestDeviceName
testDescription := TestDescription
testServiceName := TestDeviceServiceName
testProfileName := TestDeviceProfileName
testAdminState := models.Unlocked
testOperatingState := models.Up
testLastReported := int64(123546789)
testLastConnected := int64(123546789)
testNotify := false
var testAutoEvents = []dtos.AutoEvent{
{SourceName: "TestResource", Interval: "300ms", OnChange: true},
}
var testProtocols = map[string]dtos.ProtocolProperties{
"modbus-ip": {
"Address": "localhost",
"Port": "1502",
"UnitID": "1",
},
}
var testUpdateDeviceReq = requests.UpdateDeviceRequest{
BaseRequest: commonDTO.BaseRequest{
RequestId: ExampleUUID,
Versionable: commonDTO.NewVersionable(),
},
Device: dtos.UpdateDevice{ | Description: &testDescription,
ServiceName: &testServiceName,
ProfileName: &testProfileName,
AdminState: &testAdminState,
OperatingState: &testOperatingState,
LastReported: &testLastReported,
LastConnected: &testLastConnected,
Labels: []string{"MODBUS", "TEMP"},
Location: "{40lat;45long}",
AutoEvents: testAutoEvents,
Protocols: testProtocols,
Notify: &testNotify,
},
}
return testUpdateDeviceReq
}
func TestAddDevice(t *testing.T) {
testDevice := buildTestDeviceRequest()
deviceModel := requests.AddDeviceReqToDeviceModels([]requests.AddDeviceRequest{testDevice})[0]
expectedRequestId := ExampleUUID
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
valid := testDevice
dbClientMock.On("DeviceServiceNameExists", deviceModel.ServiceName).Return(true, nil)
dbClientMock.On("DeviceProfileNameExists", deviceModel.ProfileName).Return(true, nil)
dbClientMock.On("AddDevice", deviceModel).Return(deviceModel, nil)
dbClientMock.On("DeviceServiceByName", deviceModel.ServiceName).Return(models.DeviceService{BaseAddress: testBaseAddress}, nil)
notFoundService := testDevice
notFoundService.Device.ServiceName = "notFoundService"
dbClientMock.On("DeviceServiceNameExists", notFoundService.Device.ServiceName).Return(false, nil)
notFoundProfile := testDevice
notFoundProfile.Device.ProfileName = "notFoundProfile"
dbClientMock.On("DeviceProfileNameExists", notFoundProfile.Device.ProfileName).Return(false, nil)
noName := testDevice
noName.Device.Name = ""
noAdminState := testDevice
noAdminState.Device.AdminState = ""
noOperatingState := testDevice
noOperatingState.Device.OperatingState = ""
invalidAdminState := testDevice
invalidAdminState.Device.AdminState = "invalidAdminState"
invalidOperatingState := testDevice
invalidOperatingState.Device.OperatingState = "invalidOperatingState"
noServiceName := testDevice
noServiceName.Device.ServiceName = ""
noProfileName := testDevice
noProfileName.Device.ProfileName = ""
noProtocols := testDevice
noProtocols.Device.Protocols = nil
emptyProtocols := testDevice
emptyProtocols.Device.Protocols = map[string]dtos.ProtocolProperties{}
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
request []requests.AddDeviceRequest
expectedStatusCode int
}{
{"Valid", []requests.AddDeviceRequest{valid}, http.StatusCreated},
{"Invalid - not found service", []requests.AddDeviceRequest{notFoundService}, http.StatusNotFound},
{"Invalid - not found profile", []requests.AddDeviceRequest{notFoundProfile}, http.StatusNotFound},
{"Invalid - no name", []requests.AddDeviceRequest{noName}, http.StatusBadRequest},
{"Invalid - no adminState", []requests.AddDeviceRequest{noAdminState}, http.StatusBadRequest},
{"Invalid - no operatingState", []requests.AddDeviceRequest{noOperatingState}, http.StatusBadRequest},
{"Invalid - invalid adminState", []requests.AddDeviceRequest{invalidAdminState}, http.StatusBadRequest},
{"Invalid - invalid operatingState", []requests.AddDeviceRequest{invalidOperatingState}, http.StatusBadRequest},
{"Invalid - no service name", []requests.AddDeviceRequest{noServiceName}, http.StatusBadRequest},
{"Invalid - no profile name", []requests.AddDeviceRequest{noProfileName}, http.StatusBadRequest},
{"Invalid - no protocols", []requests.AddDeviceRequest{noProtocols}, http.StatusBadRequest},
{"Invalid - empty protocols", []requests.AddDeviceRequest{emptyProtocols}, http.StatusBadRequest},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
jsonData, err := json.Marshal(testCase.request)
require.NoError(t, err)
reader := strings.NewReader(string(jsonData))
req, err := http.NewRequest(http.MethodPost, common.ApiDeviceRoute, reader)
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.AddDevice)
handler.ServeHTTP(recorder, req)
if testCase.expectedStatusCode == http.StatusBadRequest {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, res.StatusCode, "BaseResponse status code not as expected")
assert.NotEmpty(t, res.Message, "Message is empty")
} else {
var res []commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, http.StatusMultiStatus, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res[0].ApiVersion, "API Version not as expected")
if res[0].RequestId != "" {
assert.Equal(t, expectedRequestId, res[0].RequestId, "RequestID not as expected")
}
assert.Equal(t, testCase.expectedStatusCode, res[0].StatusCode, "BaseResponse status code not as expected")
}
})
}
}
func TestDeleteDeviceByName(t *testing.T) {
device := dtos.ToDeviceModel(buildTestDeviceRequest().Device)
noName := ""
notFoundName := "notFoundName"
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeleteDeviceByName", device.Name).Return(nil)
dbClientMock.On("DeleteDeviceByName", notFoundName).Return(errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "device doesn't exist in the database", nil))
dbClientMock.On("DeviceByName", notFoundName).Return(device, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "device doesn't exist in the database", nil))
dbClientMock.On("DeviceByName", device.Name).Return(device, nil)
dbClientMock.On("DeviceServiceByName", device.ServiceName).Return(models.DeviceService{BaseAddress: testBaseAddress}, nil)
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
require.NotNil(t, controller)
tests := []struct {
name string
deviceName string
expectedStatusCode int
}{
{"Valid - delete device by name", device.Name, http.StatusOK},
{"Invalid - name parameter is empty", noName, http.StatusBadRequest},
{"Invalid - device not found by name", notFoundName, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
reqPath := fmt.Sprintf("%s/%s", common.ApiDeviceByNameRoute, testCase.deviceName)
req, err := http.NewRequest(http.MethodGet, reqPath, http.NoBody)
req = mux.SetURLVars(req, map[string]string{common.Name: testCase.deviceName})
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.DeleteDeviceByName)
handler.ServeHTTP(recorder, req)
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
if testCase.expectedStatusCode == http.StatusOK {
assert.Empty(t, res.Message, "Message should be empty when it is successful")
} else {
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
}
})
}
}
func TestAllDeviceByServiceName(t *testing.T) {
device := dtos.ToDeviceModel(buildTestDeviceRequest().Device)
testServiceA := "testServiceA"
testServiceB := "testServiceB"
device1WithServiceA := device
device1WithServiceA.ServiceName = testServiceA
device2WithServiceA := device
device2WithServiceA.ServiceName = testServiceA
device3WithServiceB := device
device3WithServiceB.ServiceName = testServiceB
devices := []models.Device{device1WithServiceA, device2WithServiceA, device3WithServiceB}
expectedTotalCountServiceA := uint32(2)
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeviceCountByServiceName", testServiceA).Return(expectedTotalCountServiceA, nil)
dbClientMock.On("DevicesByServiceName", 0, 5, testServiceA).Return([]models.Device{devices[0], devices[1]}, nil)
dbClientMock.On("DevicesByServiceName", 1, 1, testServiceA).Return([]models.Device{devices[1]}, nil)
dbClientMock.On("DevicesByServiceName", 4, 1, testServiceB).Return([]models.Device{}, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "query objects bounds out of range.", nil))
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
offset string
limit string
serviceName string
errorExpected bool
expectedCount int
expectedTotalCount uint32
expectedStatusCode int
}{
{"Valid - get devices with serviceName", "0", "5", testServiceA, false, 2, expectedTotalCountServiceA, http.StatusOK},
{"Valid - get devices with offset and no labels", "1", "1", testServiceA, false, 1, expectedTotalCountServiceA, http.StatusOK},
{"Invalid - offset out of range", "4", "1", testServiceB, true, 0, 0, http.StatusNotFound},
{"Invalid - get devices without serviceName", "0", "10", "", true, 0, 0, http.StatusBadRequest},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, common.ApiDeviceByServiceNameRoute, http.NoBody)
query := req.URL.Query()
query.Add(common.Offset, testCase.offset)
query.Add(common.Limit, testCase.limit)
req.URL.RawQuery = query.Encode()
req = mux.SetURLVars(req, map[string]string{common.Name: testCase.serviceName})
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.DevicesByServiceName)
handler.ServeHTTP(recorder, req)
// Assert
if testCase.errorExpected {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
} else {
var res responseDTO.MultiDevicesResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.Equal(t, testCase.expectedCount, len(res.Devices), "Device count not as expected")
assert.Equal(t, testCase.expectedTotalCount, res.TotalCount, "Total count not as expected")
assert.Empty(t, res.Message, "Message should be empty when it is successful")
}
})
}
}
func TestDeviceNameExists(t *testing.T) {
testName := TestDeviceName
notFoundName := "notFoundName"
emptyName := ""
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeviceNameExists", testName).Return(true, nil)
dbClientMock.On("DeviceNameExists", notFoundName).Return(false, nil)
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
deviceName string
errorExpected bool
expectedStatusCode int
}{
{"Valid - check device by name", testName, false, http.StatusOK},
{"Invalid - name parameter is empty", emptyName, true, http.StatusBadRequest},
{"Invalid - device not found by name", notFoundName, false, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
reqPath := fmt.Sprintf("%s/%s", common.ApiDeviceNameExistsRoute, testCase.deviceName)
req, err := http.NewRequest(http.MethodGet, reqPath, http.NoBody)
req = mux.SetURLVars(req, map[string]string{common.Name: testCase.deviceName})
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.DeviceNameExists)
handler.ServeHTTP(recorder, req)
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
if testCase.errorExpected {
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
} else {
assert.Empty(t, res.Message, "Message should be empty when it is successful")
}
})
}
}
func TestPatchDevice(t *testing.T) {
expectedRequestId := ExampleUUID
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
testReq := buildTestUpdateDeviceRequest()
dsModels := models.Device{
Id: *testReq.Device.Id,
Name: *testReq.Device.Name,
Description: *testReq.Device.Description,
Labels: testReq.Device.Labels,
AdminState: models.AdminState(*testReq.Device.AdminState),
OperatingState: models.OperatingState(*testReq.Device.OperatingState),
LastConnected: *testReq.Device.LastConnected,
LastReported: *testReq.Device.LastReported,
Location: testReq.Device.Location,
ServiceName: *testReq.Device.ServiceName,
ProfileName: *testReq.Device.ProfileName,
AutoEvents: dtos.ToAutoEventModels(testReq.Device.AutoEvents),
Protocols: dtos.ToProtocolModels(testReq.Device.Protocols),
Notify: *testReq.Device.Notify,
}
valid := testReq
dbClientMock.On("DeviceServiceNameExists", *valid.Device.ServiceName).Return(true, nil)
dbClientMock.On("DeviceProfileNameExists", *valid.Device.ProfileName).Return(true, nil)
dbClientMock.On("DeviceById", *valid.Device.Id).Return(dsModels, nil)
dbClientMock.On("UpdateDevice", mock.Anything).Return(nil)
dbClientMock.On("DeviceServiceByName", *valid.Device.ServiceName).Return(models.DeviceService{BaseAddress: testBaseAddress}, nil)
validWithNoReqID := testReq
validWithNoReqID.RequestId = ""
validWithNoId := testReq
validWithNoId.Device.Id = nil
dbClientMock.On("DeviceByName", *validWithNoId.Device.Name).Return(dsModels, nil)
validWithNoName := testReq
validWithNoName.Device.Name = nil
invalidId := testReq
invalidUUID := "invalidUUID"
invalidId.Device.Id = &invalidUUID
emptyString := ""
emptyId := testReq
emptyId.Device.Id = &emptyString
emptyId.Device.Name = nil
emptyName := testReq
emptyName.Device.Id = nil
emptyName.Device.Name = &emptyString
invalidNoIdAndName := testReq
invalidNoIdAndName.Device.Id = nil
invalidNoIdAndName.Device.Name = nil
invalidNotFoundId := testReq
invalidNotFoundId.Device.Name = nil
notFoundId := "12345678-1111-1234-5678-de9dac3fb9bc"
invalidNotFoundId.Device.Id = ¬FoundId
notFoundIdError := errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, fmt.Sprintf("%s doesn't exist in the database", notFoundId), nil)
dbClientMock.On("DeviceById", *invalidNotFoundId.Device.Id).Return(dsModels, notFoundIdError)
invalidNotFoundName := testReq
invalidNotFoundName.Device.Id = nil
notFoundName := "notFoundName"
invalidNotFoundName.Device.Name = ¬FoundName
notFoundNameError := errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, fmt.Sprintf("%s doesn't exist in the database", notFoundName), nil)
dbClientMock.On("DeviceByName", *invalidNotFoundName.Device.Name).Return(dsModels, notFoundNameError)
notFountServiceName := "notFoundService"
notFoundService := testReq
notFoundService.Device.ServiceName = ¬FountServiceName
dbClientMock.On("DeviceServiceNameExists", *notFoundService.Device.ServiceName).Return(false, nil)
notFountProfileName := "notFoundProfile"
notFoundProfile := testReq
notFoundProfile.Device.ProfileName = ¬FountProfileName
dbClientMock.On("DeviceProfileNameExists", *notFoundProfile.Device.ProfileName).Return(false, nil)
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
require.NotNil(t, controller)
tests := []struct {
name string
request []requests.UpdateDeviceRequest
expectedStatusCode int
expectedResponseCode int
}{
{"Valid", []requests.UpdateDeviceRequest{valid}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no requestId", []requests.UpdateDeviceRequest{validWithNoReqID}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no id", []requests.UpdateDeviceRequest{validWithNoId}, http.StatusMultiStatus, http.StatusOK},
{"Valid - no name", []requests.UpdateDeviceRequest{validWithNoName}, http.StatusMultiStatus, http.StatusOK},
{"Invalid - invalid id", []requests.UpdateDeviceRequest{invalidId}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - empty id", []requests.UpdateDeviceRequest{emptyId}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - empty name", []requests.UpdateDeviceRequest{emptyName}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - not found id", []requests.UpdateDeviceRequest{invalidNotFoundId}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - not found name", []requests.UpdateDeviceRequest{invalidNotFoundName}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - no id and name", []requests.UpdateDeviceRequest{invalidNoIdAndName}, http.StatusBadRequest, http.StatusBadRequest},
{"Invalid - not found service", []requests.UpdateDeviceRequest{notFoundService}, http.StatusMultiStatus, http.StatusNotFound},
{"Invalid - not found profile", []requests.UpdateDeviceRequest{notFoundProfile}, http.StatusMultiStatus, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
jsonData, err := json.Marshal(testCase.request)
require.NoError(t, err)
reader := strings.NewReader(string(jsonData))
req, err := http.NewRequest(http.MethodPatch, common.ApiDeviceRoute, reader)
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.PatchDevice)
handler.ServeHTTP(recorder, req)
if testCase.expectedStatusCode == http.StatusMultiStatus {
var res []commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, http.StatusMultiStatus, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res[0].ApiVersion, "API Version not as expected")
if res[0].RequestId != "" {
assert.Equal(t, expectedRequestId, res[0].RequestId, "RequestID not as expected")
}
assert.Equal(t, testCase.expectedResponseCode, res[0].StatusCode, "BaseResponse status code not as expected")
if testCase.expectedResponseCode == http.StatusOK {
assert.Empty(t, res[0].Message, "Message should be empty when it is successful")
} else {
assert.NotEmpty(t, res[0].Message, "Response message doesn't contain the error message")
}
} else {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
// Assert
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedResponseCode, res.StatusCode, "BaseResponse status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
}
})
}
}
func TestAllDevices(t *testing.T) {
device := dtos.ToDeviceModel(buildTestDeviceRequest().Device)
devices := []models.Device{device, device, device}
expectedDeviceTotalCount := uint32(len(devices))
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeviceTotalCount").Return(expectedDeviceTotalCount, nil)
dbClientMock.On("AllDevices", 0, 10, []string(nil)).Return(devices, nil)
dbClientMock.On("AllDevices", 0, 5, testDeviceLabels).Return([]models.Device{devices[0], devices[1]}, nil)
dbClientMock.On("AllDevices", 1, 2, []string(nil)).Return([]models.Device{devices[1], devices[2]}, nil)
dbClientMock.On("AllDevices", 4, 1, testDeviceLabels).Return([]models.Device{}, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "query objects bounds out of range.", nil))
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
offset string
limit string
labels string
errorExpected bool
expectedCount int
expectedTotalCount uint32
expectedStatusCode int
}{
{"Valid - get devices without labels", "0", "10", "", false, 3, expectedDeviceTotalCount, http.StatusOK},
{"Valid - get devices with labels", "0", "5", strings.Join(testDeviceLabels, ","), false, 2, expectedDeviceTotalCount, http.StatusOK},
{"Valid - get devices with offset and no labels", "1", "2", "", false, 2, expectedDeviceTotalCount, http.StatusOK},
{"Invalid - offset out of range", "4", "1", strings.Join(testDeviceLabels, ","), true, 0, expectedDeviceTotalCount, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, common.ApiAllDeviceRoute, http.NoBody)
query := req.URL.Query()
query.Add(common.Offset, testCase.offset)
query.Add(common.Limit, testCase.limit)
if len(testCase.labels) > 0 {
query.Add(common.Labels, testCase.labels)
}
req.URL.RawQuery = query.Encode()
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.AllDevices)
handler.ServeHTTP(recorder, req)
// Assert
if testCase.errorExpected {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
} else {
var res responseDTO.MultiDevicesResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.Equal(t, testCase.expectedCount, len(res.Devices), "Device count not as expected")
assert.Equal(t, testCase.expectedTotalCount, res.TotalCount, "Total count not as expected")
assert.Empty(t, res.Message, "Message should be empty when it is successful")
}
})
}
}
func TestDeviceByName(t *testing.T) {
device := dtos.ToDeviceModel(buildTestDeviceRequest().Device)
emptyName := ""
notFoundName := "notFoundName"
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeviceByName", device.Name).Return(device, nil)
dbClientMock.On("DeviceByName", notFoundName).Return(models.Device{}, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "device doesn't exist in the database", nil))
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
deviceName string
errorExpected bool
expectedStatusCode int
}{
{"Valid - find device by name", device.Name, false, http.StatusOK},
{"Invalid - name parameter is empty", emptyName, true, http.StatusBadRequest},
{"Invalid - device not found by name", notFoundName, true, http.StatusNotFound},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
reqPath := fmt.Sprintf("%s/%s", common.ApiDeviceByNameRoute, testCase.deviceName)
req, err := http.NewRequest(http.MethodGet, reqPath, http.NoBody)
req = mux.SetURLVars(req, map[string]string{common.Name: testCase.deviceName})
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.DeviceByName)
handler.ServeHTTP(recorder, req)
// Assert
if testCase.errorExpected {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
} else {
var res responseDTO.DeviceResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.Equal(t, testCase.deviceName, res.Device.Name, "Name not as expected")
assert.Empty(t, res.Message, "Message should be empty when it is successful")
}
})
}
}
func TestDevicesByProfileName(t *testing.T) {
device := dtos.ToDeviceModel(buildTestDeviceRequest().Device)
testProfileA := "testProfileA"
testProfileB := "testServiceB"
device1WithProfileA := device
device1WithProfileA.ProfileName = testProfileA
device2WithProfileA := device
device2WithProfileA.ProfileName = testProfileA
device3WithProfileB := device
device3WithProfileB.ProfileName = testProfileB
devices := []models.Device{device1WithProfileA, device2WithProfileA, device3WithProfileB}
expectedTotalCountProfileA := uint32(2)
dic := mockDic()
dbClientMock := &dbMock.DBClient{}
dbClientMock.On("DeviceCountByProfileName", testProfileA).Return(expectedTotalCountProfileA, nil)
dbClientMock.On("DevicesByProfileName", 0, 5, testProfileA).Return([]models.Device{devices[0], devices[1]}, nil)
dbClientMock.On("DevicesByProfileName", 1, 1, testProfileA).Return([]models.Device{devices[1]}, nil)
dbClientMock.On("DevicesByProfileName", 4, 1, testProfileB).Return([]models.Device{}, errors.NewCommonEdgeX(errors.KindEntityDoesNotExist, "query objects bounds out of range.", nil))
dic.Update(di.ServiceConstructorMap{
container.DBClientInterfaceName: func(get di.Get) interface{} {
return dbClientMock
},
})
controller := NewDeviceController(dic)
assert.NotNil(t, controller)
tests := []struct {
name string
offset string
limit string
profileName string
errorExpected bool
expectedCount int
expectedTotalCount uint32
expectedStatusCode int
}{
{"Valid - get devices with profileName", "0", "5", testProfileA, false, 2, expectedTotalCountProfileA, http.StatusOK},
{"Valid - get devices with offset and limit", "1", "1", testProfileA, false, 1, expectedTotalCountProfileA, http.StatusOK},
{"Invalid - offset out of range", "4", "1", testProfileB, true, 0, 0, http.StatusNotFound},
{"Invalid - get devices without profileName", "0", "10", "", true, 0, 0, http.StatusBadRequest},
}
for _, testCase := range tests {
t.Run(testCase.name, func(t *testing.T) {
req, err := http.NewRequest(http.MethodGet, common.ApiDeviceByProfileNameRoute, http.NoBody)
query := req.URL.Query()
query.Add(common.Offset, testCase.offset)
query.Add(common.Limit, testCase.limit)
req.URL.RawQuery = query.Encode()
req = mux.SetURLVars(req, map[string]string{common.Name: testCase.profileName})
require.NoError(t, err)
// Act
recorder := httptest.NewRecorder()
handler := http.HandlerFunc(controller.DevicesByProfileName)
handler.ServeHTTP(recorder, req)
// Assert
if testCase.errorExpected {
var res commonDTO.BaseResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.NotEmpty(t, res.Message, "Response message doesn't contain the error message")
} else {
var res responseDTO.MultiDevicesResponse
err = json.Unmarshal(recorder.Body.Bytes(), &res)
require.NoError(t, err)
assert.Equal(t, common.ApiVersion, res.ApiVersion, "API Version not as expected")
assert.Equal(t, testCase.expectedStatusCode, recorder.Result().StatusCode, "HTTP status code not as expected")
assert.Equal(t, testCase.expectedStatusCode, int(res.StatusCode), "Response status code not as expected")
assert.Equal(t, testCase.expectedCount, len(res.Devices), "Device count not as expected")
assert.Equal(t, testCase.expectedTotalCount, res.TotalCount, "Total count not as expected")
assert.Empty(t, res.Message, "Message should be empty when it is successful")
}
})
}
} | Id: &testUUID,
Name: &testName, |
plot_field.py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import scipy.io.netcdf as netcdf
plt.ion()
flag_mov = 0
flag_traj = 0
dir0 = '../run/'
file1 = 'diags.0000000000.t001.nc'
file2 = 'grid.t001.nc'
f1 = netcdf.netcdf_file(dir0 + file1)
f2 = netcdf.netcdf_file(dir0 + file2)
x = f2.variables['X'][:].copy()
y = f2.variables['Y'][:].copy()
xp1 = f2.variables['Xp1'][:].copy() | T = f1.variables['T'][:].copy()
si_x = len(x)
si_y = len(y)
si_t = len(T)
h_mit = f2.variables['Depth'][:,:].copy()
vort = f1.variables['momVort3'][0,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
xunit = 1000.0 # 1:m -- 1000:km
posxy = np.zeros((2,si_t),dtype='int')
if flag_traj == 1:
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
posxy[0,nt],posxy[1,nt] = np.unravel_index(np.argmin(vort),vort.shape)
plt.figure()
if flag_mov == -1:
nt = 0
mytime = [49]
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1[:si_x/2]/xunit,yp1/xunit,vort[:,:si_x/2],vcont,colors='k')
plt.title('Day ' + str(mytime[nt]+1))
plt.xlabel('x (km)')
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[120]/xunit,y[5]/xunit,myci)
if flag_traj:
plt.plot(xp1[posxy[1,:mytime[nt]]]/xunit,yp1[posxy[0,:mytime[nt]]]/xunit,'b')
plt.plot(xp1[posxy[1,mytime[nt]:]]/xunit,yp1[posxy[0,mytime[nt]:]]/xunit,'b--')
elif flag_mov == 0:
mytime = [0,9,19,29]
for nt in range(0,len(mytime)):
plt.subplot(2,2,nt+1, aspect='equal')
vort = f1.variables['momVort3'][mytime[nt],:,:].copy()
plt.contour(xp1/xunit,yp1/xunit,vort.squeeze(),vcont,colors='k')
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
plt.title('Day ' + str(mytime[nt]+1))
if nt == 2 or nt == 3:
plt.xlabel('x (km)')
if nt == 0 or nt == 2:
plt.ylabel('y (km)')
myci = "CI: {:.1e}".format(vcont[1]-vcont[0])
plt.text(x[-170]/xunit,y[5]/xunit,myci)
plt.savefig('corner_10mit.eps')
elif flag_mov == 1:
vort = f1.variables['momVort3'][:,:,:].copy()
vmin = np.min(vort)
vmax = -vmin
vcont = np.linspace(vmin,vmax,20)
for nt in range(0,si_t):
vort = f1.variables['momVort3'][nt,:,:].copy()
vort = vort.squeeze()
vort[0,0] = vmin
vort[0,1] = vmax
plt.contourf(xp1/xunit,yp1/xunit,vort,vcont,cmap = plt.cm.bwr)
plt.contourf(x/xunit,y/xunit,h_mit,[-10,0],colors='0.5')
ext = '0'
if nt > 9:
ext = ''
plt.savefig('movie/ewall_'+ ext + str(nt) + 'mit.png')
plt.clf()
f1.close()
f2.close() | yp1 = f2.variables['Yp1'][:].copy() |
conversation-placeholder.js | import React, { Component } from 'react'
import styled from 'styled-components'
export default class | extends Component {
render() {
return <PlaceholderMessage>CLICK A CONVERSATION</PlaceholderMessage>
}
}
const PlaceholderMessage = styled.div`
color: black;
`
| ConversationPlaceholder |
stats_scraper.go | /*
Copyright 2019 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"context"
"errors"
"fmt"
"math/rand"
"net/http"
"strconv"
"sync"
"time"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.uber.org/atomic"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
pkgmetrics "knative.dev/pkg/metrics"
autoscalingv1alpha1 "knative.dev/serving/pkg/apis/autoscaling/v1alpha1"
"knative.dev/serving/pkg/apis/serving"
"knative.dev/serving/pkg/metrics"
"knative.dev/serving/pkg/networking"
"knative.dev/serving/pkg/resources"
)
const (
httpClientTimeout = 3 * time.Second
// scraperPodName is the name used in all stats sent from the scraper to
// the autoscaler. The actual customer pods are hidden behind the scraper. The
// autoscaler does need to know how many customer pods are reporting metrics.
// Instead, the autoscaler knows the stats it receives are either from the
// scraper or the activator.
scraperPodName = "service-scraper"
// scraperMaxRetries are retries to be done to the actual Scrape routine. We want
// to retry if a Scrape returns an error or if the Scrape goes to a pod we already
// scraped.
scraperMaxRetries = 10
)
var (
// ErrFailedGetEndpoints specifies the error returned by scraper when it fails to
// get endpoints.
ErrFailedGetEndpoints = errors.New("failed to get endpoints")
// ErrDidNotReceiveStat specifies the error returned by scraper when it does not receive
// stat from an unscraped pod
ErrDidNotReceiveStat = errors.New("did not receive stat from an unscraped pod")
// Sentinel error to return from pod scraping routine, when all pods fail
// with a 503 error code, indicating (most likely), that mesh is enabled.
errDirectScrapingNotAvailable = errors.New("all pod scrapes returned 503 error")
errPodsExhausted = errors.New("pods exhausted")
scrapeTimeM = stats.Float64(
"scrape_time",
"Time to scrape metrics in milliseconds",
stats.UnitMilliseconds)
)
func init() |
// StatsScraper defines the interface for collecting Revision metrics
type StatsScraper interface {
// Scrape scrapes the Revision queue metric endpoint. The duration is used
// to cutoff young pods, whose stats might skew lower.
Scrape(time.Duration) (Stat, error)
}
// scrapeClient defines the interface for collecting Revision metrics for a given
// URL. Internal used only.
type scrapeClient interface {
// Do executes the given request.
Do(*http.Request) (Stat, error)
}
// noKeepAliveTransport is a http.Transport with the default settings, but with
// KeepAlive disabled. This is used in the mesh case, where we want to avoid
// getting the same host on each connection.
var noKeepAliveTransport = func() *http.Transport {
t := http.DefaultTransport.(*http.Transport).Clone()
t.DisableKeepAlives = true
return t
}()
// keepAliveTransport is a http.Transport with the default settings, but with
// keepAlive upped to allow 1000 connections.
var keepAliveTransport = func() *http.Transport {
t := http.DefaultTransport.(*http.Transport).Clone()
t.DisableKeepAlives = false // default, but for clarity.
t.MaxIdleConns = 1000
return t
}()
// noKeepaliveClient is a http client with HTTP Keep-Alive disabled.
// This client is used in the mesh case since we want to get a new connection -
// and therefore, hopefully, host - on every scrape of the service.
var noKeepaliveClient = &http.Client{
Transport: noKeepAliveTransport,
Timeout: httpClientTimeout,
}
// client is a normal http client with HTTP Keep-Alive enabled.
// This client is used in the direct pod scraping (no mesh) case where we want
// to take advantage of HTTP Keep-Alive to avoid connection creation overhead
// between scrapes of the same pod.
var client = &http.Client{
Timeout: httpClientTimeout,
Transport: keepAliveTransport,
}
// serviceScraper scrapes Revision metrics via a K8S service by sampling. Which
// pod to be picked up to serve the request is decided by K8S. Please see
// https://kubernetes.io/docs/concepts/services-networking/network-policies/
// for details.
type serviceScraper struct {
directClient scrapeClient
meshClient scrapeClient
host string
url string
statsCtx context.Context
logger *zap.SugaredLogger
podAccessor resources.PodAccessor
usePassthroughLb bool
podsAddressable bool
}
// NewStatsScraper creates a new StatsScraper for the Revision which
// the given Metric is responsible for.
func NewStatsScraper(metric *autoscalingv1alpha1.Metric, revisionName string, podAccessor resources.PodAccessor,
usePassthroughLb bool, logger *zap.SugaredLogger) StatsScraper {
directClient := newHTTPScrapeClient(client)
meshClient := newHTTPScrapeClient(noKeepaliveClient)
return newServiceScraperWithClient(metric, revisionName, podAccessor, usePassthroughLb, directClient, meshClient, logger)
}
func newServiceScraperWithClient(
metric *autoscalingv1alpha1.Metric,
revisionName string,
podAccessor resources.PodAccessor,
usePassthroughLb bool,
directClient, meshClient scrapeClient,
logger *zap.SugaredLogger) *serviceScraper {
svcName := metric.Labels[serving.ServiceLabelKey]
cfgName := metric.Labels[serving.ConfigurationLabelKey]
labels := metric.GetLabels()
annotations := metric.GetAnnotations()
ctx := metrics.RevisionContext(metric.ObjectMeta.Namespace, svcName, cfgName, revisionName, annotations, labels)
return &serviceScraper{
directClient: directClient,
meshClient: meshClient,
host: metric.Spec.ScrapeTarget + "." + metric.ObjectMeta.Namespace,
url: urlFromTarget(metric.Spec.ScrapeTarget, metric.ObjectMeta.Namespace),
podAccessor: podAccessor,
podsAddressable: true,
usePassthroughLb: usePassthroughLb,
statsCtx: ctx,
logger: logger,
}
}
var portAndPath = strconv.Itoa(networking.AutoscalingQueueMetricsPort) + "/metrics"
func urlFromTarget(t, ns string) string {
return fmt.Sprintf("http://%s.%s:", t, ns) + portAndPath
}
// Scrape calls the destination service then sends it
// to the given stats channel.
func (s *serviceScraper) Scrape(window time.Duration) (stat Stat, err error) {
startTime := time.Now()
defer func() {
// No errors and an empty stat? We didn't scrape at all because
// we're scaled to 0.
if stat == emptyStat && err == nil {
return
}
scrapeTime := time.Since(startTime)
pkgmetrics.RecordBatch(s.statsCtx, scrapeTimeM.M(float64(scrapeTime.Milliseconds())))
}()
if s.podsAddressable || s.usePassthroughLb {
stat, err := s.scrapePods(window)
// Return here if some pods were scraped, but not enough or if we're using a
// passthrough loadbalancer and want no fallback to service-scrape logic.
if !errors.Is(err, errDirectScrapingNotAvailable) || s.usePassthroughLb {
return stat, err
}
// Else fall back to service scrape.
}
readyPodsCount, err := s.podAccessor.ReadyCount()
if err != nil {
return emptyStat, ErrFailedGetEndpoints
}
if readyPodsCount == 0 {
return emptyStat, nil
}
stat, err = s.scrapeService(window, readyPodsCount)
if err == nil && s.podsAddressable {
s.logger.Info("Direct pod scraping off, service scraping, on")
// If err == nil, this means that we failed to scrape all pods, but service worked
// thus it is probably a mesh case.
s.podsAddressable = false
}
return stat, err
}
func (s *serviceScraper) scrapePods(window time.Duration) (Stat, error) {
pods, youngPods, err := s.podAccessor.PodIPsSplitByAge(window, time.Now())
if err != nil {
s.logger.Infow("Error querying pods by age", zap.Error(err))
return emptyStat, err
}
lp := len(pods)
lyp := len(youngPods)
s.logger.Debugf("|OldPods| = %d, |YoungPods| = %d", lp, lyp)
total := lp + lyp
if total == 0 {
return emptyStat, nil
}
frpc := float64(total)
sampleSizeF := populationMeanSampleSize(frpc)
sampleSize := int(sampleSizeF)
results := make(chan Stat, sampleSize)
// 1. If not enough: shuffle young pods and expect to use N-lp of those
// no need to shuffle old pods, since all of them are expected to be used.
// 2. If enough old pods: shuffle them and use first N, still append young pods
// as backup in case of errors, but without shuffling.
if lp < sampleSize {
rand.Shuffle(lyp, func(i, j int) {
youngPods[i], youngPods[j] = youngPods[j], youngPods[i]
})
} else {
rand.Shuffle(lp, func(i, j int) {
pods[i], pods[j] = pods[j], pods[i]
})
}
pods = append(pods, youngPods...)
grp, egCtx := errgroup.WithContext(context.Background())
idx := atomic.NewInt32(-1)
var sawNonMeshError atomic.Bool
// Start |sampleSize| threads to scan in parallel.
for i := 0; i < sampleSize; i++ {
grp.Go(func() error {
// If a given pod failed to scrape, we want to continue
// scanning pods down the line.
for {
// Acquire next pod.
myIdx := int(idx.Inc())
// All out?
if myIdx >= len(pods) {
return errPodsExhausted
}
// Scrape!
target := "http://" + pods[myIdx] + ":" + portAndPath
req, err := http.NewRequestWithContext(egCtx, http.MethodGet, target, nil)
if err != nil {
return err
}
if s.usePassthroughLb {
req.Host = s.host
req.Header.Add("Knative-Direct-Lb", "true")
}
stat, err := s.directClient.Do(req)
if err == nil {
results <- stat
return nil
}
if !isPotentialMeshError(err) {
sawNonMeshError.Store(true)
}
s.logger.Infow("Failed scraping pod "+pods[myIdx], zap.Error(err))
}
})
}
err = grp.Wait()
close(results)
// We only get here if one of the scrapers failed to scrape
// at least one pod.
if err != nil {
// Got some (but not enough) successful pods.
if len(results) > 0 {
s.logger.Warn("Too many pods failed scraping for meaningful interpolation")
return emptyStat, errPodsExhausted
}
// We didn't get any pods, but we don't want to fall back to service
// scraping because we saw an error which was not mesh-related.
if sawNonMeshError.Load() {
s.logger.Warn("0 pods scraped, but did not see a mesh-related error")
return emptyStat, errPodsExhausted
}
// No pods, and we only saw mesh-related errors, so infer that mesh must be
// enabled and fall back to service scraping.
s.logger.Warn("0 pods were successfully scraped out of ", strconv.Itoa(len(pods)))
return emptyStat, errDirectScrapingNotAvailable
}
return computeAverages(results, sampleSizeF, frpc), nil
}
func computeAverages(results <-chan Stat, sample, total float64) Stat {
ret := Stat{
PodName: scraperPodName,
}
// Sum the stats from individual pods.
for stat := range results {
ret.add(stat)
}
ret.average(sample, total)
return ret
}
// scrapeService scrapes the metrics using service endpoint
// as its target, rather than individual pods.
func (s *serviceScraper) scrapeService(window time.Duration, readyPods int) (Stat, error) {
frpc := float64(readyPods)
sampleSizeF := populationMeanSampleSize(frpc)
sampleSize := int(sampleSizeF)
oldStatCh := make(chan Stat, sampleSize)
youngStatCh := make(chan Stat, sampleSize)
scrapedPods := &sync.Map{}
grp, egCtx := errgroup.WithContext(context.Background())
youngPodCutOffSecs := window.Seconds()
for i := 0; i < sampleSize; i++ {
grp.Go(func() error {
for tries := 1; ; tries++ {
stat, err := s.tryScrape(egCtx, scrapedPods)
if err != nil {
// Return the error if we exhausted our retries and
// we had an error returned (we can end up here if
// all the pods were young, which is not an error condition).
if tries >= scraperMaxRetries {
return err
}
continue
}
if stat.ProcessUptime >= youngPodCutOffSecs {
// We run |sampleSize| goroutines and each of them terminates
// as soon as it sees a stat from an `oldPod`.
// The channel is allocated to |sampleSize|, thus this will never
// deadlock.
oldStatCh <- stat
return nil
}
select {
// This in theory might loop over all the possible pods, thus might
// fill up the channel.
case youngStatCh <- stat:
default:
// If so, just return.
return nil
}
}
})
}
// Now at this point we have two possibilities.
// 1. We scraped |sampleSize| distinct pods, with the invariant of
// sampleSize <= len(oldStatCh) + len(youngStatCh) <= sampleSize*2.
// Note, that `err` might still be non-nil, especially when the overall
// pod population is small.
// Consider the following case: sampleSize=3, in theory the first go routine
// might scrape 2 pods, the second 1 and the third won't be be able to scrape
// any unseen pod, so it will return `ErrDidNotReceiveStat`.
// 2. We did not: in this case `err` below will be non-nil.
// Return the inner error, if any.
if err := grp.Wait(); err != nil {
// Ignore the error if we have received enough statistics.
if !errors.Is(err, ErrDidNotReceiveStat) || len(oldStatCh)+len(youngStatCh) < sampleSize {
return emptyStat, fmt.Errorf("unsuccessful scrape, sampleSize=%d: %w", sampleSize, err)
}
}
close(oldStatCh)
close(youngStatCh)
ret := Stat{
PodName: scraperPodName,
}
// Sum the stats from individual pods.
oldCnt := len(oldStatCh)
for stat := range oldStatCh {
ret.add(stat)
}
for i := oldCnt; i < sampleSize; i++ {
// This will always succeed, see reasoning above.
ret.add(<-youngStatCh)
}
ret.average(sampleSizeF, frpc)
return ret, nil
}
// tryScrape runs a single scrape and returns stat if this is a pod that has not been
// seen before. An error otherwise or if scraping failed.
func (s *serviceScraper) tryScrape(ctx context.Context, scrapedPods *sync.Map) (Stat, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, s.url, nil)
if err != nil {
return emptyStat, err
}
stat, err := s.meshClient.Do(req)
if err != nil {
return emptyStat, err
}
if _, exists := scrapedPods.LoadOrStore(stat.PodName, struct{}{}); exists {
return emptyStat, ErrDidNotReceiveStat
}
return stat, nil
}
| {
if err := view.Register(
&view.View{
Description: "The time to scrape metrics in milliseconds",
Measure: scrapeTimeM,
Aggregation: view.Distribution(pkgmetrics.Buckets125(1, 100000)...),
},
); err != nil {
panic(err)
}
} |
ej2-react-progressbar.umd.min.js | !function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("@syncfusion/ej2-react-base"),require("react"),require("@syncfusion/ej2-progressbar")):"function"==typeof define&&define.amd?define(["exports","@syncfusion/ej2-react-base","react","@syncfusion/ej2-progressbar"],t):t(e.ej={},e.ej2ReactBase,e.React,e.ej2Progressbar)}(this,function(e,t,n,r){"use strict";var o=function(){var e=function(t,n){return(e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n])})(t,n)};return function(t,n){function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}}(),i=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return o(t,e),t.moduleName="progressBarAnnotation",t}(t.ComplexBase),s=function(e){function t(){return null!==e&&e.apply(this,arguments)||this}return o(t,e),t.propertyName="annotations",t.moduleName="progressBarAnnotations",t}(t.ComplexBase),a=function(){var e=function(t,n){return(e=Object.setPrototypeOf||{__proto__:[]}instanceof Array&&function(e,t){e.__proto__=t}||function(e,t){for(var n in t)t.hasOwnProperty(n)&&(e[n]=t[n])})(t,n)};return function(t,n){function r(){this.constructor=t}e(t,n),t.prototype=null===n?Object.create(n):(r.prototype=n.prototype,new r)}}(),c=function(e){function t(t){var n=e.call(this,t)||this;return n.initRenderCalled=!1,n.checkInjectedModules=!0,n.directivekeys={progressBarAnnotations:"progressBarAnnotation"},n.immediateRender=!1,n}return a(t,e),t.prototype.render=function(){if(!(this.element&&!this.initRenderCalled||this.refreshing))return n.createElement("div",this.getDefaultAttributes(),this.props.children);e.prototype.render.call(this),this.initRenderCalled=!0},t}(r.ProgressBar);t.applyMixins(c,[t.ComponentBase,n.PureComponent]),e.Inject=t.Inject,e.ProgressBarAnnotationDirective=i,e.ProgressBarAnnotationsDirective=s,e.ProgressBarComponent=c,Object.keys(r).forEach(function(t){e[t]=r[t]}),Object.defineProperty(e,"__esModule",{value:!0})}); | //# sourceMappingURL=ej2-react-progressbar.umd.min.js.map |
|
optiontypes.py | # This file is part of the Python aiocoap library project.
#
# Copyright (c) 2012-2014 Maciej Wasilak <http://sixpinetrees.blogspot.com/>,
# 2013-2014 Christian Amsüss <[email protected]>
#
# aiocoap is free software, this file is published under the MIT license as
# described in the accompanying LICENSE file.
import abc
import collections
import struct
class OptionType(metaclass=abc.ABCMeta):
"""Interface for decoding and encoding option values
Instances of :class:`OptionType` are collected in a list in a
:attr:`.Message.opt` :class:`.Options` object, and provide a translation
between the CoAP octet-stream (accessed using the
:meth:`encode()`/:meth:`decode()` method pair) and the interpreted value
(accessed via the :attr:`value` attribute).
Note that OptionType objects usually don't need to be handled by library
users; the recommended way to read and set options is via the Options
object'sproperties (eg. ``message.opt.uri_path = ('.well-known',
'core')``)."""
@abc.abstractmethod
def __init__(self, number, value):
"""Set the `self.name` and `self.value` attributes"""
@abc.abstractmethod
def encode(self):
"""Return the option's value in serialzied form"""
@abc.abstractmethod
def decode(self, rawdata):
"""Set the option's value from the bytes in rawdata"""
@property
def length(self):
"""Indicate the length of the encoded value"""
return len(self.encode())
class StringOption(OptionType):
"""String CoAP option - used to represent string options. Always encoded in
UTF8 per CoAP specification."""
def __init__(self, number, value=""):
self.value = value
self.number = number
def encode(self):
# FIXME: actually, this should be utf8 of the net-unicode form (maybe it is)
rawdata = self.value.encode('utf-8')
return rawdata
def decode(self, rawdata):
self.value = rawdata.decode('utf-8')
def _ | self):
return len(self.value.encode('utf-8'))
length = property(_length)
def __str__(self):
return self.value
class OpaqueOption(OptionType):
"""Opaque CoAP option - used to represent options that just have their
uninterpreted bytes as value."""
def __init__(self, number, value=b""):
self.value = value
self.number = number
def encode(self):
rawdata = self.value
return rawdata
def decode(self, rawdata):
self.value = rawdata # if rawdata is not None else ""
def _length(self):
return len(self.value)
length = property(_length)
def __str__(self):
return repr(self.value)
class UintOption(OptionType):
"""Uint CoAP option - used to represent integer options."""
def __init__(self, number, value=0):
self.value = value
self.number = number
def encode(self):
rawdata = struct.pack("!L", self.value) # For Python >3.1 replace with int.to_bytes()
return rawdata.lstrip(bytes([0]))
def decode(self, rawdata): # For Python >3.1 replace with int.from_bytes()
value = 0
for byte in rawdata:
value = (value * 256) + byte
self.value = value
return self
def _length(self):
if self.value > 0:
return (self.value.bit_length() - 1) // 8 + 1
else:
return 0
length = property(_length)
def __str__(self):
return str(self.value)
class BlockOption(OptionType):
"""Block CoAP option - special option used only for Block1 and Block2 options.
Currently it is the only type of CoAP options that has
internal structure."""
class BlockwiseTuple(collections.namedtuple('_BlockwiseTuple', ['block_number', 'more', 'size_exponent'])):
@property
def size(self):
return 2 ** (self.size_exponent + 4)
@property
def start(self):
return self.block_number * self.size
def __init__(self, number, value=None):
if value is not None:
self._value = self.BlockwiseTuple._make(value)
self.number = number
value = property(lambda self: self._value,
lambda self, value: setattr(self, '_value', self.BlockwiseTuple._make(value)))
def encode(self):
as_integer = (self.value.block_number << 4) + (self.value.more * 0x08) + self.value.size_exponent
rawdata = struct.pack("!L", as_integer) # For Python >3.1 replace with int.to_bytes()
return rawdata.lstrip(bytes([0]))
def decode(self, rawdata):
as_integer = 0
for byte in rawdata:
as_integer = (as_integer * 256) + byte
self.value = self.BlockwiseTuple(block_number=(as_integer >> 4), more=bool(as_integer & 0x08),
size_exponent=(as_integer & 0x07))
def _length(self):
return ((self.value[0].bit_length() + 3) // 8 + 1)
length = property(_length)
def __str__(self):
return str(self.value)
| length( |
one_shot.py | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import logging
from schema import And, Optional
from .constants import MASKER_DICT
from ..utils.config_validation import CompressorSchema
from ..compressor import Pruner
__all__ = ['LevelPruner', 'SlimPruner', 'L1FilterPruner', 'L2FilterPruner', 'FPGMPruner', \
'TaylorFOWeightFilterPruner', 'ActivationAPoZRankFilterPruner', 'ActivationMeanRankFilterPruner']
logger = logging.getLogger('torch pruner')
class OneshotPruner(Pruner):
"""
Prune model to an exact pruning level for one time.
"""
def __init__(self, model, config_list, pruning_algorithm='level', optimizer=None, **algo_kwargs):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
List on pruning configs
pruning_algorithm: str
algorithms being used to prune model
optimizer: torch.optim.Optimizer
Optimizer used to train model
algo_kwargs: dict
Additional parameters passed to pruning algorithm masker class
"""
super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False)
self.masker = MASKER_DICT[pruning_algorithm](model, self, **algo_kwargs)
def validate_config(self, model, config_list):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
List on pruning configs
"""
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
Optional('op_types'): [str],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
def calc_mask(self, wrapper, wrapper_idx=None):
"""
Calculate the mask of given layer
Parameters
----------
wrapper : Module
the module to instrument the compression operation
wrapper_idx: int
index of this wrapper in pruner's all wrappers
Returns
-------
dict
dictionary for storing masks, keys of the dict:
'weight_mask': weight mask tensor
'bias_mask': bias mask tensor (optional)
"""
if wrapper.if_calculated:
return None
sparsity = wrapper.config['sparsity']
if not wrapper.if_calculated:
masks = self.masker.calc_mask(sparsity=sparsity, wrapper=wrapper, wrapper_idx=wrapper_idx)
# masker.calc_mask returns None means calc_mask is not calculated sucessfully, can try later
if masks is not None:
wrapper.if_calculated = True
return masks
else:
return None
class LevelPruner(OneshotPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : This is to specify the sparsity operations to be compressed to.
- op_types : Operation types to prune.
"""
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='level')
class SlimPruner(OneshotPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : This is to specify the sparsity operations to be compressed to.
- op_types : Only BatchNorm2d is supported in Slim Pruner.
"""
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='slim')
def validate_config(self, model, config_list):
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'op_types': ['BatchNorm2d'],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
if len(config_list) > 1:
logger.warning('Slim pruner only supports 1 configuration')
class _StructuredFilterPruner(OneshotPruner):
def __init__(self, model, config_list, pruning_algorithm, optimizer=None, **algo_kwargs):
super().__init__(model, config_list, pruning_algorithm=pruning_algorithm, optimizer=optimizer, **algo_kwargs)
def validate_config(self, model, config_list):
schema = CompressorSchema([{
'sparsity': And(float, lambda n: 0 < n < 1),
'op_types': ['Conv2d'],
Optional('op_names'): [str]
}], model, logger)
schema.validate(config_list)
class L1FilterPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : This is to specify the sparsity operations to be compressed to.
- op_types : Only Conv2d is supported in L1FilterPruner.
"""
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='l1')
class L2FilterPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : This is to specify the sparsity operations to be compressed to.
- op_types : Only Conv2d is supported in L2FilterPruner.
"""
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='l2')
class FPGMPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : This is to specify the sparsity operations to be compressed to.
- op_types : Only Conv2d is supported in FPGM Pruner.
"""
def __init__(self, model, config_list):
super().__init__(model, config_list, pruning_algorithm='fpgm')
class TaylorFOWeightFilterPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : How much percentage of convolutional filters are to be pruned.
- op_types : Currently only Conv2d is supported in TaylorFOWeightFilterPruner.
"""
def __init__(self, model, config_list, optimizer=None, statistics_batch_num=1):
|
class ActivationAPoZRankFilterPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : How much percentage of convolutional filters are to be pruned.
- op_types : Only Conv2d is supported in ActivationAPoZRankFilterPruner.
"""
def __init__(self, model, config_list, optimizer=None, activation='relu', statistics_batch_num=1):
super().__init__(model, config_list, pruning_algorithm='apoz', optimizer=optimizer, \
activation=activation, statistics_batch_num=statistics_batch_num)
class ActivationMeanRankFilterPruner(_StructuredFilterPruner):
"""
Parameters
----------
model : torch.nn.Module
Model to be pruned
config_list : list
Supported keys:
- sparsity : How much percentage of convolutional filters are to be pruned.
- op_types : Only Conv2d is supported in ActivationMeanRankFilterPruner.
"""
def __init__(self, model, config_list, optimizer=None, activation='relu', statistics_batch_num=1):
super().__init__(model, config_list, pruning_algorithm='mean_activation', optimizer=optimizer, \
activation=activation, statistics_batch_num=statistics_batch_num)
| super().__init__(model, config_list, pruning_algorithm='taylorfo', optimizer=optimizer, statistics_batch_num=statistics_batch_num) |
pack_unique_files_test.py | import json
import os
import click
import pytest
import requests_mock
from click.testing import CliRunner
from git import GitCommandError
from demisto_sdk.__main__ import main
from demisto_sdk.commands.common import tools
from demisto_sdk.commands.common.constants import (PACK_METADATA_DESC,
PACK_METADATA_SUPPORT,
PACK_METADATA_TAGS,
PACK_METADATA_USE_CASES,
PACKS_README_FILE_NAME,
XSOAR_SUPPORT)
from demisto_sdk.commands.common.errors import Errors
from demisto_sdk.commands.common.hook_validations.base_validator import \
BaseValidator
from demisto_sdk.commands.common.hook_validations.pack_unique_files import \
PackUniqueFilesValidator
from demisto_sdk.commands.common.legacy_git_tools import git_path
from TestSuite.test_tools import ChangeCWD
VALIDATE_CMD = "validate"
PACK_METADATA_PARTNER = {
"name": "test",
"description": "test",
"support": "partner",
"currentVersion": "1.0.1",
"author": "bar",
"categories": [
"Data Enrichment & Threat Intelligence"
],
"tags": [],
"useCases": [],
"keywords": [],
"price": 2,
"email": "[email protected]",
"url": "https://www.paloaltonetworks.com/cortex"
}
README_INPUT_RESULTS_LIST = [
('', False),
(' ', False),
('\t\t\n ', False),
('Text', True),
]
class TestPackUniqueFilesValidator:
FILES_PATH = os.path.normpath(os.path.join(__file__, f'{git_path()}/demisto_sdk/tests', 'test_files', 'Packs'))
FAKE_PACK_PATH = os.path.normpath(os.path.join(__file__, f'{git_path()}/demisto_sdk/tests', 'test_files',
'fake_pack'))
FAKE_PATH_NAME = 'fake_pack'
validator = PackUniqueFilesValidator(FAKE_PATH_NAME)
validator.pack_path = FAKE_PACK_PATH
def restart_validator(self):
self.validator.pack_path = ''
self.validator = PackUniqueFilesValidator(self.FAKE_PATH_NAME)
self.validator.pack_path = self.FAKE_PACK_PATH
def test_is_error_added_name_only(self):
self.validator._add_error(('boop', '101'), 'file_name')
assert f'{self.validator.pack_path}/file_name: [101] - boop\n' in self.validator.get_errors(True)
assert f'{self.validator.pack_path}/file_name: [101] - boop\n' in self.validator.get_errors()
self.validator._errors = []
def test_is_error_added_full_path(self):
self.validator._add_error(('boop', '101'), f'{self.validator.pack_path}/file/name')
assert f'{self.validator.pack_path}/file/name: [101] - boop\n' in self.validator.get_errors(True)
assert f'{self.validator.pack_path}/file/name: [101] - boop\n' in self.validator.get_errors()
self.validator._errors = []
def test_is_file_exist(self):
assert self.validator._is_pack_file_exists(PACKS_README_FILE_NAME)
assert not self.validator._is_pack_file_exists('boop')
self.validator._errors = []
def test_parse_file_into_list(self):
assert ['boop', 'sade', ''] == self.validator._parse_file_into_list(PACKS_README_FILE_NAME)
assert not self.validator._parse_file_into_list('boop')
self.validator._errors = []
def test_validate_pack_unique_files(self, mocker):
mocker.patch.object(BaseValidator, 'check_file_flags', return_value='')
mocker.patch.object(PackUniqueFilesValidator, 'validate_pack_readme_and_pack_description', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, 'validate_pack_readme_images', return_value=True)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': []}, 'json'))
assert not self.validator.are_valid_files(id_set_validations=False)
fake_validator = PackUniqueFilesValidator('fake')
mocker.patch.object(fake_validator, '_read_metadata_content', return_value=dict())
assert fake_validator.are_valid_files(id_set_validations=False)
def test_validate_pack_metadata(self, mocker):
mocker.patch.object(BaseValidator, 'check_file_flags', return_value='')
mocker.patch.object(PackUniqueFilesValidator, 'validate_pack_readme_and_pack_description', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, 'validate_pack_readme_images', return_value=True)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': []}, 'json'))
assert not self.validator.are_valid_files(id_set_validations=False)
fake_validator = PackUniqueFilesValidator('fake')
mocker.patch.object(fake_validator, '_read_metadata_content', return_value=dict())
assert fake_validator.are_valid_files(id_set_validations=False)
def test_validate_partner_contribute_pack_metadata_no_mail_and_url(self, mocker, repo):
"""
Given
- Partner contributed pack without email and url.
When
- Running validate on it.
Then
- Ensure validate found errors.
"""
pack_metadata_no_email_and_url = PACK_METADATA_PARTNER.copy()
pack_metadata_no_email_and_url['email'] = ''
pack_metadata_no_email_and_url['url'] = ''
mocker.patch.object(tools, 'is_external_repository', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, '_is_pack_file_exists', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, 'get_master_private_repo_meta_file', return_value=None)
mocker.patch.object(PackUniqueFilesValidator, '_read_file_content',
return_value=json.dumps(pack_metadata_no_email_and_url))
mocker.patch.object(BaseValidator, 'check_file_flags', return_value=None)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': []}, 'json'))
pack = repo.create_pack('PackName')
pack.pack_metadata.write_json(pack_metadata_no_email_and_url)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [VALIDATE_CMD, '-i', pack.path], catch_exceptions=False)
assert 'Contributed packs must include email or url' in result.stdout
@pytest.mark.parametrize('url, is_valid', [
('https://github.com/pont_to_repo', False),
('some_support_url', True),
('https://github.com/pont_to_repo/issues', True),
])
def test_validate_partner_pack_metadata_url(self, mocker, repo, url, is_valid):
"""
Given
- Partner contributed pack with an is_valid url.
When
- Running validate on it.
Then
- Ensure validate finds errors accordingly.
"""
pack_metadata_changed_url = PACK_METADATA_PARTNER.copy()
pack_metadata_changed_url['url'] = url
mocker.patch.object(tools, 'is_external_repository', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, '_is_pack_file_exists', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, 'get_master_private_repo_meta_file', return_value=None)
mocker.patch.object(PackUniqueFilesValidator, '_read_file_content',
return_value=json.dumps(pack_metadata_changed_url))
mocker.patch.object(BaseValidator, 'check_file_flags', return_value=None)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': []}, 'json'))
pack = repo.create_pack('PackName')
pack.pack_metadata.write_json(pack_metadata_changed_url)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [VALIDATE_CMD, '-i', pack.path], catch_exceptions=False)
error_text = 'The metadata URL leads to a GitHub repo instead of a support page.'
if is_valid:
assert error_text not in result.stdout
else:
assert error_text in result.stdout
def test_validate_partner_contribute_pack_metadata_price_change(self, mocker, repo):
"""
Given
- Partner contributed pack where price has changed.
When
- Running validate on it.
Then
- Ensure validate found errors.
"""
pack_metadata_price_changed = PACK_METADATA_PARTNER.copy()
pack_metadata_price_changed['price'] = 3
mocker.patch.object(tools, 'is_external_repository', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, '_is_pack_file_exists', return_value=True)
mocker.patch.object(PackUniqueFilesValidator, 'get_master_private_repo_meta_file',
return_value=PACK_METADATA_PARTNER)
mocker.patch.object(PackUniqueFilesValidator, '_read_file_content',
return_value=json.dumps(pack_metadata_price_changed))
mocker.patch.object(BaseValidator, 'check_file_flags', return_value=None)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': []}, 'json'))
pack = repo.create_pack('PackName')
pack.pack_metadata.write_json(pack_metadata_price_changed)
with ChangeCWD(repo.path):
runner = CliRunner(mix_stderr=False)
result = runner.invoke(main, [VALIDATE_CMD, '-i', pack.path], catch_exceptions=False)
assert 'The pack price was changed from 2 to 3 - revert the change' in result.stdout
def test_check_timestamp_format(self):
"""
Given
- timestamps in various formats.
When
- Running check_timestamp_format on them.
Then
- Ensure True for iso format and False for any other format.
"""
fake_validator = PackUniqueFilesValidator('fake')
good_format_timestamp = '2020-04-14T00:00:00Z'
missing_z = '2020-04-14T00:00:00'
missing_t = '2020-04-14 00:00:00Z'
only_date = '2020-04-14'
with_hyphen = '2020-04-14T00-00-00Z'
assert fake_validator.check_timestamp_format(good_format_timestamp) |
def test_validate_pack_dependencies_invalid_id_set(self, mocker, repo):
"""
Given
- An invalid id set error being raised
When
- Running validate_pack_dependencies.
Then
- Ensure that the validation fails and that the invalid id set error is printed.
"""
self.restart_validator()
def error_raising_function(*args, **kwargs):
raise ValueError("Couldn't find any items for pack 'PackID'. make sure your spelling is correct.")
mocker.patch(
'demisto_sdk.commands.common.hook_validations.pack_unique_files.get_core_pack_list',
side_effect=error_raising_function
)
assert not self.validator.validate_pack_dependencies()
assert Errors.invalid_id_set()[0] in self.validator.get_errors()
def test_validate_core_pack_dependencies(self):
"""
Given
- A list of non-core packs
When
- Running validate_core_pack_dependencies.
Then
- Ensure that the validation fails and that the invalid core pack dependencies error is printed.
"""
self.restart_validator()
dependencies_packs = {'dependency_pack_1': {'mandatory': True, 'display_name': 'dependency pack 1'},
'dependency_pack_2': {'mandatory': False, 'display_name': 'dependency pack 2'},
'dependency_pack_3': {'mandatory': True, 'display_name': 'dependency pack 3'}}
assert not self.validator.validate_core_pack_dependencies(dependencies_packs)
assert Errors.invalid_core_pack_dependencies('fake_pack', ['dependency_pack_1', 'dependency_pack_3'])[0] \
in self.validator.get_errors()
def test_validate_pack_dependencies_skip_id_set_creation(self, capsys):
"""
Given
- skip_id_set_creation flag set to true.
- No id_set file exists
When
- Running validate_pack_dependencies.
Then
- Ensure that the validation passes and that the skipping message is printed.
"""
self.restart_validator()
self.validator.skip_id_set_creation = True
res = self.validator.validate_pack_dependencies()
self.validator.skip_id_set_creation = False # reverting to default for next tests
assert res
assert "No first level dependencies found" in capsys.readouterr().out
@pytest.mark.parametrize('usecases, is_valid, branch_usecases', [
([], True, []),
(['Phishing', 'Malware'], True, []),
(['NonApprovedUsecase', 'Case Management'], False, []),
(['NewUseCase'], True, ['NewUseCase']),
(['NewUseCase1, NewUseCase2'], False, ['NewUseCase1'])
])
def test_is_approved_usecases(self, repo, usecases, is_valid, branch_usecases, mocker):
"""
Given:
- Case A: Pack without usecases
- Case B: Pack with approved usecases (Phishing and Malware)
- Case C: Pack with non-approved usecase (NonApprovedUsecase) and approved usecase (Case Management)
- Case D: Pack with approved usecase (NewUseCase) located in my branch only
- Case E: Pack with non-approved usecase (NewUseCase2) and approved usecase (NewUseCase1)
located in my branch only
When:
- Validating approved usecases
Then:
- Case A: Ensure validation passes as there are no usecases to verify
- Case B: Ensure validation passes as both usecases are approved
- Case C: Ensure validation fails as it contains a non-approved usecase (NonApprovedUsecase)
Verify expected error is printed
- Case D: Ensure validation passes as usecase is approved on the same branch
- Case E: Ensure validation fails as it contains a non-approved usecase (NewUseCase2)
Verify expected error is printed
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json({
PACK_METADATA_USE_CASES: usecases,
PACK_METADATA_SUPPORT: XSOAR_SUPPORT,
PACK_METADATA_TAGS: []
})
mocker.patch.object(tools, 'is_external_repository', return_value=False)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': branch_usecases}, 'json'))
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
assert self.validator._is_approved_usecases() == is_valid
if not is_valid:
assert 'The pack metadata contains non approved usecases:' in self.validator.get_errors()
@pytest.mark.parametrize('tags, is_valid, branch_tags', [
([], True, []),
(['Machine Learning', 'Spam'], True, []),
(['NonApprovedTag', 'GDPR'], False, []),
(['NewTag'], True, ['NewTag']),
(['NewTag1, NewTag2'], False, ['NewTag1'])
])
def test_is_approved_tags(self, repo, tags, is_valid, branch_tags, mocker):
"""
Given:
- Case A: Pack without tags
- Case B: Pack with approved tags (Machine Learning and Spam)
- Case C: Pack with non-approved tags (NonApprovedTag) and approved tags (GDPR)
- Case D: Pack with approved tags (NewTag) located in my branch only
- Case E: Pack with non-approved tags (NewTag) and approved tags (NewTag)
located in my branch only
When:
- Validating approved tags
Then:
- Case A: Ensure validation passes as there are no tags to verify
- Case B: Ensure validation passes as both tags are approved
- Case C: Ensure validation fails as it contains a non-approved tags (NonApprovedTag)
Verify expected error is printed
- Case D: Ensure validation passes as tags is approved on the same branch
- Case E: Ensure validation fails as it contains a non-approved tag (NewTag2)
Verify expected error is printed
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json({
PACK_METADATA_USE_CASES: [],
PACK_METADATA_SUPPORT: XSOAR_SUPPORT,
PACK_METADATA_TAGS: tags
})
mocker.patch.object(tools, 'is_external_repository', return_value=False)
mocker.patch.object(tools, 'get_dict_from_file', return_value=({'approved_list': branch_tags}, 'json'))
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
assert self.validator._is_approved_tags() == is_valid
if not is_valid:
assert 'The pack metadata contains non approved tags:' in self.validator.get_errors()
@pytest.mark.parametrize('pack_content, tags, is_valid', [
("none", [], True),
("none", ["Use Case"], False),
("playbook", ["Use Case"], True),
("incident", ["Use Case"], True),
("layout", ["Use Case"], True),
("playbook", [], True),
])
def test_is_right_usage_of_usecase_tag(self, repo, pack_content, tags, is_valid):
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json({
PACK_METADATA_USE_CASES: [],
PACK_METADATA_SUPPORT: XSOAR_SUPPORT,
PACK_METADATA_TAGS: tags,
})
if pack_content == "playbook":
pack.create_playbook(name="PlaybookName")
elif pack_content == "incident":
pack.create_incident_type(name="IncidentTypeName")
elif pack_content == "layout":
pack.create_layout(name="Layout")
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
assert self.validator.is_right_usage_of_usecase_tag() == is_valid
@pytest.mark.parametrize('type, is_valid', [
('community', True),
('partner', True),
('xsoar', True),
('someName', False),
('test', False),
('developer', True)
])
def test_is_valid_support_type(self, repo, type, is_valid):
"""
Given:
- Pack with support type in the metadata file.
When:
- Running _is_valid_support_type.
Then:
- Ensure True when the support types are valid, else False with the right error message.
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json({
PACK_METADATA_USE_CASES: [],
PACK_METADATA_SUPPORT: type
})
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
assert self.validator._is_valid_support_type() == is_valid
if not is_valid:
assert 'Support field should be one of the following: xsoar, partner, developer or community.' in \
self.validator.get_errors()
def test_get_master_private_repo_meta_file_running_on_master(self, mocker, repo, capsys):
"""
Given:
- A repo which runs on master branch
When:
- Running get_master_private_repo_meta_file.
Then:
- Ensure result is None and the appropriate skipping message is printed.
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json(PACK_METADATA_PARTNER)
class MyRepo:
active_branch = 'master'
mocker.patch('demisto_sdk.commands.common.hook_validations.pack_unique_files.Repo', return_value=MyRepo)
res = self.validator.get_master_private_repo_meta_file(str(pack.pack_metadata.path))
assert not res
assert "Running on master branch - skipping price change validation" in capsys.readouterr().out
def test_get_master_private_repo_meta_file_getting_git_error(self, repo, capsys, mocker):
"""
Given:
- A repo which runs on non-master branch.
- git.show command raises GitCommandError.
When:
- Running get_master_private_repo_meta_file.
Then:
- Ensure result is None and the appropriate skipping message is printed.
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json(PACK_METADATA_PARTNER)
class MyRepo:
active_branch = 'not-master'
class gitClass:
def show(self, var):
raise GitCommandError("A", "B")
git = gitClass()
mocker.patch('demisto_sdk.commands.common.hook_validations.pack_unique_files.Repo', return_value=MyRepo)
res = self.validator.get_master_private_repo_meta_file(str(pack.pack_metadata.path))
assert not res
assert "Got an error while trying to connect to git" in capsys.readouterr().out
def test_get_master_private_repo_meta_file_file_not_found(self, mocker, repo, capsys):
"""
Given:
- A repo which runs on non-master branch.
- git.show command returns None.
When:
- Running get_master_private_repo_meta_file.
Then:
- Ensure result is None and the appropriate skipping message is printed.
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.pack_metadata.write_json(PACK_METADATA_PARTNER)
class MyRepo:
active_branch = 'not-master'
class gitClass:
def show(self, var):
return None
git = gitClass()
mocker.patch('demisto_sdk.commands.common.hook_validations.pack_unique_files.Repo', return_value=MyRepo)
res = self.validator.get_master_private_repo_meta_file(str(pack.pack_metadata.path))
assert not res
assert "Unable to find previous pack_metadata.json file - skipping price change validation" in \
capsys.readouterr().out
@pytest.mark.parametrize('text, result', README_INPUT_RESULTS_LIST)
def test_validate_pack_readme_file_is_not_empty_partner(self, mocker, text, result):
"""
Given:
- partner pack
When:
- Running test_validate_pack_readme_file_is_not_empty_partner.
Then:
- Ensure result is False for empty README.md file and True otherwise.
"""
self.validator = PackUniqueFilesValidator(self.FAKE_PACK_PATH)
self.validator.support = 'partner'
mocker.patch.object(PackUniqueFilesValidator, '_read_file_content', return_value=text)
assert self.validator.validate_pack_readme_file_is_not_empty() == result
@pytest.mark.parametrize('text, result', README_INPUT_RESULTS_LIST)
def test_validate_pack_readme_file_is_not_empty_use_case(self, mocker, text, result):
"""
Given:
- pack with use case
When:
- Running test_validate_pack_readme_file_is_not_empty_partner.
Then:
- Ensure result is False for empty README.md file and True otherwise.
"""
self.validator = PackUniqueFilesValidator(os.path.join(self.FILES_PATH, 'CortexXDR'))
mocker.patch.object(PackUniqueFilesValidator, '_read_file_content', return_value=text)
assert self.validator.validate_pack_readme_file_is_not_empty() == result
def test_validate_pack_readme_file_is_not_empty_missing_file(self):
self.validator = PackUniqueFilesValidator(os.path.join(self.FILES_PATH, 'DummyPack'))
assert self.validator._is_pack_file_exists(self.validator.readme_file) is False
def test_validate_pack_readme_valid_images(self, mocker):
"""
Given
- A pack README file with valid absolute image paths in it.
When
- Run validate on pack README file
Then
- Ensure:
- Validation succeed
- Valid absolute image paths were not caught
"""
from demisto_sdk.commands.common.hook_validations.readme import \
ReadMeValidator
self.validator = PackUniqueFilesValidator(os.path.join(self.FILES_PATH, 'DummyPack2'))
mocker.patch.object(ReadMeValidator, 'check_readme_relative_image_paths', return_value=[]) # Test only absolute paths
with requests_mock.Mocker() as m:
# Mock get requests
m.get('https://github.com/demisto/content/raw/test1.png',
status_code=200, text="Test1")
m.get('https://raw.githubusercontent.com/demisto/content/raw/test1.png',
status_code=200, text="Test1")
m.get('https://raw.githubusercontent.com/demisto/content/raw/test1.jpg',
status_code=200, text="Test1")
result = self.validator.validate_pack_readme_images()
errors = self.validator.get_errors()
assert result
assert 'please repair it:\n' not in errors
assert 'please repair it:\n' not in errors
assert 'please repair it:\n(https://raw.githubusercontent.com/demisto/content/raw/test1.jpg)' not in errors
def test_validate_pack_readme_invalid_images(self):
"""
Given
- A pack README file with invalid absolute and relative image paths in it.
When
- Run validate on pack README file
Then
- Ensure:
- Validation fails
- Invalid relative image paths were caught correctly
- Invalid absolute image paths were caught correctly
"""
self.validator = PackUniqueFilesValidator(os.path.join(self.FILES_PATH, 'DummyPack2'))
with requests_mock.Mocker() as m:
# Mock get requests
m.get('https://github.com/demisto/content/raw/test1.png',
status_code=404, text="Test1")
m.get('https://raw.githubusercontent.com/demisto/content/raw/test1.png',
status_code=404, text="Test1")
m.get('https://raw.githubusercontent.com/demisto/content/raw/test1.jpg',
status_code=404, text="Test1")
result = self.validator.validate_pack_readme_images()
errors = self.validator.get_errors()
assert not result
assert 'Detected the following image relative path: ' in errors
assert 'Detected the following image relative path: ' in errors
assert 'Detected the following image relative path: (../../doc_files/Access_investigation_-_Generic_4_5.png)' in errors
assert 'Image link was not found, either insert it or remove it:\n' in errors
assert 'please repair it:\n' in errors
assert 'please repair it:\n' in errors
assert 'please repair it:\n(https://raw.githubusercontent.com/demisto/content/raw/test1.jpg)' in errors
@pytest.mark.parametrize('readme_content, is_valid', [
('Hey there, just testing', True),
('This is a test. All good!', False),
])
def test_pack_readme_is_different_then_pack_description(self, repo, readme_content, is_valid):
"""
Given:
- Case A: A unique pack readme.
- Case B: Pack readme that is equal to pack description
When:
- Validating pack readme vs pack description
Then:
- Case A: Ensure validation passes as the pack readme and pack description are different.
- Case B: Ensure validation fails as the pack readme is the same as the pack description.
Verify expected error is printed
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
pack.readme.write_text(readme_content)
pack.pack_metadata.write_json({
PACK_METADATA_DESC: 'This is a test. All good!',
})
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
assert self.validator.validate_pack_readme_and_pack_description() == is_valid
if not is_valid:
assert 'README.md content is equal to pack description. ' \
'Please remove the duplicate description from README.md file' in self.validator.get_errors()
def test_validate_pack_readme_and_pack_description_no_readme_file(self, repo):
"""
Given:
- A pack with no readme.
When:
- Validating pack readme vs pack description
Then:
- Fail on no README file and not on descrption error.
"""
self.restart_validator()
pack_name = 'PackName'
pack = repo.create_pack(pack_name)
self.validator.pack_path = pack.path
with ChangeCWD(repo.path):
os.remove(pack.readme.path)
assert self.validator.validate_pack_readme_and_pack_description()
assert '"README.md" file does not exist, create one in the root of the pack' in self.validator.get_errors()
assert 'README.md content is equal to pack description. ' \
'Please remove the duplicate description from README.md file' not in self.validator.get_errors()
def test_valid_is_pack_metadata_desc_too_long(self, repo):
"""
Given:
- Valid description length
When:
- Validating pack description length
Then:
- Ensure validation passes as the description field length is valid.
"""
pack_description = 'Hey there, just testing'
assert self.validator.is_pack_metadata_desc_too_long(pack_description) is True
def test_invalid_is_pack_metadata_desc_too_long(self, mocker, repo):
"""
Given:
- Invalid description length - higher than 130
When:
- Validating pack description length
Then:
- Ensure validation passes although description field length is higher than 130
- Ensure warning will be printed.
"""
pack_description = 'This is will fail cause the description here is too long.' \
'test test test test test test test test test test test test test test test test test' \
' test test test test test'
error_desc = 'The description field of the pack_metadata.json file is longer than 130 characters.'
mocker.patch("click.secho")
assert self.validator.is_pack_metadata_desc_too_long(pack_description) is True
assert error_desc in click.secho.call_args_list[0][0][0]
def test_validate_author_image_exists_valid(self, repo):
"""
Given:
- Pack with partner support and author image
When:
- Validating if author image exists
Then:
- Ensure validation passes.
"""
pack = repo.create_pack('MyPack')
self.validator.metadata_content = {'support': 'partner'}
self.validator.pack_path = pack.path
author_image_path = pack.author_image.path
with ChangeCWD(repo.path):
res = self.validator.validate_author_image_exists()
assert res
assert f'Partners must provide a non-empty author image under the path {author_image_path}.' not in \
self.validator.get_errors()
def test_validate_author_image_exists_invalid(self, repo):
"""
Given:
- Pack with partner support and no author image
When:
- Validating if author image exists
Then:
- Ensure validation fails.
"""
pack = repo.create_pack('MyPack')
self.validator.metadata_content = {'support': 'partner'}
self.validator.pack_path = pack.path
author_image_path = pack.author_image.path
with ChangeCWD(repo.path):
os.remove(author_image_path)
res = self.validator.validate_author_image_exists()
assert not res
assert f'Partners must provide a non-empty author image under the path {author_image_path}.' in \
self.validator.get_errors() | assert not fake_validator.check_timestamp_format(missing_t)
assert not fake_validator.check_timestamp_format(missing_z)
assert not fake_validator.check_timestamp_format(only_date)
assert not fake_validator.check_timestamp_format(with_hyphen) |
mod.rs | pub mod internal;
pub mod oid;
use internal::*;
use oid::*;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct T {
pub internal: internal::InternalType,
}
pub enum Types {
Unknown
}
impl Types {
pub fn t(&self) -> T {
match self {
Types::Unknown => |
}
}
} | {
return T{
internal: InternalType {
family: Family::Unknown,
width: 0,
precision: 0,
array_dimensions: vec![],
visible_type: 0,
tuple_contents: vec![],
tuple_labels: vec![],
oid: Oid::Unknown,
array_contents: None
}
}
} |
cocoeval.py | __author__ = 'tsungyi'
import numpy as np
import datetime
import time
from collections import defaultdict
import mask as maskUtils
import copy
class COCOeval:
# Interface for evaluating detection on the Microsoft COCO dataset.
#
# The usage for CocoEval is as follows:
# cocoGt=..., cocoDt=... # load dataset and results
# E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
# E.params.recThrs = ...; # set parameters as desired
# E.evaluate(); # run per image evaluation
# E.accumulate(); # accumulate per image results
# E.summarize(); # display summary metrics of results
# For example usage see evalDemo.m and http://mscoco.org/.
#
# The evaluation parameters are as follows (defaults in brackets):
# imgIds - [all] N img ids to use for evaluation
# catIds - [all] K cat ids to use for evaluation
# iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
# recThrs - [0:.01:1] R=101 recall thresholds for evaluation
# areaRng - [...] A=4 object area ranges for evaluation
# maxDets - [1 10 100] M=3 thresholds on max detections per image
# iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'
# iouType replaced the now DEPRECATED useSegm parameter.
# useCats - [1] if true use category labels for evaluation
# Note: if useCats=0 category labels are ignored as in proposal scoring.
# Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
#
# evaluate(): evaluates detections on every image and every category and
# concats the results into the "evalImgs" with fields:
# dtIds - [1xD] id for each of the D detections (dt)
# gtIds - [1xG] id for each of the G ground truths (gt)
# dtMatches - [TxD] matching gt id at each IoU or 0
# gtMatches - [TxG] matching dt id at each IoU or 0
# dtScores - [1xD] confidence of each dt
# gtIgnore - [1xG] ignore flag for each gt
# dtIgnore - [TxD] ignore flag for each dt at each IoU
#
# accumulate(): accumulates the per-image, per-category evaluation
# results in "evalImgs" into the dictionary "eval" with fields:
# params - parameters used for evaluation
# date - date evaluation was performed
# counts - [T,R,K,A,M] parameter dimensions (see above)
# precision - [TxRxKxAxM] precision for every evaluation setting
# recall - [TxKxAxM] max recall for every evaluation setting
# Note: precision and recall==-1 for settings with no gt objects.
#
# See also coco, mask, pycocoDemo, pycocoEvalDemo
#
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
# Licensed under the Simplified BSD License [see coco/license.txt]
def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):
'''
Initialize CocoEval using coco APIs for gt and dt
:param cocoGt: coco object with ground truth annotations
:param cocoDt: coco object with detection results
:return: None
'''
if not iouType:
print('iouType not specified. use default iouType segm')
self.cocoGt = cocoGt # ground truth COCO API
self.cocoDt = cocoDt # detections COCO API
self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements
self.eval = {} # accumulated evaluation results
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
self.params = Params(iouType=iouType) # parameters
self._paramsEval = {} # parameters for evaluation
self.stats = [] # result summarization
self.ious = {} # ious between all gts and dts
if not cocoGt is None:
self.params.imgIds = sorted(cocoGt.getImgIds())
self.params.catIds = sorted(cocoGt.getCatIds())
def _prepare(self):
'''
Prepare ._gts and ._dts for evaluation based on params
:return: None
'''
def _toMask(anns, coco):
# modify ann['segmentation'] by reference
for ann in anns:
rle = coco.annToRLE(ann)
ann['segmentation'] = rle
p = self.params
if p.useCats:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
else:
gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
# convert ground truth to mask if iouType == 'segm'
if p.iouType == 'segm':
_toMask(gts, self.cocoGt)
_toMask(dts, self.cocoDt)
# set ignore flag
for gt in gts:
gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0
gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']
if p.iouType == 'keypoints':
gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']
self._gts = defaultdict(list) # gt for evaluation
self._dts = defaultdict(list) # dt for evaluation
for gt in gts:
self._gts[gt['image_id'], gt['category_id']].append(gt)
for dt in dts:
self._dts[dt['image_id'], dt['category_id']].append(dt)
self.evalImgs = defaultdict(list) # per-image per-category evaluation results
self.eval = {} # accumulated evaluation results
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
tic = time.time()
print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if not p.useSegm is None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params=p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {(imgId, catId): computeIoU(imgId, catId) \
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
self._paramsEval = copy.deepcopy(self.params)
toc = time.time()
print('DONE (t={:0.2f}s).'.format(toc-tic))
def computeIoU(self, imgId, catId):
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return []
inds = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in inds]
if len(dt) > p.maxDets[-1]:
dt=dt[0:p.maxDets[-1]]
if p.iouType == 'segm':
g = [g['segmentation'] for g in gt]
d = [d['segmentation'] for d in dt]
elif p.iouType == 'bbox':
g = [g['bbox'] for g in gt]
d = [d['bbox'] for d in dt]
else:
raise Exception('unknown iouType for iou computation')
# compute iou between each dt and gt region
iscrowd = [int(o['iscrowd']) for o in gt]
ious = maskUtils.iou(d,g,iscrowd)
return ious
def computeOks(self, imgId, catId):
p = self.params
# dimention here should be Nxm
gts = self._gts[imgId, catId]
dts = self._dts[imgId, catId]
inds = np.argsort([-d['score'] for d in dts], kind='mergesort')
dts = [dts[i] for i in inds]
if len(dts) > p.maxDets[-1]:
dts = dts[0:p.maxDets[-1]]
# if len(gts) == 0 and len(dts) == 0:
if len(gts) == 0 or len(dts) == 0:
return []
ious = np.zeros((len(dts), len(gts)))
sigmas = p.kpt_oks_sigmas
vars = (sigmas * 2)**2
k = len(sigmas)
# compute oks between each detection and ground truth object
for j, gt in enumerate(gts):
# create bounds for ignore regions(double the gt bbox)
g = np.array(gt['keypoints'])
xg = g[0::3]; yg = g[1::3]; vg = g[2::3]
k1 = np.count_nonzero(vg > 0)
bb = gt['bbox']
x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2
y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2
for i, dt in enumerate(dts):
d = np.array(dt['keypoints'])
xd = d[0::3]; yd = d[1::3]
if k1>0:
# measure the per-keypoint distance if keypoints visible
dx = xd - xg
dy = yd - yg
else:
# measure minimum distance to keypoints in (x0,y0) & (x1,y1)
z = np.zeros((k))
dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)
dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)
e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2
if k1 > 0:
e=e[vg > 0]
ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
return ious
def evaluateImg(self, imgId, catId, aRng, maxDet):
'''
perform evaluation for single category and image
:return: dict (single image results)
'''
p = self.params
if p.useCats:
gt = self._gts[imgId,catId]
dt = self._dts[imgId,catId]
else:
gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
if len(gt) == 0 and len(dt) ==0:
return None
for g in gt:
if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
g['_ignore'] = 1
else:
g['_ignore'] = 0
# sort dt highest score first, sort gt ignore last
gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
gt = [gt[i] for i in gtind]
dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
dt = [dt[i] for i in dtind[0:maxDet]]
iscrowd = [int(o['iscrowd']) for o in gt]
# load computed ious
ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]
T = len(p.iouThrs)
G = len(gt)
D = len(dt)
gtm = np.zeros((T,G))
dtm = np.zeros((T,D))
gtIg = np.array([g['_ignore'] for g in gt])
dtIg = np.zeros((T,D))
if not len(ious)==0:
for tind, t in enumerate(p.iouThrs):
for dind, d in enumerate(dt):
# information about best match so far (m=-1 -> unmatched)
iou = min([t,1-1e-10])
m = -1
for gind, g in enumerate(gt):
# if this gt already matched, and not a crowd, continue
if gtm[tind,gind]>0 and not iscrowd[gind]:
continue
# if dt matched to reg gt, and on ignore gt, stop
if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
break
# continue to next gt unless better match made
if ious[dind,gind] < iou:
continue
# if match successful and best so far, store appropriately
iou=ious[dind,gind]
m=gind
# if match made store id of match for both dt and gt
if m ==-1:
continue
dtIg[tind,dind] = gtIg[m]
dtm[tind,dind] = gt[m]['id']
gtm[tind,m] = d['id']
# set unmatched detections outside of area range to ignore
a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
# store results for given image and category
return {
'image_id': imgId,
'category_id': catId,
'aRng': aRng,
'maxDet': maxDet,
'dtIds': [d['id'] for d in dt],
'gtIds': [g['id'] for g in gt],
'dtMatches': dtm,
'gtMatches': gtm,
'dtScores': [d['score'] for d in dt],
'gtIgnore': gtIg,
'dtIgnore': dtIg,
}
def accumulate(self, p = None):
'''
Accumulate per image evaluation results and store the result in self.eval
:param p: input params for evaluation
:return: None
'''
print('Accumulating evaluation results...')
tic = time.time()
if not self.evalImgs:
print('Please run evaluate() first')
# allows input customized parameters
if p is None:
p = self.params
p.catIds = p.catIds if p.useCats == 1 else [-1]
T = len(p.iouThrs)
R = len(p.recThrs)
K = len(p.catIds) if p.useCats else 1
A = len(p.areaRng)
M = len(p.maxDets)
precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories
recall = -np.ones((T,K,A,M))
scores = -np.ones((T,R,K,A,M))
# create dictionary for future indexing
_pe = self._paramsEval
catIds = _pe.catIds if _pe.useCats else [-1]
setK = set(catIds)
setA = set(map(tuple, _pe.areaRng))
setM = set(_pe.maxDets)
setI = set(_pe.imgIds)
# get inds to evaluate
k_list = [n for n, k in enumerate(p.catIds) if k in setK]
m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
I0 = len(_pe.imgIds)
A0 = len(_pe.areaRng)
# retrieve E at each category, area range, and max number of detections
for k, k0 in enumerate(k_list):
Nk = k0*A0*I0
for a, a0 in enumerate(a_list):
Na = a0*I0
for m, maxDet in enumerate(m_list):
E = [self.evalImgs[Nk + Na + i] for i in i_list]
E = [e for e in E if not e is None]
if len(E) == 0:
continue
dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])
# different sorting method generates slightly different results.
# mergesort is used to be consistent as Matlab implementation.
inds = np.argsort(-dtScores, kind='mergesort')
dtScoresSorted = dtScores[inds]
dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]
dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]
gtIg = np.concatenate([e['gtIgnore'] for e in E])
npig = np.count_nonzero(gtIg==0 )
if npig == 0:
continue
tps = np.logical_and( dtm, np.logical_not(dtIg) )
fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )
tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)
fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)
for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
tp = np.array(tp)
fp = np.array(fp)
nd = len(tp)
rc = tp / npig
pr = tp / (fp+tp+np.spacing(1))
q = np.zeros((R,))
ss = np.zeros((R,))
if nd:
recall[t,k,a,m] = rc[-1]
else:
recall[t,k,a,m] = 0
# numpy is slow without cython optimization for accessing elements
# use python array gets significant speed improvement
pr = pr.tolist(); q = q.tolist()
for i in range(nd-1, 0, -1):
if pr[i] > pr[i-1]:
pr[i-1] = pr[i]
inds = np.searchsorted(rc, p.recThrs, side='left')
try:
for ri, pi in enumerate(inds):
q[ri] = pr[pi]
ss[ri] = dtScoresSorted[pi]
except:
pass
precision[t,:,k,a,m] = np.array(q)
scores[t,:,k,a,m] = np.array(ss)
self.eval = {
'params': p,
'counts': [T, R, K, A, M],
'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'precision': precision,
'recall': recall,
'scores': scores,
}
toc = time.time()
print('DONE (t={:0.2f}s).'.format( toc-tic))
def summarize(self):
'''
Compute and display summary metrics for evaluation results.
Note this functin can *only* be applied on the default parameter setting
'''
def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):
p = self.params
iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap==1 else '(AR)'
iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
if iouThr is None else '{:0.2f}'.format(iouThr)
aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
if ap == 1:
# dimension of precision: [TxRxKxAxM]
s = self.eval['precision']
# IoU
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,:,aind,mind]
else:
# dimension of recall: [TxKxAxM]
s = self.eval['recall']
if iouThr is not None:
t = np.where(iouThr == p.iouThrs)[0]
s = s[t]
s = s[:,:,aind,mind]
if len(s[s>-1])==0:
mean_s = -1
else:
mean_s = np.mean(s[s>-1])
print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
return mean_s
def _summarizeDets():
stats = np.zeros((12,))
stats[0] = _summarize(1)
stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])
stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])
stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])
stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])
stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])
stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])
stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])
stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])
return stats
def _summarizeKps():
stats = np.zeros((10,))
stats[0] = _summarize(1, maxDets=20)
stats[1] = _summarize(1, maxDets=20, iouThr=.5)
stats[2] = _summarize(1, maxDets=20, iouThr=.75)
stats[3] = _summarize(1, maxDets=20, areaRng='medium')
stats[4] = _summarize(1, maxDets=20, areaRng='large')
stats[5] = _summarize(0, maxDets=20)
stats[6] = _summarize(0, maxDets=20, iouThr=.5)
stats[7] = _summarize(0, maxDets=20, iouThr=.75)
stats[8] = _summarize(0, maxDets=20, areaRng='medium')
stats[9] = _summarize(0, maxDets=20, areaRng='large')
return stats
if not self.eval:
raise Exception('Please run accumulate() first')
iouType = self.params.iouType
if iouType == 'segm' or iouType == 'bbox':
summarize = _summarizeDets
elif iouType == 'keypoints':
summarize = _summarizeKps
self.stats = summarize()
def __str__(self):
self.summarize()
class Params:
'''
Params for coco evaluation api
'''
def setDetParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value
self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [1, 10, 100]
self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]
self.areaRngLbl = ['all', 'small', 'medium', 'large']
self.useCats = 1
def setKpParams(self):
self.imgIds = []
self.catIds = []
# np.arange causes trouble. the data point on arange is slightly larger than the true value | self.areaRngLbl = ['all', 'medium', 'large']
self.useCats = 1
self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62,.62, 1.07, 1.07, .87, .87, .89, .89])/10.0
def __init__(self, iouType='segm'):
if iouType == 'segm' or iouType == 'bbox':
self.setDetParams()
elif iouType == 'keypoints':
self.setKpParams()
else:
raise Exception('iouType not supported')
self.iouType = iouType
# useSegm is deprecated
self.useSegm = None | self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)
self.maxDets = [20]
self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]] |
Transformer.py | import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from utils.utils import clones
class LayerNormGoogle(nn.Module):
def __init__(self, features, epsilon=1e-6):
super(LayerNormGoogle, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.epsilon = epsilon
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.epsilon) + self.b_2
class EncoderBlockGoogle(nn.Module):
def __init__(self, layer, num_layers):
super(EncoderBlockGoogle, self).__init__()
self.layers = clones(layer, num_layers)
self.norm = LayerNormGoogle(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class ResidualConnectionGoogle(nn.Module):
def __init__(self, size, keep_prob):
super(ResidualConnectionGoogle, self).__init__()
self.norm = LayerNormGoogle(size)
# TODO: Use dropout interface
self.dropout = nn.Dropout(keep_prob)
def forward(self, input, sublayer):
return input + self.dropout(sublayer(self.norm(input)))
class EncoderLayerGoogle(nn.Module):
def __init__(self, size, attention, feed_forward, keep_prob):
super(EncoderLayerGoogle, self).__init__()
self.size = size
self.attention = attention
self.feed_forward = feed_forward
# Each encoder layer has two sublayers
self.sublayer = clones(ResidualConnectionGoogle(size, keep_prob), 2)
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.attention(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class EncoderClassifier(nn.Module):
def __init__(self, embedding, encoder, classifier, device, is_average=True):
super(EncoderClassifier, self).__init__()
self.embedding = embedding
self.encoder = encoder
self.classifier = classifier
self.device = device
self.is_average = is_average
def forward(self, x, mask=None):
kl_loss = torch.Tensor([0.0])
# Initial x.size() = [length, batch_size]
x = x.permute(1, 0)
# After permute x.size = [batch_size, length]
x = self.embedding(x)
if "cuda" in str(self.device):
x = x.cuda()
kl_loss = kl_loss.cuda()
x = self.encoder(x, mask)
if self.is_average:
# Averaged sentence representation
x = torch.mean(x, dim=1)
x = self.classifier(x)
return x, kl_loss
class Classifier(nn.Module):
def __init__(self, d_model, d_hidden, num_classes, keep_prob):
super(Classifier, self).__init__()
self.linear1 = nn.Linear(d_model, d_hidden)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(d_hidden, num_classes)
def forward(self, x):
x = self.dropout(self.relu(self.linear1(x)))
x = self.linear2(x)
return x
class MultiHeadedAttentionGoogle(nn.Module):
def __init__(self, heads=8, d_model=512, keep_prob=0.1):
super(MultiHeadedAttentionGoogle, self).__init__()
assert d_model % heads == 0
self.d_k = d_model // heads
self.heads = heads
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(keep_prob)
def attention(self, query, key, value, mask=None):
# Dot product attention
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if self.dropout is not None:
p_attn = self.dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def forward(self, query, key, value, mask=None):
num_batches = query.size(0)
if mask is not None:
mask = mask.unsqueeze(1)
# Apply linear projection on the input sequence and split the heads.
query, key, value = [linear(x).view(num_batches, -1, self.heads, self.d_k).transpose(1, 2)
for linear, x in zip(self.linears, (query, key, value))]
# Apply attention on the projected and splitted vectors
x, self.attn = self.attention(query, key, value, mask=mask)
# Concat vectors and apply linear
x = x.transpose(1, 2).contiguous().view(num_batches, -1, self.heads * self.d_k)
return self.linears[-1](x)
class PositionalFeedForwardGoogle(nn.Module):
def __init__(self, d_model, d_ff, keep_prob=0.1):
super(PositionalFeedForwardGoogle, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(keep_prob)
self.relu = nn.ReLU()
def forward(self, input):
return self.w_2(self.dropout(self.relu(self.w_1(input))))
class Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, padding_id, use_pretrained_embed, pretrained_weights,
optional_sqrt_mul=False):
super(Embeddings, self).__init__()
# Initialize embeddings
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_id).cpu()
if use_pretrained_embed:
self.embedding.from_pretrained(pretrained_weights)
self.embed_dim = embed_dim
self.optional_sqrt_mul = optional_sqrt_mul
def forward(self, input):
if self.optional_sqrt_mul:
return self.embedding(input) * math.sqrt(self.embed_dim)
else:
return self.embedding(input)
class PositionalEncodingGoogle(nn.Module):
def __init__(self, d_model, keep_prob=0.1, max_len=5000):
super(PositionalEncodingGoogle, self).__init__()
self.dropout = nn.Dropout(keep_prob)
positional_encoding = torch.zeros(max_len, d_model)
pos = torch.arange(0., max_len).unsqueeze(1)
# Log space
div_term = torch.exp(torch.arange(0., d_model, 2) * (-math.log(10000) / d_model))
positional_encoding[:, 0::2] = torch.sin(pos * div_term)
positional_encoding[:, 1::2] = torch.cos(pos * div_term)
positional_encoding = positional_encoding.unsqueeze(0)
self.register_buffer("pe", positional_encoding)
def forward(self, input):
return self.dropout(input + Variable(self.pe[:, :input.size(1)], requires_grad=False))
class TransformerGoogle:
def __init__(self, args):
super(TransformerGoogle, self).__init__()
self.args_common = args["common_model_properties"]
self.args_specific = args["transformer_google"]
# Device
self.device = self.args_common["device"]
# Input/Output dimensions
self.vocab_size = self.args_common["vocab_size"]
self.embed_dim = self.args_common["embed_dim"]
self.num_class = self.args_common["num_class"]
# Embedding parameters
self.padding_id = self.args_common["padding_id"]
# Condition parameters
self.use_pretrained_embed = self.args_common["use_pretrained_embed"]
self.use_embed_sqrt_mul = self.args_specific["use_embed_sqrt_mul"]
# Pretrained embedding weights
self.pretrained_weights = self.args_common["pretrained_weights"]
# Dropout probabilities for each individual part of the full model.
self.keep_prob_encoder = self.args_specific["keep_prob_encoder"]
self.keep_prob_pe = self.args_specific["keep_prob_pe"]
self.kee_prob_pff = self.args_specific["keep_prob_pff"]
self.keep_prob_attn = self.args_specific["keep_prob_attn"]
self.keep_prob_clf = self.args_specific["keep_prob_clf"]
# Condition parameter for the transformer type (It only supports classification for now)
self.transformer_type = self.args_specific["transformer_type"]
# Number of parallel attention layers for MultiHeadedAttention
self.heads = self.args_specific["heads"]
# Number of encoder layers |
# Number of hidden count units for Position-Wise Feed-Forward Network
self.num_hidden_pos_ff = self.args_specific["num_hidden_pos_ff"]
# Maximum length of an input
self.max_length = self.args_specific["max_length"]
if self.transformer_type == "classifier":
self.model = self.create_classifier_transformer()
else:
raise ValueError("Transformer can be created as classifier for now!")
def create_classifier_transformer(self):
c = copy.deepcopy
# Initialize individual parts of the full model
# attention = torch.nn.MultiheadAttention(num_heads=self.heads, embed_dim=self.embed_dim,
# dropout=self.keep_prob_attn)
attention = MultiHeadedAttentionGoogle(heads=self.heads, d_model=self.embed_dim, keep_prob=self.keep_prob_attn)
ff = PositionalFeedForwardGoogle(d_model=self.embed_dim, d_ff=self.num_hidden_pos_ff,
keep_prob=self.kee_prob_pff)
embeddings = Embeddings(self.embed_dim, self.vocab_size, self.padding_id, self.use_pretrained_embed,
self.pretrained_weights, optional_sqrt_mul=self.use_embed_sqrt_mul)
positional_embeddings = PositionalEncodingGoogle(d_model=self.embed_dim, keep_prob=self.keep_prob_pe,
max_len=self.max_length)
# Initialize the full model
model = EncoderClassifier(nn.Sequential(embeddings, c(positional_embeddings)),
EncoderBlockGoogle(
EncoderLayerGoogle(self.embed_dim, c(attention), c(ff), self.keep_prob_encoder),
self.num_encoder_layers),
Classifier(self.embed_dim, d_hidden=self.embed_dim // 2, num_classes=self.num_class,
keep_prob=self.keep_prob_clf),
device=self.device)
# Initialize model parameters
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
if __name__ == '__main__':
print("Transformer tests")
plt.figure(figsize=(15, 5))
pe = PositionalEncodingGoogle(20, 0)
y = pe.forward(Variable(torch.zeros(1, 100, 20)))
plt.plot(np.arange(100), y[0, :, 4:8].data.numpy())
plt.legend(["dim %d" % p for p in [4, 5, 6, 7]])
plt.show() | self.num_encoder_layers = self.args_specific["num_encoder_layers"] |
resource.py | import abc
from shlex import quote as shq
from .utils import BatchException
class Resource:
"""
Abstract class for resources.
"""
_uid: str
@abc.abstractmethod
def _get_path(self, directory) -> str:
pass
@abc.abstractmethod
def _add_output_path(self, path):
pass
def _declare(self, directory):
return f"{self._uid}={shq(self._get_path(directory))}" # pylint: disable=no-member
class ResourceFile(Resource, str):
"""
Class representing a single file resource. There exist two subclasses:
:class:`.InputResourceFile` and :class:`.JobResourceFile`.
"""
_counter = 0
_uid_prefix = "__RESOURCE_FILE__"
_regex_pattern = r"(?P<RESOURCE_FILE>{}\d+)".format(_uid_prefix)
@classmethod
def _new_uid(cls):
uid = "{}{}".format(cls._uid_prefix, cls._counter)
cls._counter += 1
return uid
def __new__(cls, value): # pylint: disable=W0613
uid = ResourceFile._new_uid()
r = str.__new__(cls, uid)
r._uid = uid
return r
def __init__(self, value):
super(ResourceFile, self).__init__()
assert value is None or isinstance(value, str)
self._value = value
self._source = None
self._output_paths = set()
self._resource_group = None
self._has_extension = False
def _get_path(self, directory):
raise NotImplementedError
def _add_source(self, source):
from .job import Job # pylint: disable=cyclic-import
assert isinstance(source, Job)
self._source = source
return self
def _add_output_path(self, path):
self._output_paths.add(path)
if self._source is not None:
self._source._external_outputs.add(self)
def _add_resource_group(self, rg):
self._resource_group = rg
def _has_resource_group(self):
return self._resource_group is not None
def _get_resource_group(self):
return self._resource_group
def add_extension(self, extension):
"""
Specify the file extension to use.
Examples
--------
>>> b = Batch()
>>> j = b.new_job()
>>> j.command(f'echo "hello" > {j.ofile}')
>>> j.ofile.add_extension('.txt')
>>> b.run()
Notes
-----
The default file name for a :class:`.ResourceFile` is a unique
identifier with no file extensions.
Parameters
----------
extension: :obj:`str`
File extension to use.
Returns
-------
:class:`.ResourceFile`
Same resource file with the extension specified
"""
if self._has_extension:
raise BatchException("Resource already has a file extension added.")
self._value += extension
self._has_extension = True
return self
def | (self):
return self._uid # pylint: disable=no-member
def __repr__(self):
return self._uid # pylint: disable=no-member
class InputResourceFile(ResourceFile):
"""
Class representing a resource from an input file.
Examples
--------
`input` is an :class:`.InputResourceFile` of the batch `b`
and is used in job `j`:
>>> b = Batch()
>>> input = b.read_input('data/hello.txt')
>>> j = b.new_job(name='hello')
>>> j.command(f'cat {input}')
>>> b.run()
"""
def __init__(self, value):
self._input_path = None
super().__init__(value)
def _add_input_path(self, path):
self._input_path = path
return self
def _get_path(self, directory):
assert self._value is not None
return shq(directory + '/inputs/' + self._value)
class JobResourceFile(ResourceFile):
"""
Class representing an intermediate file from a job.
Examples
--------
`j.ofile` is a :class:`.JobResourceFile` on the job`j`:
>>> b = Batch()
>>> j = b.new_job(name='hello-tmp')
>>> j.command(f'echo "hello world" > {j.ofile}')
>>> b.run()
Notes
-----
All :class:`.JobResourceFile` are temporary files and must be written
to a permanent location using :meth:`.Batch.write_output` if the output needs
to be saved.
"""
def _get_path(self, directory):
assert self._source is not None
assert self._value is not None
return shq(directory + '/' + self._source._uid + '/' + self._value)
class ResourceGroup(Resource):
"""
Class representing a mapping of identifiers to a resource file.
Examples
--------
Initialize a batch and create a new job:
>>> b = Batch()
>>> j = b.new_job()
Read a set of input files as a resource group:
>>> bfile = b.read_input_group(bed='data/example.bed',
... bim='data/example.bim',
... fam='data/example.fam')
Create a resource group from a job intermediate:
>>> j.declare_resource_group(ofile={'bed': '{root}.bed',
... 'bim': '{root}.bim',
... 'fam': '{root}.fam'})
>>> j.command(f'plink --bfile {bfile} --make-bed --out {j.ofile}')
Reference the entire file group:
>>> j.command(f'plink --bfile {bfile} --geno 0.2 --make-bed --out {j.ofile}')
Reference a single file:
>>> j.command(f'wc -l {bfile.fam}')
Execute the batch:
>>> b.run() # doctest: +SKIP
Notes
-----
All files in the resource group are copied between jobs even if only one
file in the resource group is mentioned. This is to account for files that
are implicitly assumed to always be together such as a FASTA file and its
index.
"""
_counter = 0
_uid_prefix = "__RESOURCE_GROUP__"
_regex_pattern = r"(?P<RESOURCE_GROUP>{}\d+)".format(_uid_prefix)
@classmethod
def _new_uid(cls):
uid = "{}{}".format(cls._uid_prefix, cls._counter)
cls._counter += 1
return uid
def __init__(self, source, root, **values):
self._source = source
self._resources = {} # dict of name to resource uid
self._root = root
self._uid = ResourceGroup._new_uid()
for name, resource_file in values.items():
assert isinstance(resource_file, ResourceFile)
self._resources[name] = resource_file
resource_file._add_resource_group(self)
def _get_path(self, directory):
subdir = self._source._uid if self._source else 'inputs'
return directory + '/' + subdir + '/' + self._root
def _add_output_path(self, path):
for name, rf in self._resources.items():
rf._add_output_path(path + '.' + name)
def _get_resource(self, item):
if item not in self._resources:
raise BatchException(f"'{item}' not found in the resource group.\n"
f"Hint: you must declare each attribute when constructing the resource group.")
return self._resources[item]
def __getitem__(self, item):
return self._get_resource(item)
def __getattr__(self, item):
return self._get_resource(item)
def __add__(self, other):
assert isinstance(other, str)
return str(self._uid) + other
def __radd__(self, other):
assert isinstance(other, str)
return other + str(self._uid)
def __str__(self):
return self._uid
| __str__ |
images.go | package dipod
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"strings"
"time"
"github.com/EricHripko/dipod/iopodman"
"github.com/docker/distribution/reference"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/image"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/pkg/streamformatter"
"github.com/docker/go-connections/nat"
specs "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus"
"github.com/varlink/go/varlink"
)
type imageBackend struct {
}
func (*imageBackend) ImageDelete(imageRef string, force, prune bool) (res []types.ImageDeleteResponseItem, err error) {
item := types.ImageDeleteResponseItem{}
item.Deleted, err = iopodman.RemoveImage().Call(
context.TODO(),
podman,
imageRef,
force,
)
if err != nil {
return
}
res = append(res, item)
return
}
func (*imageBackend) ImageHistory(imageName string) (res []*image.HistoryResponseItem, err error) {
var history []iopodman.ImageHistory
history, err = iopodman.HistoryImage().Call(context.TODO(), podman, imageName)
if err != nil {
return
}
for _, l := range history {
layer := &image.HistoryResponseItem{
ID: l.Id,
CreatedBy: l.CreatedBy,
Tags: l.Tags,
Size: l.Size,
Comment: l.Comment,
}
if created, err := time.Parse(time.RFC3339, l.Created); err == nil {
layer.Created = created.Unix()
} else {
log.
WithError(err).
WithField("created", l.Created).Warn("created parse fail")
}
res = append(res, layer)
}
return
}
func (*imageBackend) Images(imageFilters filters.Args, all bool, withExtraAttrs bool) (images []*types.ImageSummary, err error) {
if all {
err = errors.New("not implemented")
return
}
var srcs []iopodman.Image
srcs, err = iopodman.ListImages().Call(context.TODO(), podman)
if err != nil {
return
}
for _, src := range srcs {
if imageFilters.Contains("label") && !imageFilters.MatchKVList("label", src.Labels) {
continue
}
if imageFilters.Contains("reference") {
matched := false
for _, search := range imageFilters.Get("reference") {
if matched {
break
}
params := strings.Split(search, ":")
var (
id string
tag string
)
if len(params) == 0 {
continue
}
id = params[0]
if len(params) > 1 {
tag = params[1]
}
for _, rt := range src.RepoTags {
if strings.HasPrefix(rt, id+":") {
if tag == "" {
matched = true
} else {
if strings.HasSuffix(rt, ":"+tag) {
matched = true
}
}
}
}
}
if !matched {
continue
}
}
image := &types.ImageSummary{
Containers: src.Containers,
Created: 0,
ID: src.Id,
Labels: src.Labels,
ParentID: src.ParentId,
RepoDigests: src.RepoDigests,
RepoTags: src.RepoTags,
Size: src.Size,
SharedSize: 0,
VirtualSize: src.VirtualSize,
}
if image.RepoTags == nil {
image.RepoTags = []string{"<none>:<none>"}
}
if image.RepoDigests == nil {
image.RepoDigests = []string{"<none>@<none>"}
}
if created, err := time.Parse(time.RFC3339, src.Created); err == nil {
image.Created = created.Unix()
} else {
log.
WithError(err).
WithField("created", src.Created).Warn("created parse fail")
}
images = append(images, image)
}
return
}
func is2ss(i []interface{}) (s []string) {
for _, ii := range i {
s = append(s, ii.(string))
}
return
}
func (*imageBackend) LookupImage(name string) (image *types.ImageInspect, err error) {
// podman for some reason returns this as JSON string, need to decode
var payload string
payload, err = iopodman.InspectImage().Call(context.TODO(), podman, name)
if err != nil {
return
}
data := make(map[string]interface{})
err = json.Unmarshal([]byte(payload), &data)
if err != nil {
return
}
digest := strings.TrimPrefix(data["Digest"].(string), "sha256:")
image = &types.ImageInspect{
ID: data["Id"].(string),
Container: digest,
Comment: data["Comment"].(string),
Os: data["Os"].(string),
Architecture: data["Architecture"].(string),
Parent: data["Parent"].(string),
Config: &container.Config{
Hostname: "",
Domainname: "",
AttachStdout: false,
AttachStdin: false,
AttachStderr: false,
OpenStdin: false,
StdinOnce: false,
ArgsEscaped: true,
NetworkDisabled: false,
OnBuild: nil, //todo
Image: digest,
User: "",
WorkingDir: "",
MacAddress: "",
Entrypoint: nil,
Labels: nil, //todo
},
DockerVersion: data["Version"].(string),
VirtualSize: int64(data["VirtualSize"].(float64)),
Size: int64(data["Size"].(float64)),
Author: data["Author"].(string),
Created: data["Created"].(string),
RepoDigests: is2ss(data["RepoDigests"].([]interface{})),
RepoTags: is2ss(data["RepoTags"].([]interface{})),
}
// container config
config := data["Config"].(map[string]interface{})
if env, ok := config["Env"]; ok {
image.Config.Env = is2ss(env.([]interface{}))
}
if cmd, ok := config["Cmd"]; ok {
image.Config.Cmd = is2ss(cmd.([]interface{}))
}
if ep, ok := config["Entrypoint"]; ok {
image.Config.Entrypoint = is2ss(ep.([]interface{}))
}
if workdir, ok := config["WorkingDir"]; ok {
image.Config.WorkingDir = workdir.(string)
}
if user, ok := config["User"]; ok {
image.Config.User = user.(string)
}
if stopSignal, ok := config["StopSignal"]; ok {
image.Config.StopSignal = stopSignal.(string)
}
if tmp, ok := config["ExposedPorts"]; ok {
image.Config.ExposedPorts = make(nat.PortSet)
ports := tmp.(map[string]interface{})
for port := range ports {
image.Config.ExposedPorts[nat.Port(port)] = struct{}{}
}
}
if tmp, ok := config["Volumes"]; ok {
image.Config.Volumes = make(map[string]struct{})
vols := tmp.(map[string]interface{})
for vol := range vols {
image.Config.Volumes[vol] = struct{}{}
}
}
if tmp, ok := config["Labels"]; ok {
image.Config.Labels = make(map[string]string)
labels := tmp.(map[string]interface{})
for key, val := range labels {
image.Config.Labels[key] = val.(string)
}
}
image.ContainerConfig = image.Config
// graph driver
gd := data["GraphDriver"].(map[string]interface{})
gdd := gd["Data"].(map[string]interface{})
image.GraphDriver = types.GraphDriverData{
Name: gd["Name"].(string),
Data: make(map[string]string),
}
for key, val := range gdd {
image.GraphDriver.Data[key] = val.(string)
}
// rootfs
rootfs := data["RootFS"].(map[string]interface{})
image.RootFS = types.RootFS{
Type: rootfs["Type"].(string),
Layers: is2ss(rootfs["Layers"].([]interface{})),
}
return
}
func (*imageBackend) TagImage(imageName, repository, tag string) (out string, err error) {
target := repository
if tag != "" {
target += ":" + tag
}
log := log.WithField("source", imageName).WithField("target", target)
if target == "" {
err = errors.New("dipod: empty target")
log.WithError(err).Error("image tag fail")
return
}
log.Debug("image tag")
out, err = iopodman.TagImage().Call(context.TODO(), podman, imageName, target)
return
}
const (
onlyDangling = "dangling"
)
func (*imageBackend) ImagesPrune(ctx context.Context, pruneFilters filters.Args) (report *types.ImagesPruneReport, err error) {
all := false
if pruneFilters.Contains(onlyDangling) {
if pruneFilters.ExactMatch(onlyDangling, valueNo) {
all = true
}
}
var pruned []string
pruned, err = iopodman.ImagesPrune().Call(ctx, podman, all)
if err != nil {
return
}
report = &types.ImagesPruneReport{}
for _, image := range pruned {
report.ImagesDeleted = append(report.ImagesDeleted, types.ImageDeleteResponseItem{
Deleted: image,
})
}
return
}
func (*imageBackend) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error {
outStream = streamformatter.NewStdoutWriter(outStream)
// stash context tarball to a temp file
importContext, err := ioutil.TempFile("", "dipod-import")
if err != nil {
return err
}
io.Copy(importContext, inTar)
defer inTar.Close()
defer os.Remove(importContext.Name())
// import
recv, err := iopodman.LoadImage().Send(context.Background(), podman, varlink.More, "", importContext.Name(), quiet, false)
if err != nil {
return err
}
for {
var status iopodman.MoreResponse
var flags uint64
status, flags, err = recv(context.Background())
if err != nil {
return err
}
for _, log := range status.Logs {
_, err = outStream.Write([]byte(log))
if err != nil {
return err
}
}
if flags&varlink.Continues != varlink.Continues {
fmt.Fprintf(outStream, "Loaded image: %s\n", status.Id)
break
}
}
return nil
}
func (*imageBackend) ImportImage(src string, repository, platform string, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error {
return errors.New("not implemented")
}
func (*imageBackend) ExportImage(names []string, outStream io.Writer) error {
// prepare temp file for the tarball
tmp, err := ioutil.TempFile("", "dipod-export")
if err != nil {
return err
}
dest := "docker-archive://" + tmp.Name()
err = tmp.Close()
if err != nil {
return err
}
defer os.Remove(tmp.Name())
// parse list of image names into name + list of tags
ref, err := reference.Parse(names[0])
if err != nil {
return err
}
named, ok := ref.(reference.Named)
if !ok {
return errors.New("dipod: main name parse fail")
}
var tags []string
for _, name := range names[1:] {
ref, err := reference.Parse(name)
if err != nil |
nt, ok := ref.(reference.NamedTagged)
if !ok {
return errors.New("dipod: secondary name parse fail")
}
if named.Name() != nt.Name() {
return errors.New("dipod: multiple image export not supported")
}
tags = append(tags, nt.Tag())
}
_, err = iopodman.ExportImage().Call(context.TODO(), podman, names[0], dest, false, tags)
if err != nil {
return err
}
tmp, err = os.Open(tmp.Name())
if err != nil {
return err
}
_, err = io.Copy(outStream, tmp)
return err
}
func (*imageBackend) PullImage(ctx context.Context, image, tag string, platform *specs.Platform, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
outStream = streamformatter.NewStdoutWriter(outStream)
name := image + ":" + tag
// no slash => pulling from DockerHub, docker cli shadily strips docker.io/
// prefix even if user explicitly specified it
if !strings.ContainsAny(name, "/") {
name = "docker.io/library/" + name
}
recv, err := iopodman.PullImage().Send(context.TODO(), podman, varlink.More, name)
if err != nil {
return err
}
for {
status, flags, err := recv(context.TODO())
if err != nil {
return err
}
for _, log := range status.Logs {
_, err = outStream.Write([]byte(log))
if err != nil {
return err
}
}
if flags&varlink.Continues != varlink.Continues {
break
}
}
return nil
}
func (*imageBackend) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error {
return errors.New("not implemented")
}
const (
isAutomated = "is-automated"
isOfficial = "is-official"
valueYes = "true"
valueNo = "false"
)
func (*imageBackend) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (res *registry.SearchResults, err error) {
var args filters.Args
args, err = filters.FromJSON(filtersArgs)
if err != nil {
return
}
yes := true
no := false
filter := iopodman.ImageSearchFilter{}
if args.Contains(isAutomated) {
if args.ExactMatch(isAutomated, valueYes) {
filter.Is_automated = &yes
}
if args.ExactMatch(isAutomated, valueNo) {
filter.Is_automated = &no
}
}
if args.Contains(isOfficial) {
if args.ExactMatch(isOfficial, valueYes) {
filter.Is_official = &yes
}
if args.ExactMatch(isOfficial, valueNo) {
filter.Is_official = &no
}
}
stars := args.Get("stars")
if len(stars) > 0 {
var starNo int
starNo, err = strconv.Atoi(stars[0])
if err != nil {
return
}
filter.Star_count = int64(starNo)
}
var images []iopodman.ImageSearchResult
limit64 := int64(limit)
images, err = iopodman.SearchImages().Call(ctx, podman, term, &limit64, filter)
if err != nil {
return
}
res = ®istry.SearchResults{
Query: term,
NumResults: len(images),
}
for _, image := range images {
res.Results = append(res.Results, registry.SearchResult{
Name: image.Name,
Description: image.Description,
IsAutomated: image.Is_automated,
IsOfficial: image.Is_official,
StarCount: int(image.Star_count),
})
}
return
}
| {
return err
} |
client_request_test.go | // Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package runtime
import (
"net/http"
"net/url"
"testing"
"time"
"github.com/go-openapi/strfmt"
"github.com/stretchr/testify/assert"
)
type trw struct {
Headers http.Header
Body interface{}
} | if t.Headers == nil {
t.Headers = make(http.Header)
}
t.Headers.Set(name, values[0])
return nil
}
func (t *trw) SetQueryParam(_ string, _ ...string) error { return nil }
func (t *trw) SetFormParam(_ string, _ ...string) error { return nil }
func (t *trw) SetPathParam(_ string, _ string) error { return nil }
func (t *trw) SetFileParam(_ string, _ ...NamedReadCloser) error { return nil }
func (t *trw) SetBodyParam(body interface{}) error {
t.Body = body
return nil
}
func (t *trw) SetTimeout(timeout time.Duration) error {
return nil
}
func (t *trw) GetQueryParams() url.Values { return nil }
func (t *trw) GetMethod() string { return "" }
func (t *trw) GetPath() string { return "" }
func (t *trw) GetBody() []byte { return nil }
func TestRequestWriterFunc(t *testing.T) {
hand := ClientRequestWriterFunc(func(r ClientRequest, reg strfmt.Registry) error {
_ = r.SetHeaderParam("blah", "blah blah")
_ = r.SetBodyParam(struct{ Name string }{"Adriana"})
return nil
})
tr := new(trw)
_ = hand.WriteToRequest(tr, nil)
assert.Equal(t, "blah blah", tr.Headers.Get("blah"))
assert.Equal(t, "Adriana", tr.Body.(struct{ Name string }).Name)
} |
func (t *trw) SetHeaderParam(name string, values ...string) error { |
actions.test.js | import fetchMock from 'fetch-mock';
import * as actions from '@/store/jobs/actions';
import { addUrlParams } from '@/utils/api';
import { storeWithApi } from './../../utils';
describe('fetchJob', () => {
let args, params, url;
beforeEach(() => {
args = {
jobId: 'job-1',
productSlug: 'my-product',
versionLabel: 'my-version',
planSlug: 'plan-1',
};
params = {
plan__plan_template__planslug__slug: args.planSlug,
plan__version__label: args.versionLabel,
plan__version__product__productslug__slug: args.productSlug,
};
url = addUrlParams(window.api_urls.job_detail('job-1'), params);
});
describe('success', () => {
beforeEach(() => {
window.socket = { subscribe: jest.fn() };
});
afterEach(() => {
Reflect.deleteProperty(window, 'socket');
});
test('GETs job from api and subscribes to ws events', () => {
const store = storeWithApi({});
const job = {
id: 'job-1',
creator: null,
plan: 'plan-1',
status: 'complete',
steps: [],
results: {},
org_name: null,
org_type: null,
};
fetchMock.getOnce(url, job);
const started = {
type: 'FETCH_JOB_STARTED',
payload: 'job-1',
};
const succeeded = {
type: 'FETCH_JOB_SUCCEEDED',
payload: { id: 'job-1', job },
};
const expected = {
model: 'job',
id: 'job-1',
};
expect.assertions(2);
return store.dispatch(actions.fetchJob(args)).then(() => {
expect(store.getActions()).toEqual([started, succeeded]);
expect(window.socket.subscribe).toHaveBeenCalledWith(expected);
});
});
test('handles missing job', () => {
const store = storeWithApi({});
fetchMock.getOnce(url, 404);
const started = {
type: 'FETCH_JOB_STARTED',
payload: 'job-1',
};
const succeeded = {
type: 'FETCH_JOB_SUCCEEDED',
payload: { id: 'job-1', job: null },
};
expect.assertions(2);
return store.dispatch(actions.fetchJob(args)).then(() => {
expect(store.getActions()).toEqual([started, succeeded]);
expect(window.socket.subscribe).not.toHaveBeenCalled();
});
});
});
describe('error', () => {
test('dispatches FETCH_JOB_FAILED action', () => {
const store = storeWithApi({});
fetchMock.getOnce(url, { status: 500, body: {} });
const started = {
type: 'FETCH_JOB_STARTED',
payload: 'job-1',
};
const failed = {
type: 'FETCH_JOB_FAILED',
payload: 'job-1',
};
expect.assertions(5);
return store.dispatch(actions.fetchJob(args)).catch(() => {
const allActions = store.getActions();
expect(allActions[0]).toEqual(started);
expect(allActions[1].type).toEqual('ERROR_ADDED');
expect(allActions[1].payload.message).toEqual('Internal Server Error');
expect(allActions[2]).toEqual(failed);
expect(window.console.error).toHaveBeenCalled();
});
});
});
});
describe('startJob', () => {
describe('success', () => {
beforeEach(() => {
window.socket = { subscribe: jest.fn() };
});
afterEach(() => {
Reflect.deleteProperty(window, 'socket');
});
test('dispatches JOB_STARTED action and subscribes to ws events', () => {
const store = storeWithApi({});
const data = { plan: 'plan-1', steps: ['step-1'] };
const response = {
id: 'job-1',
plan: 'plan-1',
steps: ['step-1'],
};
fetchMock.postOnce(window.api_urls.job_list(), {
status: 201,
body: response,
});
const started = {
type: 'JOB_REQUESTED',
payload: data,
};
const succeeded = {
type: 'JOB_STARTED',
payload: response,
};
const expected = {
model: 'job',
id: 'job-1',
};
expect.assertions(2);
return store.dispatch(actions.startJob(data)).then(() => {
expect(store.getActions()).toEqual([started, succeeded]);
expect(window.socket.subscribe).toHaveBeenCalledWith(expected);
});
});
});
describe('error', () => {
test('dispatches JOB_REJECTED action', () => {
const store = storeWithApi({});
const data = { plan: 'plan-1', steps: ['step-1'] };
fetchMock.postOnce(window.api_urls.job_list(), 404);
const started = {
type: 'JOB_REQUESTED',
payload: data,
};
const failed = {
type: 'JOB_REJECTED',
payload: data,
};
expect.assertions(1);
return store.dispatch(actions.startJob(data)).catch(() => {
expect(store.getActions()).toEqual([started, failed]);
});
});
});
});
[
{ type: 'JOB_STEP_COMPLETED', action: 'completeJobStep' },
{ type: 'JOB_COMPLETED', action: 'completeJob' },
{ type: 'JOB_CANCELED', action: 'cancelJob' },
{ type: 'JOB_FAILED', action: 'failJob' },
].forEach(({ type, action }) => {
test(`${action} returns action object: ${type}`, () => {
const payload = { foo: 'bar' };
const expected = { type, payload };
// eslint-disable-next-line import/namespace
expect(actions[action](payload)).toEqual(expected);
});
});
describe('updateJob', () => {
describe('success', () => {
test('dispatches JOB_UPDATED action', () => {
const store = storeWithApi({});
const data = { id: 'job-1', is_public: 'true' };
const response = {
id: 'job-1',
is_public: true,
};
fetchMock.patchOnce(window.api_urls.job_detail('job-1'), {
status: 200,
body: response,
});
const started = {
type: 'JOB_UPDATE_REQUESTED',
payload: data,
};
const succeeded = {
type: 'JOB_UPDATED',
payload: response,
};
expect.assertions(1);
return store.dispatch(actions.updateJob(data)).then(() => {
expect(store.getActions()).toEqual([started, succeeded]);
});
});
});
describe('error', () => {
test('dispatches JOB_UPDATE_REJECTED action', () => {
const store = storeWithApi({});
const data = { id: 'job-1', is_public: 'true' };
fetchMock.patchOnce(window.api_urls.job_detail('job-1'), {
status: 500,
body: { detail: 'Nope.' },
});
const started = {
type: 'JOB_UPDATE_REQUESTED',
payload: data,
};
const failed = {
type: 'JOB_UPDATE_REJECTED',
payload: data,
};
expect.assertions(5);
return store.dispatch(actions.updateJob(data)).catch(() => {
const allActions = store.getActions();
expect(allActions[0]).toEqual(started);
expect(allActions[1].type).toEqual('ERROR_ADDED');
expect(allActions[1].payload.message).toEqual('Nope.');
expect(allActions[2]).toEqual(failed); | expect(window.console.error).toHaveBeenCalled();
});
});
});
});
describe('requestCancelJob', () => {
describe('success', () => {
test('dispatches JOB_CANCEL_ACCEPTED action', () => {
const store = storeWithApi({});
const id = 'job-1';
fetchMock.deleteOnce(window.api_urls.job_detail(id), 204);
const started = {
type: 'JOB_CANCEL_REQUESTED',
payload: id,
};
const succeeded = {
type: 'JOB_CANCEL_ACCEPTED',
payload: id,
};
expect.assertions(1);
return store.dispatch(actions.requestCancelJob(id)).then(() => {
expect(store.getActions()).toEqual([started, succeeded]);
});
});
});
describe('error', () => {
test('dispatches JOB_CANCEL_REJECTED action', () => {
const store = storeWithApi({});
const id = 'job-1';
fetchMock.deleteOnce(window.api_urls.job_detail(id), {
status: 500,
body: 'Oops.',
});
const started = {
type: 'JOB_CANCEL_REQUESTED',
payload: id,
};
const failed = {
type: 'JOB_CANCEL_REJECTED',
payload: id,
};
expect.assertions(5);
return store.dispatch(actions.requestCancelJob(id)).catch(() => {
const allActions = store.getActions();
expect(allActions[0]).toEqual(started);
expect(allActions[1].type).toEqual('ERROR_ADDED');
expect(allActions[1].payload.message).toEqual('Oops.');
expect(allActions[2]).toEqual(failed);
expect(window.console.error).toHaveBeenCalled();
});
});
});
}); | |
Chapter_5_5.py | # -*- coding: utf-8 -*-
# source from https://github.com/keon/deep-q-learning/blob/master/dqn.py
import random
import gym
import numpy as np
from collections import deque
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
EPISODES = 1000
class DQNAgent:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.memory = deque(maxlen=2000)
self.gamma = 0.95 # discount rate
self.epsilon = 1.0 # exploration rate
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.learning_rate = 0.001
self.model = self._build_model()
def _build_model(self):
# Neural Net for Deep-Q learning Model
model = Sequential()
model.add(Dense(24, input_dim=self.state_size, activation='relu'))
model.add(Dense(24, activation='relu'))
model.add(Dense(self.action_size, activation='linear'))
model.compile(loss='mse',
optimizer=Adam(lr=self.learning_rate))
return model
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def act(self, state):
|
def replay(self, batch_size):
minibatch = random.sample(self.memory, batch_size)
for state, action, reward, next_state, done in minibatch:
target = reward
if not done:
target = (reward + self.gamma *
np.amax(self.model.predict(next_state)[0]))
target_f = self.model.predict(state)
target_f[0][action] = target
self.model.fit(state, target_f, epochs=1, verbose=0)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def load(self, name):
self.model.load_weights(name)
def save(self, name):
self.model.save_weights(name)
if __name__ == "__main__":
env = gym.make('MountainCar-v0')
state_size = env.observation_space.shape[0]
action_size = env.action_space.n
agent = DQNAgent(state_size, action_size)
# agent.load("./save/cartpole-dqn.h5")
done = False
batch_size = 32
for e in range(EPISODES):
state = env.reset()
state = np.reshape(state, [1, state_size])
for time in range(500):
# env.render()
action = agent.act(state)
env.render()
next_state, reward, done, _ = env.step(action)
reward = reward if not done else -10
next_state = np.reshape(next_state, [1, state_size])
agent.remember(state, action, reward, next_state, done)
state = next_state
if done:
print("episode: {}/{}, score: {}, e: {:.2}"
.format(e, EPISODES, time, agent.epsilon))
break
if len(agent.memory) > batch_size:
agent.replay(batch_size)
# if e % 10 == 0:
# agent.save("./save/cartpole-dqn.h5") | if np.random.rand() <= self.epsilon:
return random.randrange(self.action_size)
act_values = self.model.predict(state)
return np.argmax(act_values[0]) # returns action |
simpler.rs | use crate::numerical::{
eigen,
eigen::{Eigen, EigenMethod::Jacobi},
integral,
integral::Integral::G7K15,
};
use crate::structure::matrix::{self, Matrix};
use crate::structure::polynomial;
use crate::traits::math::{Norm, Normed};
/// Simple Norm
pub trait SimpleNorm: Normed {
fn norm(&self) -> Self::Scalar;
fn normalize(&self) -> Self;
}
/// Simple integrate
pub fn integrate<F: Fn(f64) -> f64 + Copy>(f: F, (a, b): (f64, f64)) -> f64 {
integral::integrate(f, (a, b), G7K15(1e-15))
}
/// Simple Linear algebra
pub trait SimplerLinearAlgebra {
fn back_subs(&self, b: &Vec<f64>) -> Vec<f64>;
fn forward_subs(&self, b: &Vec<f64>) -> Vec<f64>;
fn lu(&self) -> matrix::PQLU;
fn waz_diag(&self) -> Option<matrix::WAZD>;
fn waz(&self) -> Option<matrix::WAZD>;
fn qr(&self) -> matrix::QR;
fn rref(&self) -> Matrix;
fn det(&self) -> f64;
fn block(&self) -> (Matrix, Matrix, Matrix, Matrix);
fn inv(&self) -> Matrix;
fn pseudo_inv(&self) -> Matrix;
fn solve(&self, b: &Vec<f64>) -> Vec<f64>;
fn solve_mat(&self, m: &Matrix) -> Matrix;
}
/// Simple Eigenpair
pub fn eigen(m: &Matrix) -> Eigen {
eigen::eigen(m, Jacobi)
}
/// Simple L2 norm
impl SimpleNorm for Vec<f64> {
fn norm(&self) -> Self::Scalar {
Normed::norm(self, Norm::L2)
}
fn normalize(&self) -> Self {
Normed::normalize(self, Norm::L2)
}
}
/// Simple Frobenius norm
impl SimpleNorm for Matrix {
fn norm(&self) -> Self::Scalar {
Normed::norm(self, Norm::F)
}
fn normalize(&self) -> Self { | unimplemented!()
}
}
impl SimplerLinearAlgebra for Matrix {
fn back_subs(&self, b: &Vec<f64>) -> Vec<f64> {
matrix::LinearAlgebra::back_subs(self, b)
}
fn forward_subs(&self, b: &Vec<f64>) -> Vec<f64> {
matrix::LinearAlgebra::forward_subs(self, b)
}
fn lu(&self) -> matrix::PQLU {
matrix::LinearAlgebra::lu(self)
}
fn waz(&self) -> Option<matrix::WAZD> {
matrix::LinearAlgebra::waz(self, matrix::Form::Identity)
}
fn waz_diag(&self) -> Option<matrix::WAZD> {
matrix::LinearAlgebra::waz(self, matrix::Form::Diagonal)
}
fn qr(&self) -> matrix::QR {
matrix::LinearAlgebra::qr(self)
}
fn rref(&self) -> Matrix {
matrix::LinearAlgebra::rref(self)
}
fn det(&self) -> f64 {
matrix::LinearAlgebra::det(self)
}
fn block(&self) -> (Matrix, Matrix, Matrix, Matrix) {
matrix::LinearAlgebra::block(self)
}
fn inv(&self) -> Matrix {
matrix::LinearAlgebra::inv(self)
}
fn pseudo_inv(&self) -> Matrix {
matrix::LinearAlgebra::pseudo_inv(self)
}
fn solve(&self, b: &Vec<f64>) -> Vec<f64> {
matrix::LinearAlgebra::solve(self, b, matrix::SolveKind::LU)
}
fn solve_mat(&self, m: &Matrix) -> Matrix {
matrix::LinearAlgebra::solve_mat(self, m, matrix::SolveKind::LU)
}
}
/// Simple solve
#[allow(non_snake_case)]
pub fn solve(A: &Matrix, m: &Matrix) -> Matrix {
matrix::solve(A, m, matrix::SolveKind::LU)
}
/// Simple Chebyshev Polynomial (First Kind)
pub fn chebyshev_polynomial(n: usize) -> polynomial::Polynomial {
polynomial::chebyshev_polynomial(n, polynomial::SpecialKind::First)
} | |
nodeMaterialBlockConnectionPoint.ts | import { NodeMaterialBlockConnectionPointTypes } from './Enums/nodeMaterialBlockConnectionPointTypes';
import { NodeMaterialBlockTargets } from './Enums/nodeMaterialBlockTargets';
import { Nullable } from '../../types';
import { InputBlock } from './Blocks/Input/inputBlock';
import { Observable } from '../../Misc/observable';
declare type NodeMaterialBlock = import("./nodeMaterialBlock").NodeMaterialBlock;
/**
* Defines a connection point for a block
*/
export class NodeMaterialConnectionPoint {
/** @hidden */
public _ownerBlock: NodeMaterialBlock;
/** @hidden */
public _connectedPoint: Nullable<NodeMaterialConnectionPoint> = null;
private _endpoints = new Array<NodeMaterialConnectionPoint>();
private _associatedVariableName: string;
/** @hidden */
public _typeConnectionSource: Nullable<NodeMaterialConnectionPoint> = null;
/** @hidden */
public _linkedConnectionSource: Nullable<NodeMaterialConnectionPoint> = null;
private _type = NodeMaterialBlockConnectionPointTypes.Float;
/** @hidden */
public _enforceAssociatedVariableName = false;
/**
* Gets or sets the additional types supported by this connection point
*/
public acceptedConnectionPointTypes = new Array<NodeMaterialBlockConnectionPointTypes>();
/**
* Gets or sets the additional types excluded by this connection point
*/
public excludedConnectionPointTypes = new Array<NodeMaterialBlockConnectionPointTypes>();
/**
* Observable triggered when this point is connected
*/
public onConnectionObservable = new Observable<NodeMaterialConnectionPoint>();
/**
* Gets or sets the associated variable name in the shader
*/
public get associatedVariableName(): string {
if (this._ownerBlock.isInput) {
return (this._ownerBlock as InputBlock).associatedVariableName;
}
if ((!this._enforceAssociatedVariableName || !this._associatedVariableName) && this._connectedPoint) {
return this._connectedPoint.associatedVariableName;
}
return this._associatedVariableName;
}
public set associatedVariableName(value: string) {
this._associatedVariableName = value;
}
/**
* Gets or sets the connection point type (default is float)
*/
public get type(): NodeMaterialBlockConnectionPointTypes {
if (this._type === NodeMaterialBlockConnectionPointTypes.AutoDetect) {
if (this._ownerBlock.isInput) {
return (this._ownerBlock as InputBlock).type;
}
if (this._connectedPoint) {
return this._connectedPoint.type;
}
if (this._linkedConnectionSource && this._linkedConnectionSource.isConnected) {
return this._linkedConnectionSource.type;
}
}
if (this._type === NodeMaterialBlockConnectionPointTypes.BasedOnInput && this._typeConnectionSource) {
return this._typeConnectionSource.type;
}
return this._type;
}
public set type(value: NodeMaterialBlockConnectionPointTypes) {
this._type = value;
}
/**
* Gets or sets the connection point name
*/
public name: string;
/**
* Gets or sets a boolean indicating that this connection point can be omitted
*/
public isOptional: boolean;
/**
* Gets or sets a string indicating that this uniform must be defined under a #ifdef
*/
public define: string;
/** @hidden */
public _prioritizeVertex = false;
private _target: NodeMaterialBlockTargets = NodeMaterialBlockTargets.VertexAndFragment;
/** Gets or sets the target of that connection point */
public get target(): NodeMaterialBlockTargets {
if (!this._prioritizeVertex || !this._ownerBlock) {
return this._target;
}
if (this._target !== NodeMaterialBlockTargets.VertexAndFragment) {
return this._target;
}
if (this._ownerBlock.target === NodeMaterialBlockTargets.Fragment) {
return NodeMaterialBlockTargets.Fragment;
}
return NodeMaterialBlockTargets.Vertex;
}
public set target(value: NodeMaterialBlockTargets) {
this._target = value;
}
/**
* Gets a boolean indicating that the current point is connected
*/
public get isConnected(): boolean {
return this.connectedPoint !== null;
}
/**
* Gets a boolean indicating that the current point is connected to an input block
*/
public get isConnectedToInputBlock(): boolean {
return this.connectedPoint !== null && this.connectedPoint.ownerBlock.isInput;
}
/**
* Gets a the connected input block (if any)
*/
public get connectInputBlock(): Nullable<InputBlock> {
if (!this.isConnectedToInputBlock) {
return null;
}
return this.connectedPoint!.ownerBlock as InputBlock;
}
/** Get the other side of the connection (if any) */
public get connectedPoint(): Nullable<NodeMaterialConnectionPoint> {
return this._connectedPoint;
}
/** Get the block that owns this connection point */
public get ownerBlock(): NodeMaterialBlock {
return this._ownerBlock;
}
/** Get the block connected on the other side of this connection (if any) */
public get sourceBlock(): Nullable<NodeMaterialBlock> {
if (!this._connectedPoint) {
return null;
}
return this._connectedPoint.ownerBlock;
}
/** Get the block connected on the endpoints of this connection (if any) */
public get connectedBlocks(): Array<NodeMaterialBlock> {
if (this._endpoints.length === 0) {
return [];
}
return this._endpoints.map((e) => e.ownerBlock);
}
/** Gets the list of connected endpoints */
public get endpoints() {
return this._endpoints;
}
/** Gets a boolean indicating if that output point is connected to at least one input */
public get hasEndpoints(): boolean {
return this._endpoints && this._endpoints.length > 0;
}
/** Gets a boolean indicating that this connection will be used in the vertex shader */
public get isConnectedInVertexShader(): boolean {
if (this.target === NodeMaterialBlockTargets.Vertex) {
return true;
}
if (!this.hasEndpoints) {
return false;
}
for (var endpoint of this._endpoints) {
if (endpoint.ownerBlock.target === NodeMaterialBlockTargets.Vertex) {
return true;
}
if (endpoint.target === NodeMaterialBlockTargets.Vertex) {
return true;
}
if (endpoint.ownerBlock.target === NodeMaterialBlockTargets.Neutral || endpoint.ownerBlock.target === NodeMaterialBlockTargets.VertexAndFragment) {
if (endpoint.ownerBlock.outputs.some((o) => o.isConnectedInVertexShader)) {
return true;
}
}
}
return false;
}
/** Gets a boolean indicating that this connection will be used in the fragment shader */
public get isConnectedInFragmentShader(): boolean {
if (this.target === NodeMaterialBlockTargets.Fragment) {
return true;
}
if (!this.hasEndpoints) {
return false;
}
for (var endpoint of this._endpoints) {
if (endpoint.ownerBlock.target === NodeMaterialBlockTargets.Fragment) {
|
if (endpoint.ownerBlock.target === NodeMaterialBlockTargets.Neutral || endpoint.ownerBlock.target === NodeMaterialBlockTargets.VertexAndFragment) {
if (endpoint.ownerBlock.outputs.some((o) => o.isConnectedInFragmentShader)) {
return true;
}
}
}
return false;
}
/**
* Creates a new connection point
* @param name defines the connection point name
* @param ownerBlock defines the block hosting this connection point
*/
public constructor(name: string, ownerBlock: NodeMaterialBlock) {
this._ownerBlock = ownerBlock;
this.name = name;
}
/**
* Gets the current class name e.g. "NodeMaterialConnectionPoint"
* @returns the class name
*/
public getClassName(): string {
return "NodeMaterialConnectionPoint";
}
/**
* Gets an boolean indicating if the current point can be connected to another point
* @param connectionPoint defines the other connection point
* @returns true if the connection is possible
*/
public canConnectTo(connectionPoint: NodeMaterialConnectionPoint) {
if (this.type !== connectionPoint.type && connectionPoint.type !== NodeMaterialBlockConnectionPointTypes.AutoDetect) {
// Equivalents
switch (this.type) {
case NodeMaterialBlockConnectionPointTypes.Vector3: {
if (connectionPoint.type === NodeMaterialBlockConnectionPointTypes.Color3) {
return true;
}
}
case NodeMaterialBlockConnectionPointTypes.Vector4: {
if (connectionPoint.type === NodeMaterialBlockConnectionPointTypes.Color4) {
return true;
}
}
case NodeMaterialBlockConnectionPointTypes.Color3: {
if (connectionPoint.type === NodeMaterialBlockConnectionPointTypes.Vector3) {
return true;
}
}
case NodeMaterialBlockConnectionPointTypes.Color4: {
if (connectionPoint.type === NodeMaterialBlockConnectionPointTypes.Vector4) {
return true;
}
}
}
// Accepted types
return (connectionPoint.acceptedConnectionPointTypes && connectionPoint.acceptedConnectionPointTypes.indexOf(this.type) !== -1);
}
// Excluded
if ((connectionPoint.excludedConnectionPointTypes && connectionPoint.excludedConnectionPointTypes.indexOf(this.type) !== -1)) {
return false;
}
return true;
}
/**
* Connect this point to another connection point
* @param connectionPoint defines the other connection point
* @param ignoreConstraints defines if the system will ignore connection type constraints (default is false)
* @returns the current connection point
*/
public connectTo(connectionPoint: NodeMaterialConnectionPoint, ignoreConstraints = false): NodeMaterialConnectionPoint {
if (!ignoreConstraints && !this.canConnectTo(connectionPoint)) {
throw "Cannot connect these two connectors.";
}
this._endpoints.push(connectionPoint);
connectionPoint._connectedPoint = this;
this._enforceAssociatedVariableName = false;
this.onConnectionObservable.notifyObservers(connectionPoint);
connectionPoint.onConnectionObservable.notifyObservers(this);
return this;
}
/**
* Disconnect this point from one of his endpoint
* @param endpoint defines the other connection point
* @returns the current connection point
*/
public disconnectFrom(endpoint: NodeMaterialConnectionPoint): NodeMaterialConnectionPoint {
let index = this._endpoints.indexOf(endpoint);
if (index === -1) {
return this;
}
this._endpoints.splice(index, 1);
endpoint._connectedPoint = null;
this._enforceAssociatedVariableName = false;
endpoint._enforceAssociatedVariableName = false;
return this;
}
/**
* Serializes this point in a JSON representation
* @returns the serialized point object
*/
public serialize(): any {
let serializationObject: any = {};
serializationObject.name = this.name;
if (this.connectedPoint) {
serializationObject.inputName = this.name;
serializationObject.targetBlockId = this.connectedPoint.ownerBlock.uniqueId;
serializationObject.targetConnectionName = this.connectedPoint.name;
}
return serializationObject;
}
/**
* Release resources
*/
public dispose() {
this.onConnectionObservable.clear();
}
} | return true;
}
|
divider.js | * @ngdoc module
* @name material.components.divider
* @description Divider module!
*/
angular.module('material.components.divider', [
'material.core'
])
.directive('mdDivider', MdDividerDirective);
/**
* @ngdoc directive
* @name mdDivider
* @module material.components.divider
* @restrict E
*
* @description
* Dividers group and separate content within lists and page layouts using strong visual and spatial distinctions. This divider is a thin rule, lightweight enough to not distract the user from content.
*
* @param {boolean=} md-inset Add this attribute to activate the inset divider style.
* @usage
* <hljs lang="html">
* <md-divider></md-divider>
*
* <md-divider md-inset></md-divider>
* </hljs>
*
*/
function MdDividerDirective($mdTheming) {
return {
restrict: 'E',
link: $mdTheming
};
} | /** |
|
utils.go | package app
import (
"io/ioutil"
"github.com/cosmos/cosmos-sdk/codec"
"github.com/cosmos/cosmos-sdk/types/module"
"github.com/cosmos/cosmos-sdk/x/simulation"
)
// SimulationOperations retrieves the simulation params from the provided file path
// and returns all the modules weighted operations
func SimulationOperations(app *GaiaApp, cdc *codec.Codec, config simulation.Config) []simulation.WeightedOperation {
simState := module.SimulationState{
AppParams: make(simulation.AppParams),
Cdc: cdc,
}
if config.ParamsFile != "" |
simState.ParamChanges = app.sm.GenerateParamChanges(config.Seed)
simState.Contents = app.sm.GetProposalContents(simState)
return app.sm.WeightedOperations(simState)
}
| {
bz, err := ioutil.ReadFile(config.ParamsFile)
if err != nil {
panic(err)
}
app.cdc.MustUnmarshalJSON(bz, &simState.AppParams)
} |
correctLinkHandlers.js | 'use strict';
import visit from 'unist-util-visit';
import {isNative} from '../../helpers';
import is from 'hast-util-is-element';
import has from 'hast-util-has-property';
export {correctLinkHandlers};
function correctLinkHandlers(props) {
const {tools} = props;
return transformer;
function | (tree) {
visit(tree, 'element', visitor);
}
function visitor(node, index, parent) {
if (is(node, 'button')) {
node.properties[isNative ? 'onPress' : 'onClick'] = function(e) {
e.preventDefault();
if (tools && tools.onClick && typeof tools.onClick === 'function') {
tools.onClick(node, {isNative});
}
};
}
if (is(node, 'a') && !has(node, 'onClick')) {
node.properties[isNative ? 'onPress' : 'onClick'] = e => {
e.preventDefault();
if (tools && tools.navigate && typeof tools.navigate === 'function') {
tools.navigate(node, {isNative});
}
};
}
}
}
function handleOnClick(obj) {
return Function('');
}
| transformer |
find_test.go | // Copyright (c) 2019 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package graphite
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"net/url"
"sort"
"strings"
"testing"
"time"
"github.com/m3db/m3/src/query/api/v1/handler/prometheus/handleroptions"
"github.com/m3db/m3/src/query/api/v1/options"
"github.com/m3db/m3/src/query/block"
"github.com/m3db/m3/src/query/models"
"github.com/m3db/m3/src/query/storage"
"github.com/m3db/m3/src/query/storage/m3/consolidators"
"github.com/m3db/m3/src/x/headers"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// dates is a tuple of a date with a valid string representation
type date struct {
t time.Time
s string
}
var (
from = date{
s: "14:38_20150618",
t: time.Date(2015, time.June, 18, 14, 38, 0, 0, time.UTC),
}
until = date{
s: "1432581620",
t: time.Date(2015, time.May, 25, 19, 20, 20, 0, time.UTC),
}
)
type completeTagQueryMatcher struct {
matchers []models.Matcher
}
func (m *completeTagQueryMatcher) String() string { return "complete tag query" }
func (m *completeTagQueryMatcher) Matches(x interface{}) bool {
q, ok := x.(*storage.CompleteTagsQuery)
if !ok {
return false
}
if !q.Start.Equal(from.t) {
return false
}
if !q.End.Equal(until.t) {
return false
}
if q.CompleteNameOnly {
return false
}
if len(q.FilterNameTags) != 1 {
return false
}
// both queries should filter on __g1__
if !bytes.Equal(q.FilterNameTags[0], []byte("__g1__")) {
return false
}
if len(q.TagMatchers) != len(m.matchers) {
return false
}
for i, qMatcher := range q.TagMatchers {
if !bytes.Equal(qMatcher.Name, m.matchers[i].Name) {
return false
}
if !bytes.Equal(qMatcher.Value, m.matchers[i].Value) {
return false
}
if qMatcher.Type != m.matchers[i].Type {
return false
}
}
return true
}
var _ gomock.Matcher = &completeTagQueryMatcher{}
func b(s string) []byte { return []byte(s) }
func bs(ss ...string) [][]byte {
bb := make([][]byte, len(ss))
for i, s := range ss {
bb[i] = b(s)
}
return bb
}
func setupStorage(ctrl *gomock.Controller, ex, ex2 bool) storage.Storage {
store := storage.NewMockStorage(ctrl)
// set up no children case
noChildrenMatcher := &completeTagQueryMatcher{
matchers: []models.Matcher{
{Type: models.MatchEqual, Name: b("__g0__"), Value: b("foo")},
{Type: models.MatchRegexp, Name: b("__g1__"), Value: b(`b[^\.]*`)},
{Type: models.MatchNotField, Name: b("__g2__")},
},
}
noChildrenResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
CompletedTags: []consolidators.CompletedTag{
{Name: b("__g1__"), Values: bs("bug", "bar", "baz")},
},
Metadata: block.ResultMetadata{
LocalOnly: true,
Exhaustive: ex,
},
}
store.EXPECT().CompleteTags(gomock.Any(), noChildrenMatcher, gomock.Any()).
Return(noChildrenResult, nil)
// set up children case
childrenMatcher := &completeTagQueryMatcher{
matchers: []models.Matcher{
{Type: models.MatchEqual, Name: b("__g0__"), Value: b("foo")},
{Type: models.MatchRegexp, Name: b("__g1__"), Value: b(`b[^\.]*`)},
{Type: models.MatchField, Name: b("__g2__")},
},
}
childrenResult := &consolidators.CompleteTagsResult{
CompleteNameOnly: false,
CompletedTags: []consolidators.CompletedTag{
{Name: b("__g1__"), Values: bs("baz", "bix", "bug")},
},
Metadata: block.ResultMetadata{
LocalOnly: false,
Exhaustive: true,
},
}
if !ex2 {
childrenResult.Metadata.AddWarning("foo", "bar")
}
store.EXPECT().CompleteTags(gomock.Any(), childrenMatcher, gomock.Any()).
Return(childrenResult, nil)
return store
}
type writer struct {
results []string
header http.Header
}
var _ http.ResponseWriter = &writer{}
func (w *writer) WriteHeader(_ int) {}
func (w *writer) Header() http.Header {
if w.header == nil {
w.header = make(http.Header)
}
return w.header
}
func (w *writer) Write(b []byte) (int, error) {
if w.results == nil {
w.results = make([]string, 0, 10)
}
w.results = append(w.results, string(b))
return len(b), nil
}
type result struct {
ID string `json:"id"`
Text string `json:"text"`
Leaf int `json:"leaf"`
Expandable int `json:"expandable"`
AllowChildren int `json:"allowChildren"`
}
type results []result
func (r results) Len() int { return len(r) }
func (r results) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r results) Less(i, j int) bool {
return strings.Compare(r[i].ID, r[j].ID) == -1
}
func testFind(t *testing.T, httpMethod string, ex bool, ex2 bool, header string) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
// setup storage and handler
store := setupStorage(ctrl, ex, ex2)
builder := handleroptions.NewFetchOptionsBuilder(
handleroptions.FetchOptionsBuilderOptions{})
opts := options.EmptyHandlerOptions().
SetFetchOptionsBuilder(builder).
SetStorage(store)
h := NewFindHandler(opts)
// execute the query
params := make(url.Values)
params.Set("query", "foo.b*")
params.Set("from", from.s)
params.Set("until", until.s)
w := &writer{}
req := &http.Request{
Method: httpMethod,
}
switch httpMethod {
case http.MethodGet:
req.URL = &url.URL{
RawQuery: params.Encode(),
}
case http.MethodPost:
req.Form = params
}
h.ServeHTTP(w, req)
// convert results to comparable format
require.Equal(t, 1, len(w.results))
r := make(results, 0)
decoder := json.NewDecoder(bytes.NewBufferString((w.results[0])))
require.NoError(t, decoder.Decode(&r))
sort.Sort(r)
makeNoChildrenResult := func(t string) result {
return result{ID: fmt.Sprintf("foo.%s", t), Text: t, Leaf: 1,
Expandable: 0, AllowChildren: 0}
}
makeWithChildrenResult := func(t string) result {
return result{ID: fmt.Sprintf("foo.%s", t), Text: t, Leaf: 0,
Expandable: 1, AllowChildren: 1}
}
expected := results{
makeNoChildrenResult("bar"),
makeNoChildrenResult("baz"),
makeWithChildrenResult("baz"),
makeWithChildrenResult("bix"),
makeNoChildrenResult("bug"),
makeWithChildrenResult("bug"),
}
require.Equal(t, expected, r)
actual := w.Header().Get(headers.LimitHeader)
assert.Equal(t, header, actual)
}
var limitTests = []struct {
name string
ex, ex2 bool
header string
}{
{"both incomplete", false, false, fmt.Sprintf(
"%s,%s_%s", headers.LimitHeaderSeriesLimitApplied, "foo", "bar")},
{"with terminator incomplete", true, false, "foo_bar"},
{"with children incomplete", false, true,
headers.LimitHeaderSeriesLimitApplied},
{"both complete", true, true, ""},
}
func TestFind(t *testing.T) {
for _, tt := range limitTests { | }
})
}
} | t.Run(tt.name, func(t *testing.T) {
for _, httpMethod := range FindHTTPMethods {
testFind(t, httpMethod, tt.ex, tt.ex2, tt.header) |
text.rs | use crate::bin::NTValue;
use crate::text::directory::*;
use crate::text::publish::*;
use crate::text::subscription::*;
use serde::{Deserialize, Serialize};
use serde_json::Value;
macro_rules! impl_message {
($($name:ident),+) => {
$(
impl MessageBody for $name {
fn into_message(self) -> $crate::text::NTTextMessage {
$crate::text::NTTextMessage {
_type: $crate::text::MessageType::$name,
data: serde_json::to_value(self).unwrap()
}
}
}
)+
}
}
pub mod directory;
pub mod publish;
pub mod subscription;
pub trait MessageBody {
fn into_message(self) -> NTTextMessage;
}
/// The type of the message that is being sent or received
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "lowercase")]
pub enum MessageType {
/// Publish Request Message
/// Direction: Client to Server
/// Response: Publish Acknowledge
///
/// Sent from a client to the server to indicate the client wants to start publishing values at the given NetworkTables key.
/// The server will respond with a “puback” message.
/// Once the client receives the “puback” message it can start publishing data value updates via binary CBOR messages.
#[serde(rename = "publish")]
PublishReq,
/// Publish Release Message
/// Direction: Client to Server
///
/// Sent from a client to the server to indicate the client wants to stop publishing values at the given NetworkTables key.
/// The client may also request the key be deleted.
/// The client **must** stop publishing data value updates via binary CBOR messages prior to sending this message.
#[serde(rename = "pubrel")]
PublishRel,
/// Set Flags Message
/// Direction: Client to Server
///
/// Sent from a client to the server to set or clear flags for a given topic.
/// The server will respond with an updated “announce” message.
SetFlags,
/// Key Announcement Message
/// Direction: Server to Client
///
/// Sent from the server to a client with an announcement listener covering the key.
/// The server shall send this message either initially after receiving Start Announcements from a client,
/// or when new keys are created with the prefix specified.
Announce,
/// Key Removed Message
/// Direction: Server to Client
///
/// Sent from the server to a client with an announcement listener covering the key.
/// The server shall send this message when a previously announced (via an “announce” message) key is deleted.
Unannounce,
/// Get Values Message
/// Direction: Client to Server
/// Response: Values over CBOR
///
/// Sent from a client to the server to indicate the client wants to get the current values for the specified keys (identifiers).
/// The server shall send CBOR messages containing the current values immediately upon receipt.
/// While this message could theoretically be used to poll for value updates, it is much better to use the “subscribe” message to request periodic push updates.
GetValues,
/// Subscribe Message
/// Direction: Client to Server
/// Response: Values over CBOR
///
/// Sent from a client to the server to indicate the client wants to subscribe to value changes for the specified keys (identifiers).
/// The server shall send CBOR messages containing the current values upon receipt, and continue sending CBOR messages for future value changes.
/// Subscriptions may overlap; only one CBOR message is sent per value change regardless of the number of subscriptions.
/// Sending a “subscribe” message with the same subscription UID as a previous “subscribe” message results in updating the subscription (replacing the array of identifiers and updating any specified options).
Subscribe,
/// Unsubscribe Message
/// Direction: Client to Server
///
/// Sent from a client to the server to indicate the client wants to stop subscribing to value changes for the given subscription.
Unsubscribe,
}
/// An enum containing the structs representing each text message, the explanation of each message can be found in the documentation for [`MessageType`]
///
/// [`MessageType`]: ./enum.MessageType.html
#[derive(Debug, PartialEq)]
pub enum MessageValue {
PublishReq(PublishReq),
PublishRel(PublishRel),
SetFlags(SetFlags),
Announce(Announce),
Unannounce(Unannounce),
GetValues(GetValues),
Subscribe(Subscribe),
Unsubscribe(Unsubscribe),
}
/// An enum representation of the acceptable data types in NTv4
#[derive(Serialize, Deserialize, Debug, PartialEq, Copy, Clone)]
#[serde(rename_all = "lowercase")]
pub enum DataType {
/// Represents a boolean, true or false
Boolean,
/// Represents a sequence of raw bytes
Raw,
/// Represents a Remote Procedure Call declaration
RPC,
/// Represents a sequence of bytes representing a String
String,
/// Represents a signed 64-bit integer
Int,
/// Represents an IEEE754 single-precision floating-point number
Float,
/// Represents an IEEE754 double-precision floating-point number
Double,
/// Represents an array of Booleans
#[serde(rename = "boolean[]")]
BooleanArray,
/// Represents an array of Strings
#[serde(rename = "string[]")]
StringArray,
/// Represents an array of Integers
#[serde(rename = "int[]")]
IntArray,
/// Represents an array of Floats
#[serde(rename = "float[]")]
FloatArray,
/// Represents an array of Doubles
#[serde(rename = "double[]")]
DoubleArray,
}
impl DataType {
pub fn default_value(&self) -> NTValue {
match self {
DataType::Int => NTValue::Int(0),
DataType::Boolean => NTValue::Boolean(false),
DataType::Raw => NTValue::Raw(vec![]),
DataType::RPC => NTValue::RPC(vec![]),
DataType::String => NTValue::String(String::new()),
DataType::Float => NTValue::Float(0f32),
DataType::Double => NTValue::Double(0.0),
DataType::BooleanArray => NTValue::BooleanArray(vec![]),
DataType::StringArray => NTValue::StringArray(vec![]),
DataType::IntArray => NTValue::IntArray(vec![]),
DataType::FloatArray => NTValue::FloatArray(vec![]),
DataType::DoubleArray => NTValue::DoubleArray(vec![]),
}
}
}
impl Into<u8> for DataType {
fn into(self) -> u8 {
match self {
DataType::Boolean => 0,
DataType::Double => 1,
DataType::Int => 2,
DataType::Float => 3,
DataType::String => 4,
DataType::Raw => 5,
DataType::RPC => 6,
DataType::BooleanArray => 16,
DataType::DoubleArray => 17,
DataType::IntArray => 18,
DataType::FloatArray => 19,
DataType::StringArray => 20,
}
}
}
/// The most generic struct representing a textual message transmitted in NT4
///
/// This struct should probably not be used directly, and instead can be constructed from the implementors of [`MessageBody`], found in submodules
/// These implementors are strongly typed equivalents to the `data` field on this type, and contain more information about how they should be used.
///
/// [`MessageBody`]: ./trait.MessageBody.html
#[derive(Serialize, Deserialize, PartialEq, Debug)]
pub struct NTTextMessage {
#[serde(rename = "type")]
_type: MessageType,
data: Value,
}
macro_rules! to_data_body {
($self:ident, $($ty:ident),+) => {
match $self._type {
$(
MessageType::$ty => match serde_json::from_value::<$ty>($self.data) {
Ok(value) => Ok(MessageValue::$ty(value)),
Err(e) => Err(e),
}
)+
}
}
}
impl NTTextMessage {
/// Decodes the `Value` stored in `self` as a strongly typed struct depending on the value of `self._type`
///
/// Returns the value wrapped inside the [`MessageValue`] enum.
///
/// [`MessageValue`]: ./enum.MessageValue.html
pub fn data(self) -> serde_json::Re | <MessageValue> {
use self::directory::*;
use self::publish::*;
use self::subscription::*;
to_data_body!(
self,
PublishReq,
PublishRel,
SetFlags,
Announce,
Unannounce,
GetValues,
Subscribe,
Unsubscribe
)
}
}
#[cfg(test)]
mod tests {
use crate::text::publish::{PublishReq, SetFlags};
use crate::text::{DataType, MessageBody, MessageType, MessageValue, NTTextMessage};
#[test]
fn test_de() {
let msg = r#"{"type":"publish", "data": {"name": "/foo", "type": "integer"}}"#;
let msg = serde_json::from_str::<NTTextMessage>(msg).unwrap();
assert_eq!(msg._type, MessageType::PublishReq);
assert_eq!(
msg.data(),
MessageValue::PublishReq(PublishReq {
name: "/foo".to_string(),
_type: DataType::Int,
})
);
}
#[test]
fn test_ser() {
let msg = SetFlags {
name: "/foo".to_string(),
add: vec!["persistent".to_string()],
remove: vec!["bolb".to_string()],
};
assert_eq!(
serde_json::to_string(&msg.into_message()).unwrap(),
r#"{"type":"setflags","data":{"add":["persistent"],"name":"/foo","remove":["bolb"]}}"#
)
}
}
| sult |
v1beta1_cron_job_spec.py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.19.15
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes_asyncio.client.configuration import Configuration
class V1beta1CronJobSpec(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'concurrency_policy': 'str',
'failed_jobs_history_limit': 'int',
'job_template': 'V1beta1JobTemplateSpec',
'schedule': 'str',
'starting_deadline_seconds': 'int',
'successful_jobs_history_limit': 'int',
'suspend': 'bool'
}
attribute_map = {
'concurrency_policy': 'concurrencyPolicy',
'failed_jobs_history_limit': 'failedJobsHistoryLimit',
'job_template': 'jobTemplate',
'schedule': 'schedule',
'starting_deadline_seconds': 'startingDeadlineSeconds',
'successful_jobs_history_limit': 'successfulJobsHistoryLimit',
'suspend': 'suspend'
}
def __init__(self, concurrency_policy=None, failed_jobs_history_limit=None, job_template=None, schedule=None, starting_deadline_seconds=None, successful_jobs_history_limit=None, suspend=None, local_vars_configuration=None): # noqa: E501
"""V1beta1CronJobSpec - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
|
self.local_vars_configuration = local_vars_configuration
self._concurrency_policy = None
self._failed_jobs_history_limit = None
self._job_template = None
self._schedule = None
self._starting_deadline_seconds = None
self._successful_jobs_history_limit = None
self._suspend = None
self.discriminator = None
if concurrency_policy is not None:
self.concurrency_policy = concurrency_policy
if failed_jobs_history_limit is not None:
self.failed_jobs_history_limit = failed_jobs_history_limit
self.job_template = job_template
self.schedule = schedule
if starting_deadline_seconds is not None:
self.starting_deadline_seconds = starting_deadline_seconds
if successful_jobs_history_limit is not None:
self.successful_jobs_history_limit = successful_jobs_history_limit
if suspend is not None:
self.suspend = suspend
@property
def concurrency_policy(self):
"""Gets the concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
:return: The concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
:rtype: str
"""
return self._concurrency_policy
@concurrency_policy.setter
def concurrency_policy(self, concurrency_policy):
"""Sets the concurrency_policy of this V1beta1CronJobSpec.
Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one # noqa: E501
:param concurrency_policy: The concurrency_policy of this V1beta1CronJobSpec. # noqa: E501
:type: str
"""
self._concurrency_policy = concurrency_policy
@property
def failed_jobs_history_limit(self):
"""Gets the failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:return: The failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._failed_jobs_history_limit
@failed_jobs_history_limit.setter
def failed_jobs_history_limit(self, failed_jobs_history_limit):
"""Sets the failed_jobs_history_limit of this V1beta1CronJobSpec.
The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1. # noqa: E501
:param failed_jobs_history_limit: The failed_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._failed_jobs_history_limit = failed_jobs_history_limit
@property
def job_template(self):
"""Gets the job_template of this V1beta1CronJobSpec. # noqa: E501
:return: The job_template of this V1beta1CronJobSpec. # noqa: E501
:rtype: V1beta1JobTemplateSpec
"""
return self._job_template
@job_template.setter
def job_template(self, job_template):
"""Sets the job_template of this V1beta1CronJobSpec.
:param job_template: The job_template of this V1beta1CronJobSpec. # noqa: E501
:type: V1beta1JobTemplateSpec
"""
if self.local_vars_configuration.client_side_validation and job_template is None: # noqa: E501
raise ValueError("Invalid value for `job_template`, must not be `None`") # noqa: E501
self._job_template = job_template
@property
def schedule(self):
"""Gets the schedule of this V1beta1CronJobSpec. # noqa: E501
The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
:return: The schedule of this V1beta1CronJobSpec. # noqa: E501
:rtype: str
"""
return self._schedule
@schedule.setter
def schedule(self, schedule):
"""Sets the schedule of this V1beta1CronJobSpec.
The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. # noqa: E501
:param schedule: The schedule of this V1beta1CronJobSpec. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and schedule is None: # noqa: E501
raise ValueError("Invalid value for `schedule`, must not be `None`") # noqa: E501
self._schedule = schedule
@property
def starting_deadline_seconds(self):
"""Gets the starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
:return: The starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._starting_deadline_seconds
@starting_deadline_seconds.setter
def starting_deadline_seconds(self, starting_deadline_seconds):
"""Sets the starting_deadline_seconds of this V1beta1CronJobSpec.
Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones. # noqa: E501
:param starting_deadline_seconds: The starting_deadline_seconds of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._starting_deadline_seconds = starting_deadline_seconds
@property
def successful_jobs_history_limit(self):
"""Gets the successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. # noqa: E501
:return: The successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:rtype: int
"""
return self._successful_jobs_history_limit
@successful_jobs_history_limit.setter
def successful_jobs_history_limit(self, successful_jobs_history_limit):
"""Sets the successful_jobs_history_limit of this V1beta1CronJobSpec.
The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3. # noqa: E501
:param successful_jobs_history_limit: The successful_jobs_history_limit of this V1beta1CronJobSpec. # noqa: E501
:type: int
"""
self._successful_jobs_history_limit = successful_jobs_history_limit
@property
def suspend(self):
"""Gets the suspend of this V1beta1CronJobSpec. # noqa: E501
This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
:return: The suspend of this V1beta1CronJobSpec. # noqa: E501
:rtype: bool
"""
return self._suspend
@suspend.setter
def suspend(self, suspend):
"""Sets the suspend of this V1beta1CronJobSpec.
This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false. # noqa: E501
:param suspend: The suspend of this V1beta1CronJobSpec. # noqa: E501
:type: bool
"""
self._suspend = suspend
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1beta1CronJobSpec):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1beta1CronJobSpec):
return True
return self.to_dict() != other.to_dict()
| local_vars_configuration = Configuration() |
pdbqt2pdb_ref.py | #!/usr/bin/env python
import pdbtools.ligand_tools as ligand_tools
def | ():
import argparse
title_line = 'convert pdbqt to pdb using reference pdb file'
parser = argparse.ArgumentParser(description=title_line)
parser.add_argument('-i', '--input_file', required=True,
help='input ligand pdbqt file')
parser.add_argument('-o', '--output_file', required=True,
help='output ligand pdb file')
parser.add_argument('-r', '--ref_file', required=True,
help='reference ligand pdb file')
args = parser.parse_args()
ligand_input_file = args.input_file
ligand_output_file = args.output_file
ref_file = args.ref_file
e = ligand_tools.pdbqt_to_pdb_ref(ligand_input_file, ligand_output_file,
ref_file)
if e is not None:
print(e)
if __name__ == "__main__":
main()
| main |
pytest_i2s.py | # SPDX-FileCopyrightText: 2022 Espressif Systems (Shanghai) CO LTD
# SPDX-License-Identifier: Apache-2.0
import pytest
from pytest_embedded import Dut
@pytest.mark.esp32
@pytest.mark.esp32s2
@pytest.mark.esp32c3
@pytest.mark.esp32s3
@pytest.mark.generic
@pytest.mark.parametrize(
'config',
[
'iram_safe',
'release',
],
indirect=True,
)
def | (dut: Dut) -> None:
dut.expect_exact('Press ENTER to see the list of tests')
dut.write('*')
dut.expect_unity_test_output()
| test_i2s |
azure_manager_test.go | /*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"os"
"reflect"
"strings"
"testing"
"time"
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
azclients "k8s.io/legacy-cloud-providers/azure/clients"
"k8s.io/legacy-cloud-providers/azure/clients/vmssclient/mockvmssclient"
"k8s.io/legacy-cloud-providers/azure/clients/vmssvmclient/mockvmssvmclient"
"k8s.io/legacy-cloud-providers/azure/retry"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2017-05-10/resources"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
const validAzureCfg = `{
"cloud": "AzurePublicCloud",
"tenantId": "fakeId",
"subscriptionId": "fakeId",
"aadClientId": "fakeId",
"aadClientSecret": "fakeId",
"resourceGroup": "fakeId",
"location": "southeastasia",
"subnetName": "fakeName",
"securityGroupName": "fakeName",
"vnetName": "fakeName",
"routeTableName": "fakeName",
"primaryAvailabilitySetName": "fakeName",
"vmssCacheTTL": 60,
"maxDeploymentsCount": 8,
"cloudProviderRateLimit": false,
"routeRateLimit": {
"cloudProviderRateLimit": true,
"cloudProviderRateLimitQPS": 3
}
}`
const validAzureCfgForStandardVMType = `{
"cloud": "AzurePublicCloud",
"tenantId": "fakeId",
"subscriptionId": "fakeId",
"aadClientId": "fakeId",
"aadClientSecret": "fakeId",
"resourceGroup": "fakeId",
"vmType":"standard",
"location": "southeastasia",
"subnetName": "fakeName",
"securityGroupName": "fakeName",
"vnetName": "fakeName",
"routeTableName": "fakeName",
"primaryAvailabilitySetName": "fakeName",
"vmssCacheTTL": 60,
"maxDeploymentsCount": 8,
"cloudProviderRateLimit": false,
"routeRateLimit": {
"cloudProviderRateLimit": true,
"cloudProviderRateLimitQPS": 3
},
"deployment":"cluster-autoscaler-0001",
"deploymentParameters":{
"Name": "cluster-autoscaler-0001",
"Properties":{
"ProvisioningState": "Succeeded",
"Parameters": {
"PoolName01VMSize":"PoolName01"
},
"Template": {
"resources": [
{
"type":"Microsoft.Compute/virtualMachines/extensions",
"name":"cluster-autoscaler-0001-resourceName",
"properties": {
"hardwareProfile":{
"VMSize":"10G"
}
}
}
]
}
}
}
}`
const validAzureCfgForStandardVMTypeWithoutDeploymentParameters = `{
"cloud": "AzurePublicCloud",
"tenantId": "fakeId",
"subscriptionId": "fakeId",
"aadClientId": "fakeId",
"aadClientSecret": "fakeId",
"resourceGroup": "fakeId",
"vmType":"standard",
"location": "southeastasia",
"subnetName": "fakeName",
"securityGroupName": "fakeName",
"vnetName": "fakeName",
"routeTableName": "fakeName",
"primaryAvailabilitySetName": "fakeName",
"vmssCacheTTL": 60,
"maxDeploymentsCount": 8,
"cloudProviderRateLimit": false,
"routeRateLimit": {
"cloudProviderRateLimit": true,
"cloudProviderRateLimitQPS": 3
},
"deployment":"cluster-autoscaler-0001"
}`
const invalidAzureCfg = `{{}"cloud": "AzurePublicCloud",}`
func TestCreateAzureManagerValidConfig(t *testing.T) {
manager, err := CreateAzureManager(strings.NewReader(validAzureCfg), cloudprovider.NodeGroupDiscoveryOptions{})
expectedConfig := &Config{
Cloud: "AzurePublicCloud",
Location: "southeastasia",
TenantID: "fakeId",
SubscriptionID: "fakeId",
ResourceGroup: "fakeId",
VMType: "vmss",
AADClientID: "fakeId",
AADClientSecret: "fakeId",
VmssCacheTTL: 60,
MaxDeploymentsCount: 8,
CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
InterfaceRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
StorageAccountRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
DiskRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
},
}
assert.NoError(t, err)
assert.Equal(t, true, reflect.DeepEqual(*expectedConfig, *manager.config), "unexpected azure manager configuration")
}
func TestCreateAzureManagerValidConfigForStandardVMType(t *testing.T) {
manager, err := CreateAzureManager(strings.NewReader(validAzureCfgForStandardVMType), cloudprovider.NodeGroupDiscoveryOptions{})
expectedConfig := &Config{
Cloud: "AzurePublicCloud",
Location: "southeastasia",
TenantID: "fakeId",
SubscriptionID: "fakeId",
ResourceGroup: "fakeId",
VMType: "standard",
AADClientID: "fakeId",
AADClientSecret: "fakeId",
VmssCacheTTL: 60,
MaxDeploymentsCount: 8,
CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
InterfaceRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
StorageAccountRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
DiskRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: false,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
},
Deployment: "cluster-autoscaler-0001",
DeploymentParameters: map[string]interface{}{
"Name": "cluster-autoscaler-0001",
"Properties": map[string]interface{}{
"ProvisioningState": "Succeeded",
"Parameters": map[string]interface{}{
"PoolName01VMSize": "PoolName01",
},
"Template": map[string]interface{}{
"resources": []interface{}{
map[string]interface{}{
"type": "Microsoft.Compute/virtualMachines/extensions",
"name": "cluster-autoscaler-0001-resourceName",
"properties": map[string]interface{}{
"hardwareProfile": map[string]interface{}{
"VMSize": "10G",
},
},
},
},
},
},
},
}
assert.NoError(t, err)
assert.Equal(t, *expectedConfig, *manager.config, "unexpected azure manager configuration, expected: %v, actual: %v", *expectedConfig, *manager.config)
discoveryOpts := cloudprovider.NodeGroupDiscoveryOptions{NodeGroupAutoDiscoverySpecs: []string{
"label:cluster-autoscaler-enabled=true,cluster-autoscaler-name=fake-cluster",
"label:test-tag=test-value,another-test-tag=another-test-value",
}}
timeLayout := "2006-01-02 15:04:05"
timeBenchMark, _ := time.Parse(timeLayout, "2000-01-01 00:00:00")
fakeDeployments := map[string]resources.DeploymentExtended{
"cluster-autoscaler-0001": {
Name: to.StringPtr("cluster-autoscaler-0001"),
Properties: &resources.DeploymentPropertiesExtended{
ProvisioningState: to.StringPtr("Succeeded"),
Parameters: map[string]interface{}{
"PoolName01VMSize": to.StringPtr("PoolName01"),
},
Template: map[string]interface{}{
"resources": []interface{}{
map[string]interface{}{
"type": "Microsoft.Compute/virtualMachines/extensions",
"name": "cluster-autoscaler-0001-resourceName",
"properties": map[string]interface{}{
"hardwareProfile": map[string]interface{}{
"VMSize": "10G",
},
},
},
},
},
Timestamp: &date.Time{Time: timeBenchMark},
},
},
}
manager.azClient.deploymentsClient = &DeploymentsClientMock{
FakeStore: fakeDeployments,
}
specs, err2 := parseLabelAutoDiscoverySpecs(discoveryOpts)
assert.NoError(t, err2)
result, err3 := manager.getFilteredAutoscalingGroups(specs)
expectedNodeGroup := []cloudprovider.NodeGroup{(*AgentPool)(nil)}
assert.NoError(t, err3)
assert.Equal(t, expectedNodeGroup, result, "NodeGroup does not match, expected: %v, actual: %v", expectedNodeGroup, result)
// parseLabelAutoDiscoverySpecs with invalid NodeGroupDiscoveryOptions
invalidDiscoveryOpts := cloudprovider.NodeGroupDiscoveryOptions{NodeGroupAutoDiscoverySpecs: []string{"label:keywithoutvalue"}}
specs, err4 := parseLabelAutoDiscoverySpecs(invalidDiscoveryOpts)
expectedCfg := []labelAutoDiscoveryConfig([]labelAutoDiscoveryConfig(nil))
expectedErr := fmt.Errorf("invalid key=value pair [keywithoutvalue]")
assert.Equal(t, expectedCfg, specs, "Return labelAutoDiscoveryConfig does not match, expected: %v, actual: %v", expectedCfg, specs)
assert.Equal(t, expectedErr, err4, "parseLabelAutoDiscoverySpecs return error does not match, expected: %v, actual: %v", expectedErr, err4)
}
func TestCreateAzureManagerValidConfigForStandardVMTypeWithoutDeploymentParameters(t *testing.T) |
func TestCreateAzureManagerWithNilConfig(t *testing.T) {
expectedConfig := &Config{
Cloud: "AzurePublicCloud",
Location: "southeastasia",
TenantID: "tenantId",
SubscriptionID: "subscriptionId",
ResourceGroup: "resourceGroup",
VMType: "vmss",
AADClientID: "aadClientId",
AADClientSecret: "aadClientSecret",
AADClientCertPath: "aadClientCertPath",
AADClientCertPassword: "aadClientCertPassword",
Deployment: "deployment",
ClusterName: "clusterName",
NodeResourceGroup: "resourcegroup",
UseManagedIdentityExtension: true,
UserAssignedIdentityID: "UserAssignedIdentityID",
VmssCacheTTL: 100,
MaxDeploymentsCount: 8,
CloudProviderBackoff: true,
CloudProviderBackoffRetries: 1,
CloudProviderBackoffExponent: 1,
CloudProviderBackoffDuration: 1,
CloudProviderBackoffJitter: 1,
CloudProviderRateLimitConfig: CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
InterfaceRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
StorageAccountRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
DiskRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
VirtualMachineScaleSetRateLimit: &azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 5,
CloudProviderRateLimitBucketWrite: 5,
CloudProviderRateLimitQPS: 1,
CloudProviderRateLimitQPSWrite: 1,
},
},
}
os.Setenv("ARM_CLOUD", "AzurePublicCloud")
os.Setenv("LOCATION", "southeastasia")
os.Setenv("ARM_SUBSCRIPTION_ID", "subscriptionId")
os.Setenv("ARM_RESOURCE_GROUP", "resourceGroup")
os.Setenv("ARM_TENANT_ID", "tenantId")
os.Setenv("ARM_CLIENT_ID", "aadClientId")
os.Setenv("ARM_CLIENT_SECRET", "aadClientSecret")
os.Setenv("ARM_VM_TYPE", "vmss")
os.Setenv("ARM_CLIENT_CERT_PATH", "aadClientCertPath")
os.Setenv("ARM_CLIENT_CERT_PASSWORD", "aadClientCertPassword")
os.Setenv("ARM_DEPLOYMENT", "deployment")
os.Setenv("AZURE_CLUSTER_NAME", "clusterName")
os.Setenv("AZURE_NODE_RESOURCE_GROUP", "resourcegroup")
os.Setenv("ARM_USE_MANAGED_IDENTITY_EXTENSION", "true")
os.Setenv("ARM_USER_ASSIGNED_IDENTITY_ID", "UserAssignedIdentityID")
os.Setenv("AZURE_VMSS_CACHE_TTL", "100")
os.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "8")
os.Setenv("ENABLE_BACKOFF", "true")
os.Setenv("BACKOFF_RETRIES", "1")
os.Setenv("BACKOFF_EXPONENT", "1")
os.Setenv("BACKOFF_DURATION", "1")
os.Setenv("BACKOFF_JITTER", "1")
os.Setenv("CLOUD_PROVIDER_RATE_LIMIT", "true")
manager, err := CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, true, reflect.DeepEqual(*expectedConfig, *manager.config), "unexpected azure manager configuration")
// invalid bool for ARM_USE_MANAGED_IDENTITY_EXTENSION
os.Setenv("ARM_USE_MANAGED_IDENTITY_EXTENSION", "invalidbool")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr0 := "strconv.ParseBool: parsing \"invalidbool\": invalid syntax"
assert.Nil(t, manager)
assert.Equal(t, expectedErr0, err.Error(), "Return err does not match, expected: %v, actual: %v", expectedErr0, err.Error())
// revert back to good ARM_USE_MANAGED_IDENTITY_EXTENSION
os.Setenv("ARM_USE_MANAGED_IDENTITY_EXTENSION", "true")
// invalid int for AZURE_VMSS_CACHE_TTL
os.Setenv("AZURE_VMSS_CACHE_TTL", "invalidint")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr := fmt.Errorf("failed to parse AZURE_VMSS_CACHE_TTL \"invalidint\": strconv.ParseInt: parsing \"invalidint\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good AZURE_VMSS_CACHE_TTL
os.Setenv("AZURE_VMSS_CACHE_TTL", "100")
// invalid int for AZURE_MAX_DEPLOYMENT_COUNT
os.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "invalidint")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse AZURE_MAX_DEPLOYMENT_COUNT \"invalidint\": strconv.ParseInt: parsing \"invalidint\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good AZURE_MAX_DEPLOYMENT_COUNT
os.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "8")
// zero AZURE_MAX_DEPLOYMENT_COUNT will use default value
os.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "0")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, int64(defaultMaxDeploymentsCount), (*manager.config).MaxDeploymentsCount, "MaxDeploymentsCount does not match.")
// revert back to good AZURE_MAX_DEPLOYMENT_COUNT
os.Setenv("AZURE_MAX_DEPLOYMENT_COUNT", "8")
// invalid bool for ENABLE_BACKOFF
os.Setenv("ENABLE_BACKOFF", "invalidbool")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse ENABLE_BACKOFF \"invalidbool\": strconv.ParseBool: parsing \"invalidbool\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good ENABLE_BACKOFF
os.Setenv("ENABLE_BACKOFF", "true")
// invalid int for BACKOFF_RETRIES
os.Setenv("BACKOFF_RETRIES", "invalidint")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse BACKOFF_RETRIES '\\x00': strconv.ParseInt: parsing \"invalidint\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good BACKOFF_RETRIES
os.Setenv("BACKOFF_RETRIES", "1")
// empty BACKOFF_RETRIES will use default value
os.Setenv("BACKOFF_RETRIES", "")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, backoffRetriesDefault, (*manager.config).CloudProviderBackoffRetries, "CloudProviderBackoffRetries does not match.")
// revert back to good BACKOFF_RETRIES
os.Setenv("BACKOFF_RETRIES", "1")
// invalid float for BACKOFF_EXPONENT
os.Setenv("BACKOFF_EXPONENT", "invalidfloat")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse BACKOFF_EXPONENT \"invalidfloat\": strconv.ParseFloat: parsing \"invalidfloat\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good BACKOFF_EXPONENT
os.Setenv("BACKOFF_EXPONENT", "1")
// empty BACKOFF_EXPONENT will use default value
os.Setenv("BACKOFF_EXPONENT", "")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, backoffExponentDefault, (*manager.config).CloudProviderBackoffExponent, "CloudProviderBackoffExponent does not match.")
// revert back to good BACKOFF_EXPONENT
os.Setenv("BACKOFF_EXPONENT", "1")
// invalid int for BACKOFF_DURATION
os.Setenv("BACKOFF_DURATION", "invalidint")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse BACKOFF_DURATION \"invalidint\": strconv.ParseInt: parsing \"invalidint\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good BACKOFF_DURATION
os.Setenv("BACKOFF_DURATION", "1")
// empty BACKOFF_DURATION will use default value
os.Setenv("BACKOFF_DURATION", "")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, backoffDurationDefault, (*manager.config).CloudProviderBackoffDuration, "CloudProviderBackoffDuration does not match.")
// revert back to good BACKOFF_DURATION
os.Setenv("BACKOFF_DURATION", "1")
// invalid float for BACKOFF_JITTER
os.Setenv("BACKOFF_JITTER", "invalidfloat")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse BACKOFF_JITTER \"invalidfloat\": strconv.ParseFloat: parsing \"invalidfloat\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good BACKOFF_JITTER
os.Setenv("BACKOFF_JITTER", "1")
// empty BACKOFF_JITTER will use default value
os.Setenv("BACKOFF_JITTER", "")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
assert.NoError(t, err)
assert.Equal(t, backoffJitterDefault, (*manager.config).CloudProviderBackoffJitter, "CloudProviderBackoffJitter does not match.")
// revert back to good BACKOFF_JITTER
os.Setenv("BACKOFF_JITTER", "1")
// invalid bool for CLOUD_PROVIDER_RATE_LIMIT
os.Setenv("CLOUD_PROVIDER_RATE_LIMIT", "invalidbool")
manager, err = CreateAzureManager(nil, cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr = fmt.Errorf("failed to parse CLOUD_PROVIDER_RATE_LIMIT: \"invalidbool\", strconv.ParseBool: parsing \"invalidbool\": invalid syntax")
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err, "Return err does not match, expected: %v, actual: %v", expectedErr, err)
// revert back to good CLOUD_PROVIDER_RATE_LIMIT
os.Setenv("CLOUD_PROVIDER_RATE_LIMIT", "1")
os.Unsetenv("ARM_CLOUD")
os.Unsetenv("ARM_SUBSCRIPTION_ID")
os.Unsetenv("LOCATION")
os.Unsetenv("ARM_RESOURCE_GROUP")
os.Unsetenv("ARM_TENANT_ID")
os.Unsetenv("ARM_CLIENT_ID")
os.Unsetenv("ARM_CLIENT_SECRET")
os.Unsetenv("ARM_VM_TYPE")
os.Unsetenv("ARM_CLIENT_CERT_PATH")
os.Unsetenv("ARM_CLIENT_CERT_PASSWORD")
os.Unsetenv("ARM_DEPLOYMENT")
os.Unsetenv("AZURE_CLUSTER_NAME")
os.Unsetenv("AZURE_NODE_RESOURCE_GROUP")
os.Unsetenv("ARM_USE_MANAGED_IDENTITY_EXTENSION")
os.Unsetenv("ARM_USER_ASSIGNED_IDENTITY_ID")
os.Unsetenv("AZURE_VMSS_CACHE_TTL")
os.Unsetenv("AZURE_MAX_DEPLOYMENT_COUNT")
os.Unsetenv("ENABLE_BACKOFF")
os.Unsetenv("BACKOFF_RETRIES")
os.Unsetenv("BACKOFF_EXPONENT")
os.Unsetenv("BACKOFF_DURATION")
os.Unsetenv("BACKOFF_JITTER")
os.Unsetenv("CLOUD_PROVIDER_RATE_LIMIT")
}
func TestCreateAzureManagerInvalidConfig(t *testing.T) {
_, err := CreateAzureManager(strings.NewReader(invalidAzureCfg), cloudprovider.NodeGroupDiscoveryOptions{})
assert.Error(t, err, "failed to unmarshal config body")
}
func TestFetchExplicitAsgs(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
min, max, name := 1, 15, "test-asg"
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
NodeGroupSpecs: []string{
fmt.Sprintf("%d:%d:%s", min, max, name),
},
}
manager := newTestAzureManager(t)
expectedVMSSVMs := newTestVMSSVMList()
expectedScaleSets := newTestVMSSList(3, "test-asg", "eastus")
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, "test-asg", gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
manager.fetchExplicitAsgs(ngdo.NodeGroupSpecs)
asgs := manager.asgCache.get()
assert.Equal(t, 1, len(asgs))
assert.Equal(t, name, asgs[0].Id())
assert.Equal(t, min, asgs[0].MinSize())
assert.Equal(t, max, asgs[0].MaxSize())
// test vmTypeStandard
testAS := newTestAgentPool(newTestAzureManager(t), "testAS")
timeLayout := "2006-01-02 15:04:05"
timeBenchMark, _ := time.Parse(timeLayout, "2000-01-01 00:00:00")
testAS.manager.azClient.deploymentsClient = &DeploymentsClientMock{
FakeStore: map[string]resources.DeploymentExtended{
"cluster-autoscaler-0001": {
Name: to.StringPtr("cluster-autoscaler-0001"),
Properties: &resources.DeploymentPropertiesExtended{
ProvisioningState: to.StringPtr("Succeeded"),
Timestamp: &date.Time{Time: timeBenchMark.Add(2 * time.Minute)},
},
},
},
}
testAS.manager.config.VMType = vmTypeStandard
err := testAS.manager.fetchExplicitAsgs([]string{"1:5:testAS"})
expectedErr := fmt.Errorf("failed to parse node group spec: deployment not found")
assert.Equal(t, expectedErr, err, "testAS.manager.fetchExplicitAsgs return error does not match, expected: %v, actual: %v", expectedErr, err)
err = testAS.manager.fetchExplicitAsgs(nil)
assert.NoError(t, err)
// test invalidVMType
manager.config.VMType = "invalidVMType"
err = manager.fetchExplicitAsgs(ngdo.NodeGroupSpecs)
expectedErr = fmt.Errorf("failed to parse node group spec: vmtype invalidVMType not supported")
assert.Equal(t, expectedErr, err, "manager.fetchExplicitAsgs return error does not match, expected: %v, actual: %v", expectedErr, err)
}
func TestParseLabelAutoDiscoverySpecs(t *testing.T) {
testCases := []struct {
name string
specs []string
expected []labelAutoDiscoveryConfig
expectedErr bool
}{
{
name: "ValidSpec",
specs: []string{
"label:cluster-autoscaler-enabled=true,cluster-autoscaler-name=fake-cluster",
"label:test-tag=test-value,another-test-tag=another-test-value",
},
expected: []labelAutoDiscoveryConfig{
{Selector: map[string]string{"cluster-autoscaler-enabled": "true", "cluster-autoscaler-name": "fake-cluster"}},
{Selector: map[string]string{"test-tag": "test-value", "another-test-tag": "another-test-value"}},
},
},
{
name: "MissingAutoDiscoverLabel",
specs: []string{"test-tag=test-value,another-test-tag"},
expectedErr: true,
},
{
name: "InvalidAutoDiscoerLabel",
specs: []string{"invalid:test-tag=test-value,another-test-tag"},
expectedErr: true,
},
{
name: "MissingValue",
specs: []string{"label:test-tag="},
expectedErr: true,
},
{
name: "MissingKey",
specs: []string{"label:=test-val"},
expectedErr: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ngdo := cloudprovider.NodeGroupDiscoveryOptions{NodeGroupAutoDiscoverySpecs: tc.specs}
actual, err := parseLabelAutoDiscoverySpecs(ngdo)
if tc.expectedErr {
assert.Error(t, err)
return
}
assert.NoError(t, err)
assert.True(t, assert.ObjectsAreEqualValues(tc.expected, actual), "expected %#v, but found: %#v", tc.expected, actual)
})
}
}
func TestListScalesets(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
manager := newTestAzureManager(t)
vmssTag := "fake-tag"
vmssTagValue := "fake-value"
vmssName := "test-vmss"
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
NodeGroupAutoDiscoverySpecs: []string{fmt.Sprintf("label:%s=%s", vmssTag, vmssTagValue)},
}
specs, err := parseLabelAutoDiscoverySpecs(ngdo)
assert.NoError(t, err)
testCases := []struct {
name string
specs map[string]string
isListVMSSFail bool
expected []cloudprovider.NodeGroup
expectedErrString string
}{
{
name: "ValidMinMax",
specs: map[string]string{"min": "5", "max": "50"},
expected: []cloudprovider.NodeGroup{&ScaleSet{
azureRef: azureRef{
Name: vmssName,
},
minSize: 5,
maxSize: 50,
manager: manager,
curSize: -1,
sizeRefreshPeriod: defaultVmssSizeRefreshPeriod,
}},
},
{
name: "InvalidMin",
specs: map[string]string{"min": "some-invalid-string"},
expectedErrString: "invalid minimum size specified for vmss:",
},
{
name: "NoMin",
specs: map[string]string{"max": "50"},
expectedErrString: fmt.Sprintf("no minimum size specified for vmss: %s", vmssName),
},
{
name: "InvalidMax",
specs: map[string]string{"min": "5", "max": "some-invalid-string"},
expectedErrString: "invalid maximum size specified for vmss:",
},
{
name: "NoMax",
specs: map[string]string{"min": "5"},
expectedErrString: fmt.Sprintf("no maximum size specified for vmss: %s", vmssName),
},
{
name: "MinLessThanZero",
specs: map[string]string{"min": "-4", "max": "20"},
expectedErrString: fmt.Sprintf("minimum size must be a non-negative number of nodes"),
},
{
name: "MinGreaterThanMax",
specs: map[string]string{"min": "50", "max": "5"},
expectedErrString: "maximum size must be greater than minimum size",
},
{
name: "ListVMSSFail",
specs: map[string]string{"min": "5", "max": "50"},
isListVMSSFail: true,
expectedErrString: "List VMSS failed",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
tags := make(map[string]*string)
tags[vmssTag] = &vmssTagValue
if val, ok := tc.specs["min"]; ok {
tags["min"] = &val
}
if val, ok := tc.specs["max"]; ok {
tags["max"] = &val
}
expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, tags)}
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
if tc.isListVMSSFail {
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(nil, &retry.Error{RawError: fmt.Errorf("List VMSS failed")}).AnyTimes()
} else {
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
}
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
asgs, err := manager.listScaleSets(specs)
if tc.expectedErrString != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), tc.expectedErrString)
return
}
assert.NoError(t, err)
assert.True(t, assert.ObjectsAreEqualValues(tc.expected, asgs), "expected %#v, but found: %#v", tc.expected, asgs)
})
}
}
func TestGetFilteredAutoscalingGroupsVmss(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
vmssName := "test-vmss"
vmssTag := "fake-tag"
vmssTagValue := "fake-value"
min := "1"
minVal := 1
max := "5"
maxVal := 5
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
NodeGroupAutoDiscoverySpecs: []string{fmt.Sprintf("label:%s=%s", vmssTag, vmssTagValue)},
}
manager := newTestAzureManager(t)
expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &min, "max": &max})}
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
specs, err := parseLabelAutoDiscoverySpecs(ngdo)
assert.NoError(t, err)
asgs, err := manager.getFilteredAutoscalingGroups(specs)
assert.NoError(t, err)
expectedAsgs := []cloudprovider.NodeGroup{&ScaleSet{
azureRef: azureRef{
Name: vmssName,
},
minSize: minVal,
maxSize: maxVal,
manager: manager,
curSize: -1,
sizeRefreshPeriod: defaultVmssSizeRefreshPeriod,
}}
assert.True(t, assert.ObjectsAreEqualValues(expectedAsgs, asgs), "expected %#v, but found: %#v", expectedAsgs, asgs)
}
func TestGetFilteredAutoscalingGroupsWithInvalidVMType(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
NodeGroupAutoDiscoverySpecs: []string{"label:fake-tag=fake-value"},
}
manager := newTestAzureManager(t)
expectedScaleSets := []compute.VirtualMachineScaleSet{}
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
manager.config.VMType = vmTypeAKS
specs, err := parseLabelAutoDiscoverySpecs(ngdo)
assert.NoError(t, err)
asgs1, err1 := manager.getFilteredAutoscalingGroups(specs)
assert.Nil(t, asgs1)
assert.Nil(t, err1)
manager.config.VMType = "invalidVMType"
expectedErr := fmt.Errorf("vmType \"invalidVMType\" not supported")
asgs, err2 := manager.getFilteredAutoscalingGroups(specs)
assert.Nil(t, asgs)
assert.Equal(t, expectedErr, err2, "Not match, expected: %v, actual: %v", expectedErr, err2)
}
func TestFetchAutoAsgsVmss(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
vmssName := "test-vmss"
vmssTag := "fake-tag"
vmssTagValue := "fake-value"
minString := "1"
minVal := 1
maxString := "5"
maxVal := 5
ngdo := cloudprovider.NodeGroupDiscoveryOptions{
NodeGroupAutoDiscoverySpecs: []string{fmt.Sprintf("label:%s=%s", vmssTag, vmssTagValue)},
}
expectedScaleSets := []compute.VirtualMachineScaleSet{fakeVMSSWithTags(vmssName, map[string]*string{vmssTag: &vmssTagValue, "min": &minString, "max": &maxString})}
expectedVMSSVMs := newTestVMSSVMList()
manager := newTestAzureManager(t)
mockVMSSClient := mockvmssclient.NewMockInterface(ctrl)
mockVMSSClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup).Return(expectedScaleSets, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetsClient = mockVMSSClient
mockVMSSVMClient := mockvmssvmclient.NewMockInterface(ctrl)
mockVMSSVMClient.EXPECT().List(gomock.Any(), manager.config.ResourceGroup, vmssName, gomock.Any()).Return(expectedVMSSVMs, nil).AnyTimes()
manager.azClient.virtualMachineScaleSetVMsClient = mockVMSSVMClient
specs, err := parseLabelAutoDiscoverySpecs(ngdo)
assert.NoError(t, err)
manager.asgAutoDiscoverySpecs = specs
// assert cache is empty before fetching auto asgs
asgs := manager.asgCache.get()
assert.Equal(t, 0, len(asgs))
manager.fetchAutoAsgs()
asgs = manager.asgCache.get()
assert.Equal(t, 1, len(asgs))
assert.Equal(t, vmssName, asgs[0].Id())
assert.Equal(t, minVal, asgs[0].MinSize())
assert.Equal(t, maxVal, asgs[0].MaxSize())
// test explicitlyConfigured
manager.explicitlyConfigured[vmssName] = true
manager.fetchAutoAsgs()
asgs = manager.asgCache.get()
assert.Equal(t, 1, len(asgs))
}
func TestInitializeCloudProviderRateLimitConfigWithNoConfigReturnsNoError(t *testing.T) {
err := InitializeCloudProviderRateLimitConfig(nil)
assert.Nil(t, err, "err should be nil")
}
func TestInitializeCloudProviderRateLimitConfigWithNoRateLimitSettingsReturnsDefaults(t *testing.T) {
emptyConfig := &CloudProviderRateLimitConfig{}
err := InitializeCloudProviderRateLimitConfig(emptyConfig)
assert.NoError(t, err)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitQPSDefault)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitBucketDefault)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitQPSDefault)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitBucketDefault)
}
func TestInitializeCloudProviderRateLimitConfigWithReadRateLimitSettingsFromEnv(t *testing.T) {
emptyConfig := &CloudProviderRateLimitConfig{}
var rateLimitReadQPS float32 = 3.0
rateLimitReadBuckets := 10
os.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS))
os.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets))
err := InitializeCloudProviderRateLimitConfig(emptyConfig)
assert.NoError(t, err)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitReadQPS)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitReadBuckets)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitReadQPS)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitReadBuckets)
os.Unsetenv(rateLimitReadBucketsEnvVar)
os.Unsetenv(rateLimitReadQPSEnvVar)
}
func TestInitializeCloudProviderRateLimitConfigWithReadAndWriteRateLimitSettingsFromEnv(t *testing.T) {
emptyConfig := &CloudProviderRateLimitConfig{}
var rateLimitReadQPS float32 = 3.0
rateLimitReadBuckets := 10
var rateLimitWriteQPS float32 = 6.0
rateLimitWriteBuckets := 20
os.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS))
os.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets))
os.Setenv(rateLimitWriteQPSEnvVar, fmt.Sprintf("%.1f", rateLimitWriteQPS))
os.Setenv(rateLimitWriteBucketsEnvVar, fmt.Sprintf("%d", rateLimitWriteBuckets))
err := InitializeCloudProviderRateLimitConfig(emptyConfig)
assert.NoError(t, err)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPS, rateLimitReadQPS)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucket, rateLimitReadBuckets)
assert.Equal(t, emptyConfig.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS)
assert.Equal(t, emptyConfig.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets)
os.Unsetenv(rateLimitReadQPSEnvVar)
os.Unsetenv(rateLimitReadBucketsEnvVar)
os.Unsetenv(rateLimitWriteQPSEnvVar)
os.Unsetenv(rateLimitWriteBucketsEnvVar)
}
func TestInitializeCloudProviderRateLimitConfigWithReadAndWriteRateLimitAlreadySetInConfig(t *testing.T) {
var rateLimitReadQPS float32 = 3.0
rateLimitReadBuckets := 10
var rateLimitWriteQPS float32 = 6.0
rateLimitWriteBuckets := 20
configWithRateLimits := &CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimitBucket: rateLimitReadBuckets,
CloudProviderRateLimitBucketWrite: rateLimitWriteBuckets,
CloudProviderRateLimitQPS: rateLimitReadQPS,
CloudProviderRateLimitQPSWrite: rateLimitWriteQPS,
},
}
os.Setenv(rateLimitReadQPSEnvVar, "99")
os.Setenv(rateLimitReadBucketsEnvVar, "99")
os.Setenv(rateLimitWriteQPSEnvVar, "99")
os.Setenv(rateLimitWriteBucketsEnvVar, "99")
err := InitializeCloudProviderRateLimitConfig(configWithRateLimits)
assert.NoError(t, err)
assert.Equal(t, configWithRateLimits.CloudProviderRateLimitQPS, rateLimitReadQPS)
assert.Equal(t, configWithRateLimits.CloudProviderRateLimitBucket, rateLimitReadBuckets)
assert.Equal(t, configWithRateLimits.CloudProviderRateLimitQPSWrite, rateLimitWriteQPS)
assert.Equal(t, configWithRateLimits.CloudProviderRateLimitBucketWrite, rateLimitWriteBuckets)
os.Unsetenv(rateLimitReadQPSEnvVar)
os.Unsetenv(rateLimitReadBucketsEnvVar)
os.Unsetenv(rateLimitWriteQPSEnvVar)
os.Unsetenv(rateLimitWriteBucketsEnvVar)
}
func TestInitializeCloudProviderRateLimitConfigWithInvalidReadAndWriteRateLimitSettingsFromEnv(t *testing.T) {
emptyConfig := &CloudProviderRateLimitConfig{}
var rateLimitReadQPS float32 = 3.0
rateLimitReadBuckets := 10
var rateLimitWriteQPS float32 = 6.0
rateLimitWriteBuckets := 20
invalidSetting := "invalid"
testCases := []struct {
desc string
isInvalidRateLimitReadQPSEnvVar bool
isInvalidRateLimitReadBucketsEnvVar bool
isInvalidRateLimitWriteQPSEnvVar bool
isInvalidRateLimitWriteBucketsEnvVar bool
expectedErr bool
expectedErrMsg error
}{
{
desc: "an error shall be returned if invalid rateLimitReadQPSEnvVar",
isInvalidRateLimitReadQPSEnvVar: true,
expectedErr: true,
expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseFloat: parsing \"invalid\": invalid syntax", rateLimitReadQPSEnvVar, invalidSetting),
},
{
desc: "an error shall be returned if invalid rateLimitReadBucketsEnvVar",
isInvalidRateLimitReadBucketsEnvVar: true,
expectedErr: true,
expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseInt: parsing \"invalid\": invalid syntax", rateLimitReadBucketsEnvVar, invalidSetting),
},
{
desc: "an error shall be returned if invalid rateLimitWriteQPSEnvVar",
isInvalidRateLimitWriteQPSEnvVar: true,
expectedErr: true,
expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseFloat: parsing \"invalid\": invalid syntax", rateLimitWriteQPSEnvVar, invalidSetting),
},
{
desc: "an error shall be returned if invalid rateLimitWriteBucketsEnvVar",
isInvalidRateLimitWriteBucketsEnvVar: true,
expectedErr: true,
expectedErrMsg: fmt.Errorf("failed to parse %s: %q, strconv.ParseInt: parsing \"invalid\": invalid syntax", rateLimitWriteBucketsEnvVar, invalidSetting),
},
}
for i, test := range testCases {
if test.isInvalidRateLimitReadQPSEnvVar {
os.Setenv(rateLimitReadQPSEnvVar, invalidSetting)
} else {
os.Setenv(rateLimitReadQPSEnvVar, fmt.Sprintf("%.1f", rateLimitReadQPS))
}
if test.isInvalidRateLimitReadBucketsEnvVar {
os.Setenv(rateLimitReadBucketsEnvVar, invalidSetting)
} else {
os.Setenv(rateLimitReadBucketsEnvVar, fmt.Sprintf("%d", rateLimitReadBuckets))
}
if test.isInvalidRateLimitWriteQPSEnvVar {
os.Setenv(rateLimitWriteQPSEnvVar, invalidSetting)
} else {
os.Setenv(rateLimitWriteQPSEnvVar, fmt.Sprintf("%.1f", rateLimitWriteQPS))
}
if test.isInvalidRateLimitWriteBucketsEnvVar {
os.Setenv(rateLimitWriteBucketsEnvVar, invalidSetting)
} else {
os.Setenv(rateLimitWriteBucketsEnvVar, fmt.Sprintf("%d", rateLimitWriteBuckets))
}
err := InitializeCloudProviderRateLimitConfig(emptyConfig)
assert.Equal(t, test.expectedErr, err != nil, "TestCase[%d]: %s, return error: %v", i, test.desc, err)
assert.Equal(t, test.expectedErrMsg, err, "TestCase[%d]: %s, expected: %v, return: %v", i, test.desc, test.expectedErrMsg, err)
os.Unsetenv(rateLimitReadQPSEnvVar)
os.Unsetenv(rateLimitReadBucketsEnvVar)
os.Unsetenv(rateLimitWriteQPSEnvVar)
os.Unsetenv(rateLimitWriteBucketsEnvVar)
}
}
func TestOverrideDefaultRateLimitConfig(t *testing.T) {
var rateLimitReadQPS float32 = 3.0
rateLimitReadBuckets := 10
var rateLimitWriteQPS float32 = 6.0
rateLimitWriteBuckets := 20
defaultConfigWithRateLimits := &CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimitBucket: rateLimitReadBuckets,
CloudProviderRateLimitBucketWrite: rateLimitWriteBuckets,
CloudProviderRateLimitQPS: rateLimitReadQPS,
CloudProviderRateLimitQPSWrite: rateLimitWriteQPS,
},
}
configWithRateLimits := &CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimit: true,
CloudProviderRateLimitBucket: 0,
CloudProviderRateLimitBucketWrite: 0,
CloudProviderRateLimitQPS: 0,
CloudProviderRateLimitQPSWrite: 0,
},
}
newconfig := overrideDefaultRateLimitConfig(&defaultConfigWithRateLimits.RateLimitConfig, &configWithRateLimits.RateLimitConfig)
assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitQPS, newconfig.CloudProviderRateLimitQPS)
assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitBucket, newconfig.CloudProviderRateLimitBucket)
assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitQPSWrite, newconfig.CloudProviderRateLimitQPSWrite)
assert.Equal(t, defaultConfigWithRateLimits.CloudProviderRateLimitBucketWrite, newconfig.CloudProviderRateLimitBucketWrite)
falseCloudProviderRateLimit := &CloudProviderRateLimitConfig{
RateLimitConfig: azclients.RateLimitConfig{
CloudProviderRateLimit: false,
},
}
newconfig = overrideDefaultRateLimitConfig(&defaultConfigWithRateLimits.RateLimitConfig, &falseCloudProviderRateLimit.RateLimitConfig)
assert.Equal(t, &falseCloudProviderRateLimit.RateLimitConfig, newconfig)
}
func TestGetSubscriptionIdFromInstanceMetadata(t *testing.T) {
// metadataURL in azure_manager.go is not available for our tests, expect fail.
result, err := getSubscriptionIdFromInstanceMetadata()
expected := ""
assert.NotNil(t, err.Error())
assert.Equal(t, expected, result, "Verify return result failed, expected: %v, actual: %v", expected, result)
}
func TestManagerRefreshAndCleanup(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
manager := newTestAzureManager(t)
err := manager.Refresh()
assert.NoError(t, err)
manager.Cleanup()
}
| {
manager, err := CreateAzureManager(strings.NewReader(validAzureCfgForStandardVMTypeWithoutDeploymentParameters), cloudprovider.NodeGroupDiscoveryOptions{})
expectedErr := "open /var/lib/azure/azuredeploy.parameters.json: no such file or directory"
assert.Nil(t, manager)
assert.Equal(t, expectedErr, err.Error(), "return error does not match, expected: %v, actual: %v", expectedErr, err.Error())
} |
CustomSelectField.js | import React from "react";
import classnames from "classnames";
import withStyles from "@material-ui/core/styles/withStyles";
import FormControl from "@material-ui/core/FormControl";
import TextField from "@material-ui/core/TextField";
import Typography from "@material-ui/core/Typography";
import {COLOR} from "../constants/theme";
const styles = {
formControl: {
textAlign: 'left',
margin: '10px 0'
},
input: {
'& .MuiOutlinedInput-root': {
height: 50,
borderRadius: 15,
fontSize: 14,
fontWeight: 500,
color: COLOR.DARK,
'& fieldset': {
border: 'none',
borderRadius: 15,
},
'& .MuiSelect-outlined.MuiSelect-outlined': {
height: '100%',
paddingTop: 0,
paddingBottom: 0,
display: 'flex',
alignItems: 'center',
borderRadius: 15,
}
},
'& .MuiOutlinedInput-marginDense': {
height: 50,
fontSize: 14
},
},
light: {
'& .MuiOutlinedInput-root': {
background: COLOR.LIGHT_GREY
}
},
white: {
'& .MuiOutlinedInput-root': {
background: COLOR.WHITE
},
'& .MuiSelect-select:focus': {
background: COLOR.WHITE
}
}
}
class | extends React.Component {
render() {
const { classes, input, children, label, helperText,
placeholder, color, onChange, ...props } = this.props
return (
<FormControl fullWidth className={classes.formControl} { ...props }>
{ label && (
<Typography variant="caption">{ label }</Typography>
)}
<TextField
variant="outlined"
placeholder={placeholder}
classes={{
root: classnames(
classes.input,
color ? classes[color] : classes.light
)
}}
select
onChange={onChange}
{ ...input }
{ ...props }
helperText={helperText}
onKeyDown={onChange}
>
{ children }
</TextField>
</FormControl>
);
}
}
export default withStyles(styles)(CustomSelectField)
| CustomSelectField |
builder.go | package namespace
import (
core "github.com/authzed/spicedb/pkg/proto/core/v1"
iv1 "github.com/authzed/spicedb/pkg/proto/impl/v1"
)
// Namespace creates a namespace definition with one or more defined relations.
func Namespace(name string, relations ...*core.Relation) *core.NamespaceDefinition {
return &core.NamespaceDefinition{
Name: name,
Relation: relations,
}
}
// WithComment creates a namespace definition with one or more defined relations.
func WithComment(name string, comment string, relations ...*core.Relation) *core.NamespaceDefinition {
nd := Namespace(name, relations...)
nd.Metadata, _ = AddComment(nd.Metadata, comment)
return nd
}
// Relation creates a relation definition with an optional rewrite definition.
func Relation(name string, rewrite *core.UsersetRewrite, allowedDirectRelations ...*core.AllowedRelation) *core.Relation {
var typeInfo *core.TypeInformation
if len(allowedDirectRelations) > 0 {
typeInfo = &core.TypeInformation{
AllowedDirectRelations: allowedDirectRelations,
}
}
rel := &core.Relation{
Name: name, | switch {
case rewrite != nil && len(allowedDirectRelations) == 0:
if err := SetRelationKind(rel, iv1.RelationMetadata_PERMISSION); err != nil {
panic("failed to set relation kind: " + err.Error())
}
case rewrite == nil && len(allowedDirectRelations) > 0:
if err := SetRelationKind(rel, iv1.RelationMetadata_RELATION); err != nil {
panic("failed to set relation kind: " + err.Error())
}
default:
// By default we do not set a relation kind on the relation. Relations without any
// information, or relations with both rewrites and types are legacy relations from
// before the DSL schema and, as such, do not have a defined "kind".
}
return rel
}
// RelationWithComment creates a relation definition with an optional rewrite definition.
func RelationWithComment(name string, comment string, rewrite *core.UsersetRewrite, allowedDirectRelations ...*core.AllowedRelation) *core.Relation {
rel := Relation(name, rewrite, allowedDirectRelations...)
rel.Metadata, _ = AddComment(rel.Metadata, comment)
return rel
}
// AllowedRelation creates a relation reference to an allowed relation.
func AllowedRelation(namespaceName string, relationName string) *core.AllowedRelation {
return &core.AllowedRelation{
Namespace: namespaceName,
RelationOrWildcard: &core.AllowedRelation_Relation{
Relation: relationName,
},
}
}
// AllowedPublicNamespace creates a relation reference to an allowed public namespace.
func AllowedPublicNamespace(namespaceName string) *core.AllowedRelation {
return &core.AllowedRelation{
Namespace: namespaceName,
RelationOrWildcard: &core.AllowedRelation_PublicWildcard_{
PublicWildcard: &core.AllowedRelation_PublicWildcard{},
},
}
}
// RelationReference creates a relation reference.
func RelationReference(namespaceName string, relationName string) *core.RelationReference {
return &core.RelationReference{
Namespace: namespaceName,
Relation: relationName,
}
}
// Union creates a rewrite definition that combines/considers usersets in all children.
func Union(firstChild *core.SetOperation_Child, rest ...*core.SetOperation_Child) *core.UsersetRewrite {
return &core.UsersetRewrite{
RewriteOperation: &core.UsersetRewrite_Union{
Union: setOperation(firstChild, rest),
},
}
}
// Intersection creates a rewrite definition that returns/considers only usersets present in all children.
func Intersection(firstChild *core.SetOperation_Child, rest ...*core.SetOperation_Child) *core.UsersetRewrite {
return &core.UsersetRewrite{
RewriteOperation: &core.UsersetRewrite_Intersection{
Intersection: setOperation(firstChild, rest),
},
}
}
// Exclusion creates a rewrite definition that starts with the usersets of the first child
// and iteratively removes usersets that appear in the remaining children.
func Exclusion(firstChild *core.SetOperation_Child, rest ...*core.SetOperation_Child) *core.UsersetRewrite {
return &core.UsersetRewrite{
RewriteOperation: &core.UsersetRewrite_Exclusion{
Exclusion: setOperation(firstChild, rest),
},
}
}
func setOperation(firstChild *core.SetOperation_Child, rest []*core.SetOperation_Child) *core.SetOperation {
children := append([]*core.SetOperation_Child{firstChild}, rest...)
return &core.SetOperation{
Child: children,
}
}
// This creates a child for a set operation that references only direct usersets with the parent relation.
//
// TODO(jschorr): Remove once v0 is fully removed.
func This() *core.SetOperation_Child {
return &core.SetOperation_Child{
ChildType: &core.SetOperation_Child_XThis{},
}
}
// Nil creates a child for a set operation that references the empty set.
func Nil() *core.SetOperation_Child {
return &core.SetOperation_Child{
ChildType: &core.SetOperation_Child_XNil{},
}
}
// ComputesUserset creates a child for a set operation that follows a relation on the given starting object.
func ComputedUserset(relation string) *core.SetOperation_Child {
return &core.SetOperation_Child{
ChildType: &core.SetOperation_Child_ComputedUserset{
ComputedUserset: &core.ComputedUserset{
Relation: relation,
},
},
}
}
// TupleToUserset creates a child which first loads all tuples with the specific relation,
// and then unions all children on the usersets found by following a relation on those loaded
// tuples.
func TupleToUserset(tuplesetRelation, usersetRelation string) *core.SetOperation_Child {
return &core.SetOperation_Child{
ChildType: &core.SetOperation_Child_TupleToUserset{
TupleToUserset: &core.TupleToUserset{
Tupleset: &core.TupleToUserset_Tupleset{
Relation: tuplesetRelation,
},
ComputedUserset: &core.ComputedUserset{
Relation: usersetRelation,
Object: core.ComputedUserset_TUPLE_USERSET_OBJECT,
},
},
},
}
}
// Rewrite wraps a rewrite as a set operation child of another rewrite.
func Rewrite(rewrite *core.UsersetRewrite) *core.SetOperation_Child {
return &core.SetOperation_Child{
ChildType: &core.SetOperation_Child_UsersetRewrite{
UsersetRewrite: rewrite,
},
}
} | UsersetRewrite: rewrite,
TypeInformation: typeInfo,
}
|
sentinel1.py | """Map Sentinel-1 data products to xarray.
References:
- Sentinel-1 document library: https://sentinels.copernicus.eu/web/sentinel/user-guides/sentinel-1-sar/document-library
- Sentinel-1 Product Specification v3.9 07 May 2021 S1-RS-MDA-52-7441-3-9 documenting IPF 3.40
https://sentinel.esa.int/documents/247904/1877131/S1-RS-MDA-52-7441-3-9-2_Sentinel-1ProductSpecification.pdf
- Sentinel-1 Product Specification v3.7 27 February 2020 S1-RS-MDA-52-7441 documenting IPF 3.30
https://sentinel.esa.int/documents/247904/1877131/Sentinel-1-Product-Specification
"""
import contextlib
import os
import typing as T
import warnings
import fsspec
import numpy as np
import pandas as pd
import xarray as xr
from . import conventions, esa_safe
SPEED_OF_LIGHT = 299_792_458 # m / s
ONE_SECOND = np.timedelta64(1, "s")
def get_fs_path(
urlpath_or_path: esa_safe.PathType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is not None and storage_options is not None:
raise TypeError("only one of 'fs' and 'storage_options' can be not None")
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(
urlpath_or_path, storage_options=storage_options
)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = str(urlpath_or_path)
return fs, path
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_as_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=np.float32, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=np.float32, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=np.float32, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_range_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseRangeVector", "noise")
azimuth_time_list = []
pixel_list = []
line_list = []
noiseRangeLut_list = []
for vector in noise_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
noiseRangeLut = np.fromstring(vector["noiseRangeLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseRangeLut_list.append(noiseRangeLut)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise noise vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"noiseRangeLut": (("line", "pixel"), noiseRangeLut_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_noise_azimuth_dataset(noise: esa_safe.PathType) -> xr.Dataset:
noise_vectors = esa_safe.parse_tag_as_list(noise, ".//noiseAzimuthVector", "noise")
first_range_sample = []
line_list = []
noiseAzimuthLut_list = []
for vector in noise_vectors:
first_range_sample.append(vector["firstRangeSample"])
line = np.fromstring(vector["line"]["$"], dtype=int, sep=" ") # type: ignore
line_list.append(line)
noiseAzimuthLut = np.fromstring(vector["noiseAzimuthLut"]["$"], dtype=np.float32, sep=" ") # type: ignore
noiseAzimuthLut_list.append(noiseAzimuthLut)
# BROKEN: GRDs have line and noiseAzimuthLut of different size, we take the first one
data_vars = {}
coords = {}
if first_range_sample:
data_vars["noiseAzimuthLut"] = ("line", noiseAzimuthLut_list[0])
coords["line"] = line_list[0]
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinate_conversion_dataset(
annotation_path: esa_safe.PathType,
) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag_as_list(
annotation_path, ".//coordinateConversionList/coordinateConversion"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[T.List[float]] = []
grsrCoefficients: T.List[T.List[float]] = []
for values in coordinate_conversion:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
[float(v) for v in values["srgrCoefficients"]["$"].split()]
)
grsrCoefficients.append(
[float(v) for v in values["grsrCoefficients"]["$"].split()]
)
coords: T.Dict[str, T.Any] = {}
data_vars: T.Dict[str, T.Any] = {}
if srgrCoefficients:
coords["azimuth_time"] = [np.datetime64(dt) for dt in azimuth_time]
coords["degree"] = list(range(len(srgrCoefficients[0])))
data_vars["gr0"] = ("azimuth_time", gr0)
data_vars["sr0"] = ("azimuth_time", sr0)
data_vars["slant_range_time"] = ("azimuth_time", slant_range_time)
data_vars["srgrCoefficients"] = (("azimuth_time", "degree"), srgrCoefficients)
data_vars["grsrCoefficients"] = (("azimuth_time", "degree"), grsrCoefficients)
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_as_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_as_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_as_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_tag_as_list(annotation, ".//dcEstimate")
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(
[float(c) for c in dc_estimate["dataDcPolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_tag_as_list(annotation, ".//azimuthFmRate")
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(
[float(c) for c in azimuth_fm_rate["azimuthFmRatePolynomial"]["$"].split()]
)
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_available_groups(
product_files: T.Dict[str, T.Tuple[str, str, str, str, str]],
product_path: str,
check_files_exist: bool = False,
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, T.List[str]]:
groups: T.Dict[str, T.List[str]] = {}
for path, (type, _, swath, polarization, _) in product_files.items():
swath_pol_group = f"{swath}/{polarization}".upper()
abspath = os.path.join(product_path, os.path.normpath(path))
if check_files_exist:
if not fs.exists(abspath):
continue
if type == "s1Level1ProductSchema":
groups[swath.upper()] = [""]
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
for metadata_group in [
"orbit",
"attitude",
"azimuth_fm_rate",
"dc_estimate",
"gcp",
"coordinate_conversion",
]:
groups[f"{swath_pol_group}/{metadata_group}"] = [abspath]
elif type == "s1Level1CalibrationSchema":
groups[f"{swath_pol_group}/calibration"] = [abspath]
elif type == "s1Level1NoiseSchema":
groups[f"{swath_pol_group}/noise_range"] = [abspath]
groups[f"{swath_pol_group}/noise_azimuth"] = [abspath]
elif type == "s1Level1MeasurementSchema":
groups[swath_pol_group] = [abspath] + groups.get(swath_pol_group, [])
return groups
def open_pol_dataset(
measurement: esa_safe.PathOrFileType,
annotation: esa_safe.PathOrFileType,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
incidence_angle_mid_swath = image_information["incidenceAngleMidSwath"]
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_time_interval = 1 / product_information["rangeSamplingRate"]
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
number_of_bursts = swath_timing["burstList"]["@count"]
range_pixel_spaxing = image_information["rangePixelSpacing"]
anx_datetime = image_information["ascendingNodeTime"]
attrs = {
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
"sar:pixel_spacing_azimuth": image_information["azimuthPixelSpacing"],
"sar:pixel_spacing_range": range_pixel_spaxing,
"azimuth_time_interval": azimuth_time_interval,
"slant_range_time_interval": slant_range_time_interval,
"incidence_angle_mid_swath": incidence_angle_mid_swath,
"sat:anx_datetime": anx_datetime + "Z",
}
encoding = {}
swap_dims = {}
chunks: T.Union[None, T.Dict[str, int]] = None
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.Timedelta(azimuth_time_interval, "s"),
).values
if number_of_bursts == 0:
swap_dims = {"line": "azimuth_time", "pixel": "slant_range_time"}
else:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.Timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
# chunk at burst boundaries if dask is present
try:
import dask # noqa
encoding["preferred_chunks"] = {"line": lines_per_burst}
chunks = {}
except ModuleNotFoundError:
pass
coords = {
"pixel": np.arange(0, number_of_samples, dtype=int),
"line": np.arange(0, number_of_lines, dtype=int),
"azimuth_time": ("line", azimuth_time),
}
if product_information["projection"] == "Slant Range":
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time
+ slant_range_time_interval * (number_of_samples - 1),
number_of_samples,
)
coords["slant_range_time"] = ("pixel", slant_range_time)
elif product_information["projection"] == "Ground Range":
ground_range = np.linspace(
0,
range_pixel_spaxing * (number_of_samples - 1),
number_of_samples,
)
coords["ground_range"] = ("pixel", ground_range)
swap_dims = {"line": "azimuth_time", "pixel": "ground_range"}
else:
raise ValueError(f"unknown projection {product_information['projection']}")
# temporary ugly work-around to get fsspec support with rasterio >= 1.3a3
# the try block uses fsspec if rasterio >= 1.3a3 is installed
# the except block falls back to standard file based rasterio
# the with is needed to avoid polluting stderr when the try block fails
with contextlib.redirect_stderr(open("/dev/null", "w")):
try:
arr = xr.open_dataarray(fs.open(measurement), engine="rasterio", chunks=chunks) # type: ignore
except AttributeError:
arr = xr.open_dataarray(measurement, engine="rasterio") # type: ignore
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(coords)
arr = arr.swap_dims(swap_dims)
arr.attrs.update(attrs)
arr.encoding.update(encoding)
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def find_bursts_index(
pol_dataset: xr.Dataset,
azimuth_anx_time: float,
use_center: bool = False,
) -> int:
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
azimuth_anx_time = pd.Timedelta(azimuth_anx_time, unit="s")
if use_center:
azimuth_anx_time_center = (
pol_dataset.azimuth_time[lines_per_burst // 2 :: lines_per_burst]
- anx_datetime
)
distance = abs(azimuth_anx_time_center - azimuth_anx_time)
else:
azimuth_anx_time_first_line = (
pol_dataset.azimuth_time[::lines_per_burst] - anx_datetime
)
distance = abs(azimuth_anx_time_first_line - azimuth_anx_time)
return distance.argmin().item() # type: ignore
def crop_burst_dataset(
pol_dataset: xr.Dataset,
burst_index: T.Optional[int] = None,
azimuth_anx_time: T.Optional[float] = None,
use_center: bool = False,
) -> xr.Dataset:
if (burst_index is not None) and (azimuth_anx_time is not None):
raise TypeError(
"only one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index is None:
if azimuth_anx_time is not None:
burst_index = find_bursts_index(
pol_dataset, azimuth_anx_time, use_center=use_center
)
else:
raise TypeError(
"one keyword between 'burst_index' and 'azimuth_anx_time' must be defined"
)
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"burst_index={burst_index} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
anx_datetime = np.datetime64(pol_dataset.attrs["sat:anx_datetime"].replace("Z", ""))
burst_azimuth_anx_times = ds.azimuth_time - anx_datetime
ds.attrs["azimuth_anx_time"] = burst_azimuth_anx_times.values[0] / ONE_SECOND
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def mosaic_slc_iw(slc_iw_image: xr.Dataset, crop: int = 90) -> xr.Dataset:
bursts = []
for i in range(slc_iw_image.attrs["number_of_bursts"]):
burst = crop_burst_dataset(slc_iw_image, burst_index=i)
bursts.append(burst.isel(azimuth_time=slice(crop, -crop)))
return xr.concat(bursts, dim="azimuth_time")
def calibrate_amplitude(
digital_number: xr.DataArray, calibration_lut: xr.DataArray
) -> xr.DataArray:
calibration = calibration_lut.interp(
line=digital_number.line,
pixel=digital_number.pixel,
).astype(np.float32)
amplitude = digital_number / calibration
amplitude.attrs.update(digital_number.attrs)
try:
lut_name = calibration_lut.attrs["long_name"].partition("calibration LUT")[0]
amplitude.attrs["long_name"] = f"amplitude for {lut_name}"
amplitude.attrs["units"] = calibration.attrs["units"]
except KeyError:
pass
return amplitude
def calibrate_intensity(
digital_number: xr.DataArray,
calibration_lut: xr.DataArray,
as_db: bool = False,
min_db: T.Optional[float] = -40.0,
) -> xr.DataArray:
amplitude = calibrate_amplitude(digital_number, calibration_lut)
intensity = abs(amplitude) ** 2
if as_db: | intensity.attrs["units"] = "dB"
else:
intensity.attrs.update(amplitude.attrs)
intensity.attrs["units"] = "m2 m-2"
try:
lut_name = amplitude.attrs["long_name"].partition("amplitude for ")[2]
intensity.attrs["long_name"] = lut_name
except KeyError:
pass
return intensity
def slant_range_time_to_ground_range(
azimuth_time: xr.DataArray,
slant_range_time: xr.DataArray,
coordinate_conversion: xr.DataArray,
) -> xr.DataArray:
slant_range = SPEED_OF_LIGHT / 2.0 * slant_range_time
cc = coordinate_conversion.interp(azimuth_time=azimuth_time)
x = slant_range - cc.sr0
ground_range = (cc.srgrCoefficients * x ** cc.degree).sum("degree")
return ground_range # type: ignore
def assign_slant_range_time_coord(
measurement: xr.Dataset, coordinate_conversion: xr.Dataset
) -> xr.Dataset:
x = measurement.ground_range - coordinate_conversion.gr0
slant_range = (
coordinate_conversion.grsrCoefficients * x ** coordinate_conversion.degree
).sum(dim="degree")
slant_range_coord = slant_range.interp(
azimuth_time=measurement.azimuth_time, ground_range=measurement.ground_range
).data
slant_range_time = 2 / SPEED_OF_LIGHT * slant_range_coord
measurement = measurement.assign_coords(
slant_range_time=(("azimuth_time", "ground_range"), slant_range_time)
) # type: ignore
return measurement
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[T.List[float], T.List[float]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values.tolist(), centre.longitude.values.tolist()
METADATA_OPENERS = {
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"dc_estimate": open_dc_estimate_dataset,
"gcp": open_gcp_dataset,
"coordinate_conversion": open_coordinate_conversion_dataset,
"calibration": open_calibration_dataset,
"noise_range": open_noise_range_dataset,
"noise_azimuth": open_noise_azimuth_dataset,
}
def do_override_product_files(
template: str, product_files: T.Dict[str, T.Tuple[str, str, str, str, str]]
) -> T.Dict[str, T.Tuple[str, str, str, str, str]]:
overridden_product_files = {}
for path, description in product_files.items():
type, prefix, swath, polarization, date = description
ext = os.path.splitext(path)[1]
dirname = os.path.dirname(path)
overridden_path = template.format(**locals())
overridden_product_files[overridden_path] = description
return overridden_product_files
def open_sentinel1_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
storage_options: T.Optional[T.Dict[str, T.Any]] = None,
check_files_exist: bool = False,
override_product_files: T.Optional[str] = None,
) -> xr.Dataset:
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
fs, manifest_path = get_fs_path(product_urlpath, fs, storage_options)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
product_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
if override_product_files:
product_files = do_override_product_files(override_product_files, product_files)
groups = find_available_groups(
product_files, product_path, check_files_exist=check_files_exist, fs=fs
)
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
ds = xr.Dataset()
if group == "":
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if group.count("/") == 1:
with fs.open(groups[group][1]) as annotation:
ds = open_pol_dataset(groups[group][0], annotation, fs=fs)
elif group.count("/") == 2:
_, _, metadata = group.split("/", 2)
with fs.open(groups[group][0]) as file:
ds = METADATA_OPENERS[metadata](file)
for data_var in ds.data_vars:
ds.data_vars[data_var].attrs.update(product_attrs)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
if group.count("/") == 1 and burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
conventions.update_attributes(ds, group=metadata)
return ds | intensity = 10.0 * np.log10(intensity)
if min_db is not None:
intensity = np.maximum(intensity, min_db)
intensity.attrs.update(amplitude.attrs) |
config.rs | use crate::line_range::{HighlightedLineRanges, LineRanges};
#[cfg(feature = "paging")]
use crate::paging::PagingMode;
use crate::style::StyleComponents;
use crate::syntax_mapping::SyntaxMapping;
use crate::wrapping::WrappingMode;
#[derive(Debug, Clone)]
pub enum VisibleLines {
/// Show all lines which are included in the line ranges
Ranges(LineRanges),
#[cfg(feature = "git")]
/// Only show lines surrounding added/deleted/modified lines
DiffContext(usize),
}
impl VisibleLines {
pub fn diff_mode(&self) -> bool {
match self {
Self::Ranges(_) => false,
#[cfg(feature = "git")]
Self::DiffContext(_) => true,
}
}
}
impl Default for VisibleLines {
fn default() -> Self {
VisibleLines::Ranges(LineRanges::default())
}
}
#[derive(Debug, Clone, Default)]
pub struct | <'a> {
/// The explicitly configured language, if any
pub language: Option<&'a str>,
/// Whether or not to show/replace non-printable characters like space, tab and newline.
pub show_nonprintable: bool,
/// The character width of the terminal
pub term_width: usize,
/// The width of tab characters.
/// Currently, a value of 0 will cause tabs to be passed through without expanding them.
pub tab_width: usize,
/// Whether or not to simply loop through all input (`cat` mode)
pub loop_through: bool,
/// Whether or not the output should be colorized
pub colored_output: bool,
/// Whether or not the output terminal supports true color
pub true_color: bool,
/// Style elements (grid, line numbers, ...)
pub style_components: StyleComponents,
/// If and how text should be wrapped
pub wrapping_mode: WrappingMode,
/// Pager or STDOUT
#[cfg(feature = "paging")]
pub paging_mode: PagingMode,
/// Specifies which lines should be printed
pub visible_lines: VisibleLines,
/// The syntax highlighting theme
pub theme: String,
/// File extension/name mappings
pub syntax_mapping: SyntaxMapping<'a>,
/// Command to start the pager
pub pager: Option<&'a str>,
/// Whether or not to use ANSI italics
pub use_italic_text: bool,
/// Ranges of lines which should be highlighted with a special background color
pub highlighted_lines: HighlightedLineRanges,
/// Whether or not to allow custom assets. If this is false or if custom assets (a.k.a.
/// cached assets) are not available, assets from the binary will be used instead.
pub use_custom_assets: bool,
/// Whether or not we fail silently with a non zero exit code if the syntax is unsupported
pub fail_if_unsupported: bool,
}
#[cfg(all(feature = "minimal-application", feature = "paging"))]
pub fn get_pager_executable(config_pager: Option<&str>) -> Option<String> {
crate::pager::get_pager(config_pager)
.ok()
.flatten()
.map(|pager| pager.bin)
}
#[test]
fn default_config_should_include_all_lines() {
use crate::line_range::RangeCheckResult;
assert_eq!(LineRanges::default().check(17), RangeCheckResult::InRange);
}
#[test]
fn default_config_should_highlight_no_lines() {
use crate::line_range::RangeCheckResult;
assert_ne!(
Config::default().highlighted_lines.0.check(17),
RangeCheckResult::InRange
);
}
| Config |
FunctionsRouter.js | const crypto = require('crypto');
const Router = require('express').Router;
const bodyParser = require('body-parser');
const Validator = require('jsonschema').Validator;
const log = require('../../support/log');
const schemas = require('../../domain/schemas');
const Pipeline = require('../../domain/Pipeline');
const ErrorTracker = require('../../domain/ErrorTracker');
const { StdoutLogStorage, DefaultLogStorage } = require('../../domain/LogStorage');
const FunctionsRequest = require('../FunctionsRequest');
const Metric = require('../../domain/Metric');
const SchemaResponse = require('../SchemaResponse');
const router = new Router();
const { bodyParserLimit } = require('../../support/config');
function codeFileName(namespace, codeId) {
return `${namespace}/${codeId}.js`;
}
router.get('/', async (req, res) => {
const memoryStorage = req.app.get('memoryStorage');
const page = parseInt(req.query.page || '1', 10);
const perPage = parseInt(req.query.perPage || '10', 10);
const { namespace, id } = req.query;
const functionsRequest = new FunctionsRequest(req);
try {
let list = {};
if (namespace) {
list = await memoryStorage.search(namespace, id, page, perPage);
} else {
list = await memoryStorage.listNamespaces(page, perPage);
}
new SchemaResponse(functionsRequest, res, 'functions/list').json(list);
} catch (err) {
log.error(`Error listing namespaces and its functions: ${err}`);
res.status(500).json({ error: err.message });
}
});
router.all('/:namespace/:id*', (req, res, next) => {
req.log = new StdoutLogStorage(req.params.namespace, req.params.id).console;
next();
});
router.put('/:namespace/:id', bodyParser.json({ limit: bodyParserLimit }), async (req, res) => {
const validationResult = new Validator().validate(req.body, schemas['functions/item']);
const memoryStorage = req.app.get('memoryStorage');
if (!validationResult.valid) {
const error = 'Invalid instance';
const details = validationResult.errors.map(e => e.toString());
res.status(400).json({ error, details });
return;
}
const {
namespace,
id,
} = req.params;
const {
code,
env,
} = req.body;
const filename = codeFileName(namespace, id);
const sandbox = req.app.get('sandbox');
const invalid = sandbox.testSyntaxError(filename, code, {
console: new StdoutLogStorage(namespace, id).console,
});
if (invalid) {
req.log.error(`Failed to post code: ${invalid.error}`);
res.status(400).json(invalid);
return;
}
const hash = crypto.createHash('sha1').update(code).digest('hex');
const data = { id, code, hash };
if (env) {
data.env = env;
}
try {
await memoryStorage.putCode(namespace, id, data);
res.set({ ETag: data.hash });
const functionsRequest = new FunctionsRequest(req);
new SchemaResponse(functionsRequest, res, 'functions/item').json(data);
} catch (err) {
log.error(`[${namespace}:${id}] ${err}`);
res.status(500).json({ error: err.message });
}
});
router.put('/:namespace/:id/env/:env', bodyParser.json({ strict: false, limit: bodyParserLimit }), async (req, res) => {
const validationResult = new Validator().validate(req.body, schemas['functions/env']);
const memoryStorage = req.app.get('memoryStorage');
if (!validationResult.valid) {
const error = 'Invalid instance';
const details = validationResult.errors.map(e => e.toString());
res.status(400).json({ error, details });
return;
}
const {
namespace,
id,
env,
} = req.params;
try {
await memoryStorage
.putCodeEnviromentVariable(namespace, id, env, req.body);
res.status(204).end();
} catch (err) {
log.error(`[${namespace}:${id}] Failed to set enviroment variable ${env}, error: ${err}`);
res.status(err.statusCode || 500).json({ error: err.message });
}
});
router.delete('/:namespace/:id/env/:env', async (req, res) => {
const {
namespace,
id,
env,
} = req.params;
const memoryStorage = req.app.get('memoryStorage');
try {
await memoryStorage
.deleteCodeEnviromentVariable(namespace, id, env);
res.status(204).end();
} catch (err) {
log.error(`[${namespace}:${id}] Failed to unset enviroment variable ${env}, error: ${err}`);
res.status(err.statusCode || 500).json({ error: err.message });
}
});
router.get('/:namespace/:id', async (req, res) => {
const {
namespace,
id,
} = req.params;
const memoryStorage = req.app.get('memoryStorage');
try {
const code = await memoryStorage.getCode(namespace, id);
if (!code) {
const error = 'Code not found';
req.log.error(error);
res.status(404).json({ error });
return;
}
res.set({ ETag: code.hash });
const functionsRequest = new FunctionsRequest(req);
new SchemaResponse(functionsRequest, res, 'functions/item').json(code);
} catch (err) {
req.log.error(`${err}`);
req.log.error(`${err.stack}`);
res.status(500).json({ error: err.message });
}
});
router.delete('/:namespace/:id', async (req, res) => {
const namespace = req.params.namespace;
const id = req.params.id;
const memoryStorage = req.app.get('memoryStorage');
try {
await memoryStorage.deleteCode(namespace, id);
res.status(204).end();
} catch (err) {
req.log.error(`Failed to delete code id: ${err}`);
res.status(500).json({ error: err.message });
}
});
async function | (req, res) {
const { namespace, id } = req.params;
const memoryStorage = req.app.get('memoryStorage');
const sandbox = req.app.get('sandbox');
const filename = codeFileName(namespace, id);
const metric = new Metric('function-run');
const logStorage = new DefaultLogStorage(namespace, id, req);
let code;
try {
code = await memoryStorage.getCodeByCache(namespace, id, {
preCache: (preCode) => {
preCode.script = sandbox.compileCode(filename, preCode.code);
return preCode;
},
});
if (!code) {
const error = new Error(`Code '${namespace}/${id}' is not found`);
error.statusCode = 404;
throw error;
}
} catch (err) {
res.status(err.statusCode || 500).json({ error: err.message });
return;
}
try {
const options = {
console: logStorage.console,
env: code.env,
};
const result = await sandbox.runScript(code.script, req, options);
res.set(result.headers);
res.status(result.status);
res.json(result.body);
const spent = metric.finish({
filename,
status: result.status,
});
logStorage.flush({
status: result.status,
requestTime: spent,
});
} catch (err) {
logStorage.console.error(`Failed to run function: ${err}`);
logStorage.console.error(err.stack);
const status = err.statusCode || 500;
res.status(status).json({ error: err.message });
const spent = metric.finish({
filename,
status,
error: err.message,
});
const logResult = logStorage.flush({
status,
requestTime: spent,
});
const { namespaceSettings } = code;
const { sentryDSN } = namespaceSettings || {};
const extra = Object.assign({ body: req.body }, logResult || {});
const errTracker = new ErrorTracker({
sentryDSN,
filename,
extra,
tags: { codeHash: code.hash },
code: code.code,
});
errTracker.notify(err);
}
}
const methodNotAllowed = (req, res) => res.status(405).send();
router.route('/:namespace/:id/run')
.get(bodyParser.json({ limit: bodyParserLimit }), functionsRunHandler)
.put(bodyParser.json({ limit: bodyParserLimit }), functionsRunHandler)
.post(bodyParser.json({ limit: bodyParserLimit }), functionsRunHandler)
.delete(bodyParser.json({ limit: bodyParserLimit }), methodNotAllowed);
router.put('/pipeline', bodyParser.json({ limit: bodyParserLimit }), async (req, res) => {
const memoryStorage = req.app.get('memoryStorage');
const sandbox = req.app.get('sandbox');
let { steps } = req.query;
if (!steps) {
res.status(400).json({ error: 'Pass step by querystring is required' });
return;
}
steps = steps.map((step) => {
const [namespace, id] = step.split('/', 2);
return { namespace, id };
});
try {
const codes = await memoryStorage.getCodesByCache(steps, {
preCache: (code) => {
const filename = codeFileName(code.namespace, code.id);
code.script = sandbox.compileCode(filename, code.code);
return code;
},
});
for (let i = 0; i < codes.length; i += 1) {
if (!codes[i]) {
const { namespace, id } = steps[i];
const e = new Error(`Code '${namespace}/${id}' is not found`);
e.statusCode = 404;
throw e;
}
}
const result = await new Pipeline(sandbox, req, codes).run();
res.set(result.headers);
res.status(result.status);
res.json(result.body);
} catch (err) {
const status = err.statusCode || 500;
res.status(status).json({ error: err.message });
}
});
module.exports = router;
| functionsRunHandler |
defaultTheme.js | import { lighten, darken } from 'polished';
class Colors {
primary = '#001529';
secondary = '#1890ff'; | white = '#ffffff';
black30 = lighten(0.3, this.black);
black54 = lighten(0.54, this.black);
black90 = lighten(0.9, this.black);
background = '#fcfdff';
}
const defaultTheme = {
theme: new Colors()
};
export default defaultTheme; | success = '#43A047';
error = '#f44336';
warning = '#ffc400';
black = '#000000'; |
DependencyManager.d.ts | /**
* @author ChenTao
*
* 'graphql-ts-client' is a graphql client for TypeScript, it has two functionalities:
*
* 1. Supports GraphQL queries with strongly typed code
* | private rootTypeResourceMap;
private fieldResourceMap;
private _idGetter;
constructor(idGetter?: (obj: any) => any);
register(resource: string, fetcher: Fetcher<string, object, object>, fieldDependencies?: readonly Fetcher<string, object, object>[]): void;
unregister(resource: string): void;
resources<TObject extends object>(fetcher: Fetcher<string, TObject, object>, oldObject: TObject | null | undefined, newObject: TObject | null | undefined): string[];
allResources(fetcher: Fetcher<string, object, object>): string[];
private registerTypes;
private registerFields;
private collectResources;
private collectResourcesByAssocaiton;
private collectAllResources;
} | * 2. Automatically infers the type of the returned data according to the strongly typed query
*/
import { Fetcher } from "./Fetcher";
export declare class DependencyManager { |
activities.ts | /**
* The math in this file is ported from GWToolboxpp's source (https://github.com/HasKha/GWToolboxpp/blob/master/GWToolboxdll/Windows/DailyQuestsWindow.cpp)
* @todo make it more robust with date-fns
*/
const MILLISECONDS_PER_DAY = 24 * 60 * 60 * 1000;
const MILLISECONDS_PER_WEEK = MILLISECONDS_PER_DAY * 7;
| import vanguard from '../../assets/activities/vanguard.json';
import wanted from '../../assets/activities/wanted.json';
import zaishenBounty from '../../assets/activities/zaishen-bounty.json';
import zaishenCombat from '../../assets/activities/zaishen-combat.json';
import zaishenMission from '../../assets/activities/zaishen-mission.json';
import zaishenVanquish from '../../assets/activities/zaishen-vanquish.json';
import { intervalToDuration } from 'date-fns';
export const ACTIVITIES = {
'nicholas-sandford': {
data: nicholasSandford,
startDate: new Date(1239260400000),
period: MILLISECONDS_PER_DAY,
},
'nicholas-the-traveler': {
data: nicholasTheTraveler,
startDate: new Date(1323097200000),
period: MILLISECONDS_PER_WEEK,
},
'pve-bonus': {
data: pveBonus,
startDate: new Date(1368457200000),
period: MILLISECONDS_PER_WEEK,
},
'pvp-bonus': {
data: pvpBonus,
startDate: new Date(1368457200000),
period: MILLISECONDS_PER_WEEK,
},
'vanguard': {
data: vanguard,
startDate: new Date(1299168000000),
period: MILLISECONDS_PER_DAY,
},
'wanted': {
data: wanted,
startDate: new Date(1276012800000),
period: MILLISECONDS_PER_DAY,
},
'zaishen-bounty': {
data: zaishenBounty,
startDate: new Date(1244736000000),
period: MILLISECONDS_PER_DAY,
},
'zaishen-combat': {
data: zaishenCombat,
startDate: new Date(1256227200000),
period: MILLISECONDS_PER_DAY,
},
'zaishen-mission': {
data: zaishenMission,
startDate: new Date(1299168000000),
period: MILLISECONDS_PER_DAY,
},
'zaishen-vanquish': {
data: zaishenVanquish,
startDate: new Date(1299168000000),
period: MILLISECONDS_PER_DAY,
},
};
export function getActivity<T extends keyof typeof ACTIVITIES>(type: T, date: Date = new Date()): typeof ACTIVITIES[T]['data'][0] {
const { data, startDate, period } = ACTIVITIES[type];
const index = getIndex(data, startDate, period, date);
return data[index];
}
export function getActivityMeta<T extends keyof typeof ACTIVITIES>(type: T, date: Date = new Date()): {
activity: typeof ACTIVITIES[T]['data'][0],
startDate: Date,
endDate: Date,
weeklyCountdown: string,
dailyCountdown: string,
} {
const { data, startDate, period } = ACTIVITIES[type];
const index = getIndex(data, startDate, period, date);
const endDate = getActivityEndDate(type, date);
return {
activity: data[index],
startDate: getActivityStartDate(type, date),
endDate: getActivityEndDate(type, date),
weeklyCountdown: getWeeklyCountdown(endDate),
dailyCountdown: getDailyCountdown(endDate),
};
}
// @fixme using a generic type for data messes with the functions above, will have to look into this and clean up
// eslint-disable-next-line @typescript-eslint/no-explicit-any
function getIndex(data: any[], startDate: Date, period: number, date: Date) {
const elapsedTime = date.getTime() - startDate.getTime();
const elapsedRotations = Math.floor(elapsedTime / period);
return elapsedRotations % data.length;
}
function getActivityStartDate<T extends keyof typeof ACTIVITIES>(type: T, date: Date) {
const { startDate, period } = ACTIVITIES[type];
const elapsedTime = date.getTime() - startDate.getTime();
const timestamp = Math.floor(elapsedTime / period) * period + startDate.getTime();
return new Date(timestamp);
}
function getActivityEndDate<T extends keyof typeof ACTIVITIES>(type: T, date: Date) {
const { startDate, period } = ACTIVITIES[type];
const elapsedTime = date.getTime() - startDate.getTime();
const timestamp = Math.floor(elapsedTime / period) * period + startDate.getTime() + period;
return new Date(timestamp);
}
function getWeeklyCountdown(endDate: Date) {
const now = new Date();
const { days } = intervalToDuration({
start: now,
end: endDate,
});
return `${days} days, ${getDailyCountdown(endDate)}`;
}
function getDailyCountdown(endDate: Date) {
const now = new Date();
const { hours, minutes, seconds } = intervalToDuration({
start: now,
end: endDate,
});
return `${hours}h ${minutes}m`;
} | import nicholasSandford from '../../assets/activities/nicholas-sandford.json';
import nicholasTheTraveler from '../../assets/activities/nicholas-the-traveler.json';
import pveBonus from '../../assets/activities/pve-bonus.json';
import pvpBonus from '../../assets/activities/pvp-bonus.json';
|
ip.go | package utils
import (
"errors"
"net"
)
//LocalIp 获取本机IP
func LocalIp() (string, error) {
addrs | , err := net.InterfaceAddrs()
if err != nil {
return "", err
}
for _, address := range addrs {
// 检查ip地址判断是否回环地址
if ipnet, ok := address.(*net.IPNet); ok && !ipnet.IP.IsLoopback() {
if ipnet.IP.To4() != nil {
return ipnet.IP.String(), nil
}
}
}
return "", errors.New("Can not find the client ip address!")
}
|
|
server.rs | use anyhow::Result;
use tonic::{
transport::{Certificate, Identity, Server, ServerTlsConfig},
Request, Response, Status,
};
use crate::CertConfig;
use super::greeter_server::{Greeter, GreeterServer};
use super::{HelloReply, HelloRequest};
#[derive(Default)]
pub struct MyGreeter {}
#[tonic::async_trait]
impl Greeter for MyGreeter {
async fn say_hello(
&self,
request: Request<HelloRequest>,
) -> Result<Response<HelloReply>, Status> {
println!("Got a request from {:?}", request.remote_addr());
let reply = HelloReply {
message: format!("Hello {}!", request.into_inner().name),
};
Ok(Response::new(reply))
}
}
pub async fn | (addr: &str, cert_config: CertConfig) -> Result<()> {
let addr = addr.parse().unwrap();
println!("GreeterServer listening on {}", &addr);
let svc = MyGreeter::default();
let identity = Identity::from_pem(cert_config.cert, cert_config.sk);
Server::builder()
.tls_config(ServerTlsConfig::new().identity(identity))?
.add_service(GreeterServer::new(svc))
.serve(addr)
.await?;
Ok(())
}
pub async fn start_server_verify_client_cert(addr: &str, cert_config: CertConfig) -> Result<()> {
let addr = addr.parse().unwrap();
println!("GreeterServer listening on {}", &addr);
let ca_cert: CertConfig = toml::from_str(include_str!("ca.toml")).unwrap();
let client_ca_cert = Certificate::from_pem(ca_cert.cert);
let svc = MyGreeter::default();
let identity = Identity::from_pem(cert_config.cert, cert_config.sk);
let tls = ServerTlsConfig::new()
.identity(identity)
.client_ca_root(client_ca_cert);
Server::builder()
.tls_config(tls)?
.add_service(GreeterServer::new(svc))
.serve(addr)
.await?;
Ok(())
}
| start_server |
MultiFunction.py | # coding: utf-8
import chainer
import chainer.links as L
# Network definition
class A(chainer.Chain):
def __init__(self):
super(A, self).__init__()
with self.init_scope():
self.l0 = L.Linear(7)
self.l1 = L.Linear(5)
def g(self, y):
return self.l1(y)
def | (sl, x):
x1 = sl.l0(x)
x2 = sl.g(x1)
return x2
# ======================================
import ch2o
if __name__ == '__main__':
import numpy as np
np.random.seed(314)
model = A()
v = np.random.rand(10, 20).astype(np.float32)
ch2o.generate_testcase(model, [v])
| forward |
delete.js | let zoau = require('../../lib/zoau.js');
const ID = process.env.USER;
const DSN1 = `${ID}.ZOAU3A`;
const DSN2 = `${ID}.ZOAU3B`;
const DSN3 = `${ID}.ZOAU3C`;
const DSN4 = `${ID}.ZOAU3D`;
const DSN5 = `${ID}.ZOAU3E`;
const DSNPATTERN = `${ID}.ZOAU3*`;
function | (err) {
throw err;
}
async function test() {
try {
console.log(DSNPATTERN);
console.log('Test: delete all datasets matching ZOAU3* pattern, if any');
await zoau.datasets.delete(DSNPATTERN, {'force': true}).then(console.log).catch(errfunc);
console.log('Test: create 5 datasets');
let details = { 'primary_space' : 10 };
let res, exp;
await zoau.datasets.create(DSN1, 'SEQ', details).then(console.log).catch(errfunc);
await zoau.datasets.create(DSN2, 'SEQ', details).then(console.log).catch(errfunc);
await zoau.datasets.create(DSN3, 'SEQ', details).then(console.log).catch(errfunc);
await zoau.datasets.create(DSN4, 'SEQ', details).then(console.log).catch(errfunc);
await zoau.datasets.create(DSN5, 'SEQ', details).then(console.log).catch(errfunc);
console.log('Test: list created datasets matching ZOAU3* pattern');
res = await zoau.datasets.listing(DSNPATTERN, {'detailed' : true}).catch(errfunc);
exp = [ DSN1, DSN2, DSN3, DSN4, DSN5 ];
if (!(res.length == exp.length && res.every(function(elem, i) {
console.log(`TODO: ${elem['name']}`); return elem['name'] === exp[i];
}))) {
errfunc(`unexpected dataset in listing: found ${JSON.stringify(res)}, expected ${exp}`);
}
console.log('Test: delete 1st dataset');
await zoau.datasets.delete(DSN1).then(console.log).catch(errfunc);
console.log('Test: list remaining created datasets');
res = await zoau.datasets.listing(DSNPATTERN, {'detailed' : true}).catch(errfunc);
exp = [ DSN2, DSN3, DSN4, DSN5 ];
if (!(res.length == exp.length && res.every(function(elem, i) {
return elem['name'] === exp[i];
}))) {
errfunc(`unexpected dataset in listing: found ${JSON.stringify(res)}, expected ${exp}`);
}
console.log('Test: delete remaining 4 datasets matching ZOAU3* pattern');
await zoau.datasets.delete(DSNPATTERN).then(console.log).catch(errfunc);
console.log('Test: empty list of created datasets');
res = await zoau.datasets.listing(DSNPATTERN, {'detailed' : true}).catch(errfunc);
if (res.length !== 0) {
errfunc(`expected all datasets in DSNPATTERN to be deleted, found ${res.length} still exist`);
}
console.log('All tests passed.');
} catch(err) {
let json = JSON.parse(err.message);
console.error(`Failed: ${json['command']}`);
console.error(`rc = ${json['rc']}`);
console.error(`stderr = ${json['stderr']}`);
console.error(`stdout = ${json['stdout']}`);
process.exit(-1);
}
}
test();
| errfunc |
meta_graph.rs | // This file is generated by rust-protobuf 2.27.1. Do not edit
// @generated
// https://github.com/rust-lang/rust-clippy/issues/702
#![allow(unknown_lints)]
#![allow(clippy::all)]
#![allow(unused_attributes)]
#![cfg_attr(rustfmt, rustfmt::skip)]
#![allow(box_pointers)]
#![allow(dead_code)]
#![allow(missing_docs)]
#![allow(non_camel_case_types)]
#![allow(non_snake_case)]
#![allow(non_upper_case_globals)]
#![allow(trivial_casts)]
#![allow(unused_imports)]
#![allow(unused_results)]
//! Generated file from `tensorflow/core/protobuf/meta_graph.proto`
/// Generated files are compatible only with the same version
/// of protobuf runtime.
// const _PROTOBUF_VERSION_CHECK: () = ::protobuf::VERSION_2_27_1;
#[derive(PartialEq,Clone,Default)]
pub struct MetaGraphDef {
// message fields
pub meta_info_def: ::protobuf::SingularPtrField<MetaGraphDef_MetaInfoDef>,
pub graph_def: ::protobuf::SingularPtrField<super::graph::GraphDef>,
pub saver_def: ::protobuf::SingularPtrField<super::saver::SaverDef>,
pub collection_def: ::std::collections::HashMap<::std::string::String, CollectionDef>,
pub signature_def: ::std::collections::HashMap<::std::string::String, SignatureDef>,
pub asset_file_def: ::protobuf::RepeatedField<AssetFileDef>,
pub object_graph_def: ::protobuf::SingularPtrField<super::saved_object_graph::SavedObjectGraph>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a MetaGraphDef {
fn default() -> &'a MetaGraphDef {
<MetaGraphDef as ::protobuf::Message>::default_instance()
}
}
impl MetaGraphDef {
pub fn new() -> MetaGraphDef {
::std::default::Default::default()
}
// .tensorflow.MetaGraphDef.MetaInfoDef meta_info_def = 1;
pub fn get_meta_info_def(&self) -> &MetaGraphDef_MetaInfoDef {
self.meta_info_def.as_ref().unwrap_or_else(|| <MetaGraphDef_MetaInfoDef as ::protobuf::Message>::default_instance())
}
pub fn clear_meta_info_def(&mut self) {
self.meta_info_def.clear();
}
pub fn has_meta_info_def(&self) -> bool {
self.meta_info_def.is_some()
}
// Param is passed by value, moved
pub fn set_meta_info_def(&mut self, v: MetaGraphDef_MetaInfoDef) {
self.meta_info_def = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_meta_info_def(&mut self) -> &mut MetaGraphDef_MetaInfoDef {
if self.meta_info_def.is_none() {
self.meta_info_def.set_default();
}
self.meta_info_def.as_mut().unwrap()
}
// Take field
pub fn take_meta_info_def(&mut self) -> MetaGraphDef_MetaInfoDef {
self.meta_info_def.take().unwrap_or_else(|| MetaGraphDef_MetaInfoDef::new())
}
// .tensorflow.GraphDef graph_def = 2;
pub fn get_graph_def(&self) -> &super::graph::GraphDef {
self.graph_def.as_ref().unwrap_or_else(|| <super::graph::GraphDef as ::protobuf::Message>::default_instance())
}
pub fn clear_graph_def(&mut self) {
self.graph_def.clear();
}
pub fn has_graph_def(&self) -> bool {
self.graph_def.is_some()
}
// Param is passed by value, moved
pub fn set_graph_def(&mut self, v: super::graph::GraphDef) {
self.graph_def = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_graph_def(&mut self) -> &mut super::graph::GraphDef {
if self.graph_def.is_none() {
self.graph_def.set_default();
}
self.graph_def.as_mut().unwrap()
}
// Take field
pub fn take_graph_def(&mut self) -> super::graph::GraphDef {
self.graph_def.take().unwrap_or_else(|| super::graph::GraphDef::new())
}
// .tensorflow.SaverDef saver_def = 3;
pub fn get_saver_def(&self) -> &super::saver::SaverDef {
self.saver_def.as_ref().unwrap_or_else(|| <super::saver::SaverDef as ::protobuf::Message>::default_instance())
}
pub fn clear_saver_def(&mut self) {
self.saver_def.clear();
}
pub fn has_saver_def(&self) -> bool {
self.saver_def.is_some()
}
// Param is passed by value, moved
pub fn set_saver_def(&mut self, v: super::saver::SaverDef) {
self.saver_def = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_saver_def(&mut self) -> &mut super::saver::SaverDef {
if self.saver_def.is_none() {
self.saver_def.set_default();
}
self.saver_def.as_mut().unwrap()
}
// Take field
pub fn take_saver_def(&mut self) -> super::saver::SaverDef {
self.saver_def.take().unwrap_or_else(|| super::saver::SaverDef::new())
}
// repeated .tensorflow.MetaGraphDef.CollectionDefEntry collection_def = 4;
pub fn get_collection_def(&self) -> &::std::collections::HashMap<::std::string::String, CollectionDef> {
&self.collection_def
}
pub fn clear_collection_def(&mut self) {
self.collection_def.clear();
}
// Param is passed by value, moved
pub fn set_collection_def(&mut self, v: ::std::collections::HashMap<::std::string::String, CollectionDef>) {
self.collection_def = v;
}
// Mutable pointer to the field.
pub fn mut_collection_def(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, CollectionDef> {
&mut self.collection_def
}
// Take field
pub fn take_collection_def(&mut self) -> ::std::collections::HashMap<::std::string::String, CollectionDef> {
::std::mem::replace(&mut self.collection_def, ::std::collections::HashMap::new())
}
// repeated .tensorflow.MetaGraphDef.SignatureDefEntry signature_def = 5;
pub fn get_signature_def(&self) -> &::std::collections::HashMap<::std::string::String, SignatureDef> {
&self.signature_def
}
pub fn clear_signature_def(&mut self) {
self.signature_def.clear();
}
// Param is passed by value, moved
pub fn set_signature_def(&mut self, v: ::std::collections::HashMap<::std::string::String, SignatureDef>) {
self.signature_def = v;
}
// Mutable pointer to the field.
pub fn mut_signature_def(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, SignatureDef> {
&mut self.signature_def
}
// Take field
pub fn take_signature_def(&mut self) -> ::std::collections::HashMap<::std::string::String, SignatureDef> {
::std::mem::replace(&mut self.signature_def, ::std::collections::HashMap::new())
}
// repeated .tensorflow.AssetFileDef asset_file_def = 6;
pub fn get_asset_file_def(&self) -> &[AssetFileDef] {
&self.asset_file_def
}
pub fn clear_asset_file_def(&mut self) {
self.asset_file_def.clear();
}
// Param is passed by value, moved
pub fn set_asset_file_def(&mut self, v: ::protobuf::RepeatedField<AssetFileDef>) {
self.asset_file_def = v;
}
// Mutable pointer to the field.
pub fn mut_asset_file_def(&mut self) -> &mut ::protobuf::RepeatedField<AssetFileDef> {
&mut self.asset_file_def
}
// Take field
pub fn take_asset_file_def(&mut self) -> ::protobuf::RepeatedField<AssetFileDef> {
::std::mem::replace(&mut self.asset_file_def, ::protobuf::RepeatedField::new())
}
// .tensorflow.SavedObjectGraph object_graph_def = 7;
pub fn get_object_graph_def(&self) -> &super::saved_object_graph::SavedObjectGraph {
self.object_graph_def.as_ref().unwrap_or_else(|| <super::saved_object_graph::SavedObjectGraph as ::protobuf::Message>::default_instance())
}
pub fn clear_object_graph_def(&mut self) {
self.object_graph_def.clear();
}
pub fn has_object_graph_def(&self) -> bool {
self.object_graph_def.is_some()
}
// Param is passed by value, moved
pub fn set_object_graph_def(&mut self, v: super::saved_object_graph::SavedObjectGraph) {
self.object_graph_def = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_object_graph_def(&mut self) -> &mut super::saved_object_graph::SavedObjectGraph {
if self.object_graph_def.is_none() {
self.object_graph_def.set_default();
}
self.object_graph_def.as_mut().unwrap()
}
// Take field
pub fn take_object_graph_def(&mut self) -> super::saved_object_graph::SavedObjectGraph {
self.object_graph_def.take().unwrap_or_else(|| super::saved_object_graph::SavedObjectGraph::new())
}
}
impl ::protobuf::Message for MetaGraphDef {
fn is_initialized(&self) -> bool {
for v in &self.meta_info_def {
if !v.is_initialized() {
return false;
}
};
for v in &self.graph_def {
if !v.is_initialized() {
return false;
}
};
for v in &self.saver_def {
if !v.is_initialized() {
return false;
}
};
for v in &self.asset_file_def {
if !v.is_initialized() {
return false;
}
};
for v in &self.object_graph_def {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.meta_info_def)?;
},
2 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.graph_def)?;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.saver_def)?;
},
4 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<CollectionDef>>(wire_type, is, &mut self.collection_def)?;
},
5 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<SignatureDef>>(wire_type, is, &mut self.signature_def)?;
},
6 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.asset_file_def)?;
},
7 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.object_graph_def)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(ref v) = self.meta_info_def.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.graph_def.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.saver_def.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<CollectionDef>>(4, &self.collection_def);
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<SignatureDef>>(5, &self.signature_def);
for value in &self.asset_file_def {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
if let Some(ref v) = self.object_graph_def.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(ref v) = self.meta_info_def.as_ref() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.graph_def.as_ref() {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.saver_def.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<CollectionDef>>(4, &self.collection_def, os)?;
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<SignatureDef>>(5, &self.signature_def, os)?;
for v in &self.asset_file_def {
os.write_tag(6, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
if let Some(ref v) = self.object_graph_def.as_ref() {
os.write_tag(7, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> MetaGraphDef {
MetaGraphDef::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<MetaGraphDef_MetaInfoDef>>(
"meta_info_def",
|m: &MetaGraphDef| { &m.meta_info_def },
|m: &mut MetaGraphDef| { &mut m.meta_info_def },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::graph::GraphDef>>(
"graph_def",
|m: &MetaGraphDef| { &m.graph_def },
|m: &mut MetaGraphDef| { &mut m.graph_def },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::saver::SaverDef>>(
"saver_def",
|m: &MetaGraphDef| { &m.saver_def },
|m: &mut MetaGraphDef| { &mut m.saver_def },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<CollectionDef>>(
"collection_def",
|m: &MetaGraphDef| { &m.collection_def },
|m: &mut MetaGraphDef| { &mut m.collection_def },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<SignatureDef>>(
"signature_def",
|m: &MetaGraphDef| { &m.signature_def },
|m: &mut MetaGraphDef| { &mut m.signature_def },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<AssetFileDef>>(
"asset_file_def",
|m: &MetaGraphDef| { &m.asset_file_def },
|m: &mut MetaGraphDef| { &mut m.asset_file_def },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::saved_object_graph::SavedObjectGraph>>(
"object_graph_def",
|m: &MetaGraphDef| { &m.object_graph_def },
|m: &mut MetaGraphDef| { &mut m.object_graph_def },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<MetaGraphDef>(
"MetaGraphDef",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static MetaGraphDef {
static instance: ::protobuf::rt::LazyV2<MetaGraphDef> = ::protobuf::rt::LazyV2::INIT;
instance.get(MetaGraphDef::new)
}
}
impl ::protobuf::Clear for MetaGraphDef {
fn clear(&mut self) {
self.meta_info_def.clear();
self.graph_def.clear();
self.saver_def.clear();
self.collection_def.clear();
self.signature_def.clear();
self.asset_file_def.clear();
self.object_graph_def.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for MetaGraphDef {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for MetaGraphDef {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct MetaGraphDef_MetaInfoDef {
// message fields
pub meta_graph_version: ::std::string::String,
pub stripped_op_list: ::protobuf::SingularPtrField<super::op_def::OpList>,
pub any_info: ::protobuf::SingularPtrField<::protobuf::well_known_types::Any>,
pub tags: ::protobuf::RepeatedField<::std::string::String>,
pub tensorflow_version: ::std::string::String,
pub tensorflow_git_version: ::std::string::String,
pub stripped_default_attrs: bool,
pub function_aliases: ::std::collections::HashMap<::std::string::String, ::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a MetaGraphDef_MetaInfoDef {
fn default() -> &'a MetaGraphDef_MetaInfoDef {
<MetaGraphDef_MetaInfoDef as ::protobuf::Message>::default_instance()
}
}
impl MetaGraphDef_MetaInfoDef {
pub fn new() -> MetaGraphDef_MetaInfoDef {
::std::default::Default::default()
}
// string meta_graph_version = 1;
pub fn get_meta_graph_version(&self) -> &str {
&self.meta_graph_version
}
pub fn clear_meta_graph_version(&mut self) {
self.meta_graph_version.clear();
}
// Param is passed by value, moved
pub fn set_meta_graph_version(&mut self, v: ::std::string::String) {
self.meta_graph_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_meta_graph_version(&mut self) -> &mut ::std::string::String {
&mut self.meta_graph_version
}
// Take field
pub fn take_meta_graph_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.meta_graph_version, ::std::string::String::new())
}
// .tensorflow.OpList stripped_op_list = 2;
pub fn get_stripped_op_list(&self) -> &super::op_def::OpList {
self.stripped_op_list.as_ref().unwrap_or_else(|| <super::op_def::OpList as ::protobuf::Message>::default_instance())
}
pub fn clear_stripped_op_list(&mut self) {
self.stripped_op_list.clear();
}
pub fn has_stripped_op_list(&self) -> bool {
self.stripped_op_list.is_some()
}
// Param is passed by value, moved
pub fn set_stripped_op_list(&mut self, v: super::op_def::OpList) {
self.stripped_op_list = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_stripped_op_list(&mut self) -> &mut super::op_def::OpList {
if self.stripped_op_list.is_none() {
self.stripped_op_list.set_default();
}
self.stripped_op_list.as_mut().unwrap()
}
// Take field
pub fn take_stripped_op_list(&mut self) -> super::op_def::OpList {
self.stripped_op_list.take().unwrap_or_else(|| super::op_def::OpList::new())
}
// .google.protobuf.Any any_info = 3;
pub fn get_any_info(&self) -> &::protobuf::well_known_types::Any {
self.any_info.as_ref().unwrap_or_else(|| <::protobuf::well_known_types::Any as ::protobuf::Message>::default_instance())
}
pub fn clear_any_info(&mut self) {
self.any_info.clear();
}
pub fn has_any_info(&self) -> bool {
self.any_info.is_some()
}
// Param is passed by value, moved
pub fn set_any_info(&mut self, v: ::protobuf::well_known_types::Any) {
self.any_info = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_any_info(&mut self) -> &mut ::protobuf::well_known_types::Any {
if self.any_info.is_none() {
self.any_info.set_default();
}
self.any_info.as_mut().unwrap()
}
// Take field
pub fn take_any_info(&mut self) -> ::protobuf::well_known_types::Any {
self.any_info.take().unwrap_or_else(|| ::protobuf::well_known_types::Any::new())
}
// repeated string tags = 4;
pub fn get_tags(&self) -> &[::std::string::String] {
&self.tags
}
pub fn clear_tags(&mut self) {
self.tags.clear();
}
// Param is passed by value, moved
pub fn set_tags(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.tags = v;
}
// Mutable pointer to the field.
pub fn mut_tags(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.tags
}
// Take field
pub fn take_tags(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.tags, ::protobuf::RepeatedField::new())
}
// string tensorflow_version = 5;
pub fn get_tensorflow_version(&self) -> &str {
&self.tensorflow_version
}
pub fn clear_tensorflow_version(&mut self) {
self.tensorflow_version.clear();
}
// Param is passed by value, moved
pub fn set_tensorflow_version(&mut self, v: ::std::string::String) {
self.tensorflow_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tensorflow_version(&mut self) -> &mut ::std::string::String {
&mut self.tensorflow_version
}
// Take field
pub fn take_tensorflow_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.tensorflow_version, ::std::string::String::new())
}
// string tensorflow_git_version = 6;
pub fn get_tensorflow_git_version(&self) -> &str {
&self.tensorflow_git_version
}
pub fn clear_tensorflow_git_version(&mut self) {
self.tensorflow_git_version.clear();
}
// Param is passed by value, moved
pub fn set_tensorflow_git_version(&mut self, v: ::std::string::String) {
self.tensorflow_git_version = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tensorflow_git_version(&mut self) -> &mut ::std::string::String {
&mut self.tensorflow_git_version
}
// Take field
pub fn take_tensorflow_git_version(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.tensorflow_git_version, ::std::string::String::new())
}
// bool stripped_default_attrs = 7;
pub fn get_stripped_default_attrs(&self) -> bool {
self.stripped_default_attrs
}
pub fn clear_stripped_default_attrs(&mut self) {
self.stripped_default_attrs = false;
}
// Param is passed by value, moved
pub fn set_stripped_default_attrs(&mut self, v: bool) {
self.stripped_default_attrs = v;
}
// repeated .tensorflow.MetaGraphDef.MetaInfoDef.FunctionAliasesEntry function_aliases = 8;
pub fn get_function_aliases(&self) -> &::std::collections::HashMap<::std::string::String, ::std::string::String> {
&self.function_aliases
}
pub fn clear_function_aliases(&mut self) {
self.function_aliases.clear();
}
// Param is passed by value, moved
pub fn set_function_aliases(&mut self, v: ::std::collections::HashMap<::std::string::String, ::std::string::String>) {
self.function_aliases = v;
}
// Mutable pointer to the field.
pub fn mut_function_aliases(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, ::std::string::String> {
&mut self.function_aliases
}
// Take field
pub fn take_function_aliases(&mut self) -> ::std::collections::HashMap<::std::string::String, ::std::string::String> {
::std::mem::replace(&mut self.function_aliases, ::std::collections::HashMap::new())
}
}
impl ::protobuf::Message for MetaGraphDef_MetaInfoDef {
fn is_initialized(&self) -> bool {
for v in &self.stripped_op_list {
if !v.is_initialized() {
return false;
}
};
for v in &self.any_info {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.meta_graph_version)?;
},
2 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.stripped_op_list)?;
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.any_info)?;
},
4 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.tags)?;
},
5 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.tensorflow_version)?;
},
6 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.tensorflow_git_version)?;
},
7 => {
if wire_type != ::protobuf::wire_format::WireTypeVarint {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
let tmp = is.read_bool()?;
self.stripped_default_attrs = tmp;
},
8 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(wire_type, is, &mut self.function_aliases)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.meta_graph_version.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.meta_graph_version);
}
if let Some(ref v) = self.stripped_op_list.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let Some(ref v) = self.any_info.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
for value in &self.tags {
my_size += ::protobuf::rt::string_size(4, &value);
};
if !self.tensorflow_version.is_empty() {
my_size += ::protobuf::rt::string_size(5, &self.tensorflow_version);
}
if !self.tensorflow_git_version.is_empty() {
my_size += ::protobuf::rt::string_size(6, &self.tensorflow_git_version);
}
if self.stripped_default_attrs != false {
my_size += 2;
}
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(8, &self.function_aliases);
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.meta_graph_version.is_empty() {
os.write_string(1, &self.meta_graph_version)?;
}
if let Some(ref v) = self.stripped_op_list.as_ref() {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let Some(ref v) = self.any_info.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
for v in &self.tags {
os.write_string(4, &v)?;
};
if !self.tensorflow_version.is_empty() {
os.write_string(5, &self.tensorflow_version)?;
}
if !self.tensorflow_git_version.is_empty() {
os.write_string(6, &self.tensorflow_git_version)?;
}
if self.stripped_default_attrs != false {
os.write_bool(7, self.stripped_default_attrs)?;
}
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(8, &self.function_aliases, os)?;
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> MetaGraphDef_MetaInfoDef {
MetaGraphDef_MetaInfoDef::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"meta_graph_version",
|m: &MetaGraphDef_MetaInfoDef| { &m.meta_graph_version },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.meta_graph_version },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::op_def::OpList>>(
"stripped_op_list",
|m: &MetaGraphDef_MetaInfoDef| { &m.stripped_op_list },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.stripped_op_list },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<::protobuf::well_known_types::Any>>(
"any_info",
|m: &MetaGraphDef_MetaInfoDef| { &m.any_info },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.any_info },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"tags",
|m: &MetaGraphDef_MetaInfoDef| { &m.tags },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.tags },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"tensorflow_version",
|m: &MetaGraphDef_MetaInfoDef| { &m.tensorflow_version },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.tensorflow_version },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"tensorflow_git_version",
|m: &MetaGraphDef_MetaInfoDef| { &m.tensorflow_git_version },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.tensorflow_git_version },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeBool>(
"stripped_default_attrs",
|m: &MetaGraphDef_MetaInfoDef| { &m.stripped_default_attrs }, | |m: &MetaGraphDef_MetaInfoDef| { &m.function_aliases },
|m: &mut MetaGraphDef_MetaInfoDef| { &mut m.function_aliases },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<MetaGraphDef_MetaInfoDef>(
"MetaGraphDef.MetaInfoDef",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static MetaGraphDef_MetaInfoDef {
static instance: ::protobuf::rt::LazyV2<MetaGraphDef_MetaInfoDef> = ::protobuf::rt::LazyV2::INIT;
instance.get(MetaGraphDef_MetaInfoDef::new)
}
}
impl ::protobuf::Clear for MetaGraphDef_MetaInfoDef {
fn clear(&mut self) {
self.meta_graph_version.clear();
self.stripped_op_list.clear();
self.any_info.clear();
self.tags.clear();
self.tensorflow_version.clear();
self.tensorflow_git_version.clear();
self.stripped_default_attrs = false;
self.function_aliases.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for MetaGraphDef_MetaInfoDef {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for MetaGraphDef_MetaInfoDef {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef {
// message oneof groups
pub kind: ::std::option::Option<CollectionDef_oneof_kind>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef {
fn default() -> &'a CollectionDef {
<CollectionDef as ::protobuf::Message>::default_instance()
}
}
#[derive(Clone,PartialEq,Debug)]
pub enum CollectionDef_oneof_kind {
node_list(CollectionDef_NodeList),
bytes_list(CollectionDef_BytesList),
int64_list(CollectionDef_Int64List),
float_list(CollectionDef_FloatList),
any_list(CollectionDef_AnyList),
}
impl CollectionDef {
pub fn new() -> CollectionDef {
::std::default::Default::default()
}
// .tensorflow.CollectionDef.NodeList node_list = 1;
pub fn get_node_list(&self) -> &CollectionDef_NodeList {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::node_list(ref v)) => v,
_ => <CollectionDef_NodeList as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_node_list(&mut self) {
self.kind = ::std::option::Option::None;
}
pub fn has_node_list(&self) -> bool {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::node_list(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_node_list(&mut self, v: CollectionDef_NodeList) {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::node_list(v))
}
// Mutable pointer to the field.
pub fn mut_node_list(&mut self) -> &mut CollectionDef_NodeList {
if let ::std::option::Option::Some(CollectionDef_oneof_kind::node_list(_)) = self.kind {
} else {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::node_list(CollectionDef_NodeList::new()));
}
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::node_list(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_node_list(&mut self) -> CollectionDef_NodeList {
if self.has_node_list() {
match self.kind.take() {
::std::option::Option::Some(CollectionDef_oneof_kind::node_list(v)) => v,
_ => panic!(),
}
} else {
CollectionDef_NodeList::new()
}
}
// .tensorflow.CollectionDef.BytesList bytes_list = 2;
pub fn get_bytes_list(&self) -> &CollectionDef_BytesList {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(ref v)) => v,
_ => <CollectionDef_BytesList as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_bytes_list(&mut self) {
self.kind = ::std::option::Option::None;
}
pub fn has_bytes_list(&self) -> bool {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_bytes_list(&mut self, v: CollectionDef_BytesList) {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(v))
}
// Mutable pointer to the field.
pub fn mut_bytes_list(&mut self) -> &mut CollectionDef_BytesList {
if let ::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(_)) = self.kind {
} else {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(CollectionDef_BytesList::new()));
}
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_bytes_list(&mut self) -> CollectionDef_BytesList {
if self.has_bytes_list() {
match self.kind.take() {
::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(v)) => v,
_ => panic!(),
}
} else {
CollectionDef_BytesList::new()
}
}
// .tensorflow.CollectionDef.Int64List int64_list = 3;
pub fn get_int64_list(&self) -> &CollectionDef_Int64List {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(ref v)) => v,
_ => <CollectionDef_Int64List as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_int64_list(&mut self) {
self.kind = ::std::option::Option::None;
}
pub fn has_int64_list(&self) -> bool {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_int64_list(&mut self, v: CollectionDef_Int64List) {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(v))
}
// Mutable pointer to the field.
pub fn mut_int64_list(&mut self) -> &mut CollectionDef_Int64List {
if let ::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(_)) = self.kind {
} else {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(CollectionDef_Int64List::new()));
}
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_int64_list(&mut self) -> CollectionDef_Int64List {
if self.has_int64_list() {
match self.kind.take() {
::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(v)) => v,
_ => panic!(),
}
} else {
CollectionDef_Int64List::new()
}
}
// .tensorflow.CollectionDef.FloatList float_list = 4;
pub fn get_float_list(&self) -> &CollectionDef_FloatList {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::float_list(ref v)) => v,
_ => <CollectionDef_FloatList as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_float_list(&mut self) {
self.kind = ::std::option::Option::None;
}
pub fn has_float_list(&self) -> bool {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::float_list(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_float_list(&mut self, v: CollectionDef_FloatList) {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::float_list(v))
}
// Mutable pointer to the field.
pub fn mut_float_list(&mut self) -> &mut CollectionDef_FloatList {
if let ::std::option::Option::Some(CollectionDef_oneof_kind::float_list(_)) = self.kind {
} else {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::float_list(CollectionDef_FloatList::new()));
}
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::float_list(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_float_list(&mut self) -> CollectionDef_FloatList {
if self.has_float_list() {
match self.kind.take() {
::std::option::Option::Some(CollectionDef_oneof_kind::float_list(v)) => v,
_ => panic!(),
}
} else {
CollectionDef_FloatList::new()
}
}
// .tensorflow.CollectionDef.AnyList any_list = 5;
pub fn get_any_list(&self) -> &CollectionDef_AnyList {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::any_list(ref v)) => v,
_ => <CollectionDef_AnyList as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_any_list(&mut self) {
self.kind = ::std::option::Option::None;
}
pub fn has_any_list(&self) -> bool {
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::any_list(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_any_list(&mut self, v: CollectionDef_AnyList) {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::any_list(v))
}
// Mutable pointer to the field.
pub fn mut_any_list(&mut self) -> &mut CollectionDef_AnyList {
if let ::std::option::Option::Some(CollectionDef_oneof_kind::any_list(_)) = self.kind {
} else {
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::any_list(CollectionDef_AnyList::new()));
}
match self.kind {
::std::option::Option::Some(CollectionDef_oneof_kind::any_list(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_any_list(&mut self) -> CollectionDef_AnyList {
if self.has_any_list() {
match self.kind.take() {
::std::option::Option::Some(CollectionDef_oneof_kind::any_list(v)) => v,
_ => panic!(),
}
} else {
CollectionDef_AnyList::new()
}
}
}
impl ::protobuf::Message for CollectionDef {
fn is_initialized(&self) -> bool {
if let Some(CollectionDef_oneof_kind::node_list(ref v)) = self.kind {
if !v.is_initialized() {
return false;
}
}
if let Some(CollectionDef_oneof_kind::bytes_list(ref v)) = self.kind {
if !v.is_initialized() {
return false;
}
}
if let Some(CollectionDef_oneof_kind::int64_list(ref v)) = self.kind {
if !v.is_initialized() {
return false;
}
}
if let Some(CollectionDef_oneof_kind::float_list(ref v)) = self.kind {
if !v.is_initialized() {
return false;
}
}
if let Some(CollectionDef_oneof_kind::any_list(ref v)) = self.kind {
if !v.is_initialized() {
return false;
}
}
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::node_list(is.read_message()?));
},
2 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::bytes_list(is.read_message()?));
},
3 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::int64_list(is.read_message()?));
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::float_list(is.read_message()?));
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.kind = ::std::option::Option::Some(CollectionDef_oneof_kind::any_list(is.read_message()?));
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let ::std::option::Option::Some(ref v) = self.kind {
match v {
&CollectionDef_oneof_kind::node_list(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&CollectionDef_oneof_kind::bytes_list(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&CollectionDef_oneof_kind::int64_list(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&CollectionDef_oneof_kind::float_list(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&CollectionDef_oneof_kind::any_list(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
};
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let ::std::option::Option::Some(ref v) = self.kind {
match v {
&CollectionDef_oneof_kind::node_list(ref v) => {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&CollectionDef_oneof_kind::bytes_list(ref v) => {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&CollectionDef_oneof_kind::int64_list(ref v) => {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&CollectionDef_oneof_kind::float_list(ref v) => {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&CollectionDef_oneof_kind::any_list(ref v) => {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef {
CollectionDef::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CollectionDef_NodeList>(
"node_list",
CollectionDef::has_node_list,
CollectionDef::get_node_list,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CollectionDef_BytesList>(
"bytes_list",
CollectionDef::has_bytes_list,
CollectionDef::get_bytes_list,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CollectionDef_Int64List>(
"int64_list",
CollectionDef::has_int64_list,
CollectionDef::get_int64_list,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CollectionDef_FloatList>(
"float_list",
CollectionDef::has_float_list,
CollectionDef::get_float_list,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, CollectionDef_AnyList>(
"any_list",
CollectionDef::has_any_list,
CollectionDef::get_any_list,
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef>(
"CollectionDef",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef {
static instance: ::protobuf::rt::LazyV2<CollectionDef> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef::new)
}
}
impl ::protobuf::Clear for CollectionDef {
fn clear(&mut self) {
self.kind = ::std::option::Option::None;
self.kind = ::std::option::Option::None;
self.kind = ::std::option::Option::None;
self.kind = ::std::option::Option::None;
self.kind = ::std::option::Option::None;
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef_NodeList {
// message fields
pub value: ::protobuf::RepeatedField<::std::string::String>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef_NodeList {
fn default() -> &'a CollectionDef_NodeList {
<CollectionDef_NodeList as ::protobuf::Message>::default_instance()
}
}
impl CollectionDef_NodeList {
pub fn new() -> CollectionDef_NodeList {
::std::default::Default::default()
}
// repeated string value = 1;
pub fn get_value(&self) -> &[::std::string::String] {
&self.value
}
pub fn clear_value(&mut self) {
self.value.clear();
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: ::protobuf::RepeatedField<::std::string::String>) {
self.value = v;
}
// Mutable pointer to the field.
pub fn mut_value(&mut self) -> &mut ::protobuf::RepeatedField<::std::string::String> {
&mut self.value
}
// Take field
pub fn take_value(&mut self) -> ::protobuf::RepeatedField<::std::string::String> {
::std::mem::replace(&mut self.value, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for CollectionDef_NodeList {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_string_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.value {
my_size += ::protobuf::rt::string_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.value {
os.write_string(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef_NodeList {
CollectionDef_NodeList::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"value",
|m: &CollectionDef_NodeList| { &m.value },
|m: &mut CollectionDef_NodeList| { &mut m.value },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef_NodeList>(
"CollectionDef.NodeList",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef_NodeList {
static instance: ::protobuf::rt::LazyV2<CollectionDef_NodeList> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef_NodeList::new)
}
}
impl ::protobuf::Clear for CollectionDef_NodeList {
fn clear(&mut self) {
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef_NodeList {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef_NodeList {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef_BytesList {
// message fields
pub value: ::protobuf::RepeatedField<::std::vec::Vec<u8>>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef_BytesList {
fn default() -> &'a CollectionDef_BytesList {
<CollectionDef_BytesList as ::protobuf::Message>::default_instance()
}
}
impl CollectionDef_BytesList {
pub fn new() -> CollectionDef_BytesList {
::std::default::Default::default()
}
// repeated bytes value = 1;
pub fn get_value(&self) -> &[::std::vec::Vec<u8>] {
&self.value
}
pub fn clear_value(&mut self) {
self.value.clear();
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: ::protobuf::RepeatedField<::std::vec::Vec<u8>>) {
self.value = v;
}
// Mutable pointer to the field.
pub fn mut_value(&mut self) -> &mut ::protobuf::RepeatedField<::std::vec::Vec<u8>> {
&mut self.value
}
// Take field
pub fn take_value(&mut self) -> ::protobuf::RepeatedField<::std::vec::Vec<u8>> {
::std::mem::replace(&mut self.value, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for CollectionDef_BytesList {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_bytes_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.value {
my_size += ::protobuf::rt::bytes_size(1, &value);
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.value {
os.write_bytes(1, &v)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef_BytesList {
CollectionDef_BytesList::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeBytes>(
"value",
|m: &CollectionDef_BytesList| { &m.value },
|m: &mut CollectionDef_BytesList| { &mut m.value },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef_BytesList>(
"CollectionDef.BytesList",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef_BytesList {
static instance: ::protobuf::rt::LazyV2<CollectionDef_BytesList> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef_BytesList::new)
}
}
impl ::protobuf::Clear for CollectionDef_BytesList {
fn clear(&mut self) {
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef_BytesList {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef_BytesList {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef_Int64List {
// message fields
pub value: ::std::vec::Vec<i64>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef_Int64List {
fn default() -> &'a CollectionDef_Int64List {
<CollectionDef_Int64List as ::protobuf::Message>::default_instance()
}
}
impl CollectionDef_Int64List {
pub fn new() -> CollectionDef_Int64List {
::std::default::Default::default()
}
// repeated int64 value = 1;
pub fn get_value(&self) -> &[i64] {
&self.value
}
pub fn clear_value(&mut self) {
self.value.clear();
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: ::std::vec::Vec<i64>) {
self.value = v;
}
// Mutable pointer to the field.
pub fn mut_value(&mut self) -> &mut ::std::vec::Vec<i64> {
&mut self.value
}
// Take field
pub fn take_value(&mut self) -> ::std::vec::Vec<i64> {
::std::mem::replace(&mut self.value, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for CollectionDef_Int64List {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_int64_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.value.is_empty() {
my_size += ::protobuf::rt::vec_packed_varint_size(1, &self.value);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.value.is_empty() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
// TODO: Data size is computed again, it should be cached
os.write_raw_varint32(::protobuf::rt::vec_packed_varint_data_size(&self.value))?;
for v in &self.value {
os.write_int64_no_tag(*v)?;
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef_Int64List {
CollectionDef_Int64List::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeInt64>(
"value",
|m: &CollectionDef_Int64List| { &m.value },
|m: &mut CollectionDef_Int64List| { &mut m.value },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef_Int64List>(
"CollectionDef.Int64List",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef_Int64List {
static instance: ::protobuf::rt::LazyV2<CollectionDef_Int64List> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef_Int64List::new)
}
}
impl ::protobuf::Clear for CollectionDef_Int64List {
fn clear(&mut self) {
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef_Int64List {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef_Int64List {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef_FloatList {
// message fields
pub value: ::std::vec::Vec<f32>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef_FloatList {
fn default() -> &'a CollectionDef_FloatList {
<CollectionDef_FloatList as ::protobuf::Message>::default_instance()
}
}
impl CollectionDef_FloatList {
pub fn new() -> CollectionDef_FloatList {
::std::default::Default::default()
}
// repeated float value = 1;
pub fn get_value(&self) -> &[f32] {
&self.value
}
pub fn clear_value(&mut self) {
self.value.clear();
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: ::std::vec::Vec<f32>) {
self.value = v;
}
// Mutable pointer to the field.
pub fn mut_value(&mut self) -> &mut ::std::vec::Vec<f32> {
&mut self.value
}
// Take field
pub fn take_value(&mut self) -> ::std::vec::Vec<f32> {
::std::mem::replace(&mut self.value, ::std::vec::Vec::new())
}
}
impl ::protobuf::Message for CollectionDef_FloatList {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_float_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.value.is_empty() {
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size((self.value.len() * 4) as u32) + (self.value.len() * 4) as u32;
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.value.is_empty() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
// TODO: Data size is computed again, it should be cached
os.write_raw_varint32((self.value.len() * 4) as u32)?;
for v in &self.value {
os.write_float_no_tag(*v)?;
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef_FloatList {
CollectionDef_FloatList::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_vec_accessor::<_, ::protobuf::types::ProtobufTypeFloat>(
"value",
|m: &CollectionDef_FloatList| { &m.value },
|m: &mut CollectionDef_FloatList| { &mut m.value },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef_FloatList>(
"CollectionDef.FloatList",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef_FloatList {
static instance: ::protobuf::rt::LazyV2<CollectionDef_FloatList> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef_FloatList::new)
}
}
impl ::protobuf::Clear for CollectionDef_FloatList {
fn clear(&mut self) {
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef_FloatList {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef_FloatList {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct CollectionDef_AnyList {
// message fields
pub value: ::protobuf::RepeatedField<::protobuf::well_known_types::Any>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a CollectionDef_AnyList {
fn default() -> &'a CollectionDef_AnyList {
<CollectionDef_AnyList as ::protobuf::Message>::default_instance()
}
}
impl CollectionDef_AnyList {
pub fn new() -> CollectionDef_AnyList {
::std::default::Default::default()
}
// repeated .google.protobuf.Any value = 1;
pub fn get_value(&self) -> &[::protobuf::well_known_types::Any] {
&self.value
}
pub fn clear_value(&mut self) {
self.value.clear();
}
// Param is passed by value, moved
pub fn set_value(&mut self, v: ::protobuf::RepeatedField<::protobuf::well_known_types::Any>) {
self.value = v;
}
// Mutable pointer to the field.
pub fn mut_value(&mut self) -> &mut ::protobuf::RepeatedField<::protobuf::well_known_types::Any> {
&mut self.value
}
// Take field
pub fn take_value(&mut self) -> ::protobuf::RepeatedField<::protobuf::well_known_types::Any> {
::std::mem::replace(&mut self.value, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for CollectionDef_AnyList {
fn is_initialized(&self) -> bool {
for v in &self.value {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.value)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
for value in &self.value {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
for v in &self.value {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> CollectionDef_AnyList {
CollectionDef_AnyList::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<::protobuf::well_known_types::Any>>(
"value",
|m: &CollectionDef_AnyList| { &m.value },
|m: &mut CollectionDef_AnyList| { &mut m.value },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<CollectionDef_AnyList>(
"CollectionDef.AnyList",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static CollectionDef_AnyList {
static instance: ::protobuf::rt::LazyV2<CollectionDef_AnyList> = ::protobuf::rt::LazyV2::INIT;
instance.get(CollectionDef_AnyList::new)
}
}
impl ::protobuf::Clear for CollectionDef_AnyList {
fn clear(&mut self) {
self.value.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for CollectionDef_AnyList {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for CollectionDef_AnyList {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TensorInfo {
// message fields
pub dtype: super::types::DataType,
pub tensor_shape: ::protobuf::SingularPtrField<super::tensor_shape::TensorShapeProto>,
// message oneof groups
pub encoding: ::std::option::Option<TensorInfo_oneof_encoding>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TensorInfo {
fn default() -> &'a TensorInfo {
<TensorInfo as ::protobuf::Message>::default_instance()
}
}
#[derive(Clone,PartialEq,Debug)]
pub enum TensorInfo_oneof_encoding {
name(::std::string::String),
coo_sparse(TensorInfo_CooSparse),
composite_tensor(TensorInfo_CompositeTensor),
}
impl TensorInfo {
pub fn new() -> TensorInfo {
::std::default::Default::default()
}
// string name = 1;
pub fn get_name(&self) -> &str {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::name(ref v)) => v,
_ => "",
}
}
pub fn clear_name(&mut self) {
self.encoding = ::std::option::Option::None;
}
pub fn has_name(&self) -> bool {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::name(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_name(&mut self, v: ::std::string::String) {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::name(v))
}
// Mutable pointer to the field.
pub fn mut_name(&mut self) -> &mut ::std::string::String {
if let ::std::option::Option::Some(TensorInfo_oneof_encoding::name(_)) = self.encoding {
} else {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::name(::std::string::String::new()));
}
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::name(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_name(&mut self) -> ::std::string::String {
if self.has_name() {
match self.encoding.take() {
::std::option::Option::Some(TensorInfo_oneof_encoding::name(v)) => v,
_ => panic!(),
}
} else {
::std::string::String::new()
}
}
// .tensorflow.TensorInfo.CooSparse coo_sparse = 4;
pub fn get_coo_sparse(&self) -> &TensorInfo_CooSparse {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(ref v)) => v,
_ => <TensorInfo_CooSparse as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_coo_sparse(&mut self) {
self.encoding = ::std::option::Option::None;
}
pub fn has_coo_sparse(&self) -> bool {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_coo_sparse(&mut self, v: TensorInfo_CooSparse) {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(v))
}
// Mutable pointer to the field.
pub fn mut_coo_sparse(&mut self) -> &mut TensorInfo_CooSparse {
if let ::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(_)) = self.encoding {
} else {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(TensorInfo_CooSparse::new()));
}
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_coo_sparse(&mut self) -> TensorInfo_CooSparse {
if self.has_coo_sparse() {
match self.encoding.take() {
::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(v)) => v,
_ => panic!(),
}
} else {
TensorInfo_CooSparse::new()
}
}
// .tensorflow.TensorInfo.CompositeTensor composite_tensor = 5;
pub fn get_composite_tensor(&self) -> &TensorInfo_CompositeTensor {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(ref v)) => v,
_ => <TensorInfo_CompositeTensor as ::protobuf::Message>::default_instance(),
}
}
pub fn clear_composite_tensor(&mut self) {
self.encoding = ::std::option::Option::None;
}
pub fn has_composite_tensor(&self) -> bool {
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(..)) => true,
_ => false,
}
}
// Param is passed by value, moved
pub fn set_composite_tensor(&mut self, v: TensorInfo_CompositeTensor) {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(v))
}
// Mutable pointer to the field.
pub fn mut_composite_tensor(&mut self) -> &mut TensorInfo_CompositeTensor {
if let ::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(_)) = self.encoding {
} else {
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(TensorInfo_CompositeTensor::new()));
}
match self.encoding {
::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(ref mut v)) => v,
_ => panic!(),
}
}
// Take field
pub fn take_composite_tensor(&mut self) -> TensorInfo_CompositeTensor {
if self.has_composite_tensor() {
match self.encoding.take() {
::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(v)) => v,
_ => panic!(),
}
} else {
TensorInfo_CompositeTensor::new()
}
}
// .tensorflow.DataType dtype = 2;
pub fn get_dtype(&self) -> super::types::DataType {
self.dtype
}
pub fn clear_dtype(&mut self) {
self.dtype = super::types::DataType::DT_INVALID;
}
// Param is passed by value, moved
pub fn set_dtype(&mut self, v: super::types::DataType) {
self.dtype = v;
}
// .tensorflow.TensorShapeProto tensor_shape = 3;
pub fn get_tensor_shape(&self) -> &super::tensor_shape::TensorShapeProto {
self.tensor_shape.as_ref().unwrap_or_else(|| <super::tensor_shape::TensorShapeProto as ::protobuf::Message>::default_instance())
}
pub fn clear_tensor_shape(&mut self) {
self.tensor_shape.clear();
}
pub fn has_tensor_shape(&self) -> bool {
self.tensor_shape.is_some()
}
// Param is passed by value, moved
pub fn set_tensor_shape(&mut self, v: super::tensor_shape::TensorShapeProto) {
self.tensor_shape = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tensor_shape(&mut self) -> &mut super::tensor_shape::TensorShapeProto {
if self.tensor_shape.is_none() {
self.tensor_shape.set_default();
}
self.tensor_shape.as_mut().unwrap()
}
// Take field
pub fn take_tensor_shape(&mut self) -> super::tensor_shape::TensorShapeProto {
self.tensor_shape.take().unwrap_or_else(|| super::tensor_shape::TensorShapeProto::new())
}
}
impl ::protobuf::Message for TensorInfo {
fn is_initialized(&self) -> bool {
if let Some(TensorInfo_oneof_encoding::coo_sparse(ref v)) = self.encoding {
if !v.is_initialized() {
return false;
}
}
if let Some(TensorInfo_oneof_encoding::composite_tensor(ref v)) = self.encoding {
if !v.is_initialized() {
return false;
}
}
for v in &self.tensor_shape {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::name(is.read_string()?));
},
4 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::coo_sparse(is.read_message()?));
},
5 => {
if wire_type != ::protobuf::wire_format::WireTypeLengthDelimited {
return ::std::result::Result::Err(::protobuf::rt::unexpected_wire_type(wire_type));
}
self.encoding = ::std::option::Option::Some(TensorInfo_oneof_encoding::composite_tensor(is.read_message()?));
},
2 => {
::protobuf::rt::read_proto3_enum_with_unknown_fields_into(wire_type, is, &mut self.dtype, 2, &mut self.unknown_fields)?
},
3 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.tensor_shape)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if self.dtype != super::types::DataType::DT_INVALID {
my_size += ::protobuf::rt::enum_size(2, self.dtype);
}
if let Some(ref v) = self.tensor_shape.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if let ::std::option::Option::Some(ref v) = self.encoding {
match v {
&TensorInfo_oneof_encoding::name(ref v) => {
my_size += ::protobuf::rt::string_size(1, &v);
},
&TensorInfo_oneof_encoding::coo_sparse(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
&TensorInfo_oneof_encoding::composite_tensor(ref v) => {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
},
};
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if self.dtype != super::types::DataType::DT_INVALID {
os.write_enum(2, ::protobuf::ProtobufEnum::value(&self.dtype))?;
}
if let Some(ref v) = self.tensor_shape.as_ref() {
os.write_tag(3, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if let ::std::option::Option::Some(ref v) = self.encoding {
match v {
&TensorInfo_oneof_encoding::name(ref v) => {
os.write_string(1, v)?;
},
&TensorInfo_oneof_encoding::coo_sparse(ref v) => {
os.write_tag(4, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
&TensorInfo_oneof_encoding::composite_tensor(ref v) => {
os.write_tag(5, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
},
};
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TensorInfo {
TensorInfo::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_string_accessor::<_>(
"name",
TensorInfo::has_name,
TensorInfo::get_name,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, TensorInfo_CooSparse>(
"coo_sparse",
TensorInfo::has_coo_sparse,
TensorInfo::get_coo_sparse,
));
fields.push(::protobuf::reflect::accessor::make_singular_message_accessor::<_, TensorInfo_CompositeTensor>(
"composite_tensor",
TensorInfo::has_composite_tensor,
TensorInfo::get_composite_tensor,
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeEnum<super::types::DataType>>(
"dtype",
|m: &TensorInfo| { &m.dtype },
|m: &mut TensorInfo| { &mut m.dtype },
));
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::tensor_shape::TensorShapeProto>>(
"tensor_shape",
|m: &TensorInfo| { &m.tensor_shape },
|m: &mut TensorInfo| { &mut m.tensor_shape },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TensorInfo>(
"TensorInfo",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TensorInfo {
static instance: ::protobuf::rt::LazyV2<TensorInfo> = ::protobuf::rt::LazyV2::INIT;
instance.get(TensorInfo::new)
}
}
impl ::protobuf::Clear for TensorInfo {
fn clear(&mut self) {
self.encoding = ::std::option::Option::None;
self.encoding = ::std::option::Option::None;
self.encoding = ::std::option::Option::None;
self.dtype = super::types::DataType::DT_INVALID;
self.tensor_shape.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TensorInfo {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TensorInfo {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TensorInfo_CooSparse {
// message fields
pub values_tensor_name: ::std::string::String,
pub indices_tensor_name: ::std::string::String,
pub dense_shape_tensor_name: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TensorInfo_CooSparse {
fn default() -> &'a TensorInfo_CooSparse {
<TensorInfo_CooSparse as ::protobuf::Message>::default_instance()
}
}
impl TensorInfo_CooSparse {
pub fn new() -> TensorInfo_CooSparse {
::std::default::Default::default()
}
// string values_tensor_name = 1;
pub fn get_values_tensor_name(&self) -> &str {
&self.values_tensor_name
}
pub fn clear_values_tensor_name(&mut self) {
self.values_tensor_name.clear();
}
// Param is passed by value, moved
pub fn set_values_tensor_name(&mut self, v: ::std::string::String) {
self.values_tensor_name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_values_tensor_name(&mut self) -> &mut ::std::string::String {
&mut self.values_tensor_name
}
// Take field
pub fn take_values_tensor_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.values_tensor_name, ::std::string::String::new())
}
// string indices_tensor_name = 2;
pub fn get_indices_tensor_name(&self) -> &str {
&self.indices_tensor_name
}
pub fn clear_indices_tensor_name(&mut self) {
self.indices_tensor_name.clear();
}
// Param is passed by value, moved
pub fn set_indices_tensor_name(&mut self, v: ::std::string::String) {
self.indices_tensor_name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_indices_tensor_name(&mut self) -> &mut ::std::string::String {
&mut self.indices_tensor_name
}
// Take field
pub fn take_indices_tensor_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.indices_tensor_name, ::std::string::String::new())
}
// string dense_shape_tensor_name = 3;
pub fn get_dense_shape_tensor_name(&self) -> &str {
&self.dense_shape_tensor_name
}
pub fn clear_dense_shape_tensor_name(&mut self) {
self.dense_shape_tensor_name.clear();
}
// Param is passed by value, moved
pub fn set_dense_shape_tensor_name(&mut self, v: ::std::string::String) {
self.dense_shape_tensor_name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_dense_shape_tensor_name(&mut self) -> &mut ::std::string::String {
&mut self.dense_shape_tensor_name
}
// Take field
pub fn take_dense_shape_tensor_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.dense_shape_tensor_name, ::std::string::String::new())
}
}
impl ::protobuf::Message for TensorInfo_CooSparse {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.values_tensor_name)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.indices_tensor_name)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.dense_shape_tensor_name)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if !self.values_tensor_name.is_empty() {
my_size += ::protobuf::rt::string_size(1, &self.values_tensor_name);
}
if !self.indices_tensor_name.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.indices_tensor_name);
}
if !self.dense_shape_tensor_name.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.dense_shape_tensor_name);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if !self.values_tensor_name.is_empty() {
os.write_string(1, &self.values_tensor_name)?;
}
if !self.indices_tensor_name.is_empty() {
os.write_string(2, &self.indices_tensor_name)?;
}
if !self.dense_shape_tensor_name.is_empty() {
os.write_string(3, &self.dense_shape_tensor_name)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TensorInfo_CooSparse {
TensorInfo_CooSparse::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"values_tensor_name",
|m: &TensorInfo_CooSparse| { &m.values_tensor_name },
|m: &mut TensorInfo_CooSparse| { &mut m.values_tensor_name },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"indices_tensor_name",
|m: &TensorInfo_CooSparse| { &m.indices_tensor_name },
|m: &mut TensorInfo_CooSparse| { &mut m.indices_tensor_name },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"dense_shape_tensor_name",
|m: &TensorInfo_CooSparse| { &m.dense_shape_tensor_name },
|m: &mut TensorInfo_CooSparse| { &mut m.dense_shape_tensor_name },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TensorInfo_CooSparse>(
"TensorInfo.CooSparse",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TensorInfo_CooSparse {
static instance: ::protobuf::rt::LazyV2<TensorInfo_CooSparse> = ::protobuf::rt::LazyV2::INIT;
instance.get(TensorInfo_CooSparse::new)
}
}
impl ::protobuf::Clear for TensorInfo_CooSparse {
fn clear(&mut self) {
self.values_tensor_name.clear();
self.indices_tensor_name.clear();
self.dense_shape_tensor_name.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TensorInfo_CooSparse {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TensorInfo_CooSparse {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct TensorInfo_CompositeTensor {
// message fields
pub type_spec: ::protobuf::SingularPtrField<super::struct_pb::TypeSpecProto>,
pub components: ::protobuf::RepeatedField<TensorInfo>,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a TensorInfo_CompositeTensor {
fn default() -> &'a TensorInfo_CompositeTensor {
<TensorInfo_CompositeTensor as ::protobuf::Message>::default_instance()
}
}
impl TensorInfo_CompositeTensor {
pub fn new() -> TensorInfo_CompositeTensor {
::std::default::Default::default()
}
// .tensorflow.TypeSpecProto type_spec = 1;
pub fn get_type_spec(&self) -> &super::struct_pb::TypeSpecProto {
self.type_spec.as_ref().unwrap_or_else(|| <super::struct_pb::TypeSpecProto as ::protobuf::Message>::default_instance())
}
pub fn clear_type_spec(&mut self) {
self.type_spec.clear();
}
pub fn has_type_spec(&self) -> bool {
self.type_spec.is_some()
}
// Param is passed by value, moved
pub fn set_type_spec(&mut self, v: super::struct_pb::TypeSpecProto) {
self.type_spec = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_type_spec(&mut self) -> &mut super::struct_pb::TypeSpecProto {
if self.type_spec.is_none() {
self.type_spec.set_default();
}
self.type_spec.as_mut().unwrap()
}
// Take field
pub fn take_type_spec(&mut self) -> super::struct_pb::TypeSpecProto {
self.type_spec.take().unwrap_or_else(|| super::struct_pb::TypeSpecProto::new())
}
// repeated .tensorflow.TensorInfo components = 2;
pub fn get_components(&self) -> &[TensorInfo] {
&self.components
}
pub fn clear_components(&mut self) {
self.components.clear();
}
// Param is passed by value, moved
pub fn set_components(&mut self, v: ::protobuf::RepeatedField<TensorInfo>) {
self.components = v;
}
// Mutable pointer to the field.
pub fn mut_components(&mut self) -> &mut ::protobuf::RepeatedField<TensorInfo> {
&mut self.components
}
// Take field
pub fn take_components(&mut self) -> ::protobuf::RepeatedField<TensorInfo> {
::std::mem::replace(&mut self.components, ::protobuf::RepeatedField::new())
}
}
impl ::protobuf::Message for TensorInfo_CompositeTensor {
fn is_initialized(&self) -> bool {
for v in &self.type_spec {
if !v.is_initialized() {
return false;
}
};
for v in &self.components {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.type_spec)?;
},
2 => {
::protobuf::rt::read_repeated_message_into(wire_type, is, &mut self.components)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(ref v) = self.type_spec.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
for value in &self.components {
let len = value.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
};
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(ref v) = self.type_spec.as_ref() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
for v in &self.components {
os.write_tag(2, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
};
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> TensorInfo_CompositeTensor {
TensorInfo_CompositeTensor::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<super::struct_pb::TypeSpecProto>>(
"type_spec",
|m: &TensorInfo_CompositeTensor| { &m.type_spec },
|m: &mut TensorInfo_CompositeTensor| { &mut m.type_spec },
));
fields.push(::protobuf::reflect::accessor::make_repeated_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(
"components",
|m: &TensorInfo_CompositeTensor| { &m.components },
|m: &mut TensorInfo_CompositeTensor| { &mut m.components },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<TensorInfo_CompositeTensor>(
"TensorInfo.CompositeTensor",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static TensorInfo_CompositeTensor {
static instance: ::protobuf::rt::LazyV2<TensorInfo_CompositeTensor> = ::protobuf::rt::LazyV2::INIT;
instance.get(TensorInfo_CompositeTensor::new)
}
}
impl ::protobuf::Clear for TensorInfo_CompositeTensor {
fn clear(&mut self) {
self.type_spec.clear();
self.components.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for TensorInfo_CompositeTensor {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for TensorInfo_CompositeTensor {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct SignatureDef {
// message fields
pub inputs: ::std::collections::HashMap<::std::string::String, TensorInfo>,
pub outputs: ::std::collections::HashMap<::std::string::String, TensorInfo>,
pub method_name: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a SignatureDef {
fn default() -> &'a SignatureDef {
<SignatureDef as ::protobuf::Message>::default_instance()
}
}
impl SignatureDef {
pub fn new() -> SignatureDef {
::std::default::Default::default()
}
// repeated .tensorflow.SignatureDef.InputsEntry inputs = 1;
pub fn get_inputs(&self) -> &::std::collections::HashMap<::std::string::String, TensorInfo> {
&self.inputs
}
pub fn clear_inputs(&mut self) {
self.inputs.clear();
}
// Param is passed by value, moved
pub fn set_inputs(&mut self, v: ::std::collections::HashMap<::std::string::String, TensorInfo>) {
self.inputs = v;
}
// Mutable pointer to the field.
pub fn mut_inputs(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, TensorInfo> {
&mut self.inputs
}
// Take field
pub fn take_inputs(&mut self) -> ::std::collections::HashMap<::std::string::String, TensorInfo> {
::std::mem::replace(&mut self.inputs, ::std::collections::HashMap::new())
}
// repeated .tensorflow.SignatureDef.OutputsEntry outputs = 2;
pub fn get_outputs(&self) -> &::std::collections::HashMap<::std::string::String, TensorInfo> {
&self.outputs
}
pub fn clear_outputs(&mut self) {
self.outputs.clear();
}
// Param is passed by value, moved
pub fn set_outputs(&mut self, v: ::std::collections::HashMap<::std::string::String, TensorInfo>) {
self.outputs = v;
}
// Mutable pointer to the field.
pub fn mut_outputs(&mut self) -> &mut ::std::collections::HashMap<::std::string::String, TensorInfo> {
&mut self.outputs
}
// Take field
pub fn take_outputs(&mut self) -> ::std::collections::HashMap<::std::string::String, TensorInfo> {
::std::mem::replace(&mut self.outputs, ::std::collections::HashMap::new())
}
// string method_name = 3;
pub fn get_method_name(&self) -> &str {
&self.method_name
}
pub fn clear_method_name(&mut self) {
self.method_name.clear();
}
// Param is passed by value, moved
pub fn set_method_name(&mut self, v: ::std::string::String) {
self.method_name = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_method_name(&mut self) -> &mut ::std::string::String {
&mut self.method_name
}
// Take field
pub fn take_method_name(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.method_name, ::std::string::String::new())
}
}
impl ::protobuf::Message for SignatureDef {
fn is_initialized(&self) -> bool {
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(wire_type, is, &mut self.inputs)?;
},
2 => {
::protobuf::rt::read_map_into::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(wire_type, is, &mut self.outputs)?;
},
3 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.method_name)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(1, &self.inputs);
my_size += ::protobuf::rt::compute_map_size::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(2, &self.outputs);
if !self.method_name.is_empty() {
my_size += ::protobuf::rt::string_size(3, &self.method_name);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(1, &self.inputs, os)?;
::protobuf::rt::write_map_with_cached_sizes::<::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(2, &self.outputs, os)?;
if !self.method_name.is_empty() {
os.write_string(3, &self.method_name)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> SignatureDef {
SignatureDef::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(
"inputs",
|m: &SignatureDef| { &m.inputs },
|m: &mut SignatureDef| { &mut m.inputs },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(
"outputs",
|m: &SignatureDef| { &m.outputs },
|m: &mut SignatureDef| { &mut m.outputs },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"method_name",
|m: &SignatureDef| { &m.method_name },
|m: &mut SignatureDef| { &mut m.method_name },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<SignatureDef>(
"SignatureDef",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static SignatureDef {
static instance: ::protobuf::rt::LazyV2<SignatureDef> = ::protobuf::rt::LazyV2::INIT;
instance.get(SignatureDef::new)
}
}
impl ::protobuf::Clear for SignatureDef {
fn clear(&mut self) {
self.inputs.clear();
self.outputs.clear();
self.method_name.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for SignatureDef {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for SignatureDef {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
#[derive(PartialEq,Clone,Default)]
pub struct AssetFileDef {
// message fields
pub tensor_info: ::protobuf::SingularPtrField<TensorInfo>,
pub filename: ::std::string::String,
// special fields
pub unknown_fields: ::protobuf::UnknownFields,
pub cached_size: ::protobuf::CachedSize,
}
impl<'a> ::std::default::Default for &'a AssetFileDef {
fn default() -> &'a AssetFileDef {
<AssetFileDef as ::protobuf::Message>::default_instance()
}
}
impl AssetFileDef {
pub fn new() -> AssetFileDef {
::std::default::Default::default()
}
// .tensorflow.TensorInfo tensor_info = 1;
pub fn get_tensor_info(&self) -> &TensorInfo {
self.tensor_info.as_ref().unwrap_or_else(|| <TensorInfo as ::protobuf::Message>::default_instance())
}
pub fn clear_tensor_info(&mut self) {
self.tensor_info.clear();
}
pub fn has_tensor_info(&self) -> bool {
self.tensor_info.is_some()
}
// Param is passed by value, moved
pub fn set_tensor_info(&mut self, v: TensorInfo) {
self.tensor_info = ::protobuf::SingularPtrField::some(v);
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_tensor_info(&mut self) -> &mut TensorInfo {
if self.tensor_info.is_none() {
self.tensor_info.set_default();
}
self.tensor_info.as_mut().unwrap()
}
// Take field
pub fn take_tensor_info(&mut self) -> TensorInfo {
self.tensor_info.take().unwrap_or_else(|| TensorInfo::new())
}
// string filename = 2;
pub fn get_filename(&self) -> &str {
&self.filename
}
pub fn clear_filename(&mut self) {
self.filename.clear();
}
// Param is passed by value, moved
pub fn set_filename(&mut self, v: ::std::string::String) {
self.filename = v;
}
// Mutable pointer to the field.
// If field is not initialized, it is initialized with default value first.
pub fn mut_filename(&mut self) -> &mut ::std::string::String {
&mut self.filename
}
// Take field
pub fn take_filename(&mut self) -> ::std::string::String {
::std::mem::replace(&mut self.filename, ::std::string::String::new())
}
}
impl ::protobuf::Message for AssetFileDef {
fn is_initialized(&self) -> bool {
for v in &self.tensor_info {
if !v.is_initialized() {
return false;
}
};
true
}
fn merge_from(&mut self, is: &mut ::protobuf::CodedInputStream<'_>) -> ::protobuf::ProtobufResult<()> {
while !is.eof()? {
let (field_number, wire_type) = is.read_tag_unpack()?;
match field_number {
1 => {
::protobuf::rt::read_singular_message_into(wire_type, is, &mut self.tensor_info)?;
},
2 => {
::protobuf::rt::read_singular_proto3_string_into(wire_type, is, &mut self.filename)?;
},
_ => {
::protobuf::rt::read_unknown_or_skip_group(field_number, wire_type, is, self.mut_unknown_fields())?;
},
};
}
::std::result::Result::Ok(())
}
// Compute sizes of nested messages
#[allow(unused_variables)]
fn compute_size(&self) -> u32 {
let mut my_size = 0;
if let Some(ref v) = self.tensor_info.as_ref() {
let len = v.compute_size();
my_size += 1 + ::protobuf::rt::compute_raw_varint32_size(len) + len;
}
if !self.filename.is_empty() {
my_size += ::protobuf::rt::string_size(2, &self.filename);
}
my_size += ::protobuf::rt::unknown_fields_size(self.get_unknown_fields());
self.cached_size.set(my_size);
my_size
}
fn write_to_with_cached_sizes(&self, os: &mut ::protobuf::CodedOutputStream<'_>) -> ::protobuf::ProtobufResult<()> {
if let Some(ref v) = self.tensor_info.as_ref() {
os.write_tag(1, ::protobuf::wire_format::WireTypeLengthDelimited)?;
os.write_raw_varint32(v.get_cached_size())?;
v.write_to_with_cached_sizes(os)?;
}
if !self.filename.is_empty() {
os.write_string(2, &self.filename)?;
}
os.write_unknown_fields(self.get_unknown_fields())?;
::std::result::Result::Ok(())
}
fn get_cached_size(&self) -> u32 {
self.cached_size.get()
}
fn get_unknown_fields(&self) -> &::protobuf::UnknownFields {
&self.unknown_fields
}
fn mut_unknown_fields(&mut self) -> &mut ::protobuf::UnknownFields {
&mut self.unknown_fields
}
fn as_any(&self) -> &dyn (::std::any::Any) {
self as &dyn (::std::any::Any)
}
fn as_any_mut(&mut self) -> &mut dyn (::std::any::Any) {
self as &mut dyn (::std::any::Any)
}
fn into_any(self: ::std::boxed::Box<Self>) -> ::std::boxed::Box<dyn (::std::any::Any)> {
self
}
fn descriptor(&self) -> &'static ::protobuf::reflect::MessageDescriptor {
Self::descriptor_static()
}
fn new() -> AssetFileDef {
AssetFileDef::new()
}
fn descriptor_static() -> &'static ::protobuf::reflect::MessageDescriptor {
static descriptor: ::protobuf::rt::LazyV2<::protobuf::reflect::MessageDescriptor> = ::protobuf::rt::LazyV2::INIT;
descriptor.get(|| {
let mut fields = ::std::vec::Vec::new();
fields.push(::protobuf::reflect::accessor::make_singular_ptr_field_accessor::<_, ::protobuf::types::ProtobufTypeMessage<TensorInfo>>(
"tensor_info",
|m: &AssetFileDef| { &m.tensor_info },
|m: &mut AssetFileDef| { &mut m.tensor_info },
));
fields.push(::protobuf::reflect::accessor::make_simple_field_accessor::<_, ::protobuf::types::ProtobufTypeString>(
"filename",
|m: &AssetFileDef| { &m.filename },
|m: &mut AssetFileDef| { &mut m.filename },
));
::protobuf::reflect::MessageDescriptor::new_pb_name::<AssetFileDef>(
"AssetFileDef",
fields,
file_descriptor_proto()
)
})
}
fn default_instance() -> &'static AssetFileDef {
static instance: ::protobuf::rt::LazyV2<AssetFileDef> = ::protobuf::rt::LazyV2::INIT;
instance.get(AssetFileDef::new)
}
}
impl ::protobuf::Clear for AssetFileDef {
fn clear(&mut self) {
self.tensor_info.clear();
self.filename.clear();
self.unknown_fields.clear();
}
}
impl ::std::fmt::Debug for AssetFileDef {
fn fmt(&self, f: &mut ::std::fmt::Formatter<'_>) -> ::std::fmt::Result {
::protobuf::text_format::fmt(self, f)
}
}
impl ::protobuf::reflect::ProtobufValue for AssetFileDef {
fn as_ref(&self) -> ::protobuf::reflect::ReflectValueRef {
::protobuf::reflect::ReflectValueRef::Message(self)
}
}
static file_descriptor_proto_data: &'static [u8] = b"\
\n)tensorflow/core/protobuf/meta_graph.proto\x12\ntensorflow\x1a\x19goog\
le/protobuf/any.proto\x1a%tensorflow/core/framework/graph.proto\x1a&tens\
orflow/core/framework/op_def.proto\x1a,tensorflow/core/framework/tensor_\
shape.proto\x1a%tensorflow/core/framework/types.proto\x1a1tensorflow/cor\
e/protobuf/saved_object_graph.proto\x1a$tensorflow/core/protobuf/saver.p\
roto\x1a%tensorflow/core/protobuf/struct.proto\"\xa9\t\n\x0cMetaGraphDef\
\x12H\n\rmeta_info_def\x18\x01\x20\x01(\x0b2$.tensorflow.MetaGraphDef.Me\
taInfoDefR\x0bmetaInfoDef\x121\n\tgraph_def\x18\x02\x20\x01(\x0b2\x14.te\
nsorflow.GraphDefR\x08graphDef\x121\n\tsaver_def\x18\x03\x20\x01(\x0b2\
\x14.tensorflow.SaverDefR\x08saverDef\x12R\n\x0ecollection_def\x18\x04\
\x20\x03(\x0b2+.tensorflow.MetaGraphDef.CollectionDefEntryR\rcollectionD\
ef\x12O\n\rsignature_def\x18\x05\x20\x03(\x0b2*.tensorflow.MetaGraphDef.\
SignatureDefEntryR\x0csignatureDef\x12>\n\x0easset_file_def\x18\x06\x20\
\x03(\x0b2\x18.tensorflow.AssetFileDefR\x0cassetFileDef\x12F\n\x10object\
_graph_def\x18\x07\x20\x01(\x0b2\x1c.tensorflow.SavedObjectGraphR\x0eobj\
ectGraphDef\x1a\x83\x04\n\x0bMetaInfoDef\x12,\n\x12meta_graph_version\
\x18\x01\x20\x01(\tR\x10metaGraphVersion\x12<\n\x10stripped_op_list\x18\
\x02\x20\x01(\x0b2\x12.tensorflow.OpListR\x0estrippedOpList\x12/\n\x08an\
y_info\x18\x03\x20\x01(\x0b2\x14.google.protobuf.AnyR\x07anyInfo\x12\x12\
\n\x04tags\x18\x04\x20\x03(\tR\x04tags\x12-\n\x12tensorflow_version\x18\
\x05\x20\x01(\tR\x11tensorflowVersion\x124\n\x16tensorflow_git_version\
\x18\x06\x20\x01(\tR\x14tensorflowGitVersion\x124\n\x16stripped_default_\
attrs\x18\x07\x20\x01(\x08R\x14strippedDefaultAttrs\x12d\n\x10function_a\
liases\x18\x08\x20\x03(\x0b29.tensorflow.MetaGraphDef.MetaInfoDef.Functi\
onAliasesEntryR\x0ffunctionAliases\x1aB\n\x14FunctionAliasesEntry\x12\
\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12\x14\n\x05value\x18\x02\x20\
\x01(\tR\x05value:\x028\x01\x1a[\n\x12CollectionDefEntry\x12\x10\n\x03ke\
y\x18\x01\x20\x01(\tR\x03key\x12/\n\x05value\x18\x02\x20\x01(\x0b2\x19.t\
ensorflow.CollectionDefR\x05value:\x028\x01\x1aY\n\x11SignatureDefEntry\
\x12\x10\n\x03key\x18\x01\x20\x01(\tR\x03key\x12.\n\x05value\x18\x02\x20\
\x01(\x0b2\x18.tensorflow.SignatureDefR\x05value:\x028\x01\"\xb6\x04\n\r\
CollectionDef\x12A\n\tnode_list\x18\x01\x20\x01(\x0b2\".tensorflow.Colle\
ctionDef.NodeListH\0R\x08nodeList\x12D\n\nbytes_list\x18\x02\x20\x01(\
\x0b2#.tensorflow.CollectionDef.BytesListH\0R\tbytesList\x12D\n\nint64_l\
ist\x18\x03\x20\x01(\x0b2#.tensorflow.CollectionDef.Int64ListH\0R\tint64\
List\x12D\n\nfloat_list\x18\x04\x20\x01(\x0b2#.tensorflow.CollectionDef.\
FloatListH\0R\tfloatList\x12>\n\x08any_list\x18\x05\x20\x01(\x0b2!.tenso\
rflow.CollectionDef.AnyListH\0R\x07anyList\x1a\x20\n\x08NodeList\x12\x14\
\n\x05value\x18\x01\x20\x03(\tR\x05value\x1a!\n\tBytesList\x12\x14\n\x05\
value\x18\x01\x20\x03(\x0cR\x05value\x1a%\n\tInt64List\x12\x18\n\x05valu\
e\x18\x01\x20\x03(\x03R\x05valueB\x02\x10\x01\x1a%\n\tFloatList\x12\x18\
\n\x05value\x18\x01\x20\x03(\x02R\x05valueB\x02\x10\x01\x1a5\n\x07AnyLis\
t\x12*\n\x05value\x18\x01\x20\x03(\x0b2\x14.google.protobuf.AnyR\x05valu\
eB\x06\n\x04kind\"\xda\x04\n\nTensorInfo\x12\x14\n\x04name\x18\x01\x20\
\x01(\tH\0R\x04name\x12A\n\ncoo_sparse\x18\x04\x20\x01(\x0b2\x20.tensorf\
low.TensorInfo.CooSparseH\0R\tcooSparse\x12S\n\x10composite_tensor\x18\
\x05\x20\x01(\x0b2&.tensorflow.TensorInfo.CompositeTensorH\0R\x0fcomposi\
teTensor\x12*\n\x05dtype\x18\x02\x20\x01(\x0e2\x14.tensorflow.DataTypeR\
\x05dtype\x12?\n\x0ctensor_shape\x18\x03\x20\x01(\x0b2\x1c.tensorflow.Te\
nsorShapeProtoR\x0btensorShape\x1a\xa0\x01\n\tCooSparse\x12,\n\x12values\
_tensor_name\x18\x01\x20\x01(\tR\x10valuesTensorName\x12.\n\x13indices_t\
ensor_name\x18\x02\x20\x01(\tR\x11indicesTensorName\x125\n\x17dense_shap\
e_tensor_name\x18\x03\x20\x01(\tR\x14denseShapeTensorName\x1a\x81\x01\n\
\x0fCompositeTensor\x126\n\ttype_spec\x18\x01\x20\x01(\x0b2\x19.tensorfl\
ow.TypeSpecProtoR\x08typeSpec\x126\n\ncomponents\x18\x02\x20\x03(\x0b2\
\x16.tensorflow.TensorInfoR\ncomponentsB\n\n\x08encoding\"\xd5\x02\n\x0c\
SignatureDef\x12<\n\x06inputs\x18\x01\x20\x03(\x0b2$.tensorflow.Signatur\
eDef.InputsEntryR\x06inputs\x12?\n\x07outputs\x18\x02\x20\x03(\x0b2%.ten\
sorflow.SignatureDef.OutputsEntryR\x07outputs\x12\x1f\n\x0bmethod_name\
\x18\x03\x20\x01(\tR\nmethodName\x1aQ\n\x0bInputsEntry\x12\x10\n\x03key\
\x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\x0b2\x16.te\
nsorflow.TensorInfoR\x05value:\x028\x01\x1aR\n\x0cOutputsEntry\x12\x10\n\
\x03key\x18\x01\x20\x01(\tR\x03key\x12,\n\x05value\x18\x02\x20\x01(\x0b2\
\x16.tensorflow.TensorInfoR\x05value:\x028\x01\"c\n\x0cAssetFileDef\x127\
\n\x0btensor_info\x18\x01\x20\x01(\x0b2\x16.tensorflow.TensorInfoR\ntens\
orInfo\x12\x1a\n\x08filename\x18\x02\x20\x01(\tR\x08filenameB\x87\x01\n\
\x18org.tensorflow.frameworkB\x0fMetaGraphProtosP\x01ZUgithub.com/tensor\
flow/tensorflow/tensorflow/go/core/protobuf/for_core_protos_go_proto\xf8\
\x01\x01b\x06proto3\
";
static file_descriptor_proto_lazy: ::protobuf::rt::LazyV2<::protobuf::descriptor::FileDescriptorProto> = ::protobuf::rt::LazyV2::INIT;
fn parse_descriptor_proto() -> ::protobuf::descriptor::FileDescriptorProto {
::protobuf::Message::parse_from_bytes(file_descriptor_proto_data).unwrap()
}
pub fn file_descriptor_proto() -> &'static ::protobuf::descriptor::FileDescriptorProto {
file_descriptor_proto_lazy.get(|| {
parse_descriptor_proto()
})
} | |m: &mut MetaGraphDef_MetaInfoDef| { &mut m.stripped_default_attrs },
));
fields.push(::protobuf::reflect::accessor::make_map_accessor::<_, ::protobuf::types::ProtobufTypeString, ::protobuf::types::ProtobufTypeString>(
"function_aliases", |
jacobian_jax.py | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import time
import numpy
import tensorflow as tf
import jax.numpy as np
from jax import jit, jacfwd, jacrev
import time
import numpy
numpy.random.seed(0)
@jit
def model(x, layers):
for i in range(len(layers)):
x = np.dot(x, layers[i])
return x
@jit
def compute_jacobian(input_vector, layer_weights):
l1 = lambda weights : model(input_vector, weights)
jacobian = jacrev(l1)(layer_weights)
jacobian = flatten_jacobian(jacobian)
return jacobian
@jit
def flatten_jacobian(jacobian):
param_shapes = [ j.shape[1:] for j in jacobian ]
flat_shape = [[-1, numpy.prod(js)] for js in param_shapes]
flattened_jacobian = [np.reshape(j, f) for j, f in zip(jacobian, flat_shape)]
jacobian = np.concatenate(flattened_jacobian, axis=-1)
return jacobian
def main(ninupt, n_filters_list, n_jacobian_calculations):
|
if __name__ == '__main__':
ninput=24
network_list = [
[32, 32, 16],
[128, 128],
[512, 512, 512],
[16, 16, 16, 16, 16, 16],
[2048],
]
for network in network_list:
ccp = main(ninput,network, 5)
print(ccp)
| cross_check_parameters = {}
# Create an input vector:
input_vector = numpy.random.random([ninput, 1])
n_filters_list.insert(0,1)
n_filters_list.append(1)
cross_check_parameters['input_sum'] = numpy.sum(input_vector)
cross_check_parameters['input_std'] = numpy.std(input_vector)
layer_weights = [ numpy.random.random([n_filters_list[i],n_filters_list[i+1]]) for i in range(len(n_filters_list)-1)]
# Create the model:
M = jit(lambda x : model(x, layer_weights))
# Forward pass:
output = M(input_vector)
# Capture the number of parameters:
nparameters = numpy.sum([ numpy.prod(p.shape) for p in layer_weights ])
cross_check_parameters['n_params'] = nparameters
# Capture the network output:
cross_check_parameters['output_sum'] = numpy.sum(output)
cross_check_parameters['output_std'] = numpy.std(output)
start = time.time()
cross_check_parameters['jacobian_times'] = []
for i in range(n_jacobian_calculations):
this_start = time.time()
jacobian = compute_jacobian(input_vector, layer_weights)
this_end = time.time()
cross_check_parameters['jacobian_times'].append((this_end - this_start))
end = time.time()
cross_check_parameters['n_filters_list'] = n_filters_list
cross_check_parameters['jacobian_sum'] = numpy.sum(jacobian)
cross_check_parameters['jacobian_std'] = numpy.std(jacobian)
cross_check_parameters['jacobian_prod'] = numpy.prod(jacobian)
cross_check_parameters['jacobian_time'] = (end - start) / i
cross_check_parameters['jacobian_n_calls'] = n_jacobian_calculations
return cross_check_parameters |
AppLayout.spec.js | import {beforeEach, jest, test} from "@jest/globals";
jest.mock('laravel-jetstream')
import {createLocalVue, mount, shallowMount} from '@vue/test-utils'
import {InertiaApp} from '@inertiajs/inertia-vue'
import {InertiaForm} from 'laravel-jetstream'
import AppLayout from "@src/Layouts/AppLayout";
let localVue
beforeEach(() => {
localVue = createLocalVue()
localVue.use(InertiaApp)
localVue.use(InertiaForm)
}); |
test('should mount without crashing', () => {
const wrapper = shallowMount(AppLayout, {localVue})
}) | |
collect_intra_doc_links.rs | use rustc_ast::ast;
use rustc_errors::{Applicability, DiagnosticBuilder};
use rustc_expand::base::SyntaxExtensionKind;
use rustc_feature::UnstableFeatures;
use rustc_hir as hir;
use rustc_hir::def::{
DefKind,
Namespace::{self, *},
PerNS, Res,
};
use rustc_hir::def_id::DefId;
use rustc_middle::ty;
use rustc_resolve::ParentScope;
use rustc_session::lint;
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::Ident;
use rustc_span::symbol::Symbol;
use rustc_span::DUMMY_SP;
use std::cell::Cell;
use std::ops::Range;
use crate::clean::*;
use crate::core::DocContext;
use crate::fold::DocFolder;
use crate::html::markdown::markdown_links;
use crate::passes::Pass;
use super::span_of_attrs;
pub const COLLECT_INTRA_DOC_LINKS: Pass = Pass {
name: "collect-intra-doc-links",
run: collect_intra_doc_links,
description: "reads a crate's documentation to resolve intra-doc-links",
};
pub fn collect_intra_doc_links(krate: Crate, cx: &DocContext<'_>) -> Crate {
if !UnstableFeatures::from_environment().is_nightly_build() {
krate
} else {
let mut coll = LinkCollector::new(cx);
coll.fold_crate(krate)
}
}
enum ErrorKind {
ResolutionFailure,
AnchorFailure(AnchorFailure),
}
enum AnchorFailure {
MultipleAnchors,
Primitive,
Variant,
AssocConstant,
AssocType,
Field,
Method,
}
struct LinkCollector<'a, 'tcx> {
cx: &'a DocContext<'tcx>,
// NOTE: this may not necessarily be a module in the current crate
mod_ids: Vec<DefId>,
/// This is used to store the kind of associated items,
/// because `clean` and the disambiguator code expect them to be different.
/// See the code for associated items on inherent impls for details.
kind_side_channel: Cell<Option<DefKind>>,
}
impl<'a, 'tcx> LinkCollector<'a, 'tcx> {
fn new(cx: &'a DocContext<'tcx>) -> Self {
LinkCollector { cx, mod_ids: Vec::new(), kind_side_channel: Cell::new(None) }
}
fn variant_field(
&self,
path_str: &str,
current_item: &Option<String>,
module_id: DefId,
) -> Result<(Res, Option<String>), ErrorKind> {
let cx = self.cx;
let mut split = path_str.rsplitn(3, "::");
let variant_field_name =
split.next().map(|f| Symbol::intern(f)).ok_or(ErrorKind::ResolutionFailure)?;
let variant_name =
split.next().map(|f| Symbol::intern(f)).ok_or(ErrorKind::ResolutionFailure)?;
let path = split
.next()
.map(|f| {
if f == "self" || f == "Self" {
if let Some(name) = current_item.as_ref() {
return name.clone();
}
}
f.to_owned()
})
.ok_or(ErrorKind::ResolutionFailure)?;
let (_, ty_res) = cx
.enter_resolver(|resolver| {
resolver.resolve_str_path_error(DUMMY_SP, &path, TypeNS, module_id)
})
.map_err(|_| ErrorKind::ResolutionFailure)?;
if let Res::Err = ty_res {
return Err(ErrorKind::ResolutionFailure);
}
let ty_res = ty_res.map_id(|_| panic!("unexpected node_id"));
match ty_res {
Res::Def(DefKind::Enum, did) => {
if cx
.tcx
.inherent_impls(did)
.iter()
.flat_map(|imp| cx.tcx.associated_items(*imp).in_definition_order())
.any(|item| item.ident.name == variant_name)
{
return Err(ErrorKind::ResolutionFailure);
}
match cx.tcx.type_of(did).kind {
ty::Adt(def, _) if def.is_enum() => {
if def.all_fields().any(|item| item.ident.name == variant_field_name) {
Ok((
ty_res,
Some(format!(
"variant.{}.field.{}",
variant_name, variant_field_name
)),
))
} else {
Err(ErrorKind::ResolutionFailure)
}
}
_ => Err(ErrorKind::ResolutionFailure),
}
}
_ => Err(ErrorKind::ResolutionFailure),
}
}
/// Resolves a string as a macro.
fn macro_resolve(&self, path_str: &str, parent_id: Option<DefId>) -> Option<Res> {
let cx = self.cx;
let path = ast::Path::from_ident(Ident::from_str(path_str));
cx.enter_resolver(|resolver| {
if let Ok((Some(ext), res)) = resolver.resolve_macro_path(
&path,
None,
&ParentScope::module(resolver.graph_root()),
false,
false,
) {
if let SyntaxExtensionKind::LegacyBang { .. } = ext.kind {
return Some(res.map_id(|_| panic!("unexpected id")));
}
}
if let Some(res) = resolver.all_macros().get(&Symbol::intern(path_str)) {
return Some(res.map_id(|_| panic!("unexpected id")));
}
if let Some(module_id) = parent_id {
if let Ok((_, res)) =
resolver.resolve_str_path_error(DUMMY_SP, path_str, MacroNS, module_id)
{
// don't resolve builtins like `#[derive]`
if let Res::Def(..) = res {
let res = res.map_id(|_| panic!("unexpected node_id"));
return Some(res);
}
}
} else {
debug!("attempting to resolve item without parent module: {}", path_str);
}
None
})
}
/// Resolves a string as a path within a particular namespace. Also returns an optional
/// URL fragment in the case of variants and methods.
fn resolve(
&self,
path_str: &str,
disambiguator: Option<Disambiguator>,
ns: Namespace,
current_item: &Option<String>,
parent_id: Option<DefId>,
extra_fragment: &Option<String>,
item_opt: Option<&Item>,
) -> Result<(Res, Option<String>), ErrorKind> {
let cx = self.cx;
// In case we're in a module, try to resolve the relative path.
if let Some(module_id) = parent_id {
let result = cx.enter_resolver(|resolver| {
resolver.resolve_str_path_error(DUMMY_SP, &path_str, ns, module_id)
});
debug!("{} resolved to {:?} in namespace {:?}", path_str, result, ns);
let result = match result {
Ok((_, Res::Err)) => Err(ErrorKind::ResolutionFailure),
_ => result.map_err(|_| ErrorKind::ResolutionFailure),
};
if let Ok((_, res)) = result {
let res = res.map_id(|_| panic!("unexpected node_id"));
// In case this is a trait item, skip the
// early return and try looking for the trait.
let value = match res {
Res::Def(DefKind::AssocFn | DefKind::AssocConst, _) => true,
Res::Def(DefKind::AssocTy, _) => false,
Res::Def(DefKind::Variant, _) => {
return handle_variant(cx, res, extra_fragment);
}
// Not a trait item; just return what we found.
Res::PrimTy(..) => {
if extra_fragment.is_some() {
return Err(ErrorKind::AnchorFailure(AnchorFailure::Primitive));
}
return Ok((res, Some(path_str.to_owned())));
}
Res::Def(DefKind::Mod, _) => {
// This resolved to a module, but we want primitive types to take precedence instead.
if matches!(
disambiguator,
None | Some(Disambiguator::Namespace(Namespace::TypeNS))
) {
if let Some(prim) = is_primitive(path_str, ns) {
if extra_fragment.is_some() {
return Err(ErrorKind::AnchorFailure(AnchorFailure::Primitive));
}
return Ok((prim, Some(path_str.to_owned())));
}
}
return Ok((res, extra_fragment.clone()));
}
_ => {
return Ok((res, extra_fragment.clone()));
}
};
if value != (ns == ValueNS) {
return Err(ErrorKind::ResolutionFailure);
}
} else if let Some(prim) = is_primitive(path_str, ns) {
if extra_fragment.is_some() |
return Ok((prim, Some(path_str.to_owned())));
} else {
// If resolution failed, it may still be a method
// because methods are not handled by the resolver
// If so, bail when we're not looking for a value.
if ns != ValueNS {
return Err(ErrorKind::ResolutionFailure);
}
}
// Try looking for methods and associated items.
let mut split = path_str.rsplitn(2, "::");
let item_name =
split.next().map(|f| Symbol::intern(f)).ok_or(ErrorKind::ResolutionFailure)?;
let path = split
.next()
.map(|f| {
if f == "self" || f == "Self" {
if let Some(name) = current_item.as_ref() {
return name.clone();
}
}
f.to_owned()
})
.ok_or(ErrorKind::ResolutionFailure)?;
if let Some(prim) = is_primitive(&path, TypeNS) {
let did = primitive_impl(cx, &path).ok_or(ErrorKind::ResolutionFailure)?;
return cx
.tcx
.associated_items(did)
.filter_by_name_unhygienic(item_name)
.next()
.and_then(|item| match item.kind {
ty::AssocKind::Fn => Some("method"),
_ => None,
})
.map(|out| (prim, Some(format!("{}#{}.{}", path, out, item_name))))
.ok_or(ErrorKind::ResolutionFailure);
}
let (_, ty_res) = cx
.enter_resolver(|resolver| {
resolver.resolve_str_path_error(DUMMY_SP, &path, TypeNS, module_id)
})
.map_err(|_| ErrorKind::ResolutionFailure)?;
if let Res::Err = ty_res {
return self.variant_field(path_str, current_item, module_id);
}
let ty_res = ty_res.map_id(|_| panic!("unexpected node_id"));
match ty_res {
Res::Def(
DefKind::Struct | DefKind::Union | DefKind::Enum | DefKind::TyAlias,
did,
) => {
// Checks if item_name belongs to `impl SomeItem`
let impl_item = cx
.tcx
.inherent_impls(did)
.iter()
.flat_map(|imp| cx.tcx.associated_items(*imp).in_definition_order())
.find(|item| item.ident.name == item_name);
let trait_item = item_opt
.and_then(|item| self.cx.as_local_hir_id(item.def_id))
.and_then(|item_hir| {
// Checks if item_name belongs to `impl SomeTrait for SomeItem`
let parent_hir = self.cx.tcx.hir().get_parent_item(item_hir);
let item_parent = self.cx.tcx.hir().find(parent_hir);
match item_parent {
Some(hir::Node::Item(hir::Item {
kind: hir::ItemKind::Impl { of_trait: Some(_), self_ty, .. },
..
})) => cx
.tcx
.associated_item_def_ids(self_ty.hir_id.owner)
.iter()
.map(|child| {
let associated_item = cx.tcx.associated_item(*child);
associated_item
})
.find(|child| child.ident.name == item_name),
_ => None,
}
});
let item = match (impl_item, trait_item) {
(Some(from_impl), Some(_)) => {
// Although it's ambiguous, return impl version for compat. sake.
// To handle that properly resolve() would have to support
// something like
// [`ambi_fn`](<SomeStruct as SomeTrait>::ambi_fn)
Some(from_impl)
}
(None, Some(from_trait)) => Some(from_trait),
(Some(from_impl), None) => Some(from_impl),
_ => None,
};
if let Some(item) = item {
let out = match item.kind {
ty::AssocKind::Fn if ns == ValueNS => "method",
ty::AssocKind::Const if ns == ValueNS => "associatedconstant",
ty::AssocKind::Type if ns == ValueNS => "associatedtype",
_ => return self.variant_field(path_str, current_item, module_id),
};
if extra_fragment.is_some() {
Err(ErrorKind::AnchorFailure(if item.kind == ty::AssocKind::Fn {
AnchorFailure::Method
} else {
AnchorFailure::AssocConstant
}))
} else {
// HACK(jynelson): `clean` expects the type, not the associated item.
// but the disambiguator logic expects the associated item.
// Store the kind in a side channel so that only the disambiguator logic looks at it.
self.kind_side_channel.replace(Some(item.kind.as_def_kind()));
Ok((ty_res, Some(format!("{}.{}", out, item_name))))
}
} else {
match cx.tcx.type_of(did).kind {
ty::Adt(def, _) => {
if let Some(item) = if def.is_enum() {
def.all_fields().find(|item| item.ident.name == item_name)
} else {
def.non_enum_variant()
.fields
.iter()
.find(|item| item.ident.name == item_name)
} {
if extra_fragment.is_some() {
Err(ErrorKind::AnchorFailure(if def.is_enum() {
AnchorFailure::Variant
} else {
AnchorFailure::Field
}))
} else {
Ok((
ty_res,
Some(format!(
"{}.{}",
if def.is_enum() {
"variant"
} else {
"structfield"
},
item.ident
)),
))
}
} else {
self.variant_field(path_str, current_item, module_id)
}
}
_ => self.variant_field(path_str, current_item, module_id),
}
}
}
Res::Def(DefKind::Trait, did) => {
let item = cx
.tcx
.associated_item_def_ids(did)
.iter()
.map(|item| cx.tcx.associated_item(*item))
.find(|item| item.ident.name == item_name);
if let Some(item) = item {
let kind =
match item.kind {
ty::AssocKind::Const if ns == ValueNS => "associatedconstant",
ty::AssocKind::Type if ns == TypeNS => "associatedtype",
ty::AssocKind::Fn if ns == ValueNS => {
if item.defaultness.has_value() { "method" } else { "tymethod" }
}
_ => return self.variant_field(path_str, current_item, module_id),
};
if extra_fragment.is_some() {
Err(ErrorKind::AnchorFailure(if item.kind == ty::AssocKind::Const {
AnchorFailure::AssocConstant
} else if item.kind == ty::AssocKind::Type {
AnchorFailure::AssocType
} else {
AnchorFailure::Method
}))
} else {
let res = Res::Def(item.kind.as_def_kind(), item.def_id);
Ok((res, Some(format!("{}.{}", kind, item_name))))
}
} else {
self.variant_field(path_str, current_item, module_id)
}
}
_ => self.variant_field(path_str, current_item, module_id),
}
} else {
debug!("attempting to resolve item without parent module: {}", path_str);
Err(ErrorKind::ResolutionFailure)
}
}
}
/// Check for resolve collisions between a trait and its derive
///
/// These are common and we should just resolve to the trait in that case
fn is_derive_trait_collision<T>(ns: &PerNS<Option<(Res, T)>>) -> bool {
if let PerNS {
type_ns: Some((Res::Def(DefKind::Trait, _), _)),
macro_ns: Some((Res::Def(DefKind::Macro(MacroKind::Derive), _), _)),
..
} = *ns
{
true
} else {
false
}
}
impl<'a, 'tcx> DocFolder for LinkCollector<'a, 'tcx> {
fn fold_item(&mut self, mut item: Item) -> Option<Item> {
use rustc_middle::ty::DefIdTree;
let parent_node = if item.is_fake() {
// FIXME: is this correct?
None
} else {
let mut current = item.def_id;
// The immediate parent might not always be a module.
// Find the first parent which is.
loop {
if let Some(parent) = self.cx.tcx.parent(current) {
if self.cx.tcx.def_kind(parent) == DefKind::Mod {
break Some(parent);
}
current = parent;
} else {
break None;
}
}
};
if parent_node.is_some() {
trace!("got parent node for {:?} {:?}, id {:?}", item.type_(), item.name, item.def_id);
}
let current_item = match item.inner {
ModuleItem(..) => {
if item.attrs.inner_docs {
if item.def_id.is_top_level_module() { item.name.clone() } else { None }
} else {
match parent_node.or(self.mod_ids.last().copied()) {
Some(parent) if !parent.is_top_level_module() => {
// FIXME: can we pull the parent module's name from elsewhere?
Some(self.cx.tcx.item_name(parent).to_string())
}
_ => None,
}
}
}
ImplItem(Impl { ref for_, .. }) => {
for_.def_id().map(|did| self.cx.tcx.item_name(did).to_string())
}
// we don't display docs on `extern crate` items anyway, so don't process them.
ExternCrateItem(..) => {
debug!("ignoring extern crate item {:?}", item.def_id);
return self.fold_item_recur(item);
}
ImportItem(Import::Simple(ref name, ..)) => Some(name.clone()),
MacroItem(..) => None,
_ => item.name.clone(),
};
if item.is_mod() && item.attrs.inner_docs {
self.mod_ids.push(item.def_id);
}
let cx = self.cx;
let dox = item.attrs.collapsed_doc_value().unwrap_or_else(String::new);
trace!("got documentation '{}'", dox);
// find item's parent to resolve `Self` in item's docs below
let parent_name = self.cx.as_local_hir_id(item.def_id).and_then(|item_hir| {
let parent_hir = self.cx.tcx.hir().get_parent_item(item_hir);
let item_parent = self.cx.tcx.hir().find(parent_hir);
match item_parent {
Some(hir::Node::Item(hir::Item {
kind:
hir::ItemKind::Impl {
self_ty:
hir::Ty {
kind:
hir::TyKind::Path(hir::QPath::Resolved(
_,
hir::Path { segments, .. },
)),
..
},
..
},
..
})) => segments.first().map(|seg| seg.ident.to_string()),
Some(hir::Node::Item(hir::Item {
ident, kind: hir::ItemKind::Enum(..), ..
}))
| Some(hir::Node::Item(hir::Item {
ident, kind: hir::ItemKind::Struct(..), ..
}))
| Some(hir::Node::Item(hir::Item {
ident, kind: hir::ItemKind::Union(..), ..
}))
| Some(hir::Node::Item(hir::Item {
ident, kind: hir::ItemKind::Trait(..), ..
})) => Some(ident.to_string()),
_ => None,
}
});
for (ori_link, link_range) in markdown_links(&dox) {
trace!("considering link '{}'", ori_link);
// Bail early for real links.
if ori_link.contains('/') {
continue;
}
// [] is mostly likely not supposed to be a link
if ori_link.is_empty() {
continue;
}
let link = ori_link.replace("`", "");
let parts = link.split('#').collect::<Vec<_>>();
let (link, extra_fragment) = if parts.len() > 2 {
anchor_failure(cx, &item, &link, &dox, link_range, AnchorFailure::MultipleAnchors);
continue;
} else if parts.len() == 2 {
if parts[0].trim().is_empty() {
// This is an anchor to an element of the current page, nothing to do in here!
continue;
}
(parts[0].to_owned(), Some(parts[1].to_owned()))
} else {
(parts[0].to_owned(), None)
};
let resolved_self;
let mut path_str;
let disambiguator;
let (res, fragment) = {
path_str = if let Ok((d, path)) = Disambiguator::from_str(&link) {
disambiguator = Some(d);
path
} else {
disambiguator = None;
&link
}
.trim();
if path_str.contains(|ch: char| !(ch.is_alphanumeric() || ch == ':' || ch == '_')) {
continue;
}
// In order to correctly resolve intra-doc-links we need to
// pick a base AST node to work from. If the documentation for
// this module came from an inner comment (//!) then we anchor
// our name resolution *inside* the module. If, on the other
// hand it was an outer comment (///) then we anchor the name
// resolution in the parent module on the basis that the names
// used are more likely to be intended to be parent names. For
// this, we set base_node to None for inner comments since
// we've already pushed this node onto the resolution stack but
// for outer comments we explicitly try and resolve against the
// parent_node first.
let base_node = if item.is_mod() && item.attrs.inner_docs {
self.mod_ids.last().copied()
} else {
parent_node
};
// replace `Self` with suitable item's parent name
if path_str.starts_with("Self::") {
if let Some(ref name) = parent_name {
resolved_self = format!("{}::{}", name, &path_str[6..]);
path_str = &resolved_self;
}
}
match disambiguator.map(Disambiguator::ns) {
Some(ns @ ValueNS) => {
match self.resolve(
path_str,
disambiguator,
ns,
¤t_item,
base_node,
&extra_fragment,
Some(&item),
) {
Ok(res) => res,
Err(ErrorKind::ResolutionFailure) => {
resolution_failure(cx, &item, path_str, &dox, link_range);
// This could just be a normal link or a broken link
// we could potentially check if something is
// "intra-doc-link-like" and warn in that case.
continue;
}
Err(ErrorKind::AnchorFailure(msg)) => {
anchor_failure(cx, &item, &ori_link, &dox, link_range, msg);
continue;
}
}
}
Some(ns @ TypeNS) => {
match self.resolve(
path_str,
disambiguator,
ns,
¤t_item,
base_node,
&extra_fragment,
Some(&item),
) {
Ok(res) => res,
Err(ErrorKind::ResolutionFailure) => {
resolution_failure(cx, &item, path_str, &dox, link_range);
// This could just be a normal link.
continue;
}
Err(ErrorKind::AnchorFailure(msg)) => {
anchor_failure(cx, &item, &ori_link, &dox, link_range, msg);
continue;
}
}
}
None => {
// Try everything!
let mut candidates = PerNS {
macro_ns: self
.macro_resolve(path_str, base_node)
.map(|res| (res, extra_fragment.clone())),
type_ns: match self.resolve(
path_str,
disambiguator,
TypeNS,
¤t_item,
base_node,
&extra_fragment,
Some(&item),
) {
Err(ErrorKind::AnchorFailure(msg)) => {
anchor_failure(cx, &item, &ori_link, &dox, link_range, msg);
continue;
}
x => x.ok(),
},
value_ns: match self.resolve(
path_str,
disambiguator,
ValueNS,
¤t_item,
base_node,
&extra_fragment,
Some(&item),
) {
Err(ErrorKind::AnchorFailure(msg)) => {
anchor_failure(cx, &item, &ori_link, &dox, link_range, msg);
continue;
}
x => x.ok(),
}
.and_then(|(res, fragment)| {
// Constructors are picked up in the type namespace.
match res {
Res::Def(DefKind::Ctor(..), _) | Res::SelfCtor(..) => None,
_ => match (fragment, extra_fragment) {
(Some(fragment), Some(_)) => {
// Shouldn't happen but who knows?
Some((res, Some(fragment)))
}
(fragment, None) | (None, fragment) => {
Some((res, fragment))
}
},
}
}),
};
if candidates.is_empty() {
resolution_failure(cx, &item, path_str, &dox, link_range);
// this could just be a normal link
continue;
}
let len = candidates.clone().present_items().count();
if len == 1 {
candidates.present_items().next().unwrap()
} else if len == 2 && is_derive_trait_collision(&candidates) {
candidates.type_ns.unwrap()
} else {
if is_derive_trait_collision(&candidates) {
candidates.macro_ns = None;
}
ambiguity_error(
cx,
&item,
path_str,
&dox,
link_range,
candidates.map(|candidate| candidate.map(|(res, _)| res)),
);
continue;
}
}
Some(MacroNS) => {
if let Some(res) = self.macro_resolve(path_str, base_node) {
(res, extra_fragment)
} else {
resolution_failure(cx, &item, path_str, &dox, link_range);
continue;
}
}
}
};
if let Res::PrimTy(_) = res {
item.attrs.links.push((ori_link, None, fragment));
} else {
debug!("intra-doc link to {} resolved to {:?}", path_str, res);
// Disallow e.g. linking to enums with `struct@`
if let Res::Def(kind, id) = res {
debug!("saw kind {:?} with disambiguator {:?}", kind, disambiguator);
match (self.kind_side_channel.take().unwrap_or(kind), disambiguator) {
| (DefKind::Const | DefKind::ConstParam | DefKind::AssocConst | DefKind::AnonConst, Some(Disambiguator::Kind(DefKind::Const)))
// NOTE: this allows 'method' to mean both normal functions and associated functions
// This can't cause ambiguity because both are in the same namespace.
| (DefKind::Fn | DefKind::AssocFn, Some(Disambiguator::Kind(DefKind::Fn)))
// These are namespaces; allow anything in the namespace to match
| (_, Some(Disambiguator::Namespace(_)))
// If no disambiguator given, allow anything
| (_, None)
// All of these are valid, so do nothing
=> {}
(actual, Some(Disambiguator::Kind(expected))) if actual == expected => {}
(_, Some(Disambiguator::Kind(expected))) => {
// The resolved item did not match the disambiguator; give a better error than 'not found'
let msg = format!("incompatible link kind for `{}`", path_str);
report_diagnostic(cx, &msg, &item, &dox, link_range, |diag, sp| {
// HACK(jynelson): by looking at the source I saw the DefId we pass
// for `expected.descr()` doesn't matter, since it's not a crate
let note = format!("this link resolved to {} {}, which is not {} {}", kind.article(), kind.descr(id), expected.article(), expected.descr(id));
let suggestion = Disambiguator::display_for(kind, path_str);
let help_msg = format!("to link to the {}, use its disambiguator", kind.descr(id));
diag.note(¬e);
if let Some(sp) = sp {
diag.span_suggestion(sp, &help_msg, suggestion, Applicability::MaybeIncorrect);
} else {
diag.help(&format!("{}: {}", help_msg, suggestion));
}
});
continue;
}
}
}
// item can be non-local e.g. when using #[doc(primitive = "pointer")]
if let Some((src_id, dst_id)) = res
.opt_def_id()
.and_then(|def_id| def_id.as_local())
.and_then(|dst_id| item.def_id.as_local().map(|src_id| (src_id, dst_id)))
{
use rustc_hir::def_id::LOCAL_CRATE;
let hir_src = self.cx.tcx.hir().local_def_id_to_hir_id(src_id);
let hir_dst = self.cx.tcx.hir().local_def_id_to_hir_id(dst_id);
if self.cx.tcx.privacy_access_levels(LOCAL_CRATE).is_exported(hir_src)
&& !self.cx.tcx.privacy_access_levels(LOCAL_CRATE).is_exported(hir_dst)
{
privacy_error(cx, &item, &path_str, &dox, link_range);
continue;
}
}
let id = register_res(cx, res);
item.attrs.links.push((ori_link, Some(id), fragment));
}
}
if item.is_mod() && !item.attrs.inner_docs {
self.mod_ids.push(item.def_id);
}
if item.is_mod() {
let ret = self.fold_item_recur(item);
self.mod_ids.pop();
ret
} else {
self.fold_item_recur(item)
}
}
// FIXME: if we can resolve intra-doc links from other crates, we can use the stock
// `fold_crate`, but until then we should avoid scanning `krate.external_traits` since those
// will never resolve properly
fn fold_crate(&mut self, mut c: Crate) -> Crate {
c.module = c.module.take().and_then(|module| self.fold_item(module));
c
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum Disambiguator {
Kind(DefKind),
Namespace(Namespace),
}
impl Disambiguator {
/// (disambiguator, path_str)
fn from_str(link: &str) -> Result<(Self, &str), ()> {
use Disambiguator::{Kind, Namespace as NS};
let find_suffix = || {
let suffixes = [
("!()", DefKind::Macro(MacroKind::Bang)),
("()", DefKind::Fn),
("!", DefKind::Macro(MacroKind::Bang)),
];
for &(suffix, kind) in &suffixes {
if link.ends_with(suffix) {
return Ok((Kind(kind), link.trim_end_matches(suffix)));
}
}
Err(())
};
if let Some(idx) = link.find('@') {
let (prefix, rest) = link.split_at(idx);
let d = match prefix {
"struct" => Kind(DefKind::Struct),
"enum" => Kind(DefKind::Enum),
"trait" => Kind(DefKind::Trait),
"union" => Kind(DefKind::Union),
"module" | "mod" => Kind(DefKind::Mod),
"const" | "constant" => Kind(DefKind::Const),
"static" => Kind(DefKind::Static),
"function" | "fn" | "method" => Kind(DefKind::Fn),
"derive" => Kind(DefKind::Macro(MacroKind::Derive)),
"type" => NS(Namespace::TypeNS),
"value" => NS(Namespace::ValueNS),
"macro" => NS(Namespace::MacroNS),
_ => return find_suffix(),
};
Ok((d, &rest[1..]))
} else {
find_suffix()
}
}
fn display_for(kind: DefKind, path_str: &str) -> String {
if kind == DefKind::Macro(MacroKind::Bang) {
return format!("{}!", path_str);
} else if kind == DefKind::Fn || kind == DefKind::AssocFn {
return format!("{}()", path_str);
}
let prefix = match kind {
DefKind::Struct => "struct",
DefKind::Enum => "enum",
DefKind::Trait => "trait",
DefKind::Union => "union",
DefKind::Mod => "mod",
DefKind::Const | DefKind::ConstParam | DefKind::AssocConst | DefKind::AnonConst => {
"const"
}
DefKind::Static => "static",
DefKind::Macro(MacroKind::Derive) => "derive",
// Now handle things that don't have a specific disambiguator
_ => match kind
.ns()
.expect("tried to calculate a disambiguator for a def without a namespace?")
{
Namespace::TypeNS => "type",
Namespace::ValueNS => "value",
Namespace::MacroNS => "macro",
},
};
format!("{}@{}", prefix, path_str)
}
fn ns(self) -> Namespace {
match self {
Self::Namespace(n) => n,
Self::Kind(k) => {
k.ns().expect("only DefKinds with a valid namespace can be disambiguators")
}
}
}
}
/// Reports a diagnostic for an intra-doc link.
///
/// If no link range is provided, or the source span of the link cannot be determined, the span of
/// the entire documentation block is used for the lint. If a range is provided but the span
/// calculation fails, a note is added to the diagnostic pointing to the link in the markdown.
///
/// The `decorate` callback is invoked in all cases to allow further customization of the
/// diagnostic before emission. If the span of the link was able to be determined, the second
/// parameter of the callback will contain it, and the primary span of the diagnostic will be set
/// to it.
fn report_diagnostic(
cx: &DocContext<'_>,
msg: &str,
item: &Item,
dox: &str,
link_range: Option<Range<usize>>,
decorate: impl FnOnce(&mut DiagnosticBuilder<'_>, Option<rustc_span::Span>),
) {
let hir_id = match cx.as_local_hir_id(item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
info!("ignoring warning from parent crate: {}", msg);
return;
}
};
let attrs = &item.attrs;
let sp = span_of_attrs(attrs).unwrap_or(item.source.span());
cx.tcx.struct_span_lint_hir(lint::builtin::BROKEN_INTRA_DOC_LINKS, hir_id, sp, |lint| {
let mut diag = lint.build(msg);
let span = link_range
.as_ref()
.and_then(|range| super::source_span_for_markdown_range(cx, dox, range, attrs));
if let Some(link_range) = link_range {
if let Some(sp) = span {
diag.set_span(sp);
} else {
// blah blah blah\nblah\nblah [blah] blah blah\nblah blah
// ^ ~~~~
// | link_range
// last_new_line_offset
let last_new_line_offset = dox[..link_range.start].rfind('\n').map_or(0, |n| n + 1);
let line = dox[last_new_line_offset..].lines().next().unwrap_or("");
// Print the line containing the `link_range` and manually mark it with '^'s.
diag.note(&format!(
"the link appears in this line:\n\n{line}\n\
{indicator: <before$}{indicator:^<found$}",
line = line,
indicator = "",
before = link_range.start - last_new_line_offset,
found = link_range.len(),
));
}
}
decorate(&mut diag, span);
diag.emit();
});
}
fn resolution_failure(
cx: &DocContext<'_>,
item: &Item,
path_str: &str,
dox: &str,
link_range: Option<Range<usize>>,
) {
report_diagnostic(
cx,
&format!("unresolved link to `{}`", path_str),
item,
dox,
link_range,
|diag, sp| {
if let Some(sp) = sp {
diag.span_label(sp, "unresolved link");
}
diag.help(r#"to escape `[` and `]` characters, add '\' before them like `\[` or `\]`"#);
},
);
}
fn anchor_failure(
cx: &DocContext<'_>,
item: &Item,
path_str: &str,
dox: &str,
link_range: Option<Range<usize>>,
failure: AnchorFailure,
) {
let msg = match failure {
AnchorFailure::MultipleAnchors => format!("`{}` contains multiple anchors", path_str),
AnchorFailure::Primitive
| AnchorFailure::Variant
| AnchorFailure::AssocConstant
| AnchorFailure::AssocType
| AnchorFailure::Field
| AnchorFailure::Method => {
let kind = match failure {
AnchorFailure::Primitive => "primitive type",
AnchorFailure::Variant => "enum variant",
AnchorFailure::AssocConstant => "associated constant",
AnchorFailure::AssocType => "associated type",
AnchorFailure::Field => "struct field",
AnchorFailure::Method => "method",
AnchorFailure::MultipleAnchors => unreachable!("should be handled already"),
};
format!(
"`{}` contains an anchor, but links to {kind}s are already anchored",
path_str,
kind = kind
)
}
};
report_diagnostic(cx, &msg, item, dox, link_range, |diag, sp| {
if let Some(sp) = sp {
diag.span_label(sp, "contains invalid anchor");
}
});
}
fn ambiguity_error(
cx: &DocContext<'_>,
item: &Item,
path_str: &str,
dox: &str,
link_range: Option<Range<usize>>,
candidates: PerNS<Option<Res>>,
) {
let mut msg = format!("`{}` is ", path_str);
let candidates = [TypeNS, ValueNS, MacroNS]
.iter()
.filter_map(|&ns| candidates[ns].map(|res| (res, ns)))
.collect::<Vec<_>>();
match candidates.as_slice() {
[(first_def, _), (second_def, _)] => {
msg += &format!(
"both {} {} and {} {}",
first_def.article(),
first_def.descr(),
second_def.article(),
second_def.descr(),
);
}
_ => {
let mut candidates = candidates.iter().peekable();
while let Some((res, _)) = candidates.next() {
if candidates.peek().is_some() {
msg += &format!("{} {}, ", res.article(), res.descr());
} else {
msg += &format!("and {} {}", res.article(), res.descr());
}
}
}
}
report_diagnostic(cx, &msg, item, dox, link_range.clone(), |diag, sp| {
if let Some(sp) = sp {
diag.span_label(sp, "ambiguous link");
let link_range = link_range.expect("must have a link range if we have a span");
for (res, ns) in candidates {
let (action, mut suggestion) = match res {
Res::Def(DefKind::AssocFn | DefKind::Fn, _) => {
("add parentheses", format!("{}()", path_str))
}
Res::Def(DefKind::Macro(MacroKind::Bang), _) => {
("add an exclamation mark", format!("{}!", path_str))
}
_ => {
let type_ = match (res, ns) {
(Res::Def(DefKind::Const, _), _) => "const",
(Res::Def(DefKind::Static, _), _) => "static",
(Res::Def(DefKind::Struct, _), _) => "struct",
(Res::Def(DefKind::Enum, _), _) => "enum",
(Res::Def(DefKind::Union, _), _) => "union",
(Res::Def(DefKind::Trait, _), _) => "trait",
(Res::Def(DefKind::Mod, _), _) => "module",
(_, TypeNS) => "type",
(_, ValueNS) => "value",
(Res::Def(DefKind::Macro(MacroKind::Derive), _), MacroNS) => "derive",
(_, MacroNS) => "macro",
};
// FIXME: if this is an implied shortcut link, it's bad style to suggest `@`
("prefix with the item type", format!("{}@{}", type_, path_str))
}
};
if dox.bytes().nth(link_range.start) == Some(b'`') {
suggestion = format!("`{}`", suggestion);
}
// FIXME: Create a version of this suggestion for when we don't have the span.
diag.span_suggestion(
sp,
&format!("to link to the {}, {}", res.descr(), action),
suggestion,
Applicability::MaybeIncorrect,
);
}
}
});
}
fn privacy_error(
cx: &DocContext<'_>,
item: &Item,
path_str: &str,
dox: &str,
link_range: Option<Range<usize>>,
) {
let item_name = item.name.as_deref().unwrap_or("<unknown>");
let msg =
format!("public documentation for `{}` links to private item `{}`", item_name, path_str);
report_diagnostic(cx, &msg, item, dox, link_range, |diag, sp| {
if let Some(sp) = sp {
diag.span_label(sp, "this item is private");
}
let note_msg = if cx.render_options.document_private {
"this link resolves only because you passed `--document-private-items`, but will break without"
} else {
"this link will resolve properly if you pass `--document-private-items`"
};
diag.note(note_msg);
});
}
/// Given an enum variant's res, return the res of its enum and the associated fragment.
fn handle_variant(
cx: &DocContext<'_>,
res: Res,
extra_fragment: &Option<String>,
) -> Result<(Res, Option<String>), ErrorKind> {
use rustc_middle::ty::DefIdTree;
if extra_fragment.is_some() {
return Err(ErrorKind::AnchorFailure(AnchorFailure::Variant));
}
let parent = if let Some(parent) = cx.tcx.parent(res.def_id()) {
parent
} else {
return Err(ErrorKind::ResolutionFailure);
};
let parent_def = Res::Def(DefKind::Enum, parent);
let variant = cx.tcx.expect_variant_res(res);
Ok((parent_def, Some(format!("variant.{}", variant.ident.name))))
}
const PRIMITIVES: &[(&str, Res)] = &[
("u8", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::U8))),
("u16", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::U16))),
("u32", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::U32))),
("u64", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::U64))),
("u128", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::U128))),
("usize", Res::PrimTy(hir::PrimTy::Uint(rustc_ast::ast::UintTy::Usize))),
("i8", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::I8))),
("i16", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::I16))),
("i32", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::I32))),
("i64", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::I64))),
("i128", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::I128))),
("isize", Res::PrimTy(hir::PrimTy::Int(rustc_ast::ast::IntTy::Isize))),
("f32", Res::PrimTy(hir::PrimTy::Float(rustc_ast::ast::FloatTy::F32))),
("f64", Res::PrimTy(hir::PrimTy::Float(rustc_ast::ast::FloatTy::F64))),
("str", Res::PrimTy(hir::PrimTy::Str)),
("bool", Res::PrimTy(hir::PrimTy::Bool)),
("char", Res::PrimTy(hir::PrimTy::Char)),
];
fn is_primitive(path_str: &str, ns: Namespace) -> Option<Res> {
if ns == TypeNS { PRIMITIVES.iter().find(|x| x.0 == path_str).map(|x| x.1) } else { None }
}
fn primitive_impl(cx: &DocContext<'_>, path_str: &str) -> Option<DefId> {
let tcx = cx.tcx;
match path_str {
"u8" => tcx.lang_items().u8_impl(),
"u16" => tcx.lang_items().u16_impl(),
"u32" => tcx.lang_items().u32_impl(),
"u64" => tcx.lang_items().u64_impl(),
"u128" => tcx.lang_items().u128_impl(),
"usize" => tcx.lang_items().usize_impl(),
"i8" => tcx.lang_items().i8_impl(),
"i16" => tcx.lang_items().i16_impl(),
"i32" => tcx.lang_items().i32_impl(),
"i64" => tcx.lang_items().i64_impl(),
"i128" => tcx.lang_items().i128_impl(),
"isize" => tcx.lang_items().isize_impl(),
"f32" => tcx.lang_items().f32_impl(),
"f64" => tcx.lang_items().f64_impl(),
"str" => tcx.lang_items().str_impl(),
"bool" => tcx.lang_items().bool_impl(),
"char" => tcx.lang_items().char_impl(),
_ => None,
}
}
| {
return Err(ErrorKind::AnchorFailure(AnchorFailure::Primitive));
} |
storage_test.go | // Copyright (c) 2021 Terminus, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package kuberneteslogs
import (
"context"
"fmt"
"io"
"os"
reflect "reflect"
"strings"
"testing"
"time"
"github.com/recallsong/go-utils/encoding/jsonx"
v1 "k8s.io/api/core/v1"
"github.com/erda-project/erda-infra/base/logs/logrusx"
"github.com/erda-project/erda-proto-go/core/monitor/log/query/pb"
"github.com/erda-project/erda/modules/core/monitor/log/storage"
)
type logTestItem struct {
Timestamp int64
Content string
}
var (
commonLogTestItems = []logTestItem{
{Timestamp: 11, Content: "1111"},
{Timestamp: 12, Content: "2222"},
{Timestamp: 13, Content: "3333"},
{Timestamp: 14, Content: "4444.0"},
{Timestamp: 14, Content: "4444.1"},
{Timestamp: 15, Content: "5555"},
{Timestamp: 16, Content: "6666"},
{Timestamp: 17, Content: "7777"},
{Timestamp: 17, Content: "7777"},
{Timestamp: 16, Content: "6666.1"},
{Timestamp: 19, Content: "9999"},
}
commonTestOptions = map[string]interface{}{
storage.IsLive: true,
}
commonTestFilters = []*storage.Filter{
{
Key: "id",
Op: storage.EQ,
Value: "test_id",
},
{
Key: "pod_namespace",
Op: storage.EQ,
Value: "namespace1",
},
{
Key: "pod_name",
Op: storage.EQ,
Value: "name1",
},
{
Key: "container_name",
Op: storage.EQ,
Value: "container_name1",
},
{
Key: "cluster_name",
Op: storage.EQ,
Value: "cluster_name1",
},
}
)
var stdout = os.Stdout
func printf(tmp string, args ...interface{}) {
fmt.Fprintf(stdout, tmp, args...)
}
func queryFuncForTest(expectNamespace, expectPod, expectContainer string, items []logTestItem) func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error) {
return func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error) {
if expectNamespace != it.podNamespace || expectPod != it.podName || expectContainer != opts.Container {
return nil, fmt.Errorf("want keys: [%q,%q,%q], got keys: [%q,%q,%q]",
expectNamespace, expectPod, expectContainer, it.podNamespace, it.podName, opts.Container)
}
var lines []string
if opts.SinceTime == nil {
for _, item := range items {
t := time.Unix(item.Timestamp/int64(time.Second), item.Timestamp%int64(time.Second))
line := t.Format(time.RFC3339Nano) + " " + item.Content
lines = append(lines, line)
}
} else {
for i, item := range items {
t := time.Unix(item.Timestamp/int64(time.Second), item.Timestamp%int64(time.Second))
if t.After(opts.SinceTime.Time) || t.Equal(opts.SinceTime.Time) {
for n := len(items); i < n; i++ {
item := items[i]
t := time.Unix(item.Timestamp/int64(time.Second), item.Timestamp%int64(time.Second))
line := t.Format(time.RFC3339Nano) + " " + item.Content
lines = append(lines, line)
}
break
}
}
}
// printf("request since time: %v \n", opts.SinceTime)
// printf("response logs: \n%s\n", strings.Join(lines, "\n"))
return io.NopCloser(strings.NewReader(strings.Join(lines, "\n"))), nil
}
}
const (
kubernetesLogScheme = "container"
)
func Test_cStorage_Iterator_Next(t *testing.T) {
tests := []struct {
name string
source []logTestItem
bufferLines int64
sel *storage.Selector
want []interface{}
}{
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 11,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "11",
UnixNano: 11,
Offset: initialOffset,
Content: "1111",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 12,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 13,
End: 16,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "13",
UnixNano: 13,
Offset: initialOffset,
Content: "3333",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset,
Content: "4444.0",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset + 1,
Content: "4444.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "15",
UnixNano: 15,
Offset: initialOffset,
Content: "5555",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 100,
End: 1000,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 10,
End: 11,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 11,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "11",
UnixNano: 11,
Offset: initialOffset,
Content: "1111",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 17,
End: 18,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset + 1,
Content: "7777",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 16,
End: 20,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset + 1,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "19",
UnixNano: 19,
Offset: initialOffset,
Content: "9999",
Level: "",
RequestId: "",
},
},
},
{
name: "Option_No_Live",
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 11,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: map[string]interface{}{},
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tt = tt
var namespace, name, container string
for _, v := range tt.sel.Filters {
switch v.Key {
case "pod_namespace":
namespace = v.Value.(string)
case "pod_name":
name = v.Value.(string)
case "container_name":
container = v.Value.(string)
}
}
s := &cStorage{
log: logrusx.New(),
getQueryFunc: func(clusterName string) (func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error), error) {
return queryFuncForTest(namespace, name, container, tt.source), nil
},
bufferLines: tt.bufferLines,
}
it, err := s.Iterator(context.TODO(), tt.sel)
if err != nil {
t.Errorf("cStorage.Iterator() got error: %s", it.Error())
return
}
var got []interface{}
for it.Next() {
got = append(got, it.Value())
}
if it.Error() != nil {
t.Errorf("cStorage.Iterator().Next() got error: %s", it.Error())
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("cStorage.Iterator().Next() \ngot %v, \nwant %v", jsonx.MarshalAndIndent(got), jsonx.MarshalAndIndent(tt.want))
}
})
}
}
func Test_cStorage_Iterator_Prev(t *testing.T) {
tests := []struct {
name string
source []logTestItem
bufferLines int64
sel *storage.Selector
want []interface{}
}{
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 11,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "11",
UnixNano: 11,
Offset: initialOffset,
Content: "1111",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 12,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 13,
End: 16,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "15",
UnixNano: 15,
Offset: initialOffset,
Content: "5555",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset + 1,
Content: "4444.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset,
Content: "4444.0",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "13",
UnixNano: 13,
Offset: initialOffset,
Content: "3333",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 100,
End: 1000,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 0,
End: 11,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 10,
End: 13,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "12",
UnixNano: 12,
Offset: initialOffset,
Content: "2222",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "11",
UnixNano: 11,
Offset: initialOffset,
Content: "1111",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 16,
End: 18,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset + 1,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: defaultBufferLines,
sel: &storage.Selector{
Start: 16,
End: 20,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "19",
UnixNano: 19,
Offset: initialOffset,
Content: "9999",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset + 1,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "17",
UnixNano: 17,
Offset: initialOffset,
Content: "7777",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "16",
UnixNano: 16,
Offset: initialOffset,
Content: "6666",
Level: "",
RequestId: "",
},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var namespace, name, container string
for _, v := range tt.sel.Filters {
switch v.Key {
case "pod_namespace":
namespace = v.Value.(string)
case "pod_name":
name = v.Value.(string)
case "container_name":
container = v.Value.(string)
}
}
s := &cStorage{
log: logrusx.New(),
getQueryFunc: func(clusterName string) (func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error), error) {
return queryFuncForTest(namespace, name, container, tt.source), nil
},
bufferLines: tt.bufferLines,
}
it, err := s.Iterator(context.TODO(), tt.sel)
if err != nil {
t.Errorf("cStorage.Iterator() got error: %s", it.Error())
return
}
var got []interface{}
for it.Prev() {
got = append(got, it.Value())
}
if it.Error() != nil {
t.Errorf("cStorage.Iterator().Prev() got error: %s", it.Error())
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("cStorage.Iterator().Prev() \ngot %v, \nwant %v", jsonx.MarshalAndIndent(got), jsonx.MarshalAndIndent(tt.want))
}
})
}
}
func Test_cStorage_Iterator_FirstNext(t *testing.T) {
tests := []struct {
name string
source []logTestItem
bufferLines int64
sel *storage.Selector
want []interface{}
}{
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 13,
End: 15,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "13",
UnixNano: 13,
Offset: initialOffset,
Content: "3333",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset,
Content: "4444.0",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset + 1,
Content: "4444.1",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 100,
End: 1000,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 10,
End: 11,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var namespace, name, container string
for _, v := range tt.sel.Filters {
switch v.Key {
case "pod_namespace":
namespace = v.Value.(string)
case "pod_name":
name = v.Value.(string)
case "container_name":
container = v.Value.(string)
}
}
s := &cStorage{
log: logrusx.New(),
getQueryFunc: func(clusterName string) (func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error), error) {
return queryFuncForTest(namespace, name, container, tt.source), nil
},
bufferLines: tt.bufferLines,
}
it, err := s.Iterator(context.TODO(), tt.sel)
if err != nil {
t.Errorf("cStorage.Iterator() got error: %s", it.Error())
return
}
var got []interface{}
if it.First() {
got = append(got, it.Value())
}
for it.Next() {
got = append(got, it.Value())
}
if it.Error() != nil {
t.Errorf("cStorage.Iterator() First() and Next() got error: %s", it.Error())
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("cStorage.Iterator() First() and Next() \ngot %v, \nwant %v", jsonx.MarshalAndIndent(got), jsonx.MarshalAndIndent(tt.want))
}
})
}
}
func Test_cStorage_Iterator_LastPrev(t *testing.T) {
tests := []struct {
name string
source []logTestItem
bufferLines int64
sel *storage.Selector
want []interface{}
}{
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 13,
End: 15,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: []interface{}{
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset + 1,
Content: "4444.1",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "14",
UnixNano: 14,
Offset: initialOffset,
Content: "4444.0",
Level: "",
RequestId: "",
},
&pb.LogItem{
Id: "test_id",
Source: "container",
Stream: "",
Timestamp: "13",
UnixNano: 13,
Offset: initialOffset,
Content: "3333",
Level: "",
RequestId: "",
},
},
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 100,
End: 1000,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
{
source: commonLogTestItems,
bufferLines: 1,
sel: &storage.Selector{
Start: 10,
End: 11,
Scheme: kubernetesLogScheme,
Filters: commonTestFilters,
Options: commonTestOptions,
},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
var namespace, name, container string
for _, v := range tt.sel.Filters {
switch v.Key {
case "pod_namespace":
namespace = v.Value.(string)
case "pod_name":
name = v.Value.(string)
case "container_name":
container = v.Value.(string)
}
}
s := &cStorage{
log: logrusx.New(),
getQueryFunc: func(clusterName string) (func(it *logsIterator, opts *v1.PodLogOptions) (io.ReadCloser, error), error) {
return queryFuncForTest(namespace, name, container, tt.source), nil
},
bufferLines: tt.bufferLines,
}
it, err := s.Iterator(context.TODO(), tt.sel)
if err != nil {
t.Errorf("cStorage.Iterator() got error: %s", it.Error())
return
}
var got []interface{}
if it.Last() |
for it.Prev() {
got = append(got, it.Value())
}
if it.Error() != nil {
t.Errorf("cStorage.Iterator() Last() and Prev() got error: %s", it.Error())
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("cStorage.Iterator() Last() and Prev() \ngot %v, \nwant %v", jsonx.MarshalAndIndent(got), jsonx.MarshalAndIndent(tt.want))
}
})
}
}
| {
got = append(got, it.Value())
} |
authproxy.py | # Copyright (c) 2011 Jeff Garzik
#
# Previous copyright, from python-jsonrpc/jsonrpc/proxy.py:
#
# Copyright (c) 2007 Jan-Klaas Kollhof
#
# This file is part of jsonrpc.
#
# jsonrpc is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""HTTP proxy for opening RPC connection to ogfuncoind.
AuthServiceProxy has the following improvements over python-jsonrpc's
ServiceProxy class:
- HTTP connections persist for the life of the AuthServiceProxy object
(if server supports HTTP/1.1)
- sends protocol 'version', per JSON-RPC 1.1
- sends proper, incrementing 'id'
- sends Basic HTTP authentication headers
- parses all JSON numbers that look like floats as Decimal
- uses standard Python json lib
"""
import base64
import decimal
import http.client
import json
import logging
import socket
import time
import urllib.parse
HTTP_TIMEOUT = 30
USER_AGENT = "AuthServiceProxy/0.1"
log = logging.getLogger("ogfuncoinRPC")
class JSONRPCException(Exception):
def __init__(self, rpc_error):
try:
errmsg = '%(message)s (%(code)i)' % rpc_error
except (KeyError, TypeError):
errmsg = ''
super().__init__(errmsg)
self.error = rpc_error
def EncodeDecimal(o):
if isinstance(o, decimal.Decimal):
return str(o)
raise TypeError(repr(o) + " is not JSON serializable")
class AuthServiceProxy():
__id_count = 0
# ensure_ascii: escape unicode as \uXXXX, passed to json.dumps
def __init__(self, service_url, service_name=None, timeout=HTTP_TIMEOUT, connection=None, ensure_ascii=True):
self.__service_url = service_url
self._service_name = service_name
self.ensure_ascii = ensure_ascii # can be toggled on the fly by tests
self.__url = urllib.parse.urlparse(service_url)
port = 80 if self.__url.port is None else self.__url.port
user = None if self.__url.username is None else self.__url.username.encode('utf8')
passwd = None if self.__url.password is None else self.__url.password.encode('utf8')
authpair = user + b':' + passwd
self.__auth_header = b'Basic ' + base64.b64encode(authpair)
if connection:
# Callables re-use the connection of the original proxy
self.__conn = connection
elif self.__url.scheme == 'https':
self.__conn = http.client.HTTPSConnection(self.__url.hostname, port, timeout=timeout)
else:
self.__conn = http.client.HTTPConnection(self.__url.hostname, port, timeout=timeout)
def __getattr__(self, name):
if name.startswith('__') and name.endswith('__'):
# Python internal stuff
raise AttributeError
if self._service_name is not None:
name = "%s.%s" % (self._service_name, name)
return AuthServiceProxy(self.__service_url, name, connection=self.__conn)
def _request(self, method, path, postdata):
'''
Do a HTTP request, with retry if we get disconnected (e.g. due to a timeout).
This is a workaround for https://bugs.python.org/issue3566 which is fixed in Python 3.5.
'''
headers = {'Host': self.__url.hostname,
'User-Agent': USER_AGENT,
'Authorization': self.__auth_header,
'Content-type': 'application/json'}
try:
self.__conn.request(method, path, postdata, headers)
return self._get_response()
except http.client.BadStatusLine as e:
if e.line == "''": # if connection was closed, try again
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
else:
raise
except (BrokenPipeError, ConnectionResetError):
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
# ConnectionResetError happens on FreeBSD with Python 3.4
self.__conn.close()
self.__conn.request(method, path, postdata, headers)
return self._get_response()
def get_request(self, *args, **argsn):
AuthServiceProxy.__id_count += 1
log.debug("-%s-> %s %s" % (AuthServiceProxy.__id_count, self._service_name,
json.dumps(args, default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
if args and argsn:
raise ValueError('Cannot handle both named and positional arguments')
return {'version': '1.1',
'method': self._service_name,
'params': args or argsn,
'id': AuthServiceProxy.__id_count}
def __call__(self, *args, **argsn):
postdata = json.dumps(self.get_request(*args, **argsn), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
response = self._request('POST', self.__url.path, postdata.encode('utf-8'))
if response['error'] is not None:
raise JSONRPCException(response['error'])
elif 'result' not in response:
raise JSONRPCException({
'code': -343, 'message': 'missing JSON-RPC result'})
else:
return response['result']
def batch(self, rpc_call_list):
postdata = json.dumps(list(rpc_call_list), default=EncodeDecimal, ensure_ascii=self.ensure_ascii)
log.debug("--> " + postdata)
return self._request('POST', self.__url.path, postdata.encode('utf-8'))
def _get_response(self):
req_start_time = time.time()
try:
http_response = self.__conn.getresponse() | 'using larger timeout for calls that take '
'longer to return.' % (self._service_name,
self.__conn.timeout)})
if http_response is None:
raise JSONRPCException({
'code': -342, 'message': 'missing HTTP response from server'})
content_type = http_response.getheader('Content-Type')
if content_type != 'application/json':
raise JSONRPCException({
'code': -342, 'message': 'non-JSON HTTP response with \'%i %s\' from server' % (http_response.status, http_response.reason)})
responsedata = http_response.read().decode('utf8')
response = json.loads(responsedata, parse_float=decimal.Decimal)
elapsed = time.time() - req_start_time
if "error" in response and response["error"] is None:
log.debug("<-%s- [%.6f] %s" % (response["id"], elapsed, json.dumps(response["result"], default=EncodeDecimal, ensure_ascii=self.ensure_ascii)))
else:
log.debug("<-- [%.6f] %s" % (elapsed, responsedata))
return response
def __truediv__(self, relative_uri):
return AuthServiceProxy("{}/{}".format(self.__service_url, relative_uri), self._service_name, connection=self.__conn) | except socket.timeout:
raise JSONRPCException({
'code': -344,
'message': '%r RPC took longer than %f seconds. Consider ' |
index.js | require('./bind-for');
var diff = require('./lib/diff');
var lib = require('./lib/index');
var wrapArray = require('./lib/array').wrapArray;
// Singleton state definition.
var State = {
initialState: {},
nonBindedStateKeys: [],
handlers: {},
computeState: [function () { /* no-op */ }]
};
var STATE_UPDATE_EVENT = 'stateupdate';
var TYPE_OBJECT = 'object';
var WHITESPACE_REGEX = /s+/;
AFRAME.registerState = function (definition) {
const computeState = State.computeState;
if (definition.computeState) {
computeState.push(definition.computeState);
}
AFRAME.utils.extendDeep(State, definition);
State.computeState = computeState;
}
AFRAME.registerSystem('state', {
init: function () {
var key;
this.arrays = [];
this.dirtyArrays = [];
this.diff = {};
this.state = AFRAME.utils.clone(State.initialState);
this.subscriptions = [];
this.initEventHandlers();
// Wrap array to detect dirty.
for (key in this.state) {
if (this.state[key] && this.state[key].constructor === Array) {
this.arrays.push(key);
this.state[key].__dirty = true;
wrapArray(this.state[key]);
}
}
this.lastState = AFRAME.utils.clone(this.state);
this.eventDetail = {
lastState: this.lastState,
state: this.state
};
this.el.addEventListener('loaded', () => {
var i;
// Initial compute.
for (i = 0; i < State.computeState.length; i++) {
State.computeState[i](this.state, '@@INIT');
}
// Initial dispatch.
for (i = 0; i < this.subscriptions.length; i++) {
this.subscriptions[i].onStateUpdate(this.state);
}
});
},
/**
* Dispatch action.
*/
dispatch: (function () {
const toUpdate = [];
return function (actionName, payload) {
var dirtyArrays;
var i;
var key;
var subscription;
// Modify state.
State.handlers[actionName](this.state, payload);
// Post-compute.
for (i = 0; i < State.computeState.length; i++) {
State.computeState[i](this.state, actionName, payload);
}
// Get a diff to optimize bind updates.
for (key in this.diff) { delete this.diff[key]; }
diff(this.lastState, this.state, this.diff, State.nonBindedStateKeys);
this.dirtyArrays.length = 0;
for (i = 0; i < this.arrays.length; i++) {
if (this.state[this.arrays[i]].__dirty) {
this.dirtyArrays.push(this.arrays[i]);
}
}
// Notify subscriptions / binders.
let currentUpdateCount = 0;
for (i = 0; i < this.subscriptions.length; i++) {
if (this.subscriptions[i].name === 'bind-for') {
// For arrays and bind-for, check __dirty flag on array rather than the diff.
if (!this.state[this.subscriptions[i].keysToWatch[0]].__dirty) { continue; }
} else {
if (!this.shouldUpdate(this.subscriptions[i].keysToWatch, this.diff,
this.dirtyArrays)) { continue; }
}
// Keep track to only update subscriptions once.
if (toUpdate.indexOf(this.subscriptions[i]) === -1) {
toUpdate.push(this.subscriptions[i]);
currentUpdateCount++;
}
}
// Unset array dirty.
for (key in this.state) {
if (this.state[key] && this.state[key].constructor === Array) {
this.state[key].__dirty = false;
}
}
// Store last state.
this.copyState(this.lastState, this.state);
// Update subscriptions.
for (i = 0; i < currentUpdateCount; i++) {
let subscriber = toUpdate.pop();
subscriber.onStateUpdate();
}
// Emit.
this.eventDetail.action = actionName;
this.eventDetail.payload = payload;
this.el.emit(STATE_UPDATE_EVENT, this.eventDetail);
};
})(),
/**
* Store last state through a deep extend, but not for arrays.
*/
copyState: function (lastState, state, isRecursive) {
var key;
for (key in state) {
// Don't copy pieces of state keys that are non-binded or untracked.
if (!isRecursive && State.nonBindedStateKeys.indexOf(key) !== -1) { continue; }
// Nested state.
if (state[key] && state[key].constructor === Object) {
if (!(key in lastState)) {
// Clone object if destination does not exist.
lastState[key] = AFRAME.utils.clone(state[key]);
continue;
}
// Recursively copy state.
this.copyState(lastState[key], state[key], true);
continue;
}
// Copy by value.
lastState[key] = state[key];
}
},
subscribe: function (component) {
this.subscriptions.push(component);
},
unsubscribe: function (component) {
var i = this.subscriptions.indexOf(component);
if (i > -1)
this.subscriptions.splice(i, 1);
},
/**
* Check if state changes were relevant to this binding. If not, don't call.
*/
shouldUpdate: function (keysToWatch, diff, dirtyArrays) {
for (let i = 0; i < keysToWatch.length; i++) {
if (keysToWatch[i] in diff || dirtyArrays.indexOf(keysToWatch[i]) !== -1) {
return true;
}
}
return false;
},
/**
* Proxy events to action dispatches so components can just bubble actions up as events.
* Handlers define which actions they handle. Go through all and add event listeners.
*/
initEventHandlers: function () {
var actionName;
var registeredActions = [];
var self = this;
registerListener = registerListener.bind(this);
// Use declared handlers to know what events to listen to.
for (actionName in State.handlers) {
// Only need to register one handler for each event.
if (registeredActions.indexOf(actionName) !== -1) { continue; }
registeredActions.push(actionName);
registerListener(actionName);
}
function registerListener (actionName) {
this.el.addEventListener(actionName, evt => {
this.dispatch(actionName, evt.detail);
});
}
},
/**
* Render template to string with item data.
*/
renderTemplate: (function () {
// Braces, whitespace, optional item name, item key, whitespace, braces.
var interpRegex = /{{\s*(\w*\.)?([\w.]+)\s*}}/g;
return function (template, data, asString) {
var match;
var str;
str = template;
// Data will be null if initialize pool for bind-for.updateInPlace.
if (data) {
while (match = interpRegex.exec(template)) {
str = str.replace(
match[0],
typeof data === TYPE_OBJECT
? lib.select(data, match[2]) || ''
: data);
}
}
// Return as string.
if (asString) { return str; }
// Return as DOM.
return document.createRange().createContextualFragment(str);
};
})(),
select: lib.select
});
/**
* Bind component property to a value in state.
*
* bind="geometry.width: car.width""
* bind__material="color: enemy.color; opacity: enemy.opacity"
* bind__visible="player.visible"
*/
AFRAME.registerComponent('bind', {
schema: {
default: {},
parse: function (value) {
// Parse style-like object.
var data;
var i;
var properties;
var pair;
// Using setAttribute with object, no need to parse.
if (value.constructor === Object) { return value; }
// Using instanced ID as component namespace for single-property component,
// nothing to separate.
if (value.indexOf(':') === -1) { return value; }
// Parse style-like object as keys to values.
data = {};
properties = lib.split(value, ';');
for (i = 0; i < properties.length; i++) {
pair = lib.split(properties[i].trim(), ':');
data[pair[0]] = pair[1].trim();
}
return data;
}
},
multiple: true,
init: function () {
var componentId;
var data = this.data;
var key;
this.keysToWatch = [];
this.onStateUpdate = this.onStateUpdate.bind(this);
this.system = this.el.sceneEl.systems.state;
// Whether we are binding by namespace (e.g., bind__foo="prop1: true").
if (this.id) {
componentId = lib.split(this.id, '__')[0];
}
this.isNamespacedBind =
this.id &&
(componentId in AFRAME.components && !AFRAME.components[componentId].isSingleProp) ||
componentId in AFRAME.systems;
this.lastData = {};
this.updateObj = {};
// Subscribe to store and register handler to do data-binding to components.
this.system.subscribe(this);
this.onStateUpdate = this.onStateUpdate.bind(this);
},
update: function () {
var data = this.data;
var key;
var property;
// Index `keysToWatch` to only update state on relevant changes.
this.keysToWatch.length = 0;
if (typeof data === 'string') {
lib.parseKeysToWatch(this.keysToWatch, data);
} else {
for (key in data) {
lib.parseKeysToWatch(this.keysToWatch, data[key]);
}
}
this.onStateUpdate();
},
/**
* Handle state update.
*/
onStateUpdate: function () {
// Update component with the state.
var hasKeys = false;
var el = this.el;
var propertyName;
var stateSelector;
var state;
var tempNode;
var value;
if (!el.parentNode) { return; }
if (this.isNamespacedBind) { lib.clearObject(this.updateObj); }
state = this.system.state;
// Single-property bind.
if (typeof this.data !== TYPE_OBJECT) {
try {
value = lib.select(state, this.data);
} catch (e) {
throw new Error(`[aframe-state-component] Key '${this.data}' not found in state.` +
` #${this.el.getAttribute('id')}[${this.attrName}]`);
}
if (typeof value !== TYPE_OBJECT &&
typeof this.lastData !== TYPE_OBJECT &&
this.lastData === value) { return; }
AFRAME.utils.entity.setComponentProperty(el, this.id, value);
this.lastData = value;
return;
}
for (propertyName in this.data) {
// Pointer to a value in the state (e.g., `player.health`).
stateSelector = this.data[propertyName].trim();
try {
value = lib.select(state, stateSelector);
} catch (e) {
console.log(e);
throw new Error(`[aframe-state-component] Key '${stateSelector}' not found in state.` +
` #${this.el.getAttribute('id')}[${this.attrName}]`);
}
if (typeof value !== TYPE_OBJECT &&
typeof this.lastData[propertyName] !== TYPE_OBJECT &&
this.lastData[propertyName] === value) { continue; }
// Remove component if value is `undefined`.
if (propertyName in AFRAME.components && value === undefined) {
el.removeAttribute(propertyName);
return;
}
// Set using dot-delimited property name.
if (this.isNamespacedBind) {
// Batch if doing namespaced bind.
this.updateObj[propertyName] = value;
} else {
AFRAME.utils.entity.setComponentProperty(el, propertyName, value);
}
this.lastData[propertyName] = value;
}
// Batch if doing namespaced bind.
for (hasKeys in this.updateObj) {
// See if object is empty.
}
if (this.isNamespacedBind && hasKeys) {
el.setAttribute(this.id, this.updateObj);
}
},
remove: function () {
this.system.unsubscribe(this);
}
});
/**
* Toggle component attach and detach based on boolean value.
*
* bind-toggle__raycastable="isRaycastable""
*/
AFRAME.registerComponent('bind-toggle', {
schema: {type: 'string'},
multiple: true,
init: function () {
this.system = this.el.sceneEl.systems.state;
this.keysToWatch = [];
this.onStateUpdate = this.onStateUpdate.bind(this);
// Subscribe to store and register handler to do data-binding to components.
this.system.subscribe(this);
this.onStateUpdate();
},
update: function () {
this.keysToWatch.length = 0;
lib.parseKeysToWatch(this.keysToWatch, this.data);
},
/**
* Handle state update.
*/
onStateUpdate: function () {
var el = this.el;
var state;
var value;
state = this.system.state;
try {
value = lib.select(state, this.data);
} catch (e) {
throw new Error(`[aframe-state-component] Key '${this.data}' not found in state.` +
` #${this.el.getAttribute('id')}[${this.attrName}]`);
}
if (value) {
el.setAttribute(this.id, '');
} else {
el.removeAttribute(this.id);
}
},
remove: function () {
this.system.unsubscribe(this);
}
}); | composeHandlers: lib.composeHandlers,
select: lib.select
}; |
module.exports = {
composeFunctions: lib.composeFunctions, |
Camera.ts | function cameraName( label: string ) {
let clean = label.replace( /\s*\([0-9a-f]+(:[0-9a-f]+)?\)\s*$/, '' );
return clean || label || null;
}
class MediaError extends Error {
type: string;
inner: Error;
constructor( type, inner?: Error ) {
super( inner |
this.type = type;
this.inner = inner;
}
}
export default class Camera {
id: string;
name: string;
private _stream: MediaStream;
constructor( id: string, name: string ) {
this.id = id;
this.name = name;
this._stream = null;
}
async start(facingMode) {
let constraints: any = {
audio: false,
video: {
mandatory: {
sourceId: this.id,
minWidth: 600,
maxWidth: 800,
minAspectRatio: 1.6
},
optional: []
}
};
if ( facingMode && typeof facingMode === 'string' ) {
constraints.video.facingMode = facingMode;
}
this._stream = await Camera.wrapErrors( async () => {
return await navigator.mediaDevices.getUserMedia( constraints );
} );
return this._stream;
}
stop() {
if ( !this._stream ) {
return;
}
for ( let stream of this._stream.getVideoTracks() ) {
stream.stop();
}
this._stream = null;
}
static async getCameras() {
await Camera.ensureAccess();
let devices = await navigator.mediaDevices.enumerateDevices();
return devices
.filter( d => d.kind === 'videoinput' )
.map( d => new Camera( d.deviceId, cameraName( d.label ) ) );
}
static async ensureAccess() {
return await this.wrapErrors( async () => {
let access = await navigator.mediaDevices.getUserMedia( { video: true } );
for ( let stream of access.getVideoTracks() ) {
stream.stop();
}
} );
}
static async wrapErrors<T>( fn: () => Promise<T> ): Promise<T> {
try {
return await fn();
} catch ( e ) {
if ( e.name && process.env.NODE_ENV !== "development" ) {
throw new MediaError( e.name, e );
} else {
throw e;
}
}
}
} | ? `Cannot access video stream (${type}: ${inner.message}).`
: `Cannot access video stream (${type}).` ); |
model.py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# Adam Zsarnóczay
"""
This module has classes and methods that define and access the model used for
loss assessment.
.. rubric:: Contents
.. autosummary::
prep_constant_median_DV
prep_bounded_linear_median_DV
prep_bounded_multilinear_median_DV
FragilityFunction
ConsequenceFunction
DamageState
DamageStateGroup
PerformanceGroup
FragilityGroup
"""
from .base import *
class FragilityFunction(object):
"""
Describes the relationship between asset response and damage.
Asset response is characterized by a Demand value that represents an
engineering demand parameter (EDP). Only a scalar EDP is supported
currently. The damage is characterized by a set of DamageStateGroup (DSG)
objects. For each DSG, the corresponding EDP limit (i.e. the EDP at which
the asset is assumed to experience damage described by the DSG) is
considered uncertain; hence, it is described by a random variable. The
random variables that describe EDP limits for the set of DSGs are not
necessarily independent.
We assume that the EDP limit will be approximated by a probability
distribution for each DSG and these variables together form a multivariate
distribution. Following common practice, the correlation between
variables is assumed perfect by default, but the framework allows the
users to explore other, more realistic options.
Parameters
----------
EDP_limit: list of RandomVariable
A list of correlated random variables where each variable corresponds
to an EDP limit that triggers a damage state. The number of
list elements shall be equal to the number of DSGs handled by the
Fragility Function (FF) and they shall be in ascending order of damage
severity.
"""
def __init__(self, EDP_limit):
self._EDP_limit = EDP_limit
self._EDP_tags = [EDP_lim_i.name for EDP_lim_i in EDP_limit]
def P_exc(self, EDP, DSG_ID):
"""
Return the probability of damage state exceedance.
Calculate the probability of exceeding the damage corresponding to the
DSG identified by the DSG_ID conditioned on a particular EDP value.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the conditioning DSG. The DSG numbering is 1-based,
because zero typically corresponds to the undamaged state.
Returns
-------
P_exc: float scalar or ndarray
DSG exceedance probability at the given EDP point(s).
"""
EDP = np.asarray(EDP, dtype=np.float64)
nvals = EDP.size
# The exceedance probability corresponding to no damage is 1.
# Although this is trivial, returning 1 for DSG_ID=0 makes using P_exc
# more convenient.
if DSG_ID == 0:
P_exc = np.ones(EDP.size)
else:
# prepare the limits for the density calculation
ndims = len(self._EDP_limit)
limit_list = np.full((ndims, nvals), -np.inf, dtype=np.float64)
limit_list[DSG_ID - 1:] = EDP
limit_list[:DSG_ID - 1] = None
P_exc = 1.0 - self._EDP_limit[0].RV_set.orthotope_density(
lower=limit_list, var_subset = self._EDP_tags)
# if EDP was a scalar, make sure that the result is also a scalar
if EDP.size == 1:
return P_exc[0]
else:
return P_exc
def DSG_given_EDP(self, EDP, force_resampling=False):
"""
Given an EDP, get a damage level based on the fragility function.
The damage is evaluated by sampling the joint distribution of
fragilities corresponding to all possible damage levels and checking
which damage level the given EDP falls into. This approach allows for
efficient damage state evaluation for a large number of EDP
realizations.
Parameters
----------
EDP: float scalar or ndarray or Series
Single EDP, or numpy array or pandas Series of EDP values.
force_resampling: bool, optional, default: False
If True, the probability distribution is resampled before
evaluating the damage for each EDP. This is not recommended if the
fragility functions are correlated with other sources of
uncertainty because those variables will also be resampled in this
case. If False, which is the default approach, we assume that
the random variable has already been sampled and the number of
samples greater or equal to the number of EDP values.
Returns
-------
DSG_ID: Series
Identifies the damage that corresponds to the given EDP. A DSG_ID
of 0 means no damage.
"""
# get the number of samples needed
nsamples = EDP.size
# if there are no samples or resampling is forced, then sample the
# distribution first
# TODO: force_resampling is probably not needed
# if force_resampling or (self._EDP_limit.samples is None):
# self._EDP_limit.sample_distribution(sample_size=nsamples)
#
# # if the number of samples is not sufficiently large, raise an error
# if self._EDP_limit.samples.shape[0] < nsamples:
# raise ValueError(
# 'Damage evaluation requires at least as many samples of the '
# 'joint distribution defined by the fragility functions as '
# 'many EDP values are provided to the DSG_given_EDP function. '
# 'You might want to consider setting force_resampling to True '
# 'or sampling the distribution before calling the DSG_given_EDP '
# 'function.')
#samples = pd.DataFrame(self._EDP_limit.samples)
samples = pd.DataFrame(dict([(lim_i.name, lim_i.samples)
for lim_i in self._EDP_limit]))
if type(EDP) not in [pd.Series, pd.DataFrame]:
EDP = pd.Series(EDP, name='EDP')
#nstates = samples.columns.values.size
nstates = samples.shape[1]
samples = samples.loc[EDP.index,:]
# sort columns
sample_cols = samples.columns
samples = samples[sample_cols[np.argsort(sample_cols)]]
# check for EDP exceedance
EXC = samples.sub(EDP, axis=0) < 0.
DSG_ID = pd.Series(np.zeros(len(samples.index)), name='DSG_ID',
index=samples.index, dtype=np.int)
for s in range(nstates):
DSG_ID[EXC.iloc[:,s]] = s + 1
return DSG_ID
def prep_constant_median_DV(median):
"""
Returns a constant median Decision Variable (DV) function.
Parameters
----------
median: float
The median DV for a consequence function with fixed median.
Returns
-------
f: callable
A function that returns the constant median DV for all component
quantities.
"""
def f(quantity):
return median
return f
def prep_bounded_linear_median_DV(median_max, median_min, quantity_lower,
quantity_upper):
"""
Returns a bounded linear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by a linear function with a
negative slope between max and min values.
Parameters
----------
median_max: float, optional
median_min: float, optional
Minimum and maximum limits that define the bounded_linear median DV
function.
quantity_lower: float, optional
quantity_upper: float, optional
Lower and upper bounds of component quantity that define the
bounded_linear median DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.
"""
def f(quantity):
if quantity is None:
raise ValueError(
'A bounded linear median Decision Variable function called '
'without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
# calculate the median consequence given the quantity of damaged
# components
output = np.interp(q_array,
[quantity_lower, quantity_upper],
[median_max, median_min])
return output
return f
def prep_bounded_multilinear_median_DV(medians, quantities):
"""
Returns a bounded multilinear median Decision Variable (DV) function.
The median DV equals the min and max values when the quantity is
outside of the prescribed quantity bounds. When the quantity is within the
bounds, the returned median is calculated by linear interpolation.
Parameters
----------
medians: ndarray
Series of values that define the y coordinates of the multilinear DV
function.
quantities: ndarray
Series of values that define the component quantities corresponding to
the series of medians and serving as the x coordinates of the
multilinear DV function.
Returns
-------
f: callable
A function that returns the median DV given the quantity of damaged
components.
"""
def f(quantity):
if quantity is None:
raise ValueError(
'A bounded linear median Decision Variable function called '
'without specifying the quantity of damaged components')
q_array = np.asarray(quantity, dtype=np.float64)
# calculate the median consequence given the quantity of damaged
# components
output = np.interp(q_array, quantities, medians)
return output
return f
class ConsequenceFunction(object):
"""
Describes the relationship between damage and a decision variable.
Indicates the distribution of a quantified Decision Variable (DV)
conditioned on a component, an element, or the system reaching a given
damage state (DS). DV can be reconstruction cost, repair time, casualties,
injuries, etc. Its distribution might depend on the quantity of damaged
components.
Parameters
----------
DV_median: callable
Describes the median DV as an f(quantity) function of the total
quantity of damaged components. Use the prep_constant_median_DV, and
prep_bounded_linear_median_DV helper functions to conveniently
prescribe the typical FEMA P-58 functions.
DV_distribution: RandomVariable
A random variable that characterizes the uncertainty in the DV. The
distribution shall be normalized by the median DV (i.e. the RV is
expected to have a unit median). Truncation can be used to
prescribe lower and upper limits for the DV, such as the (0,1) domain
needed for red tag evaluation.
"""
def __init__(self, DV_median, DV_distribution):
self._DV_median = DV_median
self._DV_distribution = DV_distribution
def m | self, quantity=None):
"""
Return the value of the median DV.
The median DV corresponds to the component damage state (DS). If the
damage consequence depends on the quantity of damaged components, the
total quantity of damaged components shall be specified through the
quantity parameter.
Parameters
----------
quantity: float scalar or ndarray, optional
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
Returns
-------
median: float scalar or ndarray
A single scalar for fixed median; a scalar or an array depending on
the shape of the quantity parameter for bounded_linear median.
"""
return self._DV_median(quantity)
def sample_unit_DV(self, quantity=None, sample_size=1,
force_resampling=False):
"""
Sample the decision variable quantity per component unit.
The Unit Decision Variable (UDV) corresponds to the component Damage
State (DS). It shall be multiplied by the quantity of damaged
components to get the total DV that corresponds to the quantity of the
damaged components in the asset. If the DV depends on the total
quantity of damaged components, that value shall be specified through
the quantity parameter.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median DV. Not needed for consequence functions with a fixed
median DV.
sample_size: int, optional, default: 1
Number of samples drawn from the DV distribution. The default value
yields one sample. If quantity is an array with more than one
element, the sample_size parameter is ignored.
force_resampling: bool, optional, default: False
If True, the DV distribution (and the corresponding RV if there
are correlations) is resampled even if there are samples already
available. This is not recommended if the DV distribution is
correlated with other sources of uncertainty because those
variables will also be resampled in this case. If False, which is
the default approach, we assume that the random variable has
already been sampled and the number of samples is greater or equal
to the number of samples requested.
Returns
-------
unit_DV: float scalar or ndarray
Unit DV samples.
"""
# get the median DV conditioned on the provided quantities
median = self.median(quantity=np.asarray(quantity))
# if the distribution is None, then there is no uncertainty in the DV
# and the median values are the samples
if self._DV_distribution is None:
return median
else:
# if there are more than one quantities defined, infer the number of
# samples from the number of quantities
if quantity is not None:
if type(quantity) not in [pd.Series, pd.DataFrame]:
quantity = pd.Series(quantity, name='QNT')
if quantity.size > 1:
sample_size = quantity.size
elif sample_size > 1:
quantity = pd.Series(np.ones(sample_size) * quantity.values,
name='QNT')
# if there are no samples or resampling is forced, then sample the
# distribution first
# TODO: force_resampling is probably not needed
# if (force_resampling or
# (self._DV_distribution.samples is None)):
# self._DV_distribution.sample_distribution(sample_size=sample_size)
# # if the number of samples is not sufficiently large, raise an error
# if self._DV_distribution.samples.shape[0] < sample_size:
# raise ValueError(
# 'Consequence evaluation requires at least as many samples of '
# 'the Decision Variable distribution as many samples are '
# 'requested or as many quantity values are provided to the '
# 'sample_unit_DV function. You might want to consider setting '
# 'force_resampling to True or sampling the distribution before '
# 'calling the sample_unit_DV function.')
# get the samples
if quantity is not None:
samples = pd.Series(self._DV_distribution.samples).loc[quantity.index]
else:
samples = pd.Series(self._DV_distribution.samples).iloc[:sample_size]
samples = samples * median
return samples
class DamageState(object):
"""
Characterizes one type of damage that corresponds to a particular DSG.
The occurrence of damage is evaluated at the DSG. The DS describes one of
the possibly several types of damages that belong to the same DSG and the
consequences of such damage.
Parameters
----------
ID:int
weight: float, optional, default: 1.0
Describes the probability of DS occurrence, conditioned on the damage
being in the DSG linked to this DS. This information is only used for
DSGs with multiple DS corresponding to them. The weights of the set of
DS shall sum up to 1.0 if they are mutually exclusive. When the set of
DS occur simultaneously, the sum of weights typically exceeds 1.0.
description: str, optional
Provides a short description of the damage state.
affected_area: float, optional, default: 0.
Defines the area over which life safety hazards from this DS exist.
repair_cost_CF: ConsequenceFunction, optional
A consequence function that describes the cost necessary to restore the
component to its pre-disaster condition.
reconstruction_time_CF: ConsequenceFunction, optional
A consequence function that describes the time, necessary to repair the
damaged component to its pre-disaster condition.
injuries_CF_set: ConsequenceFunction array, optional
A set of consequence functions; each describes the number of people
expected to experience injury of a particular severity when the
component is in this DS. Any number of injury-levels can be considered.
red_tag_CF: ConsequenceFunction, optional
A consequence function that describes the proportion of components
(within a Performance Group) that needs to be damaged to trigger an
unsafe placard (i.e. red tag) for the building during post-disaster
inspection.
"""
def __init__(self, ID, weight=1.0, description='',
repair_cost_CF=None, reconstruction_time_CF=None,
injuries_CF_set=None, affected_area=0., red_tag_CF=None):
self._ID = int(ID)
self._weight = float(weight)
self._description = description
self._repair_cost_CF = repair_cost_CF
self._reconstruction_time_CF = reconstruction_time_CF
self._injuries_CF_set = injuries_CF_set
self._affected_area = affected_area
self._red_tag_CF = red_tag_CF
@property
def description(self):
"""
Return the damage description.
"""
return self._description
@property
def weight(self):
"""
Return the weight of DS among the set of damage states in the DSG.
"""
return self._weight
def unit_repair_cost(self, quantity=None, sample_size=1, **kwargs):
"""
Sample the repair cost distribution and return the unit repair costs.
The unit repair costs shall be multiplied by the quantity of damaged
components to get the total repair costs for the components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the median
repair cost. Not used for repair cost models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the repair cost distribution. The
default value yields one sample.
Returns
-------
unit_repair_cost: float scalar or ndarray
Unit repair cost samples.
"""
output = None
if self._repair_cost_CF is not None:
output = self._repair_cost_CF.sample_unit_DV(quantity=quantity,
sample_size=sample_size,
**kwargs)
return output
def unit_reconstruction_time(self, quantity=None, sample_size=1,
**kwargs):
"""
Sample the reconstruction time distribution and return the unit
reconstruction times.
The unit reconstruction times shall be multiplied by the quantity of
damaged components to get the total reconstruction time for the
components in this DS.
Parameters
----------
quantity: float scalar, ndarray or Series, optional, default: None
Total quantity of damaged components that determines the magnitude
of median reconstruction time. Not used for reconstruction time
models with fixed median.
sample_size: int, optional, default: 1
Number of samples drawn from the reconstruction time distribution.
The default value yields one sample.
Returns
-------
unit_reconstruction_time: float scalar or ndarray
Unit reconstruction time samples.
"""
output = None
if self._reconstruction_time_CF is not None:
output = self._reconstruction_time_CF.sample_unit_DV(
quantity=quantity,
sample_size=sample_size, **kwargs)
return output
def red_tag_dmg_limit(self, sample_size=1, **kwargs):
"""
Sample the red tag consequence function and return the proportion of
components that needs to be damaged to trigger a red tag.
The red tag consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components.
Parameters
----------
sample_size: int, optional, default: 1
Number of samples drawn from the red tag consequence distribution.
The default value yields one sample.
Returns
-------
red_tag_trigger: float scalar or ndarray
Samples of damaged component proportions that trigger a red tag.
"""
output = None
if self._red_tag_CF is not None:
output = self._red_tag_CF.sample_unit_DV(sample_size=sample_size,
**kwargs)
return output
def unit_injuries(self, severity_level=0, sample_size=1, **kwargs):
"""
Sample the injury consequence function that corresponds to the
specified level of severity and return the injuries per component unit.
The injury consequence function is assumed to have a fixed median
value that does not depend on the quantity of damaged components (i.e.
the number of injuries per component unit does not change with the
quantity of components.)
Parameters
----------
severity_level: int, optional, default: 1
Identifies which injury consequence to sample. The indexing of
severity levels is zero-based.
sample_size: int, optional, default: 1
Number of samples drawn from the injury consequence distribution.
The default value yields one sample.
Returns
-------
unit_injuries: float scalar or ndarray
Unit injury samples.
"""
output = None
if len(self._injuries_CF_set) > severity_level:
CF = self._injuries_CF_set[severity_level]
if CF is not None:
output = CF.sample_unit_DV(sample_size=sample_size, **kwargs)
return output
class DamageStateGroup(object):
"""
A set of similar component damages that are controlled by the same EDP.
Damages are described in detail by the set of Damage State objects.
Damages in a DSG are assumed to occur at the same EDP magnitude. A Damage
State Group (DSG) might have only a single DS in the simplest case.
Parameters
----------
ID: int
DS_set: DamageState array
DS_set_kind: {'single', 'mutually_exclusive', 'simultaneous'}
Specifies the relationship among the DS in the set. When only one DS is
defined, use the 'single' option to improve calculation efficiency.
When multiple DS are present, the 'mutually_exclusive' option assumes
that the occurrence of one DS precludes the occurrence of another DS.
In such a case, the weights of the DS in the set shall sum up to 1.0.
In a 'simultaneous' case the DS are independent and unrelated. Hence,
they can occur at the same time and at least one of them has to occur.
"""
def __init__(self, ID, DS_set, DS_set_kind):
self._ID = ID
self._DS_set = DS_set
self._DS_set_kind = DS_set_kind
class PerformanceGroup(object):
"""
A group of similar components that experience the same demands.
FEMA P-58: Performance Groups (PGs) are a sub-categorization of fragility
groups. A performance group is a subset of fragility group components that
are subjected to the same demands (e.g. story drift, floor acceleration,
etc.).
In buildings, most performance groups shall be organized by story level.
There is no need to separate performance groups by direction, because the
direction of components within a group can be specified during definition,
and it will be taken into consideration in the analysis.
Parameters
----------
ID: int
location: int
Identifies the location of the components that belong to the PG. In a
building, location shall typically refer to the story of the building.
The location assigned to each PG shall be in agreement with the
locations assigned to the Demand objects.
quantity: RandomVariable
Specifies the quantity of components that belong to this PG.
Uncertainty in component quantities is considered by assigning a
random variable to this property.
fragility_functions: FragilityFunction list
Each fragility function describes the probability that the damage in
a subset of components will meet or exceed the damages described by
each damage state group in the DSG_set. Each is a multi-dimensional
function if there is more than one DSG. The number of functions shall
match the number of subsets defined by the `csg_weights` parameter.
DSG_set: DamageStateGroup array
A set of sequential Damage State Groups that describe the plausible set
of damage states of the components in the FG.
csg_weights: float ndarray, optional, default: [1.0]
Identifies subgroups of components within a PG, each of which have
perfectly correlated behavior. Correlation between the damage and
consequences among subgroups is controlled by the `correlation`
parameter of the FragilityGroup that the PG belongs to. Note that if
the components are assumed to have perfectly correlated behavior at the
PG level, assigning several subgroups to the PG is unnecessary. This
input shall be a list of weights that are applied to the quantity
of components to define the amount of components in each subgroup. The
sum of assigned weights shall be 1.0.
directions: int ndarray, optional, default: [0]
Identifies the direction of each subgroup of components within the PG.
The number of directions shall be identical to the number of
csg_weights assigned. In buildings, directions typically correspond to
the orientation of components in plane. Hence, using 0 or 1 to identify
'X' or 'Y' is recommended. These directions shall be in agreement with
the directions assigned to Demand objects.
"""
def __init__(self, ID, location, quantity, fragility_functions, DSG_set,
csg_weights=[1.0], direction=0):
self._ID = ID
self._location = location
self._quantity = quantity
if type(fragility_functions) == FragilityFunction:
self._FF_set = [fragility_functions,]
else:
self._FF_set = fragility_functions
self._DSG_set = DSG_set
self._csg_weights = csg_weights
self._direction = direction
def P_exc(self, EDP, DSG_ID):
"""
This is a convenience function that provides a shortcut to
fragility_function.P_exc(). It calculates the exceedance probability
of a given DSG conditioned on the provided EDP value(s). The fragility
functions assigned to the first subset are used for this calculation
because P_exc shall be identical among subsets.
Parameters
----------
EDP: float scalar or ndarray
Single EDP or numpy array of EDP values.
DSG_ID: int
Identifies the DSG of interest.
Returns
-------
P_exc: float scalar or ndarray
Exceedance probability of the given DSG at the EDP point(s).
"""
return self._FF_set[0].P_exc(EDP, DSG_ID)
class FragilityGroup(object):
"""
Groups a set of similar components from a loss-assessment perspective.
Characterizes a set of structural or non-structural components that have
similar construction characteristics, similar potential modes of damage,
similar probability of incurring those modes of damage, and similar
potential consequences resulting from their damage.
Parameters
----------
ID: int
demand_type: {'PID', 'PFA', 'PSD', 'PSA', 'ePGA', 'PGD'}
The type of Engineering Demand Parameter (EDP) that controls the damage
of the components in the FG. See Demand for acronym descriptions.
performance_groups: PerformanceGroup array
A list of performance groups that contain the components characterized
by the FG.
directional: bool, optional, default: True
Determines whether the components in the FG are sensitive to the
directionality of the EDP.
correlation: bool, optional, default: True
Determines whether the components within a Performance Group (PG) will
have correlated or uncorrelated damage. Correlated damage means that
all components will have the same damage state. In the uncorrelated
case, each component in the performance group will have its damage
state evaluated independently. Correlated damage reduces the required
computational effort for the calculation. Incorrect correlation
modeling will only slightly affect the mean estimates, but might
significantly change the dispersion of results.
demand_location_offset: int, optional, default: 0
Indicates if the location for the demand shall be different from the
location of the components. Damage to components of the ceiling, for
example, is controlled by demands on the floor above the one that the
components belong to. This can be indicated by setting the
demand_location_offset to 1 for such an FG.
incomplete: bool, optional, default: False
Indicates that the FG information is not complete and corresponding
results shall be treated with caution.
name: str, optional, default: ''
Provides a short description of the fragility group.
description: str, optional, default: ''
Provides a detailed description of the fragility group.
"""
def __init__(self, ID, demand_type, performance_groups,
directional=True, correlation=True, demand_location_offset=0,
incomplete=False, name='', description='', unit="ea"):
self._ID = ID
self._demand_type = demand_type
self._performance_groups = performance_groups
self._directional = directional
self._correlation = correlation
self._demand_location_offset = demand_location_offset
self._incomplete = incomplete
self._name = name
self._description = description
self._unit = unit
@property
def description(self):
"""
Return the fragility group description.
"""
return self._description
@property
def name(self):
"""
Return the name of the fragility group.
"""
return self._name | edian( |
board.component.ts | import { Component, OnInit, OnDestroy } from '@angular/core';
import { ActivatedRoute, Router } from '@angular/router';
import { DragulaService } from 'ng2-dragula';
import { DataService } from '../../shared/data.service';
import { Subscription } from 'rxjs';
import { DynamicDataService } from '../../shared/dynamic-data.service';
@Component({
selector: 'app-board',
templateUrl: './board.component.html',
styleUrls: ['./board.component.scss']
})
export class | implements OnInit, OnDestroy {
id: string;
board: Board;
private subs = new Subscription();
constructor(private route: ActivatedRoute,
private dragulaService: DragulaService,
private dataService: DataService,
private dynamicDataService: DynamicDataService,
private router: Router) {
}
ngOnInit() {
this.dragulaService.createGroup('CARDLISTS', {
direction: 'horizontal',
moves: (el, _source, handle) => {
const handles = el.getElementsByClassName('list-handle');
return handles.length > 0 && handles[0].contains(handle);
}
});
this.subs.add(this.dragulaService.dropModel('CARDLISTS')
.subscribe(({targetModel, targetIndex, item}) => {
item.position = this.calcPosition(targetModel, targetIndex);
this.dataService.patchList(item.id, { position: item.position })
.subscribe({error: e => console.error(e)});
})
);
this.subs.add(this.dragulaService.dropModel('CARDS')
.subscribe(({targetModel, targetIndex, item, target}) => {
const newListId = target.attributes.getNamedItem('data-list-id').value;
item.position = this.calcPosition(targetModel, targetIndex);
item.listId = newListId;
this.dataService.patchCard(item.id, { position: item.position, listId: newListId })
.subscribe({error: e => console.error(e)});
})
);
this.subs.add(this.dynamicDataService.onBoardDeleted.subscribe(({id}) => {
if (id === this.board.id) {
this.router.navigate(['/']);
}
}));
this.subs.add(this.route.params.subscribe(params => {
this.id = params['id'];
this.dynamicDataService.getBoard(this.id)
.subscribe(
result => this.board = result,
error => console.error(error)
);
}));
}
ngOnDestroy() {
this.dragulaService.destroy('CARDLISTS');
this.subs.unsubscribe();
}
calcPosition(targetArray, targetIndex): number {
const DEFAULT_POSITION_SPACING = 65535;
const prevNeighborPosition = targetIndex > 0
? targetArray[targetIndex - 1].position
: 0;
const nextNeighborPosition = targetIndex < targetArray.length - 1
? targetArray[targetIndex + 1].position
: prevNeighborPosition + DEFAULT_POSITION_SPACING;
// todo: if difference between neighbors is too tiny, normalize positions
return prevNeighborPosition + (nextNeighborPosition - prevNeighborPosition) / 2;
}
titleChanged(title: string) {
this.dataService.patchBoard(this.id, {
title
}).subscribe({error: e => console.error(e)});
}
newListSubmitted(title: string) {
this.dataService.createList({
boardId: this.id,
title
}).subscribe();
}
}
| BoardComponent |
overArgs.d.ts | import { overArgs } from "lodash";
export default overArgs; |
||
productTag.js | const { Model, DataTypes } = require('sequelize');
const sequelize = require('../config/connection');
class | extends Model {}
ProductTag.init(
{
id: {
type: DataTypes.INTEGER,
allowNull: false,
primaryKey: true,
autoIncrement: true,
},
product_id: {
type: DataTypes.INTEGER,
references: {
model: 'product',
key: 'id',
},
},
tag_id: {
type: DataTypes.INTEGER,
references: {
model: 'tag',
key: 'id',
},
},
},
{
sequelize,
timestamps: false,
freezeTableName: true,
underscored: true,
modelName: 'product_tag',
}
);
module.exports = ProductTag; | ProductTag |
module_instance.go | package addrs
import (
"bytes"
"fmt"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/hclsyntax"
"github.com/zclconf/go-cty/cty"
"github.com/zclconf/go-cty/cty/gocty"
"github.com/hashicorp/terraform-plugin-sdk/tfdiags"
)
// ModuleInstance is an address for a particular module instance within the
// dynamic module tree. This is an extension of the static traversals
// represented by type Module that deals with the possibility of a single
// module call producing multiple instances via the "count" and "for_each"
// arguments.
//
// Although ModuleInstance is a slice, it should be treated as immutable after
// creation.
type ModuleInstance []ModuleInstanceStep
var (
_ Targetable = ModuleInstance(nil)
)
func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagnostics) {
mi, remain, diags := parseModuleInstancePrefix(traversal)
if len(remain) != 0 {
if len(remain) == len(traversal) {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module instance address",
Detail: "A module instance address must begin with \"module.\".",
Subject: remain.SourceRange().Ptr(),
})
} else {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid module instance address",
Detail: "The module instance address is followed by additional invalid content.",
Subject: remain.SourceRange().Ptr(),
})
}
}
return mi, diags
}
// ParseModuleInstanceStr is a helper wrapper around ParseModuleInstance
// that takes a string and parses it with the HCL native syntax traversal parser
// before interpreting it.
//
// This should be used only in specialized situations since it will cause the
// created references to not have any meaningful source location information.
// If a reference string is coming from a source that should be identified in
// error messages then the caller should instead parse it directly using a
// suitable function from the HCL API and pass the traversal itself to
// ParseProviderConfigCompact.
//
// Error diagnostics are returned if either the parsing fails or the analysis
// of the traversal fails. There is no way for the caller to distinguish the
// two kinds of diagnostics programmatically. If error diagnostics are returned
// then the returned address is invalid.
func | (str string) (ModuleInstance, tfdiags.Diagnostics) {
var diags tfdiags.Diagnostics
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
diags = diags.Append(parseDiags)
if parseDiags.HasErrors() {
return nil, diags
}
addr, addrDiags := ParseModuleInstance(traversal)
diags = diags.Append(addrDiags)
return addr, diags
}
func parseModuleInstancePrefix(traversal hcl.Traversal) (ModuleInstance, hcl.Traversal, tfdiags.Diagnostics) {
remain := traversal
var mi ModuleInstance
var diags tfdiags.Diagnostics
for len(remain) > 0 {
var next string
switch tt := remain[0].(type) {
case hcl.TraverseRoot:
next = tt.Name
case hcl.TraverseAttr:
next = tt.Name
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address operator",
Detail: "Module address prefix must be followed by dot and then a name.",
Subject: remain[0].SourceRange().Ptr(),
})
break
}
if next != "module" {
break
}
kwRange := remain[0].SourceRange()
remain = remain[1:]
// If we have the prefix "module" then we should be followed by an
// module call name, as an attribute, and then optionally an index step
// giving the instance key.
if len(remain) == 0 {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address operator",
Detail: "Prefix \"module.\" must be followed by a module name.",
Subject: &kwRange,
})
break
}
var moduleName string
switch tt := remain[0].(type) {
case hcl.TraverseAttr:
moduleName = tt.Name
default:
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address operator",
Detail: "Prefix \"module.\" must be followed by a module name.",
Subject: remain[0].SourceRange().Ptr(),
})
break
}
remain = remain[1:]
step := ModuleInstanceStep{
Name: moduleName,
}
if len(remain) > 0 {
if idx, ok := remain[0].(hcl.TraverseIndex); ok {
remain = remain[1:]
switch idx.Key.Type() {
case cty.String:
step.InstanceKey = StringKey(idx.Key.AsString())
case cty.Number:
var idxInt int
err := gocty.FromCtyValue(idx.Key, &idxInt)
if err == nil {
step.InstanceKey = IntKey(idxInt)
} else {
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address operator",
Detail: fmt.Sprintf("Invalid module index: %s.", err),
Subject: idx.SourceRange().Ptr(),
})
}
default:
// Should never happen, because no other types are allowed in traversal indices.
diags = diags.Append(&hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid address operator",
Detail: "Invalid module key: must be either a string or an integer.",
Subject: idx.SourceRange().Ptr(),
})
}
}
}
mi = append(mi, step)
}
var retRemain hcl.Traversal
if len(remain) > 0 {
retRemain = make(hcl.Traversal, len(remain))
copy(retRemain, remain)
// The first element here might be either a TraverseRoot or a
// TraverseAttr, depending on whether we had a module address on the
// front. To make life easier for callers, we'll normalize to always
// start with a TraverseRoot.
if tt, ok := retRemain[0].(hcl.TraverseAttr); ok {
retRemain[0] = hcl.TraverseRoot{
Name: tt.Name,
SrcRange: tt.SrcRange,
}
}
}
return mi, retRemain, diags
}
// UnkeyedInstanceShim is a shim method for converting a Module address to the
// equivalent ModuleInstance address that assumes that no modules have
// keyed instances.
//
// This is a temporary allowance for the fact that Terraform does not presently
// support "count" and "for_each" on modules, and thus graph building code that
// derives graph nodes from configuration must just assume unkeyed modules
// in order to construct the graph. At a later time when "count" and "for_each"
// support is added for modules, all callers of this method will need to be
// reworked to allow for keyed module instances.
func (m Module) UnkeyedInstanceShim() ModuleInstance {
path := make(ModuleInstance, len(m))
for i, name := range m {
path[i] = ModuleInstanceStep{Name: name}
}
return path
}
// ModuleInstanceStep is a single traversal step through the dynamic module
// tree. It is used only as part of ModuleInstance.
type ModuleInstanceStep struct {
Name string
InstanceKey InstanceKey
}
// RootModuleInstance is the module instance address representing the root
// module, which is also the zero value of ModuleInstance.
var RootModuleInstance ModuleInstance
// IsRoot returns true if the receiver is the address of the root module instance,
// or false otherwise.
func (m ModuleInstance) IsRoot() bool {
return len(m) == 0
}
// Child returns the address of a child module instance of the receiver,
// identified by the given name and key.
func (m ModuleInstance) Child(name string, key InstanceKey) ModuleInstance {
ret := make(ModuleInstance, 0, len(m)+1)
ret = append(ret, m...)
return append(ret, ModuleInstanceStep{
Name: name,
InstanceKey: key,
})
}
// Parent returns the address of the parent module instance of the receiver, or
// the receiver itself if there is no parent (if it's the root module address).
func (m ModuleInstance) Parent() ModuleInstance {
if len(m) == 0 {
return m
}
return m[:len(m)-1]
}
// String returns a string representation of the receiver, in the format used
// within e.g. user-provided resource addresses.
//
// The address of the root module has the empty string as its representation.
func (m ModuleInstance) String() string {
var buf bytes.Buffer
sep := ""
for _, step := range m {
buf.WriteString(sep)
buf.WriteString("module.")
buf.WriteString(step.Name)
if step.InstanceKey != NoKey {
buf.WriteString(step.InstanceKey.String())
}
sep = "."
}
return buf.String()
}
// Less returns true if the receiver should sort before the given other value
// in a sorted list of addresses.
func (m ModuleInstance) Less(o ModuleInstance) bool {
if len(m) != len(o) {
// Shorter path sorts first.
return len(m) < len(o)
}
for i := range m {
mS, oS := m[i], o[i]
switch {
case mS.Name != oS.Name:
return mS.Name < oS.Name
case mS.InstanceKey != oS.InstanceKey:
return InstanceKeyLess(mS.InstanceKey, oS.InstanceKey)
}
}
return false
}
// Ancestors returns a slice containing the receiver and all of its ancestor
// module instances, all the way up to (and including) the root module.
// The result is ordered by depth, with the root module always first.
//
// Since the result always includes the root module, a caller may choose to
// ignore it by slicing the result with [1:].
func (m ModuleInstance) Ancestors() []ModuleInstance {
ret := make([]ModuleInstance, 0, len(m)+1)
for i := 0; i <= len(m); i++ {
ret = append(ret, m[:i])
}
return ret
}
// Call returns the module call address that corresponds to the given module
// instance, along with the address of the module instance that contains it.
//
// There is no call for the root module, so this method will panic if called
// on the root module address.
//
// A single module call can produce potentially many module instances, so the
// result discards any instance key that might be present on the last step
// of the instance. To retain this, use CallInstance instead.
//
// In practice, this just turns the last element of the receiver into a
// ModuleCall and then returns a slice of the receiever that excludes that
// last part. This is just a convenience for situations where a call address
// is required, such as when dealing with *Reference and Referencable values.
func (m ModuleInstance) Call() (ModuleInstance, ModuleCall) {
if len(m) == 0 {
panic("cannot produce ModuleCall for root module")
}
inst, lastStep := m[:len(m)-1], m[len(m)-1]
return inst, ModuleCall{
Name: lastStep.Name,
}
}
// CallInstance returns the module call instance address that corresponds to
// the given module instance, along with the address of the module instance
// that contains it.
//
// There is no call for the root module, so this method will panic if called
// on the root module address.
//
// In practice, this just turns the last element of the receiver into a
// ModuleCallInstance and then returns a slice of the receiever that excludes
// that last part. This is just a convenience for situations where a call\
// address is required, such as when dealing with *Reference and Referencable
// values.
func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) {
if len(m) == 0 {
panic("cannot produce ModuleCallInstance for root module")
}
inst, lastStep := m[:len(m)-1], m[len(m)-1]
return inst, ModuleCallInstance{
Call: ModuleCall{
Name: lastStep.Name,
},
Key: lastStep.InstanceKey,
}
}
// TargetContains implements Targetable by returning true if the given other
// address either matches the receiver, is a sub-module-instance of the
// receiver, or is a targetable absolute address within a module that
// is contained within the reciever.
func (m ModuleInstance) TargetContains(other Targetable) bool {
switch to := other.(type) {
case ModuleInstance:
if len(to) < len(m) {
// Can't be contained if the path is shorter
return false
}
// Other is contained if its steps match for the length of our own path.
for i, ourStep := range m {
otherStep := to[i]
if ourStep != otherStep {
return false
}
}
// If we fall out here then the prefixed matched, so it's contained.
return true
case AbsResource:
return m.TargetContains(to.Module)
case AbsResourceInstance:
return m.TargetContains(to.Module)
default:
return false
}
}
func (m ModuleInstance) targetableSigil() {
// ModuleInstance is targetable
}
| ParseModuleInstanceStr |
config.rs | // Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#[derive(Debug, PartialEq)]
#[allow(dead_code)]
pub enum | {
// Mac OS parameterized by the major and minor version numbers, such as MacOS(10, 14) for MacOS 10.14.
MacOS(u32, u32),
// Linux.
Linux,
}
#[derive(Debug, PartialEq)]
pub struct PreflightConfig {
// The operating system preflight is running on.
pub system: OperatingSystem,
}
| OperatingSystem |
gpu_display_wl.rs | // Copyright 2019 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! Crate for displaying simple surfaces and GPU buffers over wayland.
extern crate base;
extern crate data_model;
#[path = "dwl.rs"]
mod dwl;
use dwl::*;
use crate::{
DisplayT, EventDeviceKind, GpuDisplayError, GpuDisplayEvents, GpuDisplayFramebuffer,
GpuDisplayImport, GpuDisplayResult, GpuDisplaySurface, SurfaceType,
};
use linux_input_sys::virtio_input_event;
use std::cell::Cell;
use std::cmp::max;
use std::ffi::{CStr, CString};
use std::mem::zeroed;
use std::path::Path;
use std::ptr::null;
use base::{
error, round_up_to_page_size, AsRawDescriptor, MemoryMapping, MemoryMappingBuilder,
RawDescriptor, SharedMemory,
};
use data_model::VolatileMemory;
const BUFFER_COUNT: usize = 3;
const BYTES_PER_PIXEL: u32 = 4;
struct DwlContext(*mut dwl_context);
impl Drop for DwlContext {
fn drop(&mut self) {
if !self.0.is_null() {
// Safe given that we checked the pointer for non-null and it should always be of the
// correct type.
unsafe {
dwl_context_destroy(&mut self.0);
}
}
}
}
impl AsRawDescriptor for DwlContext {
fn as_raw_descriptor(&self) -> RawDescriptor {
// Safe given that the context pointer is valid.
unsafe { dwl_context_fd(self.0) }
}
}
struct DwlDmabuf(*mut dwl_dmabuf);
impl GpuDisplayImport for DwlDmabuf {}
impl Drop for DwlDmabuf {
fn drop(&mut self) {
if !self.0.is_null() {
// Safe given that we checked the pointer for non-null and it should always be of the
// correct type.
unsafe {
dwl_dmabuf_destroy(&mut self.0);
}
}
}
}
struct DwlSurface(*mut dwl_surface);
impl Drop for DwlSurface {
fn drop(&mut self) {
if !self.0.is_null() {
// Safe given that we checked the pointer for non-null and it should always be of the
// correct type.
unsafe {
dwl_surface_destroy(&mut self.0);
}
}
}
}
struct WaylandSurface {
surface: DwlSurface,
row_size: u32,
buffer_size: usize,
buffer_index: Cell<usize>,
buffer_mem: MemoryMapping,
}
impl WaylandSurface {
fn surface(&self) -> *mut dwl_surface {
self.surface.0
}
}
impl GpuDisplaySurface for WaylandSurface {
fn surface_descriptor(&self) -> u64 {
// Safe if the surface is valid.
let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
pointer as u64
}
fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
let framebuffer = self
.buffer_mem
.get_slice(buffer_index * self.buffer_size, self.buffer_size)
.ok()?;
Some(GpuDisplayFramebuffer::new(
framebuffer,
self.row_size,
BYTES_PER_PIXEL,
))
}
fn next_buffer_in_use(&self) -> bool {
let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
// Safe because only a valid surface and buffer index is used.
unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
}
fn close_requested(&self) -> bool {
// Safe because only a valid surface is used.
unsafe { dwl_surface_close_requested(self.surface()) }
}
fn flip(&mut self) {
self.buffer_index
.set((self.buffer_index.get() + 1) % BUFFER_COUNT);
// Safe because only a valid surface and buffer index is used.
unsafe {
dwl_surface_flip(self.surface(), self.buffer_index.get());
}
}
fn flip_to(&mut self, import_id: u32) {
// Safe because only a valid surface and import_id is used.
unsafe { dwl_surface_flip_to(self.surface(), import_id) }
}
fn commit(&mut self) -> GpuDisplayResult<()> {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_commit(self.surface());
}
Ok(())
}
fn | (&mut self, x: u32, y: u32) {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_set_position(self.surface(), x, y);
}
}
fn set_scanout_id(&mut self, scanout_id: u32) {
// Safe because only a valid surface is used.
unsafe {
dwl_surface_set_scanout_id(self.surface(), scanout_id);
}
}
}
/// A connection to the compositor and associated collection of state.
///
/// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
/// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
pub struct DisplayWl {
ctx: DwlContext,
current_event: Option<dwl_event>,
}
impl DisplayWl {
/// Opens a fresh connection to the compositor.
pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl> {
// The dwl_context_new call should always be safe to call, and we check its result.
let ctx = DwlContext(unsafe { dwl_context_new() });
if ctx.0.is_null() {
return Err(GpuDisplayError::Allocate);
}
// The dwl_context_setup call is always safe to call given that the supplied context is
// valid. and we check its result.
let cstr_path = match wayland_path.map(|p| p.as_os_str().to_str()) {
Some(Some(s)) => match CString::new(s) {
Ok(cstr) => Some(cstr),
Err(_) => return Err(GpuDisplayError::InvalidPath),
},
Some(None) => return Err(GpuDisplayError::InvalidPath),
None => None,
};
// This grabs a pointer to cstr_path without moving the CString into the .map closure
// accidentally, which triggeres a really hard to catch use after free in
// dwl_context_setup.
let cstr_path_ptr = cstr_path
.as_ref()
.map(|s: &CString| CStr::as_ptr(s))
.unwrap_or(null());
let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) };
if !setup_success {
return Err(GpuDisplayError::Connect);
}
Ok(DisplayWl {
ctx,
current_event: None,
})
}
fn ctx(&self) -> *mut dwl_context {
self.ctx.0
}
fn pop_event(&self) -> dwl_event {
// Safe because dwl_next_events from a context's circular buffer.
unsafe {
let mut ev = zeroed();
dwl_context_next_event(self.ctx(), &mut ev);
ev
}
}
}
impl DisplayT for DisplayWl {
fn pending_events(&self) -> bool {
// Safe because the function just queries the values of two variables in a context.
unsafe { dwl_context_pending_events(self.ctx()) }
}
fn next_event(&mut self) -> GpuDisplayResult<u64> {
let ev = self.pop_event();
let descriptor = ev.surface_descriptor as u64;
self.current_event = Some(ev);
Ok(descriptor)
}
fn handle_next_event(
&mut self,
_surface: &mut Box<dyn GpuDisplaySurface>,
) -> Option<GpuDisplayEvents> {
// Should not panic since the common layer only calls this when an event occurs.
let event = self.current_event.take().unwrap();
match event.event_type {
DWL_EVENT_TYPE_KEYBOARD_ENTER => None,
DWL_EVENT_TYPE_KEYBOARD_LEAVE => None,
DWL_EVENT_TYPE_KEYBOARD_KEY => {
let linux_keycode = event.params[0] as u16;
let pressed = event.params[1] == DWL_KEYBOARD_KEY_STATE_PRESSED;
let events = vec![virtio_input_event::key(linux_keycode, pressed)];
Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Keyboard,
})
}
// TODO(tutankhamen): both slot and track_id are always 0, because all the input
// events come from mouse device, i.e. only one touch is possible at a time.
// Full MT protocol has to be implemented and properly wired later.
DWL_EVENT_TYPE_TOUCH_DOWN | DWL_EVENT_TYPE_TOUCH_MOTION => {
let events = vec![
virtio_input_event::multitouch_slot(0),
virtio_input_event::multitouch_tracking_id(0),
virtio_input_event::multitouch_absolute_x(max(0, event.params[0])),
virtio_input_event::multitouch_absolute_y(max(0, event.params[1])),
];
Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Touchscreen,
})
}
DWL_EVENT_TYPE_TOUCH_UP => {
let events = vec![
virtio_input_event::multitouch_slot(0),
virtio_input_event::multitouch_tracking_id(-1),
];
Some(GpuDisplayEvents {
events,
device_type: EventDeviceKind::Touchscreen,
})
}
_ => {
error!("unknown event type {}", event.event_type);
None
}
}
}
fn flush(&self) {
// Safe given that the context pointer is valid.
unsafe {
dwl_context_dispatch(self.ctx());
}
}
fn create_surface(
&mut self,
parent_surface_id: Option<u32>,
surface_id: u32,
width: u32,
height: u32,
surf_type: SurfaceType,
) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
let parent_id = parent_surface_id.unwrap_or(0);
let row_size = width * BYTES_PER_PIXEL;
let fb_size = row_size * height;
let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
let buffer_shm = SharedMemory::named("GpuDisplaySurface", buffer_size as u64)?;
let buffer_mem = MemoryMappingBuilder::new(buffer_size)
.from_shared_memory(&buffer_shm)
.build()
.unwrap();
let dwl_surf_flags = match surf_type {
SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA,
SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT,
};
// Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
// The returned surface is checked for validity before being filed away.
let surface = DwlSurface(unsafe {
dwl_context_surface_new(
self.ctx(),
parent_id,
surface_id,
buffer_shm.as_raw_descriptor(),
buffer_size,
fb_size as usize,
width,
height,
row_size,
dwl_surf_flags,
)
});
if surface.0.is_null() {
return Err(GpuDisplayError::CreateSurface);
}
Ok(Box::new(WaylandSurface {
surface,
row_size,
buffer_size: fb_size as usize,
buffer_index: Cell::new(0),
buffer_mem,
}))
}
fn import_memory(
&mut self,
import_id: u32,
descriptor: &dyn AsRawDescriptor,
offset: u32,
stride: u32,
modifiers: u64,
width: u32,
height: u32,
fourcc: u32,
) -> GpuDisplayResult<Box<dyn GpuDisplayImport>> {
// Safe given that the context pointer is valid. Any other invalid parameters would be
// rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
// before filing it away.
let dmabuf = DwlDmabuf(unsafe {
dwl_context_dmabuf_new(
self.ctx(),
import_id,
descriptor.as_raw_descriptor(),
offset,
stride,
modifiers,
width,
height,
fourcc,
)
});
if dmabuf.0.is_null() {
return Err(GpuDisplayError::FailedImport);
}
Ok(Box::new(dmabuf))
}
}
impl AsRawDescriptor for DisplayWl {
fn as_raw_descriptor(&self) -> RawDescriptor {
// Safe given that the context pointer is valid.
unsafe { dwl_context_fd(self.ctx.0) }
}
}
| set_position |
util.js | /* Copyright (c) 2017 Red Hat, Inc. */
Array.prototype.extend = function (other_array) {
/* you should include a test to check whether other_array really is an array */
var i = 0;
for (i = 0; i < other_array.length; i++) {
this.push(other_array[i]);
}
};
var math = require('mathjs');
var yaml = require('js-yaml');
var nunjucks = require('nunjucks');
function | (text) {
var variables = [];
var tokenizer = nunjucks.lexer.lex(text, {});
var token = tokenizer.nextToken();
while (token !== null) {
if (token.type === 'variable-start') {
token = tokenizer.nextToken();
while (token !== null) {
if (token.type === 'symbol') {
variables.push(token.value);
}
if (token.type === 'variable-end') {
break;
}
token = tokenizer.nextToken();
}
}
token = tokenizer.nextToken();
}
return variables;
}
exports.nunjucks_find_variables = nunjucks_find_variables;
function parse_variables (variables) {
var parsed_variables = {};
try {
parsed_variables = JSON.parse(variables);
} catch (err) {
try {
parsed_variables = yaml.safeLoad(variables);
} catch (err) {
parsed_variables = {};
}
}
if (parsed_variables === undefined) {
return {};
}
return parsed_variables;
}
exports.parse_variables = parse_variables;
function noop () {
}
exports.noop = noop;
function natural_numbers (start) {
var counter = start;
return function () {return counter += 1;};
}
exports.natural_numbers = natural_numbers;
function distance (x1, y1, x2, y2) {
return Math.sqrt(Math.pow(x2 - x1, 2) + Math.pow(y2 - y1, 2));
}
exports.distance = distance;
// polarToCartesian
// @wdebeaum, @opsb
// from http://stackoverflow.com/questions/5736398/how-to-calculate-the-svg-path-for-an-arc-of-a-circle
function polarToCartesian(centerX, centerY, radius, angleInDegrees) {
var angleInRadians = (angleInDegrees-90) * Math.PI / 180.0;
return {
x: centerX + (radius * Math.cos(angleInRadians)),
y: centerY + (radius * Math.sin(angleInRadians))
};
}
// describeArc
// @wdebeaum, @opsb
// from http://stackoverflow.com/questions/5736398/how-to-calculate-the-svg-path-for-an-arc-of-a-circle
function describeArc(x, y, radius, startAngle, endAngle){
var start = polarToCartesian(x, y, radius, endAngle);
var end = polarToCartesian(x, y, radius, startAngle);
var largeArcFlag = endAngle - startAngle <= 180 ? "0" : "1";
var d = [
"M", start.x, start.y,
"A", radius, radius, 0, largeArcFlag, 0, end.x, end.y
].join(" ");
return d;
}
exports.describeArc = describeArc;
function pDistanceLine(x, y, x1, y1, x2, y2) {
//Code from http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
//Joshua
// Find the dot product of two vectors <A, B>, <C, D>
// Divide by the length squared of <C, D>
// Use scalar project to find param
//
var A = x - x1;
var B = y - y1;
var C = x2 - x1;
var D = y2 - y1;
var dot = A * C + B * D;
var len_sq = C * C + D * D;
var param = -1;
if (len_sq !== 0) {
//in case of 0 length line
param = dot / len_sq;
}
var xx, yy;
//Find a point xx, yy where the projection and the <C, D> vector intersect.
//If less than 0 use x1, y1 as the closest point.
//If less than 1 use x2, y2 as the closest point.
//If between 0 and 1 use the projection intersection xx, yy
if (param < 0) {
xx = x1;
yy = y1;
}
else if (param > 1) {
xx = x2;
yy = y2;
}
else {
xx = x1 + param * C;
yy = y1 + param * D;
}
return {x1:x, y1:y, x2: xx, y2: yy};
}
exports.pDistanceLine = pDistanceLine;
function pDistance(x, y, x1, y1, x2, y2) {
//Code from http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
//Joshua
// Find the dot product of two vectors <A, B>, <C, D>
// Divide by the length squared of <C, D>
// Use scalar project to find param
//
var A = x - x1;
var B = y - y1;
var C = x2 - x1;
var D = y2 - y1;
var dot = A * C + B * D;
var len_sq = C * C + D * D;
var param = -1;
if (len_sq !== 0) {
//in case of 0 length line
param = dot / len_sq;
}
var xx, yy;
//Find a point xx, yy where the projection and the <C, D> vector intersect.
//If less than 0 use x1, y1 as the closest point.
//If less than 1 use x2, y2 as the closest point.
//If between 0 and 1 use the projection intersection xx, yy
if (param < 0) {
xx = x1;
yy = y1;
}
else if (param > 1) {
xx = x2;
yy = y2;
}
else {
xx = x1 + param * C;
yy = y1 + param * D;
}
var dx = x - xx;
var dy = y - yy;
return Math.sqrt(dx * dx + dy * dy);
}
exports.pDistance = pDistance;
function cross_z_pos(x, y, x1, y1, x2, y2) {
var A = x - x1;
var B = y - y1;
var C = x2 - x1;
var D = y2 - y1;
return math.cross([A, B, 0], [C, D, 0])[2] > 0;
}
exports.cross_z_pos = cross_z_pos;
function intersection (x1, y1, x2, y2, x3, y3, x4, y4) {
//Find the point where lines through x1, y1, x2, y2 and x3, y3, x4, y4 intersect.
//
var Aslope;
var Aintercept;
var Bslope;
var Bintercept;
if ((x2 - x1) !== 0 && (x4 - x3) !== 0) {
Aslope = (y2 - y1)/(x2 - x1);
Aintercept = y1 - Aslope * x1;
Bslope = (y4 - y3)/(x4 - x3);
Bintercept = y3 - Bslope * x3;
var xi = (Bintercept - Aintercept) / (Aslope - Bslope);
var yi = Bslope * xi + Bintercept;
return {x: xi, y: yi};
}
if ((x2 - x1) === 0 && (x4 - x3) === 0) {
return {x: null, y: null};
}
if ((x2 - x1) === 0) {
Bslope = (y4 - y3)/(x4 - x3);
Bintercept = y3 - Bslope * x3;
return {x: x1, y: Bslope * x1 + Bintercept};
}
if ((x4 - x3) === 0) {
Aslope = (y2 - y1)/(x2 - x1);
Aintercept = y1 - Aslope * x1;
return {x: x3, y: Aslope * x3 + Aintercept};
}
}
exports.intersection = intersection;
function pCase(x, y, x1, y1, x2, y2) {
//Code from http://stackoverflow.com/questions/849211/shortest-distance-between-a-point-and-a-line-segment
//Joshua
// Find the dot product of two vectors <A, B>, <C, D>
// Divide by the length squared of <C, D>
// Use scalar project to find param
//
var A = x - x1;
var B = y - y1;
var C = x2 - x1;
var D = y2 - y1;
var dot = A * C + B * D;
var len_sq = C * C + D * D;
var param = -1;
if (len_sq !== 0) {
//in case of 0 length line
param = dot / len_sq;
}
return param;
}
exports.pCase = pCase;
| nunjucks_find_variables |
rabbitmq_config.go | package configuration
import "text/template"
// RabbitmqConfig is the template of the Rabbitmq service configuration.
var RabbitmqConfig = template.Must(template.New("").Parse(`#!/bin/bash
echo $RABBITMQ_ERLANG_COOKIE > /var/lib/rabbitmq/.erlang.cookie
chmod 0600 /var/lib/rabbitmq/.erlang.cookie
export RABBITMQ_NODENAME=rabbit@${POD_IP}
if [[ $(grep $POD_IP /etc/rabbitmq/0) ]] ; then
rabbitmq-server
else
rabbitmqctl --node rabbit@${POD_IP} forget_cluster_node rabbit@${POD_IP}
rabbitmqctl --node rabbit@$(cat /etc/rabbitmq/0) ping
while [[ $? -ne 0 ]]; do
rabbitmqctl --node rabbit@$(cat /etc/rabbitmq/0) ping
done
rabbitmq-server -detached
rabbitmqctl --node rabbit@$(cat /etc/rabbitmq/0) node_health_check
while [[ $? -ne 0 ]]; do
rabbitmqctl --node rabbit@$(cat /etc/rabbitmq/0) node_health_check
done
rabbitmqctl stop_app
sleep 2
rabbitmqctl join_cluster rabbit@$(cat /etc/rabbitmq/0)
rabbitmqctl shutdown
rabbitmq-server
fi
`))
// RabbitmqDefinition is the template for Rabbitmq user/vhost configuration
var RabbitmqDefinition = template.Must(template.New("").Parse(`{ | {
"name": "{{ .RabbitmqUser }}",
"password_hash": "{{ .RabbitmqPassword }}",
"tags": "administrator"
}
],
"vhosts": [
{
"name": "{{ .RabbitmqVhost }}"
}
],
"permissions": [
{
"user": "{{ .RabbitmqUser }}",
"vhost": "{{ .RabbitmqVhost }}",
"configure": ".*",
"write": ".*",
"read": ".*"
}
],
}
`)) | "users": [ |
httpmessage.rs | use bytes::{Bytes, BytesMut};
use encoding::all::UTF_8;
use encoding::label::encoding_from_whatwg_label;
use encoding::types::{DecoderTrap, Encoding};
use encoding::EncodingRef;
use futures::{Async, Future, Poll, Stream};
use http::{header, HeaderMap};
use mime::Mime;
use serde::de::DeserializeOwned;
use serde_urlencoded;
use std::str;
use error::{
ContentTypeError, ParseError, PayloadError, ReadlinesError, UrlencodedError,
};
use header::Header;
use json::JsonBody;
use multipart::Multipart;
/// Trait that implements general purpose operations on http messages
pub trait HttpMessage: Sized {
/// Type of message payload stream
type Stream: Stream<Item = Bytes, Error = PayloadError> + Sized;
/// Read the message headers.
fn headers(&self) -> &HeaderMap;
/// Message payload stream
fn payload(&self) -> Self::Stream;
#[doc(hidden)]
/// Get a header
fn get_header<H: Header>(&self) -> Option<H>
where
Self: Sized,
{
if self.headers().contains_key(H::name()) {
H::parse(self).ok()
} else {
None
}
}
/// Read the request content type. If request does not contain
/// *Content-Type* header, empty str get returned.
fn content_type(&self) -> &str {
if let Some(content_type) = self.headers().get(header::CONTENT_TYPE) {
if let Ok(content_type) = content_type.to_str() {
return content_type.split(';').next().unwrap().trim();
}
}
""
}
/// Get content type encoding
///
/// UTF-8 is used by default, If request charset is not set.
fn encoding(&self) -> Result<EncodingRef, ContentTypeError> {
if let Some(mime_type) = self.mime_type()? {
if let Some(charset) = mime_type.get_param("charset") {
if let Some(enc) = encoding_from_whatwg_label(charset.as_str()) {
Ok(enc)
} else {
Err(ContentTypeError::UnknownEncoding)
}
} else {
Ok(UTF_8)
}
} else {
Ok(UTF_8)
}
}
/// Convert the request content type to a known mime type.
fn mime_type(&self) -> Result<Option<Mime>, ContentTypeError> {
if let Some(content_type) = self.headers().get(header::CONTENT_TYPE) {
if let Ok(content_type) = content_type.to_str() {
return match content_type.parse() {
Ok(mt) => Ok(Some(mt)),
Err(_) => Err(ContentTypeError::ParseError),
};
} else {
return Err(ContentTypeError::ParseError);
}
}
Ok(None)
}
/// Check if request has chunked transfer encoding
fn chunked(&self) -> Result<bool, ParseError> {
if let Some(encodings) = self.headers().get(header::TRANSFER_ENCODING) {
if let Ok(s) = encodings.to_str() {
Ok(s.to_lowercase().contains("chunked"))
} else {
Err(ParseError::Header)
}
} else {
Ok(false)
}
}
/// Load http message body.
///
/// By default only 256Kb payload reads to a memory, then
/// `PayloadError::Overflow` get returned. Use `MessageBody::limit()`
/// method to change upper limit.
///
/// ## Server example
///
/// ```rust
/// # extern crate bytes;
/// # extern crate actix_web;
/// # extern crate futures;
/// # #[macro_use] extern crate serde_derive;
/// use actix_web::{
/// AsyncResponder, FutureResponse, HttpMessage, HttpRequest, HttpResponse,
/// };
/// use bytes::Bytes;
/// use futures::future::Future;
///
/// fn index(mut req: HttpRequest) -> FutureResponse<HttpResponse> {
/// req.body() // <- get Body future
/// .limit(1024) // <- change max size of the body to a 1kb
/// .from_err()
/// .and_then(|bytes: Bytes| { // <- complete body
/// println!("==== BODY ==== {:?}", bytes);
/// Ok(HttpResponse::Ok().into())
/// }).responder()
/// }
/// # fn main() {}
/// ```
fn body(&self) -> MessageBody<Self> {
MessageBody::new(self)
}
/// Parse `application/x-www-form-urlencoded` encoded request's body.
/// Return `UrlEncoded` future. Form can be deserialized to any type that
/// implements `Deserialize` trait from *serde*.
///
/// Returns error:
///
/// * content type is not `application/x-www-form-urlencoded`
/// * content-length is greater than 256k
///
/// ## Server example
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate futures;
/// # use futures::Future;
/// # use std::collections::HashMap;
/// use actix_web::{FutureResponse, HttpMessage, HttpRequest, HttpResponse};
///
/// fn index(mut req: HttpRequest) -> FutureResponse<HttpResponse> {
/// Box::new(
/// req.urlencoded::<HashMap<String, String>>() // <- get UrlEncoded future
/// .from_err()
/// .and_then(|params| { // <- url encoded parameters
/// println!("==== BODY ==== {:?}", params);
/// Ok(HttpResponse::Ok().into())
/// }),
/// )
/// }
/// # fn main() {}
/// ```
fn urlencoded<T: DeserializeOwned>(&self) -> UrlEncoded<Self, T> {
UrlEncoded::new(self)
}
/// Parse `application/json` encoded body.
/// Return `JsonBody<T>` future. It resolves to a `T` value.
///
/// Returns error:
///
/// * content type is not `application/json`
/// * content length is greater than 256k
///
/// ## Server example
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate futures;
/// # #[macro_use] extern crate serde_derive;
/// use actix_web::*;
/// use futures::future::{ok, Future};
///
/// #[derive(Deserialize, Debug)]
/// struct MyObj {
/// name: String,
/// }
///
/// fn index(mut req: HttpRequest) -> Box<Future<Item = HttpResponse, Error = Error>> {
/// req.json() // <- get JsonBody future
/// .from_err()
/// .and_then(|val: MyObj| { // <- deserialized value
/// println!("==== BODY ==== {:?}", val);
/// Ok(HttpResponse::Ok().into())
/// }).responder()
/// }
/// # fn main() {}
/// ```
fn json<T: DeserializeOwned>(&self) -> JsonBody<Self, T> {
JsonBody::new(self)
}
/// Return stream to http payload processes as multipart.
///
/// Content-type: multipart/form-data;
///
/// ## Server example
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate env_logger;
/// # extern crate futures;
/// # extern crate actix;
/// # use std::str;
/// # use actix_web::*;
/// # use actix::FinishStream;
/// # use futures::{Future, Stream};
/// # use futures::future::{ok, result, Either};
/// fn index(mut req: HttpRequest) -> Box<Future<Item = HttpResponse, Error = Error>> {
/// req.multipart().from_err() // <- get multipart stream for current request
/// .and_then(|item| match item { // <- iterate over multipart items
/// multipart::MultipartItem::Field(field) => {
/// // Field in turn is stream of *Bytes* object
/// Either::A(field.from_err()
/// .map(|c| println!("-- CHUNK: \n{:?}", str::from_utf8(&c)))
/// .finish())
/// },
/// multipart::MultipartItem::Nested(mp) => {
/// // Or item could be nested Multipart stream
/// Either::B(ok(()))
/// }
/// })
/// .finish() // <- Stream::finish() combinator from actix
/// .map(|_| HttpResponse::Ok().into())
/// .responder()
/// }
/// # fn main() {}
/// ```
fn multipart(&self) -> Multipart<Self::Stream> {
let boundary = Multipart::boundary(self.headers());
Multipart::new(boundary, self.payload())
}
/// Return stream of lines.
fn readlines(&self) -> Readlines<Self> {
Readlines::new(self)
}
}
/// Stream to read request line by line.
pub struct Readlines<T: HttpMessage> {
stream: T::Stream,
buff: BytesMut,
limit: usize,
checked_buff: bool,
encoding: EncodingRef,
err: Option<ReadlinesError>,
}
impl<T: HttpMessage> Readlines<T> {
/// Create a new stream to read request line by line.
fn new(req: &T) -> Self {
let encoding = match req.encoding() {
Ok(enc) => enc,
Err(err) => return Self::err(req, err.into()),
};
Readlines {
stream: req.payload(),
buff: BytesMut::with_capacity(262_144),
limit: 262_144,
checked_buff: true,
err: None,
encoding,
}
}
/// Change max line size. By default max size is 256Kb
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self
}
fn err(req: &T, err: ReadlinesError) -> Self {
Readlines {
stream: req.payload(),
buff: BytesMut::new(),
limit: 262_144,
checked_buff: true,
encoding: UTF_8,
err: Some(err),
}
}
}
impl<T: HttpMessage + 'static> Stream for Readlines<T> {
type Item = String;
type Error = ReadlinesError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
if let Some(err) = self.err.take() {
return Err(err);
}
// check if there is a newline in the buffer
if !self.checked_buff {
let mut found: Option<usize> = None;
for (ind, b) in self.buff.iter().enumerate() {
if *b == b'\n' {
found = Some(ind);
break;
}
}
if let Some(ind) = found {
// check if line is longer than limit
if ind + 1 > self.limit {
return Err(ReadlinesError::LimitOverflow);
}
let enc: *const Encoding = self.encoding as *const Encoding;
let line = if enc == UTF_8 {
str::from_utf8(&self.buff.split_to(ind + 1))
.map_err(|_| ReadlinesError::EncodingError)?
.to_owned()
} else {
self.encoding
.decode(&self.buff.split_to(ind + 1), DecoderTrap::Strict)
.map_err(|_| ReadlinesError::EncodingError)?
};
return Ok(Async::Ready(Some(line)));
}
self.checked_buff = true;
}
// poll req for more bytes
match self.stream.poll() {
Ok(Async::Ready(Some(mut bytes))) => {
// check if there is a newline in bytes
let mut found: Option<usize> = None;
for (ind, b) in bytes.iter().enumerate() {
if *b == b'\n' {
found = Some(ind);
break;
}
}
if let Some(ind) = found {
// check if line is longer than limit
if ind + 1 > self.limit {
return Err(ReadlinesError::LimitOverflow);
}
let enc: *const Encoding = self.encoding as *const Encoding;
let line = if enc == UTF_8 {
str::from_utf8(&bytes.split_to(ind + 1))
.map_err(|_| ReadlinesError::EncodingError)?
.to_owned()
} else {
self.encoding
.decode(&bytes.split_to(ind + 1), DecoderTrap::Strict)
.map_err(|_| ReadlinesError::EncodingError)?
};
// extend buffer with rest of the bytes;
self.buff.extend_from_slice(&bytes);
self.checked_buff = false;
return Ok(Async::Ready(Some(line)));
}
self.buff.extend_from_slice(&bytes);
Ok(Async::NotReady)
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(None)) => {
if self.buff.is_empty() {
return Ok(Async::Ready(None));
}
if self.buff.len() > self.limit {
return Err(ReadlinesError::LimitOverflow);
}
let enc: *const Encoding = self.encoding as *const Encoding;
let line = if enc == UTF_8 {
str::from_utf8(&self.buff)
.map_err(|_| ReadlinesError::EncodingError)?
.to_owned()
} else {
self.encoding
.decode(&self.buff, DecoderTrap::Strict)
.map_err(|_| ReadlinesError::EncodingError)?
};
self.buff.clear();
Ok(Async::Ready(Some(line)))
}
Err(e) => Err(ReadlinesError::from(e)),
}
}
}
/// Future that resolves to a complete http message body.
pub struct MessageBody<T: HttpMessage> {
limit: usize,
length: Option<usize>,
stream: Option<T::Stream>,
err: Option<PayloadError>,
fut: Option<Box<Future<Item = Bytes, Error = PayloadError>>>,
}
impl<T: HttpMessage> MessageBody<T> {
/// Create `MessageBody` for request.
pub fn new(req: &T) -> MessageBody<T> {
let mut len = None;
if let Some(l) = req.headers().get(header::CONTENT_LENGTH) {
if let Ok(s) = l.to_str() {
if let Ok(l) = s.parse::<usize>() {
len = Some(l)
} else {
return Self::err(PayloadError::UnknownLength);
}
} else {
return Self::err(PayloadError::UnknownLength);
}
}
MessageBody {
limit: 262_144,
length: len,
stream: Some(req.payload()),
fut: None,
err: None,
}
}
/// Change max size of payload. By default max size is 256Kb
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self
}
fn err(e: PayloadError) -> Self {
MessageBody {
stream: None,
limit: 262_144,
fut: None,
err: Some(e),
length: None,
}
}
}
impl<T> Future for MessageBody<T>
where
T: HttpMessage + 'static,
{
type Item = Bytes;
type Error = PayloadError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut {
return fut.poll();
}
if let Some(err) = self.err.take() {
return Err(err);
}
if let Some(len) = self.length.take() {
if len > self.limit {
return Err(PayloadError::Overflow);
}
}
// future
let limit = self.limit;
self.fut = Some(Box::new(
self.stream
.take()
.expect("Can not be used second time")
.from_err()
.fold(BytesMut::with_capacity(8192), move |mut body, chunk| {
if (body.len() + chunk.len()) > limit {
Err(PayloadError::Overflow)
} else {
body.extend_from_slice(&chunk);
Ok(body)
}
})
.map(|body| body.freeze()),
));
self.poll()
}
}
/// Future that resolves to a parsed urlencoded values.
pub struct UrlEncoded<T: HttpMessage, U> {
stream: Option<T::Stream>,
limit: usize,
length: Option<usize>,
encoding: EncodingRef,
err: Option<UrlencodedError>,
fut: Option<Box<Future<Item = U, Error = UrlencodedError>>>,
}
impl<T: HttpMessage, U> UrlEncoded<T, U> {
/// Create a new future to URL encode a request
pub fn new(req: &T) -> UrlEncoded<T, U> {
// check content type
if req.content_type().to_lowercase() != "application/x-www-form-urlencoded" {
return Self::err(UrlencodedError::ContentType);
}
let encoding = match req.encoding() {
Ok(enc) => enc,
Err(_) => return Self::err(UrlencodedError::ContentType),
};
let mut len = None;
if let Some(l) = req.headers().get(header::CONTENT_LENGTH) {
if let Ok(s) = l.to_str() {
if let Ok(l) = s.parse::<usize>() {
len = Some(l)
} else {
return Self::err(UrlencodedError::UnknownLength);
}
} else {
return Self::err(UrlencodedError::UnknownLength);
}
};
UrlEncoded {
encoding,
stream: Some(req.payload()),
limit: 262_144,
length: len,
fut: None,
err: None,
}
}
fn err(e: UrlencodedError) -> Self {
UrlEncoded {
stream: None,
limit: 262_144,
fut: None,
err: Some(e),
length: None,
encoding: UTF_8,
}
}
/// Change max size of payload. By default max size is 256Kb
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self
}
}
impl<T, U> Future for UrlEncoded<T, U>
where
T: HttpMessage + 'static,
U: DeserializeOwned + 'static,
{
type Item = U;
type Error = UrlencodedError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut {
return fut.poll();
}
if let Some(err) = self.err.take() {
return Err(err);
}
// payload size
let limit = self.limit;
if let Some(len) = self.length.take() |
// future
let encoding = self.encoding;
let fut = self
.stream
.take()
.expect("UrlEncoded could not be used second time")
.from_err()
.fold(BytesMut::with_capacity(8192), move |mut body, chunk| {
if (body.len() + chunk.len()) > limit {
Err(UrlencodedError::Overflow)
} else {
body.extend_from_slice(&chunk);
Ok(body)
}
})
.and_then(move |body| {
if (encoding as *const Encoding) == UTF_8 {
serde_urlencoded::from_bytes::<U>(&body)
.map_err(|_| UrlencodedError::Parse)
} else {
let body = encoding
.decode(&body, DecoderTrap::Strict)
.map_err(|_| UrlencodedError::Parse)?;
serde_urlencoded::from_str::<U>(&body)
.map_err(|_| UrlencodedError::Parse)
}
});
self.fut = Some(Box::new(fut));
self.poll()
}
}
#[cfg(test)]
mod tests {
use super::*;
use encoding::all::ISO_8859_2;
use encoding::Encoding;
use futures::Async;
use mime;
use test::TestRequest;
#[test]
fn test_content_type() {
let req = TestRequest::with_header("content-type", "text/plain").finish();
assert_eq!(req.content_type(), "text/plain");
let req =
TestRequest::with_header("content-type", "application/json; charset=utf=8")
.finish();
assert_eq!(req.content_type(), "application/json");
let req = TestRequest::default().finish();
assert_eq!(req.content_type(), "");
}
#[test]
fn test_mime_type() {
let req = TestRequest::with_header("content-type", "application/json").finish();
assert_eq!(req.mime_type().unwrap(), Some(mime::APPLICATION_JSON));
let req = TestRequest::default().finish();
assert_eq!(req.mime_type().unwrap(), None);
let req =
TestRequest::with_header("content-type", "application/json; charset=utf-8")
.finish();
let mt = req.mime_type().unwrap().unwrap();
assert_eq!(mt.get_param(mime::CHARSET), Some(mime::UTF_8));
assert_eq!(mt.type_(), mime::APPLICATION);
assert_eq!(mt.subtype(), mime::JSON);
}
#[test]
fn test_mime_type_error() {
let req = TestRequest::with_header(
"content-type",
"applicationadfadsfasdflknadsfklnadsfjson",
)
.finish();
assert_eq!(Err(ContentTypeError::ParseError), req.mime_type());
}
#[test]
fn test_encoding() {
let req = TestRequest::default().finish();
assert_eq!(UTF_8.name(), req.encoding().unwrap().name());
let req = TestRequest::with_header("content-type", "application/json").finish();
assert_eq!(UTF_8.name(), req.encoding().unwrap().name());
let req = TestRequest::with_header(
"content-type",
"application/json; charset=ISO-8859-2",
)
.finish();
assert_eq!(ISO_8859_2.name(), req.encoding().unwrap().name());
}
#[test]
fn test_encoding_error() {
let req = TestRequest::with_header("content-type", "applicatjson").finish();
assert_eq!(Some(ContentTypeError::ParseError), req.encoding().err());
let req = TestRequest::with_header(
"content-type",
"application/json; charset=kkkttktk",
)
.finish();
assert_eq!(
Some(ContentTypeError::UnknownEncoding),
req.encoding().err()
);
}
#[test]
fn test_chunked() {
let req = TestRequest::default().finish();
assert!(!req.chunked().unwrap());
let req =
TestRequest::with_header(header::TRANSFER_ENCODING, "chunked").finish();
assert!(req.chunked().unwrap());
let req = TestRequest::default()
.header(
header::TRANSFER_ENCODING,
Bytes::from_static(b"some va\xadscc\xacas0xsdasdlue"),
)
.finish();
assert!(req.chunked().is_err());
}
impl PartialEq for UrlencodedError {
fn eq(&self, other: &UrlencodedError) -> bool {
match *self {
UrlencodedError::Chunked => match *other {
UrlencodedError::Chunked => true,
_ => false,
},
UrlencodedError::Overflow => match *other {
UrlencodedError::Overflow => true,
_ => false,
},
UrlencodedError::UnknownLength => match *other {
UrlencodedError::UnknownLength => true,
_ => false,
},
UrlencodedError::ContentType => match *other {
UrlencodedError::ContentType => true,
_ => false,
},
_ => false,
}
}
}
#[derive(Deserialize, Debug, PartialEq)]
struct Info {
hello: String,
}
#[test]
fn test_urlencoded_error() {
let req = TestRequest::with_header(
header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.header(header::CONTENT_LENGTH, "xxxx")
.finish();
assert_eq!(
req.urlencoded::<Info>().poll().err().unwrap(),
UrlencodedError::UnknownLength
);
let req = TestRequest::with_header(
header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.header(header::CONTENT_LENGTH, "1000000")
.finish();
assert_eq!(
req.urlencoded::<Info>().poll().err().unwrap(),
UrlencodedError::Overflow
);
let req = TestRequest::with_header(header::CONTENT_TYPE, "text/plain")
.header(header::CONTENT_LENGTH, "10")
.finish();
assert_eq!(
req.urlencoded::<Info>().poll().err().unwrap(),
UrlencodedError::ContentType
);
}
#[test]
fn test_urlencoded() {
let req = TestRequest::with_header(
header::CONTENT_TYPE,
"application/x-www-form-urlencoded",
)
.header(header::CONTENT_LENGTH, "11")
.set_payload(Bytes::from_static(b"hello=world"))
.finish();
let result = req.urlencoded::<Info>().poll().ok().unwrap();
assert_eq!(
result,
Async::Ready(Info {
hello: "world".to_owned()
})
);
let req = TestRequest::with_header(
header::CONTENT_TYPE,
"application/x-www-form-urlencoded; charset=utf-8",
)
.header(header::CONTENT_LENGTH, "11")
.set_payload(Bytes::from_static(b"hello=world"))
.finish();
let result = req.urlencoded().poll().ok().unwrap();
assert_eq!(
result,
Async::Ready(Info {
hello: "world".to_owned()
})
);
}
#[test]
fn test_message_body() {
let req = TestRequest::with_header(header::CONTENT_LENGTH, "xxxx").finish();
match req.body().poll().err().unwrap() {
PayloadError::UnknownLength => (),
_ => unreachable!("error"),
}
let req = TestRequest::with_header(header::CONTENT_LENGTH, "1000000").finish();
match req.body().poll().err().unwrap() {
PayloadError::Overflow => (),
_ => unreachable!("error"),
}
let req = TestRequest::default()
.set_payload(Bytes::from_static(b"test"))
.finish();
match req.body().poll().ok().unwrap() {
Async::Ready(bytes) => assert_eq!(bytes, Bytes::from_static(b"test")),
_ => unreachable!("error"),
}
let req = TestRequest::default()
.set_payload(Bytes::from_static(b"11111111111111"))
.finish();
match req.body().limit(5).poll().err().unwrap() {
PayloadError::Overflow => (),
_ => unreachable!("error"),
}
}
#[test]
fn test_readlines() {
let req = TestRequest::default()
.set_payload(Bytes::from_static(
b"Lorem Ipsum is simply dummy text of the printing and typesetting\n\
industry. Lorem Ipsum has been the industry's standard dummy\n\
Contrary to popular belief, Lorem Ipsum is not simply random text.",
))
.finish();
let mut r = Readlines::new(&req);
match r.poll().ok().unwrap() {
Async::Ready(Some(s)) => assert_eq!(
s,
"Lorem Ipsum is simply dummy text of the printing and typesetting\n"
),
_ => unreachable!("error"),
}
match r.poll().ok().unwrap() {
Async::Ready(Some(s)) => assert_eq!(
s,
"industry. Lorem Ipsum has been the industry's standard dummy\n"
),
_ => unreachable!("error"),
}
match r.poll().ok().unwrap() {
Async::Ready(Some(s)) => assert_eq!(
s,
"Contrary to popular belief, Lorem Ipsum is not simply random text."
),
_ => unreachable!("error"),
}
}
}
| {
if len > limit {
return Err(UrlencodedError::Overflow);
}
} |
watcher_test.go | // Copyright 2016-2018, Pulumi Corporation.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package watcher
import (
"context"
"strings"
"testing"
"time"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func testObjWatcher(ctx context.Context, poll pollFunc) *ObjectWatcher {
return &ObjectWatcher{
ctx: ctx,
pollFunc: poll,
}
}
var timeoutErrPrefix = "Timeout occurred polling for"
func Test_WatchUntil_PollFuncTimeout(t *testing.T) {
type timeoutTest struct {
name string
targetPollFuncCalls func(int) bool
targetWatchFuncCalls func(int) bool
pollFunc pollFunc
predicate Predicate
timeout time.Duration
}
timeoutTests := []timeoutTest{
{
name: "PollFuncTimeout",
targetPollFuncCalls: func(i int) bool { return i == 1 },
targetWatchFuncCalls: func(i int) bool { return i == 0 },
pollFunc: func() (*unstructured.Unstructured, error) {
time.Sleep(2 * time.Second)
return nil, nil
},
predicate: func(*unstructured.Unstructured) bool {
return true
},
timeout: 100 * time.Millisecond,
},
{
name: "PredicateFuncTimeout",
targetPollFuncCalls: func(i int) bool { return i > 1 }, | },
predicate: func(*unstructured.Unstructured) bool {
return false // Always false.
},
timeout: 3 * time.Second,
},
}
testCompleted := make(chan struct{})
for _, test := range timeoutTests {
go func(test timeoutTest) {
pollFuncCalls, watchFuncCalls := 0, 0
err := testObjWatcher(
context.Background(),
func() (*unstructured.Unstructured, error) {
pollFuncCalls++
return test.pollFunc()
}).
WatchUntil(
func(obj *unstructured.Unstructured) bool {
watchFuncCalls++
return test.predicate(obj)
},
test.timeout)
if err == nil || !strings.HasPrefix(err.Error(), timeoutErrPrefix) {
t.Errorf("%s: Polling should have timed out", test.name)
}
if !test.targetPollFuncCalls(pollFuncCalls) {
t.Errorf("%s: Got %d poll function calls, which did not satisfy the test predicate", test.name, pollFuncCalls)
}
if !test.targetWatchFuncCalls(watchFuncCalls) {
t.Errorf("%s: Got %d watch function calls, which did not satisfy the test predicate", test.name, watchFuncCalls)
}
testCompleted <- struct{}{}
}(test)
}
testsCompleted := 0
for range testCompleted {
testsCompleted++
if testsCompleted == len(timeoutTests) {
return
}
}
}
func Test_WatchUntil_Success(t *testing.T) {
// Timeout because the `WatchUntil` predicate always returns false.
err := testObjWatcher(
context.Background(),
func() (*unstructured.Unstructured, error) {
return &unstructured.Unstructured{}, nil
}).
WatchUntil(
func(*unstructured.Unstructured) bool {
return true // Always true.
},
1*time.Second)
if err != nil {
t.Error("Expected watch to terminate without error")
}
}
func Test_RetryUntil_Success(t *testing.T) {
// Timeout because the `WatchUntil` predicate always returns false.
err := testObjWatcher(
context.Background(),
func() (*unstructured.Unstructured, error) {
return &unstructured.Unstructured{}, nil
}).
RetryUntil(
func(*unstructured.Unstructured, error) error {
return nil // Always succeeds.
},
1*time.Second)
if err != nil {
t.Error("Expected watch to terminate without error")
}
}
func Test_RetryUntil_Cancel(t *testing.T) {
cancelCtx, cancel := context.WithCancel(context.Background())
cancel()
// Timeout because the `WatchUntil` predicate always returns false.
err := testObjWatcher(
cancelCtx,
func() (*unstructured.Unstructured, error) {
return &unstructured.Unstructured{}, nil
}).
RetryUntil(
func(*unstructured.Unstructured, error) error {
return nil // Always succeeds.
},
1*time.Second)
if err == nil {
t.Error("Expected watch to terminate with an initialization error")
}
} | targetWatchFuncCalls: func(i int) bool { return i > 0 },
pollFunc: func() (*unstructured.Unstructured, error) {
return &unstructured.Unstructured{}, nil |
contextutil.py | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
import tarfile
import tempfile
import time
import uuid
import zipfile
from contextlib import closing, contextmanager
from six import string_types
from pants.util.dirutil import safe_delete
@contextmanager
def environment_as(**kwargs):
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.6'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key, val):
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
@contextmanager
def temporary_dir(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary directory.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary directory.
:param bool cleanup: Whether or not to clean up the temporary directory.
"""
path = tempfile.mkdtemp(dir=root_dir)
try:
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(root_dir=None, cleanup=True):
"""
A with-context that creates a temporary file and returns its path.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(root_dir=None, cleanup=True, suffix=''):
"""
A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param str root_dir: The parent directory to create the temporary file.
:param bool cleanup: Whether or not to clean up the temporary file.
:param str suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
"""
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False) as fd:
try:
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def safe_file(path, suffix=None, cleanup=True):
"""A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param str suffix: Use this suffix to create the copy. Otherwise use a random string.
:param bool cleanup: Whether or not to clean up the copy.
"""
safe_path = '{0}.{1}'.format(path, suffix or uuid.uuid4())
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path)
@contextmanager
def pushd(directory):
"""
A with-context that encapsulates pushd/popd.
"""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file, *args, **kwargs):
"""
A with-context for zip files. Passes through positional and kwargs to zipfile.ZipFile.
"""
try:
allowZip64 = kwargs.pop('allowZip64', True)
zf = zipfile.ZipFile(path_or_file, *args, allowZip64=allowZip64, **kwargs)
except zipfile.BadZipfile as bze:
raise zipfile.BadZipfile("Bad Zipfile {0}: {1}".format(path_or_file, bze))
try:
yield zf
finally:
zf.close()
@contextmanager
def open_tar(path_or_file, *args, **kwargs):
"""
A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = ((path_or_file, None) if isinstance(path_or_file, string_types)
else (None, path_or_file))
with closing(tarfile.open(path, *args, fileobj=fileobj, **kwargs)) as tar:
yield tar
class Timer(object):
"""Very basic with-context to time operations
Example usage:
>>> from pants.util.contextutil import Timer
>>> with Timer() as timer:
... time.sleep(2)
...
>>> timer.elapsed
2.0020849704742432
"""
def __init__(self, clock=time):
self._clock = clock
def __enter__(self):
|
@property
def elapsed(self):
if self.finish:
return self.finish - self.start
else:
return self._clock.time() - self.start
def __exit__(self, typ, val, traceback):
self.finish = self._clock.time()
| self.start = self._clock.time()
self.finish = None
return self |
init_test.go | /*
Copyright 2019 The Tekton Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file contains initialization logic for the tests, such as special magical global state that needs to be initialized.
package test
import (
"flag"
"fmt"
"strings"
"sync"
"testing"
"github.com/tektoncd/pipeline/pkg/names"
"golang.org/x/xerrors"
yaml "gopkg.in/yaml.v2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
knativetest "knative.dev/pkg/test"
"knative.dev/pkg/test/logging"
// Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
// Mysteriously by k8s libs, or they fail to create `KubeClient`s when using oidc authentication. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/345
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
)
const triggersNamespace = "tekton-pipelines"
var initMetrics sync.Once
func setup(t *testing.T) (*clients, string) {
t.Helper()
namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arrakis")
initializeLogsAndMetrics(t)
c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster)
createNamespace(t, namespace, c.KubeClient)
verifyDefaultServiceAccountExists(t, namespace, c.KubeClient)
return c, namespace
}
func tearDown(t *testing.T, cs *clients, namespace string) {
t.Helper()
if cs.KubeClient == nil {
return
}
if t.Failed() {
header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace))
bs, err := getCRDYaml(cs, namespace)
if err != nil {
t.Error(err)
} else {
t.Log(string(bs))
}
header(t.Logf, fmt.Sprintf("Dumping logs from tekton-triggers-controller in namespace %s", triggersNamespace))
controllerLogs, err := CollectPodLogsWithLabel(cs.KubeClient, triggersNamespace, "app=tekton-triggers-controller")
if err != nil {
t.Logf("Could not get logs for tekton-triggers-controller Pod: %s", err)
} else {
t.Log(controllerLogs)
}
header(t.Logf, fmt.Sprintf("Dumping logs from tekton-triggers-webhook in namespace %s", triggersNamespace))
webhookLogs, err := CollectPodLogsWithLabel(cs.KubeClient, triggersNamespace, "app=tekton-triggers-webhook")
if err != nil {
t.Logf("Could not get logs for tekton-triggers-webhook Pod: %s", err)
} else {
t.Log(webhookLogs)
}
header(t.Logf, fmt.Sprintf("Dumping logs from EventListener sinks in namespace %s", namespace))
elSinkLogs, err := CollectPodLogsWithLabel(cs.KubeClient, namespace, "triggers=eventlistener")
if err != nil {
t.Logf("Could not get logs for EventListener sink Pods: %s", err)
} else {
t.Log(elSinkLogs)
}
}
t.Logf("Deleting namespace %s", namespace)
if err := cs.KubeClient.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete namespace %s: %s", namespace, err)
}
t.Logf("Deleting Clusterscoped resource")
if err := cs.KubeClient.RbacV1().ClusterRoles().Delete("my-role", &metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrole my-role: %s", err)
}
if err := cs.KubeClient.RbacV1().ClusterRoleBindings().Delete("my-rolebinding", &metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clusterrolebinding my-rolebinding: %s", err)
}
if err := cs.TriggersClient.TektonV1alpha1().ClusterTriggerBindings().Delete("my-clustertriggerbinding", &metav1.DeleteOptions{}); err != nil {
t.Errorf("Failed to delete clustertriggerbinding my-clustertriggerbinding: %s", err)
}
}
func header(logf logging.FormatLogger, text string) {
left := "### "
right := " ###"
txt := left + text + right
bar := strings.Repeat("#", len(txt))
logf(bar)
logf(txt)
logf(bar)
}
func initializeLogsAndMetrics(t *testing.T) {
t.Helper()
initMetrics.Do(func() {
flag.Parse()
if err := flag.Set("alsologtostderr", "true"); err != nil {
t.Fatalf("Failed to set 'alsologtostderr' flag to 'true': %s", err)
}
logging.InitializeLogger(knativetest.Flags.LogVerbose)
if knativetest.Flags.EmitMetrics {
logging.InitializeMetricExporter(t.Name())
}
})
}
func createNamespace(t *testing.T, namespace string, kubeClient kubernetes.Interface) |
func verifyDefaultServiceAccountExists(t *testing.T, namespace string, kubeClient kubernetes.Interface) {
t.Helper()
defaultSA := "default"
t.Logf("Verify SA %s is created in namespace %s", defaultSA, namespace)
if err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := kubeClient.CoreV1().ServiceAccounts(namespace).Get(defaultSA, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
return true, err
}); err != nil {
t.Fatalf("Failed to get SA %q in namespace %q for tests: %s", defaultSA, namespace, err)
}
}
func getCRDYaml(cs *clients, ns string) ([]byte, error) {
var output []byte
printOrAdd := func(kind, name string, i interface{}) {
bs, err := yaml.Marshal(i)
if err != nil {
return
}
output = append(output, []byte("\n---\n")...)
output = append(output, bs...)
}
ctbs, err := cs.TriggersClient.TektonV1alpha1().ClusterTriggerBindings().List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get ClusterTriggerBindings: %w", err)
}
for _, i := range ctbs.Items {
printOrAdd("ClusterTriggerBinding", i.Name, i)
}
els, err := cs.TriggersClient.TektonV1alpha1().EventListeners(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get EventListeners: %w", err)
}
for _, i := range els.Items {
printOrAdd("EventListener", i.Name, i)
}
tbs, err := cs.TriggersClient.TektonV1alpha1().TriggerBindings(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get TriggerBindings: %w", err)
}
for _, i := range tbs.Items {
printOrAdd("TriggerBindings", i.Name, i)
}
// TODO: Update TriggerTemplates Marshalling so it isn't a byte array in debug log
tts, err := cs.TriggersClient.TektonV1alpha1().TriggerTemplates(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get TriggerTemplates: %w", err)
}
for _, i := range tts.Items {
printOrAdd("TriggerTemplate", i.Name, i)
}
pods, err := cs.KubeClient.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get Pods: %w", err)
}
for _, i := range pods.Items {
printOrAdd("Pod", i.Name, i)
}
services, err := cs.KubeClient.CoreV1().Services(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get Services: %w", err)
}
for _, i := range services.Items {
printOrAdd("Service", i.Name, i)
}
roles, err := cs.KubeClient.RbacV1().Roles(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get Roles: %w", err)
}
for _, i := range roles.Items {
printOrAdd("Role", i.Name, i)
}
roleBindings, err := cs.KubeClient.RbacV1().RoleBindings(ns).List(metav1.ListOptions{})
if err != nil {
return nil, xerrors.Errorf("could not get RoleBindings: %w", err)
}
for _, i := range roleBindings.Items {
printOrAdd("Role", i.Name, i)
}
return output, nil
}
| {
t.Helper()
t.Logf("Create namespace %s to deploy to", namespace)
if _, err := kubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
},
}); err != nil {
t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err)
}
} |
tela2.component.spec.ts | import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { Tela2Component } from './tela2.component';
describe('Tela2Component', () => {
let component: Tela2Component;
let fixture: ComponentFixture<Tela2Component>; | beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ Tela2Component ]
})
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(Tela2Component);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
}); | |
obj.py | # Copyright (c) 2010-2020 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mimetypes
import time
import math
from swift import gettext_ as _
from swift.common.utils import (
clean_content_type, config_true_value, Timestamp, public,
close_if_possible, closing_if_possible)
from swift.common.constraints import check_metadata, check_object_creation
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.middleware.versioned_writes.legacy \
import DELETE_MARKER_CONTENT_TYPE
from swift.common.oio_utils import check_if_none_match, \
handle_not_allowed, handle_oio_timeout, handle_service_busy, \
REQID_HEADER, BUCKET_NAME_PROP, MULTIUPLOAD_SUFFIX, \
obj_version_from_env
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \
HTTPConflict, HTTPPreconditionFailed, HTTPRequestTimeout, \
HTTPUnprocessableEntity, HTTPClientDisconnect, HTTPCreated, \
HTTPNoContent, Response, HTTPInternalServerError, multi_range_iterator, \
HTTPServiceUnavailable, HTTPException
from swift.common.request_helpers import is_sys_or_user_meta, \
is_object_transient_sysmeta, resolve_etag_is_at_header
from swift.common.wsgi import make_subrequest
from swift.proxy.controllers.base import set_object_info_cache, \
delay_denial, cors_validation, get_object_info
from swift.proxy.controllers.obj import check_content_type
from swift.proxy.controllers.obj import BaseObjectController as \
BaseObjectController
from oio.common import exceptions
from oio.common.constants import FORCEVERSIONING_HEADER
from oio.common.http import ranges_from_http_header
from oio.common.storage_method import STORAGE_METHODS
from oio.api.object_storage import _sort_chunks
from oio.common.exceptions import SourceReadTimeout
BUCKET_NAME_HEADER = 'X-Object-Sysmeta-Oio-Bucket-Name'
SLO = 'x-static-large-object'
# FIXME(FVE): we do support versioning now
SUPPORT_VERSIONING = True
class ObjectControllerRouter(object):
def __getitem__(self, policy):
return ObjectController
class StreamRangeIterator(object):
"""
Data stream wrapper that handles range requests and deals with exceptions.
"""
def __init__(self, request, stream):
self.req = request
self._stream = stream
def app_iter_range(self, _start, _stop):
# This will be called when there is only one range,
# no need to check the number of bytes
return self.stream()
def _chunked_app_iter_range(self, start, stop):
# The stream generator give us one "chunk" per range,
# and as we are called once for each range, we must
# simulate end-of-stream by generating StopIteration
for dat in self.stream():
yield dat
raise StopIteration
def app_iter_ranges(self, ranges, content_type,
boundary, content_size,
*_args, **_kwargs):
for chunk in multi_range_iterator(
ranges, content_type, boundary, content_size,
self._chunked_app_iter_range):
yield chunk
def stream(self, *args, **kwargs):
"""
Get the wrapped data stream.
"""
try:
for dat in self._stream:
yield dat
except (exceptions.ServiceBusy, exceptions.ServiceUnavailable) as err:
# We cannot use the handle_service_busy() decorator
# because it returns the exception object instead of raising it.
headers = dict()
headers['Retry-After'] = '1'
raise HTTPServiceUnavailable(request=self.req, headers=headers,
body=str(err))
def __iter__(self):
return self.stream()
class ExpectedSizeReader(object):
"""Only accept as a valid EOF an exact number of bytes received."""
def __init__(self, source, expected):
self.source = source
self.expected = expected
self.consumed = 0
def read(self, *args, **kwargs):
rc = self.source.read(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def readline(self, *args, **kwargs):
rc = self.source.readline(*args, **kwargs)
if len(rc) == 0:
if self.consumed != self.expected:
raise exceptions.SourceReadError("Truncated input")
else:
self.consumed = self.consumed + len(rc)
return rc
def close(self):
return close_if_possible(self.source)
class ObjectController(BaseObjectController):
allowed_headers = {'content-disposition', 'content-encoding',
'x-delete-at', 'x-object-manifest',
'x-static-large-object'}
@public
@cors_validation
@delay_denial
def HEAD(self, req):
"""Handle HEAD requests."""
return self.GETorHEAD(req)
@public
@cors_validation
@delay_denial
def GET(self, req):
"""Handle GET requests."""
return self.GETorHEAD(req)
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def GETorHEAD(self, req):
"""Handle HTTP GET or HEAD requests."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['read_acl']
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
if req.method == 'HEAD':
resp = self.get_object_head_resp(req)
else:
resp = self.get_object_fetch_resp(req)
set_object_info_cache(self.app, req.environ, self.account_name,
self.container_name, self.object_name, resp)
if ';' in resp.headers.get('content-type', ''):
resp.content_type = clean_content_type(
resp.headers['content-type'])
return resp
# TODO(FVE): get rid of this
# This is not needed if we rely on swift's object versioning.
def enforce_versioning(self, req):
|
def get_object_head_resp(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
version = obj_version_from_env(req.environ)
force_master = False
while True:
try:
if self.app.check_state:
metadata, chunks = storage.object_locate(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
else:
metadata = storage.object_get_properties(
self.account_name, self.container_name,
self.object_name, version=version,
headers=oio_headers, force_master=force_master,
cache=oio_cache, perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
# Either the request failed with the master,
# or it is not an MPU
return HTTPNotFound(request=req)
# This part appears in the manifest, so it should be there.
# To be sure, we must go check the master
# in case of desynchronization.
force_master = True
if self.app.check_state:
storage_method = STORAGE_METHODS.load(metadata['chunk_method'])
# TODO(mbo): use new property of STORAGE_METHODS
min_chunks = storage_method.ec_nb_data if storage_method.ec else 1
chunks_by_pos = _sort_chunks(chunks, storage_method.ec)
for idx, entries in enumerate(chunks_by_pos.items()):
if idx != entries[0]:
return HTTPBadRequest(request=req)
nb_chunks_ok = 0
for entry in entries[1]:
try:
storage.blob_client.chunk_head(
entry['url'], headers=oio_headers)
nb_chunks_ok += 1
except exceptions.OioException:
pass
if nb_chunks_ok >= min_chunks:
break
else:
return HTTPBadRequest(request=req)
resp = self.make_object_response(req, metadata)
return resp
def get_object_fetch_resp(self, req):
storage = self.app.storage
if req.headers.get('Range'):
ranges = ranges_from_http_header(req.headers.get('Range'))
else:
ranges = None
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
force_master = False
while True:
try:
metadata, stream = storage.object_fetch(
self.account_name, self.container_name, self.object_name,
ranges=ranges, headers=oio_headers,
version=obj_version_from_env(req.environ),
force_master=force_master, cache=oio_cache,
perfdata=perfdata)
break
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
if force_master or not \
self.container_name.endswith(MULTIUPLOAD_SUFFIX):
# Either the request failed with the master,
# or it is not an MPU
return HTTPNotFound(request=req)
# This part appears in the manifest, so it should be there.
# To be sure, we must go check the master
# in case of desynchronization.
force_master = True
resp = self.make_object_response(req, metadata, stream)
return resp
def make_object_response(self, req, metadata, stream=None):
conditional_etag = resolve_etag_is_at_header(
req, metadata.get('properties'))
resp = Response(request=req, conditional_response=True,
conditional_etag=conditional_etag)
if config_true_value(metadata['deleted']):
resp.headers['Content-Type'] = DELETE_MARKER_CONTENT_TYPE
else:
resp.headers['Content-Type'] = metadata.get(
'mime_type', 'application/octet-stream')
properties = metadata.get('properties')
if properties:
for k, v in properties.items():
if is_sys_or_user_meta('object', k) or \
is_object_transient_sysmeta(k) or \
k.lower() in self.allowed_headers:
resp.headers[str(k)] = v
hash_ = metadata.get('hash')
if hash_ is not None:
hash_ = hash_.lower()
resp.headers['etag'] = hash_
resp.headers['x-object-sysmeta-version-id'] = metadata['version']
resp.last_modified = int(metadata['mtime'])
if stream:
# Whether we are bothered with ranges or not, we wrap the
# stream in order to handle exceptions.
resp.app_iter = StreamRangeIterator(req, stream)
length_ = metadata.get('length')
if length_ is not None:
length_ = int(length_)
resp.content_length = length_
resp.content_encoding = metadata.get('encoding')
resp.accept_ranges = 'bytes'
return resp
def load_object_metadata(self, headers):
"""
Load object metadata from response headers.
Also load some well-known headers like x-static-large-object.
"""
metadata = {
k.lower(): v for k, v in headers.items()
if is_sys_or_user_meta('object', k) or
is_object_transient_sysmeta(k)
}
for header_key in self.allowed_headers:
if header_key in headers:
headers_lower = header_key.lower()
metadata[headers_lower] = headers[header_key]
return metadata
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def POST(self, req):
"""HTTP POST request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
error_response = check_metadata(req, 'object')
if error_response:
return error_response
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
stgpol = self._stgpol_from_policy_index(policy_index)
headers = self._prepare_headers(req)
return self._post_object(req, headers, stgpol)
def _stgpol_from_policy_index(self, policy_index):
# TODO actually convert policy_index to oio stgpol
return 'SINGLE'
def _post_object(self, req, headers, stgpol):
# TODO do something with stgpol
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
# Genuine Swift clears all properties on POST requests.
# But for convenience, keep them when the request originates
# from swift3.
clear = req.environ.get('swift.source') != 'S3'
self.app.storage.object_set_properties(
self.account_name, self.container_name, self.object_name,
metadata, clear=clear, headers=oio_headers,
version=obj_version_from_env(req.environ),
cache=oio_cache, perfdata=perfdata)
except (exceptions.NoSuchObject, exceptions.NoSuchContainer):
return HTTPNotFound(request=req)
resp = HTTPAccepted(request=req)
return resp
def _delete_slo_parts(self, req, manifest):
"""Delete parts of an obsolete SLO."""
# We cannot use bulk-delete here,
# because we are at the end of the pipeline, after 'bulk'.
for part in manifest:
path = '/'.join(('', 'v1', self.account_name)) + part['name']
try:
del_req = make_subrequest(req.environ, 'DELETE', path=path)
del_req.get_response(self.app)
except Exception as exc:
self.app.logger.warn('Failed to delete SLO part %s: %s',
path, exc)
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
@check_if_none_match
def PUT(self, req):
"""HTTP PUT request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
# is request authorized
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
self.enforce_versioning(req)
old_slo_manifest = None
old_slo_manifest_etag = None
# If versioning is disabled, we must check if the object exists.
# If it's a NEW SLO (we must check it is not the same manifest),
# we will have to delete the parts if the current
# operation is a success.
if (self.app.delete_slo_parts and
not config_true_value(container_info.get(
'sysmeta', {}).get('versions-enabled', False))):
try:
dest_info = get_object_info(req.environ, self.app)
if 'slo-size' in dest_info['sysmeta']:
manifest_env = req.environ.copy()
manifest_env['QUERY_STRING'] = 'multipart-manifest=get'
manifest_req = make_subrequest(manifest_env, 'GET')
manifest_resp = manifest_req.get_response(self.app)
old_slo_manifest = json.loads(manifest_resp.body)
old_slo_manifest_etag = dest_info.get('etag')
except Exception as exc:
self.app.logger.warn(('Failed to check existence of %s. If '
'overwriting a SLO, old parts may '
'remain. Error was: %s') %
(req.path, exc))
self._update_content_type(req)
req.ensure_x_timestamp()
# check constraints on object name and request headers
error_response = check_object_creation(req, self.object_name) or \
check_content_type(req)
if error_response:
return error_response
if req.headers.get('Oio-Copy-From'):
return self._link_object(req)
data_source = req.environ['wsgi.input']
if req.content_length:
data_source = ExpectedSizeReader(data_source, req.content_length)
headers = self._prepare_headers(req)
with closing_if_possible(data_source):
resp = self._store_object(req, data_source, headers)
if (resp.is_success and
old_slo_manifest and resp.etag != old_slo_manifest_etag):
self.app.logger.debug(
'Previous object %s was a different SLO, deleting parts',
req.path)
self._delete_slo_parts(req, old_slo_manifest)
return resp
def _prepare_headers(self, req):
req.headers['X-Timestamp'] = Timestamp(time.time()).internal
headers = self.generate_request_headers(req, additional=req.headers)
return headers
def _get_auto_policy_from_size(self, content_length):
# The default storage policy has an offset of -1
# so should always be chosen
policy = None
for (name, offset) in self.app.oio_stgpol:
if offset > content_length:
break
policy = name
return policy
def _link_object(self, req):
_, container, obj = req.headers['Oio-Copy-From'].split('/', 2)
from_account = req.headers.get('X-Copy-From-Account',
self.account_name)
self.app.logger.info("Creating link from %s/%s/%s to %s/%s/%s",
# Existing
from_account, container, obj,
# New
self.account_name, self.container_name,
self.object_name)
storage = self.app.storage
if req.headers.get('Range'):
raise Exception("Fast Copy with Range is unsupported")
ranges = ranges_from_http_header(req.headers.get('Range'))
if len(ranges) != 1:
raise HTTPInternalServerError(
request=req, body="mutiple ranges unsupported")
ranges = ranges[0]
else:
ranges = None
headers = self._prepare_headers(req)
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# FIXME(FVE): use object_show, cache in req.environ
version = obj_version_from_env(req.environ)
props = storage.object_get_properties(from_account, container, obj,
headers=oio_headers,
version=version,
cache=oio_cache,
perfdata=perfdata)
if props['properties'].get(SLO, None):
raise Exception("Fast Copy with SLO is unsupported")
else:
if ranges:
raise HTTPInternalServerError(
request=req, body="no range supported with single object")
try:
# TODO check return code (values ?)
link_meta = storage.object_link(
from_account, container, obj,
self.account_name, self.container_name, self.object_name,
headers=oio_headers, properties=metadata,
properties_directive='REPLACE', target_version=version,
cache=oio_cache, perfdata=perfdata)
# TODO(FVE): this exception catching block has to be refactored
# TODO check which ones are ok or make non sense
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except (exceptions.NoSuchContainer, exceptions.NotFound):
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
resp = HTTPCreated(request=req, etag=link_meta['hash'])
return resp
def _get_footers(self, req):
"""
Get extra metadata that may be generated during upload by some
middlewares (e.g. checksum of cyphered data).
"""
footers = HeaderKeyDict()
footer_callback = req.environ.get(
'swift.callback.update_footers', lambda _footer: None)
footer_callback(footers)
return footers
def _object_create(self, account, container, **kwargs):
storage = self.app.storage
if hasattr(storage, 'object_create_ext'):
return storage.object_create_ext(account, container, **kwargs)
_chunks, _size, checksum = storage.object_create(account, container,
**kwargs)
return _chunks, _size, checksum, {}
def _store_object(self, req, data_source, headers):
kwargs = req.environ.get('oio.query', {})
content_type = req.headers.get('content-type', 'octet/stream')
policy = None
container_info = self.container_info(self.account_name,
self.container_name, req)
if 'X-Oio-Storage-Policy' in req.headers:
policy = req.headers.get('X-Oio-Storage-Policy')
if not self.app.POLICIES.get_by_name(policy):
raise HTTPBadRequest(
"invalid policy '%s', must be in %s" %
(policy, self.app.POLICIES.by_name.keys()))
else:
try:
policy_index = int(
req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy']))
except TypeError:
policy_index = 0
if policy_index != 0:
policy = self.app.POLICIES.get_by_index(policy_index).name
else:
content_length = int(req.headers.get('content-length', -1))
policy = self._get_auto_policy_from_size(content_length)
ct_props = {'properties': {}, 'system': {}}
metadata = self.load_object_metadata(headers)
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
headers.get(FORCEVERSIONING_HEADER)
if req.environ.get('oio.force-version'):
# In a case of MPU, it contains version of the UploadId
# to be able to include version-id of MPU in S3 reponse
kwargs['version'] = req.environ.get('oio.force-version')
bucket_name = req.environ.get('s3api.bucket')
if bucket_name:
# In case a shard is being created, save the name of the S3 bucket
# in a container property. This will be used when aggregating
# container statistics to make bucket statistics.
ct_props['system'][BUCKET_NAME_PROP] = bucket_name
try:
_chunks, _size, checksum, _meta = self._object_create(
self.account_name, self.container_name,
obj_name=self.object_name, file_or_path=data_source,
mime_type=content_type, policy=policy, headers=oio_headers,
etag=req.headers.get('etag', '').strip('"'),
properties=metadata, container_properties=ct_props,
properties_callback=(
lambda: self.load_object_metadata(self._get_footers(req))),
cache=oio_cache, perfdata=perfdata,
**kwargs)
except exceptions.Conflict:
raise HTTPConflict(request=req)
except exceptions.PreconditionFailed:
raise HTTPPreconditionFailed(request=req)
except SourceReadTimeout as err:
self.app.logger.warning(
_('ERROR Client read timeout (%s)'), err)
self.app.logger.increment('client_timeouts')
raise HTTPRequestTimeout(request=req)
except exceptions.SourceReadError:
req.client_disconnect = True
self.app.logger.warning(
_('Client disconnected without sending last chunk'))
self.app.logger.increment('client_disconnects')
raise HTTPClientDisconnect(request=req)
except exceptions.EtagMismatch:
return HTTPUnprocessableEntity(request=req)
except (exceptions.ServiceBusy, exceptions.OioTimeout,
exceptions.DeadlineReached):
raise
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
except exceptions.ClientException as err:
# 481 = CODE_POLICY_NOT_SATISFIABLE
if err.status == 481:
raise exceptions.ServiceBusy()
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
except HTTPException:
# This can happen when the data source raises an exception
raise
except Exception:
self.app.logger.exception(
_('ERROR Exception transferring data %s'),
{'path': req.path})
raise HTTPInternalServerError(request=req)
last_modified = int(_meta.get('mtime', math.ceil(time.time())))
# FIXME(FVE): if \x10 character in object name, decode version
# number and set it in the response headers, instead of the oio
# version number.
version_id = _meta.get('version', 'null')
resp = HTTPCreated(
request=req, etag=checksum,
last_modified=last_modified,
headers={
'x-object-sysmeta-version-id': version_id
})
return resp
def _update_content_type(self, req):
# Sometimes the 'content-type' header exists, but is set to None.
req.content_type_manually_set = True
detect_content_type = \
config_true_value(req.headers.get('x-detect-content-type'))
if detect_content_type or not req.headers.get('content-type'):
guessed_type, _junk = mimetypes.guess_type(req.path_info)
req.headers['Content-Type'] = guessed_type or \
'application/octet-stream'
if detect_content_type:
req.headers.pop('x-detect-content-type')
else:
req.content_type_manually_set = False
@public
@cors_validation
@delay_denial
@handle_not_allowed
@handle_oio_timeout
@handle_service_busy
def DELETE(self, req):
"""HTTP DELETE request handler."""
container_info = self.container_info(
self.account_name, self.container_name, req)
policy_index = req.headers.get('X-Backend-Storage-Policy-Index',
container_info['storage_policy'])
req.headers['X-Backend-Storage-Policy-Index'] = policy_index
req.acl = container_info['write_acl']
req.environ['swift_sync_key'] = container_info['sync_key']
if 'swift.authorize' in req.environ:
aresp = req.environ['swift.authorize'](req)
if aresp:
return aresp
req.ensure_x_timestamp()
self.enforce_versioning(req)
return self._delete_object(req)
def _delete_object(self, req):
storage = self.app.storage
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
# only send headers if needed
if SUPPORT_VERSIONING and req.headers.get(FORCEVERSIONING_HEADER):
oio_headers[FORCEVERSIONING_HEADER] = \
req.headers.get(FORCEVERSIONING_HEADER)
try:
storage.object_delete(
self.account_name, self.container_name, self.object_name,
version=obj_version_from_env(req.environ),
headers=oio_headers, cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
return HTTPNotFound(request=req)
except exceptions.NoSuchObject:
# Swift doesn't consider this case as an error
pass
resp = HTTPNoContent(request=req)
return resp
| """
Enforce the versioning mode of a container just before executing
an object operation. This is useful when the current object is not
stored in the "main" container but in a shard, where the versioning
mode may not have been set yet.
"""
if not SUPPORT_VERSIONING:
return None
# There is no reason to save several versions of segments:
# a new version of a multipart object manifest will point to a
# completely different set of segments, with another uploadId.
bucket_name = req.environ.get('s3api.bucket')
if not bucket_name \
or self.container_name == bucket_name \
or self.container_name.endswith(MULTIUPLOAD_SUFFIX):
return None
# We can't use _get_info_from_caches as it would use local worker cache
# first and an update of versioning mode may not be detected.
memcache = getattr(self.app, 'memcache', None) or \
req.environ.get('swift.cache')
if memcache is None:
return None
key = "/".join(("versioning", self.account_name, bucket_name))
val = memcache.get(key)
if val is not None:
if val != '':
req.headers[FORCEVERSIONING_HEADER] = val
return
oio_headers = {REQID_HEADER: self.trans_id}
oio_cache = req.environ.get('oio.cache')
perfdata = req.environ.get('swift.perfdata')
try:
meta = self.app.storage.container_get_properties(
self.account_name, bucket_name, headers=oio_headers,
cache=oio_cache, perfdata=perfdata)
except exceptions.NoSuchContainer:
raise HTTPNotFound(request=req)
val = meta['system'].get('sys.m2.policy.version', '')
memcache.set(key, val)
if val:
req.headers[FORCEVERSIONING_HEADER] = val |
run.py | """
Run CGLE example using specified config file.
"""
import int.cgle as cint
import tests
import lpde
import os
import pickle
import shutil
import configparser
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import torch
from torch.utils.tensorboard import SummaryWriter
import utils_cgle
from scipy.spatial.distance import cdist
torch.set_default_dtype(torch.float32)
POINTS_W = 397.48499
plt.set_cmap('plasma')
def | (config, n, path, verbose=False, n_min=0):
"""Integrate complex Ginzburg-Landau equation."""
pars = {}
pars["c1"] = float(config["c1"])
pars["c2"] = float(config["c2"])
pars["c3"] = float(config["c3"])
pars["mu"] = float(config["mu"])
pars["L"] = float(config["L"])
data_dict = cint.integrate(pars=pars,
dt=float(config["dt"]), N=int(config["N_int"]), T=int(config["T"]),
tmin=float(config["tmin"]), tmax=float(config["tmax"]),
append_init=True)
if verbose:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(data_dict["xx"], data_dict["data"][-1].real, label='real')
ax.plot(data_dict["xx"], data_dict["data"][-1].imag, label='imag')
ax.set_xlabel(r'$\omega$')
plt.title('snapshot')
plt.legend()
plt.show()
for i in range(n_min, n):
for p in [0, -1, 1]:
data_perturbed = cint.integrate(pars=pars,
dt=data_dict["dt"], N=data_dict["N"], T=data_dict["T"],
tmin=0, tmax=data_dict["tmax"]-data_dict["tmin"],
ic='manual',
Ainit=data_dict["data"][int(i*int(config["T_off"]))] +
p*float(config["eps"]) *
data_dict["data"][int(i*int(config["T_off"]))],
append_init=True)
data_perturbed["data"] = data_perturbed["data"][:, ::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["xx"] = data_perturbed["xx"][::int(
int(config["N_int"])/int(config["N"]))]
data_perturbed["N"] = int(config["N"])
output = open(path + 'run'+str(i)+'_p_'+str(p)+'.pkl', 'wb')
pickle.dump(data_perturbed, output)
output.close()
def make_plot_paper(config):
"""Plot CGLE simulation results."""
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_dict = pickle.load(pkl_file)
pkl_file.close()
# t_off = 2000
t_off = 0
idxs = np.arange(data_dict["N"])
np.random.shuffle(idxs)
fig = plt.figure(figsize=(POINTS_W/72, 0.9*POINTS_W/72))
ax1 = fig.add_subplot(321)
pl1 = ax1.pcolor(data_dict["xx"], data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax1.set_xlabel('$x$', labelpad=-2)
ax1.set_ylabel('$t$', labelpad=0)
ax1.set_xlim((0, data_dict["L"]))
ax1.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar1 = plt.colorbar(pl1)
cbar1.set_label('Re $W$', labelpad=-3)
ax2 = fig.add_subplot(322)
pl2 = ax2.pcolor(np.arange(data_dict["N"]), data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10, idxs].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax2.set_xlabel('$i$', labelpad=-2)
ax2.set_ylabel('$t$', labelpad=0)
ax2.set_xlim((0, data_dict["N"]))
ax2.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
cbar2 = plt.colorbar(pl2)
cbar2.set_label('Re $W$', labelpad=-3)
ax3 = fig.add_subplot(323)
v_scaled = np.load(config["GENERAL"]["save_dir"]+'/v_scaled.npy')
pl3 = ax3.scatter(np.arange(data_dict["N"]), v_scaled[idxs], s=2, c=data_dict["xx"][idxs],
cmap='plasma')
ax3.set_xlabel('$i$', labelpad=-2)
ax3.set_xlim((0, data_dict["N"]))
ax3.set_ylabel(r'$\phi_1$', labelpad=-3)
cbar3 = plt.colorbar(pl3)
cbar3.set_label('$x$', labelpad=0)
ax4 = fig.add_subplot(324)
pl4 = ax4.pcolor(v_scaled, data_dict["tt"][::10]+t_off,
data_dict["data_org"][1::10].real, vmin=-1, vmax=1,
rasterized=True, cmap='plasma')
ax4.set_ylim((data_dict["tmin"]+t_off, data_dict["tmax"]+t_off))
ax4.set_xlabel(r'$\phi_1$', labelpad=0)
ax4.set_xlim((-1, 1))
ax4.set_ylabel(r'$t$', labelpad=0)
cbar4 = plt.colorbar(pl4)
cbar4.set_label('Re $W$', labelpad=-3)
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"])
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
num_pars = sum(p.numel() for p in model.net.parameters() if p.requires_grad)
print(num_pars)
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(0)+'.pkl', 'rb')
data_unperturbed = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open(config["GENERAL"]["save_dir"]+'/dat/run' +
config["TRAINING"]["n_train"]+'_p_'+str(-1)+'.pkl', 'rb')
data_perturbed_neg = pickle.load(pkl_file)
pkl_file.close()
prediction = model.integrate_svd(dataset_test, dataset_train.svd, 0, data_unperturbed["T"])
print("Calculating closest distances....")
dists_neg = cdist(np.append(data_perturbed_neg["data"].real, data_perturbed_neg["data"].imag,
axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
dists_learned = cdist(np.append(prediction[:, 0], prediction[:, 1], axis=1), np.append(
data_unperturbed["data"].real, data_unperturbed["data"].imag, axis=1))
phi_arr = np.linspace(-1, 1, data_unperturbed["N"])
t_off = 0
ax5 = fig.add_subplot(325)
pl5 = ax5.pcolor(phi_arr, data_unperturbed["tt"][::10]+t_off,
prediction[1::10, 0], vmin=-1, vmax=1,
rasterized=True)
ax5.axvline(x=(phi_arr[3]+phi_arr[4])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.axvline(x=(phi_arr[-4]+phi_arr[-5])/2, ymin=0, ymax=1, color='white', lw=1)
ax5.set_xlabel(r'$\phi_1$', labelpad=0)
ax5.set_ylabel(r'$t$', labelpad=0)
ax5.set_xlim((-1, 1))
ax5.set_ylim((data_unperturbed["tmin"]+t_off, data_unperturbed["tmax"]+t_off))
cbar5 = plt.colorbar(pl5)
cbar5.set_label('Re $W$', labelpad=-3)
ax6 = fig.add_subplot(326)
ax6.plot(data_unperturbed["tt"]+t_off, np.min(dists_neg, axis=1)[:-1], label='$d$ true')
ax6.plot(data_unperturbed["tt"]+t_off, np.min(dists_learned, axis=1)
[:-1], '--', label='$d$ learned')
plt.legend()
ax6.set_xlabel('$t$', labelpad=0)
ax6.set_ylabel('$d$', labelpad=0)
# plt.subplots_adjust(top=0.94, wspace=0.35, right=0.98, bottom=0.18, left=0.08)
ax1.text(-0.25, 1., r'$\mathbf{a}$', transform=ax1.transAxes, weight='bold', fontsize=12)
ax2.text(-0.25, 1., r'$\mathbf{b}$', transform=ax2.transAxes, weight='bold', fontsize=12)
ax3.text(-0.25, 1., r'$\mathbf{c}$', transform=ax3.transAxes, weight='bold', fontsize=12)
ax4.text(-0.25, 1., r'$\mathbf{d}$', transform=ax4.transAxes, weight='bold', fontsize=12)
ax5.text(-0.25, 1., r'$\mathbf{e}$', transform=ax5.transAxes, weight='bold', fontsize=12)
ax6.text(-0.25, 1., r'$\mathbf{f}$', transform=ax6.transAxes, weight='bold', fontsize=12)
plt.subplots_adjust(top=0.96, wspace=0.35, right=0.95, bottom=0.09, hspace=0.31, left=0.08)
plt.show()
def main(config):
"""Integrate system and train model."""
verbose = config["GENERAL"].getboolean("verbose")
# Create data folders
if not os.path.exists(config["GENERAL"]["save_dir"]):
os.makedirs(config["GENERAL"]["save_dir"])
if not os.path.exists(config["GENERAL"]["save_dir"]+'/tests'):
os.makedirs(config["GENERAL"]["save_dir"]+'/tests')
# Create training and test data
if not os.path.exists(config["GENERAL"]["save_dir"]+'/dat'):
os.makedirs(config["GENERAL"]["save_dir"]+'/dat')
if config["MODEL"].getboolean("use_param"):
raise NotImplementedError
else:
integrate_system(config["SYSTEM"], int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["GENERAL"]["save_dir"]+'/dat/',
verbose=verbose)
# Create Dataset
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
if config["GENERAL"].getboolean("use_dmaps"):
utils_cgle.dmaps_transform(int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]), dataset_train,
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_train = utils_cgle.Dataset(0, int(config["TRAINING"]["n_train"]), config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
dataset_test = utils_cgle.Dataset(int(config["TRAINING"]["n_train"]),
int(config["TRAINING"]["n_train"]) +
int(config["TRAINING"]["n_test"]),
config["MODEL"],
path=config["GENERAL"]["save_dir"], verbose=verbose)
if verbose:
tests.test_perturbation(path=config["GENERAL"]["save_dir"], idx=0)
tests.test_dt(cint.f, path=config["GENERAL"]["save_dir"], idx=0)
tests.test_dataset(dataset_train, path=config["GENERAL"]["save_dir"])
if dataset.train.svd:
tests.test_svd(dataset_train, dataset_test, path=config["GENERAL"]["save_dir"])
# Create Dataloader
dataloader_train = torch.utils.data.DataLoader(
dataset_train, batch_size=int(config["TRAINING"]['batch_size']), shuffle=True,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
dataloader_test = torch.utils.data.DataLoader(
dataset_test, batch_size=int(config["TRAINING"]['batch_size']), shuffle=False,
num_workers=int(config["TRAINING"]['num_workers']), pin_memory=True)
network = lpde.network.Network(config["MODEL"], n_vars=2)
delta_x = float(config["SYSTEM"]["L"])/int(config["SYSTEM"]["N"]) * \
float(config["MODEL"]["rescale_dx"])
if verbose:
tests.test_fd_coeffs(network, path=config["GENERAL"]["save_dir"])
tests.test_derivs(network, torch.tensor(dataset_train.x_data[:1],
dtype=torch.get_default_dtype()),
torch.tensor([delta_x], dtype=torch.get_default_dtype()),
path=config["GENERAL"]["save_dir"])
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
if not os.path.exists(config["GENERAL"]["save_dir"]+'/log'):
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
else:
shutil.rmtree(config["GENERAL"]["save_dir"]+'/log')
os.makedirs(config["GENERAL"]["save_dir"]+'/log')
logger = SummaryWriter(config["GENERAL"]["save_dir"]+'/log/')
progress_bar = tqdm.tqdm(range(0, int(config["TRAINING"]['epochs'])),
total=int(config["TRAINING"]['epochs']),
leave=True, desc=lpde.utils.progress(0, 0))
if config["GENERAL"].getboolean('proceed_training'):
model.load_network('test.model')
for epoch in progress_bar:
train_loss = model.train()
val_loss = model.validate()
progress_bar.set_description(lpde.utils.progress(train_loss, val_loss))
logger.add_scalar('Loss/train', train_loss, epoch)
logger.add_scalar('Loss/val', val_loss, epoch)
logger.add_scalar('learning rate', model.optimizer.param_groups[-1]["lr"], epoch)
model.save_network('test.model')
if verbose:
model = lpde.model.Model(dataloader_train, dataloader_test, network, config["TRAINING"],
path=config["GENERAL"]["save_dir"]+'/')
model.load_network('test.model')
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=0)
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=2500)
tests.test_learned_dt(model, dataset_test, cint.f,
path=config["GENERAL"]["save_dir"], idx=4500)
_ = tests.test_integration(model, dataset_test, dataset_train.svd, 1000, 4000,
path=config["GENERAL"]["save_dir"])
tests.test_transient_dynamics(model, dataset_test, dataset_train.svd,
idx=int(config["TRAINING"]["n_train"]), t_off=0,
path=config["GENERAL"]["save_dir"])
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read('config/config.cfg')
main(config)
make_plot_paper(config)
| integrate_system |
niatelemetry_syslog_remote_dest_list_all_of.py | """
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document. # noqa: E501
The version of the OpenAPI document: 1.0.9-4950
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from intersight.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
|
class NiatelemetrySyslogRemoteDestListAllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'count': (int,), # noqa: E501
'results': ([NiatelemetrySyslogRemoteDest], none_type,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'count': 'Count', # noqa: E501
'results': 'Results', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""NiatelemetrySyslogRemoteDestListAllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
count (int): The total number of 'niatelemetry.SyslogRemoteDest' resources matching the request, accross all pages. The 'Count' attribute is included when the HTTP GET request includes the '$inlinecount' parameter.. [optional] # noqa: E501
results ([NiatelemetrySyslogRemoteDest], none_type): The array of 'niatelemetry.SyslogRemoteDest' resources matching the request.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| from intersight.model.niatelemetry_syslog_remote_dest import NiatelemetrySyslogRemoteDest
globals()['NiatelemetrySyslogRemoteDest'] = NiatelemetrySyslogRemoteDest |
HomePage.tsx | import { FC, memo } from "react";
const HomePage: FC = (): JSX.Element => {
return <div>Example home page</div>;
};
| export default memo(HomePage); |
|
string8.rs | /*
cargo run -p vectors --bin string8
cargo fmt --verbose --package vectors
*/
fn main() {
let s1 = String::from("C");
let s2 = String::from("is"); | print!("{}", s);
} | let s3 = String::from("a");
let s4 = String::from("programming");
let s5 = String::from("language.");
let s = format!("{} {} {} {} {}", s1, s2, s3, s4, s5); |
associate_auditor_with_space_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package spaces
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"net/http"
"github.com/go-openapi/runtime"
"github.com/suse/carrier/shim/models"
)
// AssociateAuditorWithSpaceCreatedCode is the HTTP code returned for type AssociateAuditorWithSpaceCreated
const AssociateAuditorWithSpaceCreatedCode int = 201
/*AssociateAuditorWithSpaceCreated successful response
swagger:response associateAuditorWithSpaceCreated
*/
type AssociateAuditorWithSpaceCreated struct {
/*
In: Body
*/
Payload *models.AssociateAuditorWithSpaceResponseResource `json:"body,omitempty"`
}
// NewAssociateAuditorWithSpaceCreated creates AssociateAuditorWithSpaceCreated with default headers values
func | () *AssociateAuditorWithSpaceCreated {
return &AssociateAuditorWithSpaceCreated{}
}
// WithPayload adds the payload to the associate auditor with space created response
func (o *AssociateAuditorWithSpaceCreated) WithPayload(payload *models.AssociateAuditorWithSpaceResponseResource) *AssociateAuditorWithSpaceCreated {
o.Payload = payload
return o
}
// SetPayload sets the payload to the associate auditor with space created response
func (o *AssociateAuditorWithSpaceCreated) SetPayload(payload *models.AssociateAuditorWithSpaceResponseResource) {
o.Payload = payload
}
// WriteResponse to the client
func (o *AssociateAuditorWithSpaceCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {
rw.WriteHeader(201)
if o.Payload != nil {
payload := o.Payload
if err := producer.Produce(rw, payload); err != nil {
panic(err) // let the recovery middleware deal with this
}
}
}
| NewAssociateAuditorWithSpaceCreated |
utils.py | # -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import re
from typing import Mapping, Union
from six import string_types
from frozendict import frozendict
from twisted.internet import defer
from synapse.api.constants import EventTypes, RelationTypes
from synapse.util.async_helpers import yieldable_gather_results
from . import EventBase
# Split strings on "." but not "\." This uses a negative lookbehind assertion for '\'
# (?<!stuff) matches if the current position in the string is not preceded
# by a match for 'stuff'.
# TODO: This is fast, but fails to handle "foo\\.bar" which should be treated as
# the literal fields "foo\" and "bar" but will instead be treated as "foo\\.bar"
SPLIT_FIELD_REGEX = re.compile(r"(?<!\\)\.")
def prune_event(event):
""" Returns a pruned version of the given event, which removes all keys we
don't know about or think could potentially be dodgy.
This is used when we "redact" an event. We want to remove all fields that
the user has specified, but we do want to keep necessary information like
type, state_key etc.
Args:
event (FrozenEvent)
Returns:
FrozenEvent
"""
pruned_event_dict = prune_event_dict(event.get_dict())
from . import event_type_from_format_version
pruned_event = event_type_from_format_version(event.format_version)(
pruned_event_dict, event.internal_metadata.get_dict()
)
# Mark the event as redacted
pruned_event.internal_metadata.redacted = True
return pruned_event
def prune_event_dict(event_dict):
"""Redacts the event_dict in the same way as `prune_event`, except it
operates on dicts rather than event objects
Args:
event_dict (dict)
Returns:
dict: A copy of the pruned event dict
"""
allowed_keys = [
"event_id",
"sender",
"room_id",
"hashes",
"signatures",
"content",
"type",
"state_key",
"depth",
"prev_events",
"prev_state",
"auth_events",
"origin",
"origin_server_ts",
"membership",
]
event_type = event_dict["type"]
new_content = {}
def add_fields(*fields):
for field in fields:
if field in event_dict["content"]:
new_content[field] = event_dict["content"][field]
if event_type == EventTypes.Member:
add_fields("membership")
elif event_type == EventTypes.Create:
add_fields("creator")
elif event_type == EventTypes.JoinRules:
add_fields("join_rule")
elif event_type == EventTypes.PowerLevels:
add_fields(
"users",
"users_default",
"events",
"events_default",
"state_default",
"ban",
"kick",
"redact",
)
elif event_type == EventTypes.Aliases:
add_fields("aliases")
elif event_type == EventTypes.RoomHistoryVisibility:
add_fields("history_visibility")
allowed_fields = {k: v for k, v in event_dict.items() if k in allowed_keys}
allowed_fields["content"] = new_content
unsigned = {}
allowed_fields["unsigned"] = unsigned
event_unsigned = event_dict.get("unsigned", {})
if "age_ts" in event_unsigned:
unsigned["age_ts"] = event_unsigned["age_ts"]
if "replaces_state" in event_unsigned:
unsigned["replaces_state"] = event_unsigned["replaces_state"]
return allowed_fields
def _copy_field(src, dst, field):
"""Copy the field in 'src' to 'dst'.
For example, if src={"foo":{"bar":5}} and dst={}, and field=["foo","bar"]
then dst={"foo":{"bar":5}}.
Args:
src(dict): The dict to read from.
dst(dict): The dict to modify.
field(list<str>): List of keys to drill down to in 'src'.
"""
if len(field) == 0: # this should be impossible
return
if len(field) == 1: # common case e.g. 'origin_server_ts'
if field[0] in src:
dst[field[0]] = src[field[0]]
return
# Else is a nested field e.g. 'content.body'
# Pop the last field as that's the key to move across and we need the
# parent dict in order to access the data. Drill down to the right dict.
key_to_move = field.pop(-1)
sub_dict = src
for sub_field in field: # e.g. sub_field => "content"
if sub_field in sub_dict and type(sub_dict[sub_field]) in [dict, frozendict]:
sub_dict = sub_dict[sub_field]
else:
return
if key_to_move not in sub_dict:
return
# Insert the key into the output dictionary, creating nested objects
# as required. We couldn't do this any earlier or else we'd need to delete
# the empty objects if the key didn't exist.
sub_out_dict = dst
for sub_field in field:
sub_out_dict = sub_out_dict.setdefault(sub_field, {})
sub_out_dict[key_to_move] = sub_dict[key_to_move]
def only_fields(dictionary, fields):
"""Return a new dict with only the fields in 'dictionary' which are present
in 'fields'.
If there are no event fields specified then all fields are included.
The entries may include '.' charaters to indicate sub-fields.
So ['content.body'] will include the 'body' field of the 'content' object.
A literal '.' character in a field name may be escaped using a '\'.
Args:
dictionary(dict): The dictionary to read from.
fields(list<str>): A list of fields to copy over. Only shallow refs are
taken.
Returns:
dict: A new dictionary with only the given fields. If fields was empty,
the same dictionary is returned.
"""
if len(fields) == 0:
return dictionary
# for each field, convert it:
# ["content.body.thing\.with\.dots"] => [["content", "body", "thing\.with\.dots"]]
split_fields = [SPLIT_FIELD_REGEX.split(f) for f in fields]
# for each element of the output array of arrays:
# remove escaping so we can use the right key names.
split_fields[:] = [
[f.replace(r"\.", r".") for f in field_array] for field_array in split_fields
]
output = {}
for field_array in split_fields:
_copy_field(dictionary, output, field_array)
return output
def format_event_raw(d):
return d
def format_event_for_client_v1(d):
d = format_event_for_client_v2(d)
sender = d.get("sender")
if sender is not None:
d["user_id"] = sender
copy_keys = (
"age",
"redacted_because",
"replaces_state",
"prev_content",
"invite_room_state",
)
for key in copy_keys:
if key in d["unsigned"]:
d[key] = d["unsigned"][key]
return d
def format_event_for_client_v2(d):
drop_keys = (
"auth_events",
"prev_events",
"hashes",
"signatures",
"depth",
"origin",
"prev_state",
)
for key in drop_keys:
d.pop(key, None)
return d
def format_event_for_client_v2_without_room_id(d):
d = format_event_for_client_v2(d)
d.pop("room_id", None)
return d
def serialize_event(
e,
time_now_ms,
as_client_event=True,
event_format=format_event_for_client_v1,
token_id=None,
only_event_fields=None,
is_invite=False,
):
"""Serialize event for clients
Args:
e (EventBase)
time_now_ms (int)
as_client_event (bool)
event_format
token_id
only_event_fields
is_invite (bool): Whether this is an invite that is being sent to the
invitee
Returns:
dict
"""
# FIXME(erikj): To handle the case of presence events and the like
if not isinstance(e, EventBase):
return e
time_now_ms = int(time_now_ms)
# Should this strip out None's?
d = {k: v for k, v in e.get_dict().items()}
d["event_id"] = e.event_id
if "age_ts" in d["unsigned"]:
d["unsigned"]["age"] = time_now_ms - d["unsigned"]["age_ts"]
del d["unsigned"]["age_ts"]
if "redacted_because" in e.unsigned:
d["unsigned"]["redacted_because"] = serialize_event(
e.unsigned["redacted_because"], time_now_ms, event_format=event_format
)
if token_id is not None:
if token_id == getattr(e.internal_metadata, "token_id", None):
txn_id = getattr(e.internal_metadata, "txn_id", None)
if txn_id is not None: | # will already have the state since they're in the room.
if not is_invite:
d["unsigned"].pop("invite_room_state", None)
if as_client_event:
d = event_format(d)
if only_event_fields:
if not isinstance(only_event_fields, list) or not all(
isinstance(f, string_types) for f in only_event_fields
):
raise TypeError("only_event_fields must be a list of strings")
d = only_fields(d, only_event_fields)
return d
class EventClientSerializer(object):
"""Serializes events that are to be sent to clients.
This is used for bundling extra information with any events to be sent to
clients.
"""
def __init__(self, hs):
self.store = hs.get_datastore()
self.experimental_msc1849_support_enabled = (
hs.config.experimental_msc1849_support_enabled
)
@defer.inlineCallbacks
def serialize_event(self, event, time_now, bundle_aggregations=True, **kwargs):
"""Serializes a single event.
Args:
event (EventBase)
time_now (int): The current time in milliseconds
bundle_aggregations (bool): Whether to bundle in related events
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[dict]: The serialized event
"""
# To handle the case of presence events and the like
if not isinstance(event, EventBase):
return event
event_id = event.event_id
serialized_event = serialize_event(event, time_now, **kwargs)
# If MSC1849 is enabled then we need to look if there are any relations
# we need to bundle in with the event.
# Do not bundle relations if the event has been redacted
if not event.internal_metadata.is_redacted() and (
self.experimental_msc1849_support_enabled and bundle_aggregations
):
annotations = yield self.store.get_aggregation_groups_for_event(event_id)
references = yield self.store.get_relations_for_event(
event_id, RelationTypes.REFERENCE, direction="f"
)
if annotations.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.ANNOTATION] = annotations.to_dict()
if references.chunk:
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REFERENCE] = references.to_dict()
edit = None
if event.type == EventTypes.Message:
edit = yield self.store.get_applicable_edit(event_id)
if edit:
# If there is an edit replace the content, preserving existing
# relations.
relations = event.content.get("m.relates_to")
serialized_event["content"] = edit.content.get("m.new_content", {})
if relations:
serialized_event["content"]["m.relates_to"] = relations
else:
serialized_event["content"].pop("m.relates_to", None)
r = serialized_event["unsigned"].setdefault("m.relations", {})
r[RelationTypes.REPLACE] = {
"event_id": edit.event_id,
"origin_server_ts": edit.origin_server_ts,
"sender": edit.sender,
}
return serialized_event
def serialize_events(self, events, time_now, **kwargs):
"""Serializes multiple events.
Args:
event (iter[EventBase])
time_now (int): The current time in milliseconds
**kwargs: Arguments to pass to `serialize_event`
Returns:
Deferred[list[dict]]: The list of serialized events
"""
return yieldable_gather_results(
self.serialize_event, events, time_now=time_now, **kwargs
)
def copy_power_levels_contents(
old_power_levels: Mapping[str, Union[int, Mapping[str, int]]]
):
"""Copy the content of a power_levels event, unfreezing frozendicts along the way
Raises:
TypeError if the input does not look like a valid power levels event content
"""
if not isinstance(old_power_levels, collections.Mapping):
raise TypeError("Not a valid power-levels content: %r" % (old_power_levels,))
power_levels = {}
for k, v in old_power_levels.items():
if isinstance(v, int):
power_levels[k] = v
continue
if isinstance(v, collections.Mapping):
power_levels[k] = h = {}
for k1, v1 in v.items():
# we should only have one level of nesting
if not isinstance(v1, int):
raise TypeError(
"Invalid power_levels value for %s.%s: %r" % (k, k1, v1)
)
h[k1] = v1
continue
raise TypeError("Invalid power_levels value for %s: %r" % (k, v))
return power_levels | d["unsigned"]["transaction_id"] = txn_id
# If this is an invite for somebody else, then we don't care about the
# invite_room_state as that's meant solely for the invitee. Other clients |
parcellation.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#autoindent
"""
Generic Parcellation class:
Contains all the items that define a multi-subject parcellation
Author : Bertrand Thirion, 2005-2008
TODO : add a method 'global field', i.e. non-subject-specific info
"""
import numpy as np
class Parcellation(object):
"""
This is the basic Parcellation class:
It is defined discretely , i.e.
the parcellation is an explicit function on the set of voxels
(or equivalently a labelling)
we explictly handle the case of multiple subjects,
where the labelling varies with the subjects
k is the number of parcels/classes
ijk: array of shape(nbvoxels,anatomical_dimension)
that represents the grid of voxels to be parcelled
(the same for all subjects)
typically anatomical_dimension=3
referential rerpresents the image referential,
resoltuion, position and size
this is expressed as an affine (4,4) transformation matrix
label (nbvox, subjects) array: nbvox is the number of voxels
within the binary mask
if the voxel is not labelled in a given subject, then the label is -1
thus the label has integer values in [-1,k-1]
group_labels is a labelling of the template
subjects=none is a list of ids of the subjects
by default, is is set as range(self.nb_subj)
"""
def __init__(self, k, ijk, label, group_labels=None,
referential = None, subjects = []):
"""
Constructor
"""
self.k = k
self.ijk = ijk.astype(np.int)
self.nbvox = ijk.shape[0]
if np.size(ijk)==self.nbvox:
ijk = np.reshape(ijk, (self.nbvox, 1))
self.anatdim = ijk.shape[1]
self.label = label.astype(np.int)
if np.size(label)==self.nbvox:
label = np.reshape(label,(self.nbvox,1))
self.nb_subj = label.shape[1]
if group_labels==None:
self.group_labels = np.zeros(self.nbvox).astype(np.int)
else:
self.group_labels = group_labels
if subjects==[]:
self.subjects = range(self.nb_subj)
else:
self.subjects = subjects
self.referential = referential
self.features = []
self.fids = []
self.check()
def | (self):
"""
Some sanity check on the arguments of the class
"""
if self.label.min()<-1:
raise ValueError,"All labels must be >=-1"
if (self.label.max()>self.k-1):
raise ValueError, "all labels must be < %d" %self.k
if self.label.shape[0]!=self.nbvox:
print self.ijk.shape[0], self.nbvox
raise ValueError,"The mask does not coincide with the labelling"
if np.size(self.group_labels) != self.nbvox:
print np.size(self.group_labels), self.nbvox
raise ValueError,"group_label has not the correct size"
else:
self.group_labels = np.reshape(self.group_labels,self.nbvox)
if len(self.subjects)!=self.nb_subj:
print len(self.subjects), self.nb_subj
raise ValueError,"The list of subjects \
does not coincide with the number of subjects"
def copy(self):
"""
Pa = self.copy()
copy method
"""
Pa = Parcellation(self.k, self.ijk, self.label.copy(),\
self.group_labels.copy(), self.referential,
self.subjects)
for fid,f in zip(self.fids,self.features):
Pa.set_feature(f, fid)
return Pa
def empty_parcels(self):
"""
q = self.empty_parcels()
returns the ids of all parcels that are empty
"""
q = [i for i in range(self.k) if i not in self.label]
q = np.array(q)
return q
def population(self):
"""
pop = self.population()
the population of parcellation is the number of voxels
included in each parcel
this function simply returns an array of shape
(number of parcels, number of subjects)
that contains the parcel population
"""
pop = np.zeros((self.k,self.nb_subj))
for s in range(self.nb_subj):
for i in range(self.k):
pop[i,s] = np.size(np.nonzero(self.label[:,s]==i))
return pop
def set_group_labels(self,glabels):
"""
self.reset_group_labels(glabels)
reset the group labels
"""
if np.size(np.size(glabels)==self.nbvox):
self.group_labels = glabels
self.check()
else:
raise ValueError,"Not the correct shape for glabel"
def set_labels(self,label):
"""
resets the label array of the class
Parameters
----------
label = array of shape(self.k,self.nb_subj)
"""
if (np.shape(label)==(self.nbvox,self.nb_subj)):
self.label = label
self.check()
else:
raise ValueError,"Not the correct shape for label"
def set_subjects(self,subjects):
"""
self.reset_subjects(subjects)
reset the list of subjects name
Parameters
----------
subjects = a list of subjects id with length self.nb_subj
"""
if len(subjects)!=self.nb_subj:
print len(subjects), self.nb_subj
raise ValueError,"The list of subjects \
does not coincide with the number of subjects"
else:
self.subjects = subjects
def add_subjects(self,label,nsubj_id):
"""
self.add_subjects(label,subj_id)
Add some subjects to the structure
Not implemented yet.
"""
print "not implemented yet"
pass
#-----------------------------------------------------------
#-- Parcel-level analysis of various information -----------
#-----------------------------------------------------------
def set_info(self,data,fid):
"""
self.set_info(data,fid):
Add some non-subject specific feature information
defined on a voxel-by voxel basis
Parameters
----------
feature: an array of shape(self.nbvox,dim),
where dim is the info dimension
fid : an identifier of the information
"""
pass
def make_feature_from_info(self,fid):
"""
self.make_feature_from_info(fid)
"""
pass
def set_feature(self,feature,fid):
"""
self.set_feature(feature,fid):
Add a feature to the feature list of the structure
Parameters
----------
feature: array of shape(self.nb_subj,self.k,fdim),
where fdim is the feature dimension
fid, string, the feature id
"""
# 1. test that the feature does not exist yet
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if np.size(i)>0:
raise ValueError,"Existing feature id"
# 2. if no, add the new one
if np.size(feature)==self.nbsubj*self.k:
feature = np.reshape(feature,(self.nbsub,self.k))
if (feature.shape[0])==self.nb_subj:
if (feature.shape[1])==self.k:
self.features.append(feature)
self.fids.append(fid)
else: raise ValueError,"incoherent size"
else: raise ValueError,"incoherent size"
def get_feature(self,fid):
"""
Get feature to the feature list of the structure
Parameters
----------
fid, string, the feature id
Returns
-------
feature: array of shape(self.nb_subj,self.k,fdim),
where fdim is the feature dimension
"""
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if np.size(i)==0:
print "The feature does not exist"
return None
if np.size(i)==1:
return self.features[int(i)]
def isfield(self,fid):
"""
tests whether fid is known as a field
"""
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
return np.size(i)==1
def remove_feature(self,fid):
"""
Remove feature from the feature list of the structure
Parameters
----------
fid, string, the feature id
"""
i = np.array([fid!=f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
Rf = [self.features[j] for j in i]
Rfid =[self.fids[j] for j in i]
self.features = Rf
self.fids= Rfid
def make_feature(self, data, fid, subj=-1, method="average"):
"""
Compute and Add a feature to the feature list of the structure
Parameters
----------
data: a list of arrays of shape(nbvoxels,fdim),
where fdim is the feature dimension
Note: if subj>-1, then data is simply an array
of shape (nbvoxels,fdim)
fid, string, the feature id
subj = -1: subject in which this is performed
if subject==-1, this is in all subjects,
and it is checked that the fid is not defined yet
otherwise, this is in one particular subject,
and the feature may be overriden
method = 'average', the way to compute the feature
"""
# 1. test that the feature does not exist yet
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if subj==-1:
if np.size(i)>0:
print fid,
raise ValueError,"Existing feature id"
#2. If no, compute the new feature and add it to the list
feature = self.average_feature(data)
self.features.append(feature)
self.fids.append(fid)
else:
if subj>self.nb_subj-1:
raise ValueError,"incoherent subject index"
if np.size(i)==0:
# create the feature
i = len(self.fids)
self.fids.append(fid)
self.features.append(np.zeros((self.nb_subj,
self.k, data.shape[1])))
# check that the dimension is OK
if data.shape[1]!=self.features[i].shape[2]:
raise ValueError,"Incompatible feature dimension"
# make the feature
feature = self.average_feature(data,subj)
self.features[i][subj]=feature
def PRFX(self, fid, zstat=1, DMtx = None):
"""
Compute the Random effects of the feature on the
parcels across subjects
Parameters
----------
fid, string, feature identifier;
it is assumed that the feature is 1-dimensional
zstat indicator variable for the output variate
if ztsat==0, the basic student statistic is returned
if zstat==1, the student stat is converted to a normal(z) variate
DMtx = None : design matrix for the model.
So far, it is assumed that DMtx = np.ones(self.nb_subj)
returns
-------
RFX: array with shape (self.k,fdim)
the parcel-based RFX.
"""
if self.nb_subj<2:
print "Sorry, there is only one subject"
return []
#1. find the field id
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if np.size(i)==0:
print "The feature does not exist"
return []
#2. Compute the PRFX
PF = self.features[i]
eps = 1.e-7
SF = np.array(np.std(PF,0))
SF = np.maximum(SF,eps)
MF = np.array(np.mean(PF,0))
RFX = MF/SF*np.sqrt(self.nb_subj)
if zstat==1:
import scipy.stats as ST
pval = ST.t.cdf(RFX,self.nb_subj-1)
RFX = np.minimum(10,np.maximum(-10,ST.norm.ppf(pval)))
return RFX
def average_feature(self,Feature,subj=-1):
"""
compute parcel-based fetaure bu averaging voxel-based quantities
Parameters
----------
Feature is a list of length self.nb_subj,
so that for each s in 0..self.nb_subj-1,
that Feature[s] is an (nvox,fdim)-shaped array
where nvox is the number of voxels in subject s with label >-1
subj = -1: subject in which this is performed
if subj==-1, this is in all subjects,
and it is checked that the fid is not defined yet
if subj>-1, this is in one particular subject,
and Feature merely is an array, not a list
Returns
-------
PF: array of shape (self.nb_subj,self.k,fdim) if subj==-1
or (self.k,fdim)
containing the parcel-based features.
"""
if subj==-1:
# Do the computation in available subjects
PF = []
for s in range (self.nb_subj):
pf = np.zeros((self.k, Feature[s].shape[1])).astype('d')
pop = np.zeros((self.k))
j = 0
for i in range(self.nbvox):
if self.label[i,s]>-1:
pf[self.label[i,s],:] += Feature[s][j,:]
j = j+1
pop[self.label[i,s]] +=1
for i in range(self.k):
if pop[i]>0:
pf[i,:]/=(pop[i])
PF.append(pf)
PF = np.array(PF)
else:
# Do the computation in subject s specifically
if subj>self.nb_subj-1:
raise ValueError,"incoherent subject index"
s = subj
PF = np.zeros((self.k, Feature.shape[1])).astype('d')
pop = np.zeros((self.k))
j = 0
for i in range(self.nbvox):
if self.label[i,s]>-1:
PF[self.label[i,s],:] += Feature[j,:]
j = j+1
pop[self.label[i,s]] +=1
for i in range(self.k):
if pop[i]>0:
PF[i,:]/=(pop[i])
return(PF)
def variance_inter(self,fid):
"""
Compute the variance of the feature at each parcel across subjects
Parameters
----------
fid, string, the feature identifier
Returns
-------
HI, array of shape (self.k) (?)
the inter-subject variance
"""
#.0 check that there is more than 1 subject
if self.nb_subj<2:
print "Sorry, there is only one subject"
return []
#1. find the field id
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if np.size(i)==0:
print "The feature does not exist"
return []
#2. Compute the corss-subject variance
AF = self.features[i]
pop = np.transpose(self.population())
MAF = np.mean(AF,0)
dAF = AF-MAF
for d in range(AF.shape[2]):
dAF[:,:,d] = (pop>0)*dAF[:,:,d]**2
HI = np.sum(dAF)/ np.repeat(np.sum(pop>0)-1,dAF.shape[2],1).T
return HI
def var_feature_intra(self,Feature):
"""
compute the feature variance in each subject and each parcel
"""
VF = []
for s in range (self.nb_subj):
pf = np.zeros((self.k, Feature[s].shape[1])).astype('d')
vf = np.zeros((self.k, Feature[s].shape[1])).astype('d')
pop = np.zeros((self.k))
j = 0
for i in range(self.nbvox):
if self.label[i,s]>-1:
pf[self.label[i,s],:] += Feature[s][j,:]
pop[self.label[i,s]] +=1
j = j+1
for i in range(self.k):
if pop[i]>0:
pf[i,:]/=(pop[i])
j = 0
for i in range(self.nbvox):
if self.label[i,s]>-1:
dfeat = pf[self.label[i,s],:] - Feature[s][j,:]
vf[self.label[i,s],:] += dfeat*dfeat
j = j+1
for i in range(self.k):
if pop[i]>1:
vf[i,:]/=(pop[i]-1)
VF.append(vf)
return(VF)
def variance_intra(self,data,bweight=0):
"""
Vintra = self.variance_intra(fid)
Compute the variance of the feature at each parcel within each subject
Parameters
----------
data is the data on which the variance is estimated:
this is a list of arrays
bweight=0: flag for the relative weighting of the parcels
if bweight = 1, the variance of each parcels
is weighted by its size
else, all parcels are equally weighted
Returns
-------
VA : array of shape (self.k) of the variance
"""
VF = self.var_feature_intra(data)
Vintra = np.zeros((self.nb_subj)).astype('d')
if bweight==1:
pop = self.population()
for s in range (self.nb_subj):
Vintra[s] = np.mean(np.mean(VF[s],1)*(pop[:,s]-1))/np.mean(pop[:,s])
else:
for s in range (self.nb_subj):
Vintra[s] = np.mean(np.mean(VF[s]))
return Vintra
def boxplot_feature(self,pid,fids):
"""
self.show_feature(pid,fids)
This function makes a boxplot of the feature distribution
in a given parcel across subjects
Parameters
----------
pid = parcel identifier an integer within the [0..self.K] range
fids = list of features of inetegers
"""
#1. test that pid is coorect
if pid<0:
raise ValueError,"Negative parcel id"
if pid>self.k:
raise ValueError,"Wrong parcel id"
# 2. test that the feature(s) exist
idx = []
for fid in fids:
i = np.array([fid==f for f in self.fids])
i = np.nonzero(i)
i = np.reshape(i,np.size(i))
if np.size(i)==0:
raise ValueError,"The feature does not exist yet"
idx.append(i)
#3 get the data and make the figure
dataplot = []
for j in idx:
dataplot.append(np.transpose(self.features[j][:,pid]))
dataplot = np.transpose(np.concatenate(dataplot))
print np.shape(dataplot)
import matplotlib.pylab as mp
mp.figure()
mp.boxplot(dataplot)
| check |
mod.rs |
pub mod etcd;
/// EtcdError holds the errors that can occur while trying to dump information
/// from etcd database
#[derive(Debug)]
pub(crate) enum EtcdError {
Etcd(StoreError),
K8sResource(K8sResourceError),
IOError(std::io::Error),
Custom(String),
}
impl From<StoreError> for EtcdError {
fn from(e: StoreError) -> Self {
EtcdError::Etcd(e)
}
}
impl From<std::io::Error> for EtcdError {
fn from(e: Error) -> Self {
EtcdError::IOError(e)
}
}
impl From<K8sResourceError> for EtcdError {
fn from(e: K8sResourceError) -> Self {
EtcdError::K8sResource(e)
}
} | use crate::collect::k8s_resources::client::K8sResourceError;
use common_lib::types::v0::store::definitions::StoreError;
use std::io::Error; |
|
issue-2955.rs | // rustfmt-condense_wildcard_suffixes: true
fn main() | {
match (1, 2, 3) {
(..) => (),
}
} |
|
41-es2015.js | (window["webpackJsonp"] = window["webpackJsonp"] || []).push([[41],{
/***/ "./node_modules/@ionic/core/dist/esm/ion-nav_5.entry.js":
/*!**************************************************************!*\
!*** ./node_modules/@ionic/core/dist/esm/ion-nav_5.entry.js ***!
\**************************************************************/
/*! exports provided: ion_nav, ion_nav_link, ion_nav_pop, ion_nav_push, ion_nav_set_root */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_nav", function() { return Nav; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_nav_link", function() { return NavLink; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_nav_pop", function() { return NavPop; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_nav_push", function() { return NavPush; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ion_nav_set_root", function() { return NavSetRoot; });
/* harmony import */ var _core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! ./core-feeeff0d.js */ "./node_modules/@ionic/core/dist/esm/core-feeeff0d.js");
/* harmony import */ var _config_3c7f3790_js__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./config-3c7f3790.js */ "./node_modules/@ionic/core/dist/esm/config-3c7f3790.js");
/* harmony import */ var _helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./helpers-46f4a262.js */ "./node_modules/@ionic/core/dist/esm/helpers-46f4a262.js");
/* harmony import */ var _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./constants-3c3e1099.js */ "./node_modules/@ionic/core/dist/esm/constants-3c3e1099.js");
/* harmony import */ var _framework_delegate_c2e2e1f4_js__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./framework-delegate-c2e2e1f4.js */ "./node_modules/@ionic/core/dist/esm/framework-delegate-c2e2e1f4.js");
/* harmony import */ var _index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! ./index-4d91f03a.js */ "./node_modules/@ionic/core/dist/esm/index-4d91f03a.js");
/* harmony import */ var _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(/*! ./cubic-bezier-2812fda3.js */ "./node_modules/@ionic/core/dist/esm/cubic-bezier-2812fda3.js");
const VIEW_STATE_NEW = 1;
const VIEW_STATE_ATTACHED = 2;
const VIEW_STATE_DESTROYED = 3;
class | {
constructor(component, params) {
this.component = component;
this.params = params;
this.state = VIEW_STATE_NEW;
}
async init(container) {
this.state = VIEW_STATE_ATTACHED;
if (!this.element) {
const component = this.component;
this.element = await Object(_framework_delegate_c2e2e1f4_js__WEBPACK_IMPORTED_MODULE_4__["a"])(this.delegate, container, component, ['ion-page', 'ion-page-invisible'], this.params);
}
}
/**
* DOM WRITE
*/
_destroy() {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(this.state !== VIEW_STATE_DESTROYED, 'view state must be ATTACHED');
const element = this.element;
if (element) {
if (this.delegate) {
this.delegate.removeViewFromDom(element.parentElement, element);
}
else {
element.remove();
}
}
this.nav = undefined;
this.state = VIEW_STATE_DESTROYED;
}
}
const matches = (view, id, params) => {
if (!view) {
return false;
}
if (view.component !== id) {
return false;
}
const currentParams = view.params;
if (currentParams === params) {
return true;
}
if (!currentParams && !params) {
return true;
}
if (!currentParams || !params) {
return false;
}
const keysA = Object.keys(currentParams);
const keysB = Object.keys(params);
if (keysA.length !== keysB.length) {
return false;
}
// Test for A's keys different from B.
for (const key of keysA) {
if (currentParams[key] !== params[key]) {
return false;
}
}
return true;
};
const convertToView = (page, params) => {
if (!page) {
return null;
}
if (page instanceof ViewController) {
return page;
}
return new ViewController(page, params);
};
const convertToViews = (pages) => {
return pages.map(page => {
if (page instanceof ViewController) {
return page;
}
if ('page' in page) {
return convertToView(page.page, page.params);
}
return convertToView(page, undefined);
}).filter(v => v !== null);
};
const Nav = class {
constructor(hostRef) {
Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
this.transInstr = [];
this.animationEnabled = true;
this.useRouter = false;
this.isTransitioning = false;
this.destroyed = false;
this.views = [];
/**
* If `true`, the nav should animate the transition of components.
*/
this.animated = true;
this.ionNavWillLoad = Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["d"])(this, "ionNavWillLoad", 7);
this.ionNavWillChange = Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["d"])(this, "ionNavWillChange", 3);
this.ionNavDidChange = Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["d"])(this, "ionNavDidChange", 3);
}
swipeGestureChanged() {
if (this.gesture) {
this.gesture.setDisabled(this.swipeGesture !== true);
}
}
rootChanged() {
if (this.root !== undefined) {
if (!this.useRouter) {
this.setRoot(this.root, this.rootParams);
}
}
}
componentWillLoad() {
this.useRouter =
!!document.querySelector('ion-router') &&
!this.el.closest('[no-router]');
if (this.swipeGesture === undefined) {
const mode = Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["c"])(this);
this.swipeGesture = _config_3c7f3790_js__WEBPACK_IMPORTED_MODULE_1__["b"].getBoolean('swipeBackEnabled', mode === 'ios');
}
this.ionNavWillLoad.emit();
}
async componentDidLoad() {
this.rootChanged();
this.gesture = (await __webpack_require__.e(/*! import() | swipe-back-35ad8e37-js */ "swipe-back-35ad8e37-js").then(__webpack_require__.bind(null, /*! ./swipe-back-35ad8e37.js */ "./node_modules/@ionic/core/dist/esm/swipe-back-35ad8e37.js"))).createSwipeBackGesture(this.el, this.canStart.bind(this), this.onStart.bind(this), this.onMove.bind(this), this.onEnd.bind(this));
this.swipeGestureChanged();
}
componentDidUnload() {
for (const view of this.views) {
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["l"])(view.element, _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__["d"]);
view._destroy();
}
if (this.gesture) {
this.gesture.destroy();
this.gesture = undefined;
}
// release swipe back gesture and transition
this.transInstr.length = this.views.length = 0;
this.destroyed = true;
}
/**
* Push a new component onto the current navigation stack. Pass any additional
* information along as an object. This additional information is accessible
* through NavParams.
*
* @param component The component to push onto the navigation stack.
* @param componentProps Any properties of the component.
* @param opts The navigation options.
* @param done The transition complete function.
*/
push(component, componentProps, opts, done) {
return this.queueTrns({
insertStart: -1,
insertViews: [{ page: component, params: componentProps }],
opts
}, done);
}
/**
* Inserts a component into the navigation stack at the specified index.
* This is useful to add a component at any point in the navigation stack.
*
* @param insertIndex The index to insert the component at in the stack.
* @param component The component to insert into the navigation stack.
* @param componentProps Any properties of the component.
* @param opts The navigation options.
* @param done The transition complete function.
*/
insert(insertIndex, component, componentProps, opts, done) {
return this.queueTrns({
insertStart: insertIndex,
insertViews: [{ page: component, params: componentProps }],
opts
}, done);
}
/**
* Inserts an array of components into the navigation stack at the specified index.
* The last component in the array will become instantiated as a view, and animate
* in to become the active view.
*
* @param insertIndex The index to insert the components at in the stack.
* @param insertComponents The components to insert into the navigation stack.
* @param opts The navigation options.
* @param done The transition complete function.
*/
insertPages(insertIndex, insertComponents, opts, done) {
return this.queueTrns({
insertStart: insertIndex,
insertViews: insertComponents,
opts
}, done);
}
/**
* Pop a component off of the navigation stack. Navigates back from the current
* component.
*
* @param opts The navigation options.
* @param done The transition complete function.
*/
pop(opts, done) {
return this.queueTrns({
removeStart: -1,
removeCount: 1,
opts
}, done);
}
/**
* Pop to a specific index in the navigation stack.
*
* @param indexOrViewCtrl The index or view controller to pop to.
* @param opts The navigation options.
* @param done The transition complete function.
*/
popTo(indexOrViewCtrl, opts, done) {
const tiConfig = {
removeStart: -1,
removeCount: -1,
opts
};
if (typeof indexOrViewCtrl === 'object' && indexOrViewCtrl.component) {
tiConfig.removeView = indexOrViewCtrl;
tiConfig.removeStart = 1;
}
else if (typeof indexOrViewCtrl === 'number') {
tiConfig.removeStart = indexOrViewCtrl + 1;
}
return this.queueTrns(tiConfig, done);
}
/**
* Navigate back to the root of the stack, no matter how far back that is.
*
* @param opts The navigation options.
* @param done The transition complete function.
*/
popToRoot(opts, done) {
return this.queueTrns({
removeStart: 1,
removeCount: -1,
opts
}, done);
}
/**
* Removes a component from the navigation stack at the specified index.
*
* @param startIndex The number to begin removal at.
* @param removeCount The number of components to remove.
* @param opts The navigation options.
* @param done The transition complete function.
*/
removeIndex(startIndex, removeCount = 1, opts, done) {
return this.queueTrns({
removeStart: startIndex,
removeCount,
opts
}, done);
}
/**
* Set the root for the current navigation stack to a component.
*
* @param component The component to set as the root of the navigation stack.
* @param componentProps Any properties of the component.
* @param opts The navigation options.
* @param done The transition complete function.
*/
setRoot(component, componentProps, opts, done) {
return this.setPages([{ page: component, params: componentProps }], opts, done);
}
/**
* Set the views of the current navigation stack and navigate to the last view.
* By default animations are disabled, but they can be enabled by passing options
* to the navigation controller. Navigation parameters can also be passed to the
* individual pages in the array.
*
* @param views The list of views to set as the navigation stack.
* @param opts The navigation options.
* @param done The transition complete function.
*/
setPages(views, opts, done) {
if (opts == null) {
opts = {};
}
// if animation wasn't set to true then default it to NOT animate
if (opts.animated !== true) {
opts.animated = false;
}
return this.queueTrns({
insertStart: 0,
insertViews: views,
removeStart: 0,
removeCount: -1,
opts
}, done);
}
/** @internal */
setRouteId(id, params, direction) {
const active = this.getActiveSync();
if (matches(active, id, params)) {
return Promise.resolve({
changed: false,
element: active.element
});
}
let resolve;
const promise = new Promise(r => (resolve = r));
let finish;
const commonOpts = {
updateURL: false,
viewIsReady: enteringEl => {
let mark;
const p = new Promise(r => (mark = r));
resolve({
changed: true,
element: enteringEl,
markVisible: async () => {
mark();
await finish;
}
});
return p;
}
};
if (direction === 'root') {
finish = this.setRoot(id, params, commonOpts);
}
else {
const viewController = this.views.find(v => matches(v, id, params));
if (viewController) {
finish = this.popTo(viewController, Object.assign(Object.assign({}, commonOpts), { direction: 'back' }));
}
else if (direction === 'forward') {
finish = this.push(id, params, commonOpts);
}
else if (direction === 'back') {
finish = this.setRoot(id, params, Object.assign(Object.assign({}, commonOpts), { direction: 'back', animated: true }));
}
}
return promise;
}
/** @internal */
async getRouteId() {
const active = this.getActiveSync();
return active
? {
id: active.element.tagName,
params: active.params,
element: active.element
}
: undefined;
}
/**
* Get the active view.
*/
getActive() {
return Promise.resolve(this.getActiveSync());
}
/**
* Get the view at the specified index.
*
* @param index The index of the view.
*/
getByIndex(index) {
return Promise.resolve(this.views[index]);
}
/**
* Returns `true` if the current view can go back.
*
* @param view The view to check.
*/
canGoBack(view) {
return Promise.resolve(this.canGoBackSync(view));
}
/**
* Get the previous view.
*
* @param view The view to get.
*/
getPrevious(view) {
return Promise.resolve(this.getPreviousSync(view));
}
getLength() {
return this.views.length;
}
getActiveSync() {
return this.views[this.views.length - 1];
}
canGoBackSync(view = this.getActiveSync()) {
return !!(view && this.getPreviousSync(view));
}
getPreviousSync(view = this.getActiveSync()) {
if (!view) {
return undefined;
}
const views = this.views;
const index = views.indexOf(view);
return index > 0 ? views[index - 1] : undefined;
}
// _queueTrns() adds a navigation stack change to the queue and schedules it to run:
// 1. _nextTrns(): consumes the next transition in the queue
// 2. _viewInit(): initializes enteringView if required
// 3. _viewTest(): ensures canLeave/canEnter Returns `true`, so the operation can continue
// 4. _postViewInit(): add/remove the views from the navigation stack
// 5. _transitionInit(): initializes the visual transition if required and schedules it to run
// 6. _viewAttachToDOM(): attaches the enteringView to the DOM
// 7. _transitionStart(): called once the transition actually starts, it initializes the Animation underneath.
// 8. _transitionFinish(): called once the transition finishes
// 9. _cleanup(): syncs the navigation internal state with the DOM. For example it removes the pages from the DOM or hides/show them.
queueTrns(ti, done) {
if (this.isTransitioning && ti.opts != null && ti.opts.skipIfBusy) {
return Promise.resolve(false);
}
const promise = new Promise((resolve, reject) => {
ti.resolve = resolve;
ti.reject = reject;
});
ti.done = done;
// Normalize empty
if (ti.insertViews && ti.insertViews.length === 0) {
ti.insertViews = undefined;
}
// Enqueue transition instruction
this.transInstr.push(ti);
// if there isn't a transition already happening
// then this will kick off this transition
this.nextTrns();
return promise;
}
success(result, ti) {
if (this.destroyed) {
this.fireError('nav controller was destroyed', ti);
return;
}
if (ti.done) {
ti.done(result.hasCompleted, result.requiresTransition, result.enteringView, result.leavingView, result.direction);
}
ti.resolve(result.hasCompleted);
if (ti.opts.updateURL !== false && this.useRouter) {
const router = document.querySelector('ion-router');
if (router) {
const direction = result.direction === 'back' ? 'back' : 'forward';
router.navChanged(direction);
}
}
}
failed(rejectReason, ti) {
if (this.destroyed) {
this.fireError('nav controller was destroyed', ti);
return;
}
this.transInstr.length = 0;
this.fireError(rejectReason, ti);
}
fireError(rejectReason, ti) {
if (ti.done) {
ti.done(false, false, rejectReason);
}
if (ti.reject && !this.destroyed) {
ti.reject(rejectReason);
}
else {
ti.resolve(false);
}
}
nextTrns() {
// this is the framework's bread 'n butta function
// only one transition is allowed at any given time
if (this.isTransitioning) {
return false;
}
// there is no transition happening right now
// get the next instruction
const ti = this.transInstr.shift();
if (!ti) {
return false;
}
this.runTransition(ti);
return true;
}
async runTransition(ti) {
try {
// set that this nav is actively transitioning
this.ionNavWillChange.emit();
this.isTransitioning = true;
this.prepareTI(ti);
const leavingView = this.getActiveSync();
const enteringView = this.getEnteringView(ti, leavingView);
if (!leavingView && !enteringView) {
throw new Error('no views in the stack to be removed');
}
if (enteringView && enteringView.state === VIEW_STATE_NEW) {
await enteringView.init(this.el);
}
this.postViewInit(enteringView, leavingView, ti);
// Needs transition?
const requiresTransition = (ti.enteringRequiresTransition || ti.leavingRequiresTransition) &&
enteringView !== leavingView;
const result = requiresTransition
? await this.transition(enteringView, leavingView, ti)
: {
// transition is not required, so we are already done!
// they're inserting/removing the views somewhere in the middle or
// beginning, so visually nothing needs to animate/transition
// resolve immediately because there's no animation that's happening
hasCompleted: true,
requiresTransition: false
};
this.success(result, ti);
this.ionNavDidChange.emit();
}
catch (rejectReason) {
this.failed(rejectReason, ti);
}
this.isTransitioning = false;
this.nextTrns();
}
prepareTI(ti) {
const viewsLength = this.views.length;
ti.opts = ti.opts || {};
if (ti.opts.delegate === undefined) {
ti.opts.delegate = this.delegate;
}
if (ti.removeView !== undefined) {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(ti.removeStart !== undefined, 'removeView needs removeStart');
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(ti.removeCount !== undefined, 'removeView needs removeCount');
const index = this.views.indexOf(ti.removeView);
if (index < 0) {
throw new Error('removeView was not found');
}
ti.removeStart += index;
}
if (ti.removeStart !== undefined) {
if (ti.removeStart < 0) {
ti.removeStart = viewsLength - 1;
}
if (ti.removeCount < 0) {
ti.removeCount = viewsLength - ti.removeStart;
}
ti.leavingRequiresTransition =
ti.removeCount > 0 && ti.removeStart + ti.removeCount === viewsLength;
}
if (ti.insertViews) {
// allow -1 to be passed in to auto push it on the end
// and clean up the index if it's larger then the size of the stack
if (ti.insertStart < 0 || ti.insertStart > viewsLength) {
ti.insertStart = viewsLength;
}
ti.enteringRequiresTransition = ti.insertStart === viewsLength;
}
const insertViews = ti.insertViews;
if (!insertViews) {
return;
}
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(insertViews.length > 0, 'length can not be zero');
const viewControllers = convertToViews(insertViews);
if (viewControllers.length === 0) {
throw new Error('invalid views to insert');
}
// Check all the inserted view are correct
for (const view of viewControllers) {
view.delegate = ti.opts.delegate;
const nav = view.nav;
if (nav && nav !== this) {
throw new Error('inserted view was already inserted');
}
if (view.state === VIEW_STATE_DESTROYED) {
throw new Error('inserted view was already destroyed');
}
}
ti.insertViews = viewControllers;
}
getEnteringView(ti, leavingView) {
const insertViews = ti.insertViews;
if (insertViews !== undefined) {
// grab the very last view of the views to be inserted
// and initialize it as the new entering view
return insertViews[insertViews.length - 1];
}
const removeStart = ti.removeStart;
if (removeStart !== undefined) {
const views = this.views;
const removeEnd = removeStart + ti.removeCount;
for (let i = views.length - 1; i >= 0; i--) {
const view = views[i];
if ((i < removeStart || i >= removeEnd) && view !== leavingView) {
return view;
}
}
}
return undefined;
}
postViewInit(enteringView, leavingView, ti) {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(leavingView || enteringView, 'Both leavingView and enteringView are null');
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(ti.resolve, 'resolve must be valid');
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(ti.reject, 'reject must be valid');
const opts = ti.opts;
const insertViews = ti.insertViews;
const removeStart = ti.removeStart;
const removeCount = ti.removeCount;
let destroyQueue;
// there are views to remove
if (removeStart !== undefined && removeCount !== undefined) {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(removeStart >= 0, 'removeStart can not be negative');
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(removeCount >= 0, 'removeCount can not be negative');
destroyQueue = [];
for (let i = 0; i < removeCount; i++) {
const view = this.views[i + removeStart];
if (view && view !== enteringView && view !== leavingView) {
destroyQueue.push(view);
}
}
// default the direction to "back"
opts.direction = opts.direction || 'back';
}
const finalBalance = this.views.length +
(insertViews !== undefined ? insertViews.length : 0) -
(removeCount !== undefined ? removeCount : 0);
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(finalBalance >= 0, 'final balance can not be negative');
if (finalBalance === 0) {
console.warn(`You can't remove all the pages in the navigation stack. nav.pop() is probably called too many times.`, this, this.el);
throw new Error('navigation stack needs at least one root page');
}
// At this point the transition can not be rejected, any throw should be an error
// there are views to insert
if (insertViews) {
// add the views to the
let insertIndex = ti.insertStart;
for (const view of insertViews) {
this.insertViewAt(view, insertIndex);
insertIndex++;
}
if (ti.enteringRequiresTransition) {
// default to forward if not already set
opts.direction = opts.direction || 'forward';
}
}
// if the views to be removed are in the beginning or middle
// and there is not a view that needs to visually transition out
// then just destroy them and don't transition anything
// batch all of lifecycles together
// let's make sure, callbacks are zoned
if (destroyQueue && destroyQueue.length > 0) {
for (const view of destroyQueue) {
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["l"])(view.element, _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__["b"]);
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["l"])(view.element, _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__["c"]);
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["l"])(view.element, _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__["d"]);
}
// once all lifecycle events has been delivered, we can safely detroy the views
for (const view of destroyQueue) {
this.destroyView(view);
}
}
}
async transition(enteringView, leavingView, ti) {
// we should animate (duration > 0) if the pushed page is not the first one (startup)
// or if it is a portal (modal, actionsheet, etc.)
const opts = ti.opts;
const progressCallback = opts.progressAnimation
? (ani) => this.sbAni = ani
: undefined;
const mode = Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["c"])(this);
const enteringEl = enteringView.element;
const leavingEl = leavingView && leavingView.element;
const animationOpts = Object.assign({ mode, showGoBack: this.canGoBackSync(enteringView), baseEl: this.el, animationBuilder: this.animation || opts.animationBuilder || _config_3c7f3790_js__WEBPACK_IMPORTED_MODULE_1__["b"].get('navAnimation'), progressCallback, animated: this.animated && _config_3c7f3790_js__WEBPACK_IMPORTED_MODULE_1__["b"].getBoolean('animated', true), enteringEl,
leavingEl }, opts);
const { hasCompleted } = await Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["t"])(animationOpts);
return this.transitionFinish(hasCompleted, enteringView, leavingView, opts);
}
transitionFinish(hasCompleted, enteringView, leavingView, opts) {
const cleanupView = hasCompleted ? enteringView : leavingView;
if (cleanupView) {
this.cleanup(cleanupView);
}
return {
hasCompleted,
requiresTransition: true,
enteringView,
leavingView,
direction: opts.direction
};
}
insertViewAt(view, index) {
const views = this.views;
const existingIndex = views.indexOf(view);
if (existingIndex > -1) {
// this view is already in the stack!!
// move it to its new location
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(view.nav === this, 'view is not part of the nav');
views.splice(index, 0, views.splice(existingIndex, 1)[0]);
}
else {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(!view.nav, 'nav is used');
// this is a new view to add to the stack
// create the new entering view
view.nav = this;
// insert the entering view into the correct index in the stack
views.splice(index, 0, view);
}
}
removeView(view) {
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(view.state === VIEW_STATE_ATTACHED || view.state === VIEW_STATE_DESTROYED, 'view state should be loaded or destroyed');
const views = this.views;
const index = views.indexOf(view);
Object(_helpers_46f4a262_js__WEBPACK_IMPORTED_MODULE_2__["b"])(index > -1, 'view must be part of the stack');
if (index >= 0) {
views.splice(index, 1);
}
}
destroyView(view) {
view._destroy();
this.removeView(view);
}
/**
* DOM WRITE
*/
cleanup(activeView) {
// ok, cleanup time!! Destroy all of the views that are
// INACTIVE and come after the active view
// only do this if the views exist, though
if (this.destroyed) {
return;
}
const views = this.views;
const activeViewIndex = views.indexOf(activeView);
for (let i = views.length - 1; i >= 0; i--) {
const view = views[i];
const element = view.element;
if (i > activeViewIndex) {
// this view comes after the active view
// let's unload it
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["l"])(element, _constants_3c3e1099_js__WEBPACK_IMPORTED_MODULE_3__["d"]);
this.destroyView(view);
}
else if (i < activeViewIndex) {
// this view comes before the active view
// and it is not a portal then ensure it is hidden
Object(_index_4d91f03a_js__WEBPACK_IMPORTED_MODULE_5__["s"])(element, true);
}
}
}
canStart() {
return (!!this.swipeGesture &&
!this.isTransitioning &&
this.transInstr.length === 0 &&
this.animationEnabled &&
this.canGoBackSync());
}
onStart() {
this.queueTrns({
removeStart: -1,
removeCount: 1,
opts: {
direction: 'back',
progressAnimation: true
}
}, undefined);
}
onMove(stepValue) {
if (this.sbAni) {
this.sbAni.progressStep(stepValue);
}
}
onEnd(shouldComplete, stepValue, dur) {
if (this.sbAni) {
this.animationEnabled = false;
this.sbAni.onFinish(() => {
this.animationEnabled = true;
}, { oneTimeCallback: true });
// Account for rounding errors in JS
let newStepValue = (shouldComplete) ? -0.001 : 0.001;
/**
* Animation will be reversed here, so need to
* reverse the easing curve as well
*
* Additionally, we need to account for the time relative
* to the new easing curve, as `stepValue` is going to be given
* in terms of a linear curve.
*/
if (!shouldComplete) {
this.sbAni.easing('cubic-bezier(1, 0, 0.68, 0.28)');
newStepValue += Object(_cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["g"])(new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](0, 0), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](1, 0), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](0.68, 0.28), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](1, 1), stepValue);
}
else {
newStepValue += Object(_cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["g"])(new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](0, 0), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](0.32, 0.72), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](0, 1), new _cubic_bezier_2812fda3_js__WEBPACK_IMPORTED_MODULE_6__["P"](1, 1), stepValue);
}
this.sbAni.progressEnd(shouldComplete ? 1 : 0, newStepValue, dur);
}
}
render() {
return (Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["h"])("slot", null));
}
get el() { return Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
static get watchers() { return {
"swipeGesture": ["swipeGestureChanged"],
"root": ["rootChanged"]
}; }
static get style() { return ":host{left:0;right:0;top:0;bottom:0;position:absolute;contain:layout size style;overflow:hidden;z-index:0}"; }
};
const navLink = (el, routerDirection, component, componentProps) => {
const nav = el.closest('ion-nav');
if (nav) {
if (routerDirection === 'forward') {
if (component !== undefined) {
return nav.push(component, componentProps, { skipIfBusy: true });
}
}
else if (routerDirection === 'root') {
if (component !== undefined) {
return nav.setRoot(component, componentProps, { skipIfBusy: true });
}
}
else if (routerDirection === 'back') {
return nav.pop({ skipIfBusy: true });
}
}
return Promise.resolve(false);
};
const NavLink = class {
constructor(hostRef) {
Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
/**
* The transition direction when navigating to another page.
*/
this.routerDirection = 'forward';
this.onClick = () => {
return navLink(this.el, this.routerDirection, this.component, this.componentProps);
};
}
render() {
return (Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["h"])(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["H"], { onClick: this.onClick }));
}
get el() { return Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
};
const NavPop = class {
constructor(hostRef) {
Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
this.pop = () => {
return navLink(this.el, 'back');
};
}
componentDidLoad() {
console.warn('[DEPRECATED][ion-nav-pop] <ion-nav-pop> is deprecated. Use `<ion-nav-link routerDirection="back">` instead.');
}
render() {
return (Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["h"])(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["H"], { onClick: this.pop }));
}
get el() { return Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
};
const NavPush = class {
constructor(hostRef) {
Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
this.push = () => {
return navLink(this.el, 'forward', this.component, this.componentProps);
};
}
componentDidLoad() {
console.warn('[DEPRECATED][ion-nav-push] `<ion-nav-push component="MyComponent">` is deprecated. Use `<ion-nav-link component="MyComponent">` instead.');
}
render() {
return (Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["h"])(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["H"], { onClick: this.push }));
}
get el() { return Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
};
const NavSetRoot = class {
constructor(hostRef) {
Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["r"])(this, hostRef);
this.setRoot = () => {
return navLink(this.el, 'root', this.component, this.componentProps);
};
}
componentDidLoad() {
console.warn('[DEPRECATED][ion-nav-set-root] `<ion-nav-set-root component="MyComponent">` is deprecated. Use `<ion-nav-link component="MyComponent" routerDirection="root">` instead.');
}
render() {
return (Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["h"])(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["H"], { onClick: this.setRoot }));
}
get el() { return Object(_core_feeeff0d_js__WEBPACK_IMPORTED_MODULE_0__["e"])(this); }
};
/***/ })
}]);
//# sourceMappingURL=41-es2015.js.map | ViewController |
readers.go | package exercise
import (
"io"
)
type MyReader struct {
stream string
offset int
}
func NewMyReader(stream string) *MyReader |
func (mr *MyReader) Read(buffer []byte) (int, error) {
if mr.stream == "" {
for i := range buffer {
buffer[i] = 'A'
}
return len(buffer), nil
}
streamLen := len(mr.stream)
if streamLen == mr.offset {
mr.offset = 0
return 0, io.EOF
}
max := cap(buffer)
if len(mr.stream) < (mr.offset + max) {
max = len(mr.stream) - mr.offset
}
idx := 0
for ; idx < max; idx++ {
buffer[idx] = mr.stream[idx + mr.offset]
}
mr.offset += idx
return idx, nil
}
| {
return &MyReader{
stream: stream,
offset: 0,
}
} |
tcp.rs | use std::cmp::min;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::task::{Context, Poll};
use async_trait::async_trait;
use bytes::{Bytes, BytesMut};
use futures::stream::Stream;
use futures::TryFutureExt;
use log::*;
use tokio::io::{AsyncRead, AsyncWrite};
use url::Url;
use crate::{
proxy::{ProxyStream, TcpOutboundHandler, SimpleProxyStream},
session::Session,
};
struct Adapter {
send_stream: h2::SendStream<Bytes>,
recv_stream: h2::RecvStream,
recv_buf: BytesMut,
}
impl AsyncRead for Adapter {
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
if !self.recv_buf.is_empty() {
let to_read = min(buf.len(), self.recv_buf.len());
let for_read = self.recv_buf.split_to(to_read);
(&mut buf[..to_read]).copy_from_slice(&for_read[..to_read]);
return Poll::Ready(Ok(to_read));
}
if self.recv_stream.is_end_stream() {
return Poll::Ready(Ok(0));
}
let item = match Pin::new(&mut self.recv_stream).poll_next(cx) {
Poll::Ready(item) => item,
Poll::Pending => return Poll::Pending,
};
match item {
Some(res) => match res {
Ok(data) => {
let to_read = min(buf.len(), data.len());
(&mut buf[..to_read]).copy_from_slice(&data[..to_read]);
if data.len() > to_read {
self.recv_buf.extend_from_slice(&data[to_read..]);
}
Poll::Ready(Ok(to_read))
}
Err(e) => Poll::Ready(Err(io::Error::new(
io::ErrorKind::Other,
format!("receive data failed: {}", e),
))),
},
None => {
panic!("could never happend, we already checked stream end");
}
}
}
}
impl AsyncWrite for Adapter {
fn poll_write(self: Pin<&mut Self>, _cx: &mut Context, buf: &[u8]) -> Poll<io::Result<usize>> {
let me = self.get_mut();
// FIXME reserve capacity before sending to avoid memory issue
let mut buf2 = BytesMut::new();
buf2.extend_from_slice(buf);
match me.send_stream.send_data(buf2.freeze(), false) {
Ok(_) => Poll::Ready(Ok(buf.len())),
Err(e) => Poll::Ready(Err(io::Error::new(
io::ErrorKind::Other,
format!("send data failed: {}", e),
))),
}
}
fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
fn poll_shutdown(self: Pin<&mut Self>, _cx: &mut Context) -> Poll<io::Result<()>> {
Poll::Ready(Ok(()))
}
}
pub struct Handler {
pub path: String,
pub host: String,
}
#[async_trait]
impl TcpOutboundHandler for Handler {
fn name(&self) -> &str {
super::NAME
}
fn tcp_connect_addr(&self) -> Option<(String, u16, SocketAddr)> {
None
}
async fn handle_tcp<'a>(
&'a self,
_sess: &'a Session,
stream: Option<Box<dyn ProxyStream>>,
) -> io::Result<Box<dyn ProxyStream>> {
match stream {
Some(stream) => {
// stream is aussumed to be a connection ready for h2 handshake,
// e.g. a TLS connection negotiated with alpn h2.
let (client, conn) = h2::client::handshake(stream)
.map_err(|e| {
io::Error::new(io::ErrorKind::Other, format!("handshake failed: {}", e))
})
.await?;
let mut url = Url::parse(&format!("https://{}", self.host)).unwrap();
url = url.join(self.path.as_str()).unwrap();
let req = http::Request::builder()
.method(http::Method::PUT)
.uri(&url.to_string())
.body(())
.unwrap();
let mut client = client
.ready()
.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("h2 error: {}", e)))
.await?;
let (resp, send_stream) = client.send_request(req, false).map_err(|e| {
io::Error::new(io::ErrorKind::Other, format!("h2 error: {}", e))
})?;
tokio::spawn(async move {
if let Err(e) = conn.await {
debug!("connection failed: {}", e);
}
});
let (parts, recv_stream) = resp
.map_err(|e| io::Error::new(io::ErrorKind::Other, format!("h2 error: {}", e)))
.await?
.into_parts();
if parts.status != http::status::StatusCode::OK {
return Err(io::Error::new(
io::ErrorKind::Other,
format!("h2 failed with status code: {}", parts.status),
));
}
let h2_stream = Adapter {
send_stream,
recv_stream,
recv_buf: BytesMut::new(),
};
Ok(Box::new(SimpleProxyStream(h2_stream)))
}
None => Err(io::Error::new(io::ErrorKind::Other, "invalid h2 input")), | }
}
} |
|
Checkmark.js | import React from 'react';
import { StyledIcon } from '../StyledIcon';
export const Checkmark = props => (
<StyledIcon viewBox='0 0 24 24' a11yTitle='Checkmark' {...props}> | <path fill='none' stroke='#000' strokeWidth='2' d='m2 14 7 6L22 4' />
</StyledIcon>
); | |
tab.rs | use std::{
collections::HashMap,
path::{Path, PathBuf},
sync::Arc,
thread,
};
use directories::ProjectDirs;
use druid::{
kurbo::Line,
piet::{PietTextLayout, Text, TextLayout, TextLayoutBuilder},
theme, Application, BoxConstraints, Color, Command, Cursor, Data, Env, Event,
EventCtx, FontFamily, Insets, InternalLifeCycle, LayoutCtx, LifeCycle,
LifeCycleCtx, PaintCtx, Point, Rect, RenderContext, Size, Target, Vec2, Widget,
WidgetExt, WidgetId, WidgetPod, WindowConfig,
};
use itertools::Itertools;
use lsp_types::{CallHierarchyOptions, DiagnosticSeverity};
use crate::{
activity::ActivityBar,
buffer::{
BufferContent, BufferId, BufferNew, BufferState, BufferUpdate,
LocalBufferKind, UpdateEvent,
},
code_action::CodeAction,
command::{
LapceCommand, LapceUICommand, LAPCE_COMMAND, LAPCE_NEW_COMMAND,
LAPCE_UI_COMMAND,
},
completion::{CompletionContainer, CompletionNew, CompletionStatus},
config::{Config, LapceTheme},
data::{
DragContent, EditorContent, EditorDiagnostic, EditorTabChild,
LapceMainSplitData, LapceTabData, PanelKind, SplitContent, WorkProgress,
},
editor::{EditorLocationNew, LapceEditorTab, LapceEditorView},
explorer::FileExplorer,
keypress::KeyPressData,
menu::Menu,
movement::{self, CursorMode, Selection},
palette::{NewPalette, PaletteStatus, PaletteViewLens},
panel::{PanelHeaderKind, PanelPosition, PanelResizePosition},
picker::FilePicker,
plugin::Plugin,
scroll::LapceScrollNew,
settings::LapceSettingsPanel,
split::LapceSplitNew,
state::{LapceWorkspace, LapceWorkspaceType},
status::LapceStatusNew,
terminal::TerminalPanel,
};
pub struct LapceIcon {
pub rect: Rect,
pub command: Command,
pub icon: String,
}
pub struct LapceButton {
pub rect: Rect,
pub command: Command,
pub text_layout: PietTextLayout,
}
pub struct LapceTabNew {
id: WidgetId,
activity: WidgetPod<LapceTabData, ActivityBar>,
main_split: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
completion: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
palette: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
code_action: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
status: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
picker: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
settings: WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>,
panels:
HashMap<PanelKind, WidgetPod<LapceTabData, Box<dyn Widget<LapceTabData>>>>,
current_bar_hover: Option<PanelResizePosition>,
height: f64,
main_split_height: f64,
status_height: f64,
mouse_pos: Point,
}
impl LapceTabNew {
pub fn new(data: &LapceTabData) -> Self {
let split_data = data
.main_split
.splits
.get(&*data.main_split.split_id)
.unwrap();
let main_split = split_data.widget(data);
let activity = ActivityBar::new();
let completion = CompletionContainer::new(&data.completion);
let palette = NewPalette::new(
&data.palette,
data.main_split
.editors
.get(&data.palette.preview_editor)
.unwrap(),
);
let status = LapceStatusNew::new();
let code_action = CodeAction::new();
let mut panels = HashMap::new();
let file_explorer = FileExplorer::new(&data.file_explorer);
panels.insert(
PanelKind::FileExplorer,
WidgetPod::new(file_explorer.boxed()),
);
let source_control = data.source_control.new_panel(&data);
panels.insert(
PanelKind::SourceControl,
WidgetPod::new(source_control.boxed()),
);
let plugin = Plugin::new();
panels.insert(PanelKind::Plugin, WidgetPod::new(plugin.boxed()));
let terminal = TerminalPanel::new(&data);
panels.insert(PanelKind::Terminal, WidgetPod::new(terminal.boxed()));
let search = data.search.new_panel(&data);
panels.insert(PanelKind::Search, WidgetPod::new(search.boxed()));
let problem = data.problem.new_panel();
panels.insert(PanelKind::Problem, WidgetPod::new(problem.boxed()));
let picker = FilePicker::new(data);
let settings = LapceSettingsPanel::new(data);
Self {
id: data.id,
activity: WidgetPod::new(activity),
main_split: WidgetPod::new(main_split.boxed()),
completion: WidgetPod::new(completion.boxed()),
code_action: WidgetPod::new(code_action.boxed()),
picker: WidgetPod::new(picker.boxed()),
palette: WidgetPod::new(palette.boxed()),
status: WidgetPod::new(status.boxed()),
settings: WidgetPod::new(settings.boxed()),
panels,
current_bar_hover: None,
height: 0.0,
main_split_height: 0.0,
status_height: 0.0,
mouse_pos: Point::ZERO,
}
}
fn update_split_point(&mut self, data: &mut LapceTabData, mouse_pos: Point) {
if let Some(position) = self.current_bar_hover.as_ref() {
match position {
PanelResizePosition::Left => {
data.panel_size.left = (mouse_pos.x - 50.0).round().max(50.0);
}
PanelResizePosition::LeftSplit => (),
PanelResizePosition::Bottom => {
data.panel_size.bottom =
(self.height - mouse_pos.y.round() - self.status_height)
.max(50.0);
}
}
}
}
fn | (
&self,
data: &LapceTabData,
mouse_pos: Point,
) -> Option<PanelResizePosition> {
let panel_left_top_shown = data
.panels
.get(&PanelPosition::LeftTop)
.map(|p| p.is_shown())
.unwrap_or(false);
let panel_left_bottom_shown = data
.panels
.get(&PanelPosition::LeftBottom)
.map(|p| p.is_shown())
.unwrap_or(false);
let left = if panel_left_bottom_shown || panel_left_top_shown {
let left = data.panel_size.left + 50.0;
if mouse_pos.x >= left - 3.0 && mouse_pos.x <= left + 3.0 {
return Some(PanelResizePosition::Left);
}
left
} else {
0.0
};
let panel_bottom_left_shown = data
.panels
.get(&PanelPosition::BottomLeft)
.map(|p| p.is_shown())
.unwrap_or(false);
let panel_bottom_right_shown = data
.panels
.get(&PanelPosition::BottomRight)
.map(|p| p.is_shown())
.unwrap_or(false);
if panel_bottom_left_shown || panel_bottom_right_shown {
let bottom = data.panel_size.bottom;
let y = self.main_split_height;
if mouse_pos.x > left && mouse_pos.y >= y - 3.0 && mouse_pos.y <= y + 3.0
{
return Some(PanelResizePosition::Bottom);
}
}
None
}
fn paint_drag(&self, ctx: &mut PaintCtx, data: &LapceTabData) {
if let Some((offset, drag_content)) = data.drag.as_ref() {
match drag_content {
DragContent::EditorTab(_, _, _, tab_rect) => {
let rect = tab_rect.rect.with_origin(self.mouse_pos - *offset);
let size = rect.size();
let shadow_width = 5.0;
ctx.blurred_rect(
rect,
shadow_width,
data.config
.get_color_unchecked(LapceTheme::LAPCE_DROPDOWN_SHADOW),
);
ctx.fill(
rect,
&data
.config
.get_color_unchecked(LapceTheme::EDITOR_BACKGROUND)
.clone()
.with_alpha(0.6),
);
let width = 13.0;
let height = 13.0;
let svg_rect =
Size::new(width, height).to_rect().with_origin(Point::new(
rect.x0 + (size.height - width) / 2.0,
rect.y0 + (size.height - height) / 2.0,
));
ctx.draw_svg(&tab_rect.svg, svg_rect, None);
let text_size = tab_rect.text_layout.size();
ctx.draw_text(
&tab_rect.text_layout,
Point::new(
rect.x0 + size.height,
rect.y0 + (size.height - text_size.height) / 2.0,
),
);
}
}
}
}
}
impl Widget<LapceTabData> for LapceTabNew {
fn id(&self) -> Option<WidgetId> {
Some(self.id)
}
fn event(
&mut self,
ctx: &mut EventCtx,
event: &Event,
data: &mut LapceTabData,
env: &Env,
) {
match event {
Event::MouseDown(mouse) => {
if mouse.button.is_left() {
if let Some(position) = self.bar_hit_test(data, mouse.pos) {
self.current_bar_hover = Some(position);
ctx.set_active(true);
ctx.set_handled();
}
}
}
Event::MouseUp(mouse) => {
if mouse.button.is_left() && ctx.is_active() {
ctx.set_active(false);
}
}
Event::MouseMove(mouse) => {
self.mouse_pos = mouse.pos;
if ctx.is_active() {
self.update_split_point(data, mouse.pos);
ctx.request_layout();
ctx.set_handled();
} else {
match self.bar_hit_test(data, mouse.pos) {
Some(PanelResizePosition::Left) => {
ctx.set_cursor(&Cursor::ResizeLeftRight)
}
Some(PanelResizePosition::LeftSplit) => {
ctx.set_cursor(&Cursor::ResizeUpDown)
}
Some(PanelResizePosition::Bottom) => {
ctx.set_cursor(&Cursor::ResizeUpDown)
}
None => ctx.clear_cursor(),
}
}
}
Event::Command(cmd) if cmd.is(LAPCE_NEW_COMMAND) => {
let command = cmd.get_unchecked(LAPCE_NEW_COMMAND);
data.run_command(ctx, command, None, env);
ctx.set_handled();
}
Event::Command(cmd) if cmd.is(LAPCE_UI_COMMAND) => {
let command = cmd.get_unchecked(LAPCE_UI_COMMAND);
match command {
LapceUICommand::RequestPaint => {
ctx.request_paint();
ctx.set_handled();
}
LapceUICommand::UpdateWindowOrigin => {
data.window_origin = ctx.window_origin();
ctx.set_handled();
}
LapceUICommand::LoadBuffer {
path,
content,
locations,
} => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer).load_content(content);
for (view_id, location) in locations {
data.main_split.go_to_location(
ctx,
Some(*view_id),
location.clone(),
&data.config,
);
}
ctx.set_handled();
}
LapceUICommand::UpdateSearch(pattern) => {
if pattern == "" {
Arc::make_mut(&mut data.find).unset();
} else {
Arc::make_mut(&mut data.find)
.set_find(pattern, false, false, false);
}
}
LapceUICommand::GlobalSearchResult(pattern, matches) => {
let buffer = data
.main_split
.local_buffers
.get(&LocalBufferKind::Search)
.unwrap();
if &buffer.rope.to_string() == pattern {
Arc::make_mut(&mut data.search).matches =
matches.clone();
}
}
LapceUICommand::LoadBufferHead { path, id, content } => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
let buffer = Arc::make_mut(buffer);
buffer.load_history(id, content.clone());
ctx.set_handled();
}
LapceUICommand::UpdateTerminalTitle(term_id, title) => {
let terminal_panel = Arc::make_mut(&mut data.terminal);
if let Some(mut terminal) =
terminal_panel.terminals.get_mut(term_id)
{
Arc::make_mut(&mut terminal).title = title.to_string();
}
}
LapceUICommand::CancelFilePicker => {
Arc::make_mut(&mut data.picker).active = false;
ctx.set_handled();
}
LapceUICommand::ProxyUpdateStatus(status) => {
data.proxy_status = Arc::new(*status);
ctx.set_handled();
}
LapceUICommand::HomeDir(path) => {
Arc::make_mut(&mut data.picker).init_home(path);
data.set_picker_pwd(path.clone());
ctx.set_handled();
}
LapceUICommand::CloseTerminal(id) => {
let terminal_panel = Arc::make_mut(&mut data.terminal);
if let Some(terminal) = terminal_panel.terminals.get_mut(id)
{
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::SplitTerminalClose(
terminal.term_id,
terminal.widget_id,
),
Target::Widget(terminal.split_id),
));
data.proxy.terminal_close(terminal.term_id);
}
ctx.set_handled();
}
LapceUICommand::UpdateInstalledPlugins(plugins) => {
data.installed_plugins = Arc::new(plugins.to_owned());
}
LapceUICommand::UpdateDiffInfo(diff) => {
let source_control = Arc::make_mut(&mut data.source_control);
source_control.branch = diff.head.to_string();
source_control.branches = diff.branches.clone();
source_control.file_diffs = diff
.diffs
.iter()
.map(|diff| {
let mut checked = true;
for (p, c) in source_control.file_diffs.iter() {
if p == diff {
checked = *c;
break;
}
}
(diff.clone(), checked)
})
.collect();
for (path, buffer) in data.main_split.open_files.iter() {
buffer.retrieve_file_head(
data.id,
data.proxy.clone(),
ctx.get_external_handle(),
);
}
ctx.set_handled();
}
LapceUICommand::WorkDoneProgress(params) => {
match ¶ms.value {
lsp_types::ProgressParamsValue::WorkDone(progress) => {
match progress {
lsp_types::WorkDoneProgress::Begin(begin) => {
data.progresses.push_back(WorkProgress {
token: params.token.clone(),
title: begin.title.clone(),
message: begin.message.clone(),
percentage: begin.percentage.clone(),
});
}
lsp_types::WorkDoneProgress::Report(report) => {
for p in data.progresses.iter_mut() {
if p.token == params.token {
p.message = report.message.clone();
p.percentage =
report.percentage.clone();
}
}
}
lsp_types::WorkDoneProgress::End(end) => {
for i in data
.progresses
.iter()
.positions(|p| p.token == params.token)
.sorted()
.rev()
{
data.progresses.remove(i);
}
}
}
}
}
}
LapceUICommand::PublishDiagnostics(diagnostics) => {
let path = PathBuf::from(diagnostics.uri.path());
let diagnostics = diagnostics
.diagnostics
.iter()
.map(|d| EditorDiagnostic {
range: None,
diagnositc: d.clone(),
})
.collect();
data.main_split
.diagnostics
.insert(path, Arc::new(diagnostics));
let mut errors = 0;
let mut warnings = 0;
for (_, diagnositics) in data.main_split.diagnostics.iter() {
for diagnositic in diagnositics.iter() {
if let Some(severity) =
diagnositic.diagnositc.severity
{
match severity {
DiagnosticSeverity::Error => errors += 1,
DiagnosticSeverity::Warning => warnings += 1,
_ => (),
}
}
}
}
data.main_split.error_count = errors;
data.main_split.warning_count = warnings;
ctx.set_handled();
}
LapceUICommand::DocumentFormatAndSave(path, rev, result) => {
data.main_split.document_format_and_save(
ctx,
path,
*rev,
result,
&data.config,
);
ctx.set_handled();
}
LapceUICommand::DocumentFormat(path, rev, result) => {
data.main_split.document_format(
ctx,
path,
*rev,
result,
&data.config,
);
ctx.set_handled();
}
LapceUICommand::BufferSave(path, rev) => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
if buffer.rev == *rev {
Arc::make_mut(buffer).dirty = false;
}
ctx.set_handled();
}
LapceUICommand::LoadBufferAndGoToPosition {
path,
content,
editor_view_id,
location,
} => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer).load_content(content);
data.main_split.go_to_location(
ctx,
Some(*editor_view_id),
location.clone(),
&data.config,
);
ctx.set_handled();
}
LapceUICommand::UpdateSettingsFile(key, value) => {
if let Ok(value) =
serde_json::from_value::<toml::Value>(value.clone())
{
Config::update_file(key, value);
}
}
LapceUICommand::OpenFileDiff(path, history) => {
let editor_view_id = data.main_split.active.clone();
let editor_view_id = data.main_split.jump_to_location(
ctx,
*editor_view_id,
EditorLocationNew {
path: path.clone(),
position: None,
scroll_offset: None,
hisotry: Some(history.to_string()),
},
&data.config,
);
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::Focus,
Target::Widget(editor_view_id),
));
ctx.set_handled();
}
LapceUICommand::UpdateKeymapsFilter(pattern) => {
ctx.set_handled();
let keypress = Arc::make_mut(&mut data.keypress);
keypress.filter_commands(pattern);
}
LapceUICommand::FilterKeymaps(
pattern,
filtered_commands_with_keymap,
filtered_commands_without_keymap,
) => {
ctx.set_handled();
let keypress = Arc::make_mut(&mut data.keypress);
if &keypress.filter_pattern == pattern {
keypress.filtered_commands_with_keymap =
filtered_commands_with_keymap.clone();
keypress.filtered_commands_without_keymap =
filtered_commands_without_keymap.clone();
}
}
LapceUICommand::UpdateKeymap(keymap, keys) => {
KeyPressData::update_file(keymap, keys);
}
LapceUICommand::OpenFile(path) => {
data.main_split.jump_to_location(
ctx,
None,
EditorLocationNew {
path: path.clone(),
position: None,
scroll_offset: None,
hisotry: None,
},
&data.config,
);
ctx.set_handled();
}
LapceUICommand::GoToLocationNew(editor_view_id, location) => {
data.main_split.go_to_location(
ctx,
Some(*editor_view_id),
location.clone(),
&data.config,
);
ctx.set_handled();
}
LapceUICommand::JumpToPosition(editor_view_id, position) => {
data.main_split.jump_to_position(
ctx,
*editor_view_id,
*position,
&data.config,
);
ctx.set_handled();
}
LapceUICommand::JumpToLocation(editor_view_id, location) => {
data.main_split.jump_to_location(
ctx,
*editor_view_id,
location.clone(),
&data.config,
);
ctx.set_handled();
}
LapceUICommand::JumpToLine(editor_view_id, line) => {
data.main_split.jump_to_line(
ctx,
*editor_view_id,
*line,
&data.config,
);
ctx.set_handled();
}
LapceUICommand::TerminalJumpToLine(line) => {
if let Some(terminal) = data
.terminal
.terminals
.get(&data.terminal.active_term_id)
{
terminal.raw.lock().term.vi_goto_point(
alacritty_terminal::index::Point::new(
alacritty_terminal::index::Line(*line),
alacritty_terminal::index::Column(0),
),
);
ctx.request_paint();
}
// data.term_tx.send((
// data.terminal.active_term_id,
// TerminalEvent::JumpToLine(*line),
// ));
ctx.set_handled();
}
LapceUICommand::GotoDefinition(
editor_view_id,
offset,
location,
) => {
if let Some(editor) = data.main_split.active_editor() {
if *editor_view_id == editor.view_id
&& *offset == editor.cursor.offset()
{
data.main_split.jump_to_location(
ctx,
None,
location.clone(),
&data.config,
);
}
}
ctx.set_handled();
}
LapceUICommand::GotoReference(
editor_view_id,
offset,
location,
) => {
if let Some(editor) = data.main_split.active_editor() {
if *editor_view_id == editor.view_id
&& *offset == editor.cursor.offset()
{
data.main_split.jump_to_location(
ctx,
Some(*editor_view_id),
location.clone(),
&data.config,
);
}
}
ctx.set_handled();
}
LapceUICommand::UpdateCodeActions(path, rev, offset, resp) => {
if let Some(buffer) =
data.main_split.open_files.get_mut(path)
{
if buffer.rev == *rev {
Arc::make_mut(buffer)
.code_actions
.insert(*offset, resp.clone());
}
}
}
LapceUICommand::PaletteReferences(offset, locations) => {
if let Some(editor) = data.main_split.active_editor() {
if *offset == editor.cursor.offset() {
let locations = locations
.iter()
.map(|l| EditorLocationNew {
path: PathBuf::from(l.uri.path()),
position: Some(l.range.start.clone()),
scroll_offset: None,
hisotry: None,
})
.collect();
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::RunPaletteReferences(locations),
Target::Widget(data.palette.widget_id),
));
}
}
ctx.set_handled();
}
LapceUICommand::ReloadBuffer(id, rev, new_content) => {
for (_, buffer) in data.main_split.open_files.iter_mut() {
if &buffer.id == id {
if buffer.rev + 1 == *rev {
let buffer = Arc::make_mut(buffer);
buffer.load_content(new_content);
buffer.rev = *rev;
for (_, editor) in
data.main_split.editors.iter_mut()
{
if editor.content == buffer.content {
if editor.cursor.offset() >= buffer.len()
{
let editor = Arc::make_mut(editor);
if data.config.lapce.modal {
editor.cursor =
movement::Cursor::new(
CursorMode::Normal(
buffer.len() - 1,
),
None,
);
} else {
editor.cursor =
movement::Cursor::new(
CursorMode::Insert(
Selection::caret(
buffer.len() - 1,
),
),
None,
);
}
}
}
}
}
break;
}
}
ctx.set_handled();
}
LapceUICommand::UpdateSemanticTokens(id, path, rev, tokens) => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
if buffer.rev == *rev {
if let Some(language) = buffer.language.as_ref() {
if let BufferContent::File(path) = &buffer.content {
data.update_sender.send(
UpdateEvent::SemanticTokens(
BufferUpdate {
id: buffer.id,
path: path.clone(),
rope: buffer.rope.clone(),
rev: *rev,
language: *language,
highlights: buffer.styles.clone(),
semantic_tokens: true,
},
tokens.to_owned(),
),
);
}
}
}
ctx.set_handled();
}
LapceUICommand::ShowCodeActions
| LapceUICommand::CancelCodeActions => {
self.code_action.event(ctx, event, data, env);
}
LapceUICommand::Focus => {
let dir = data
.workspace
.path
.as_ref()
.map(|p| {
let dir = p.file_name().unwrap().to_str().unwrap();
let dir = match &data.workspace.kind {
LapceWorkspaceType::Local => dir.to_string(),
LapceWorkspaceType::RemoteSSH(user, host) => {
format!("{} [{}@{}]", dir, user, host)
}
};
dir
})
.unwrap_or("Lapce".to_string());
ctx.configure_window(WindowConfig::default().set_title(dir));
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::Focus,
Target::Widget(data.focus),
));
ctx.set_handled();
}
LapceUICommand::UpdateStyle {
id,
path,
rev,
highlights,
semantic_tokens,
} => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer).update_styles(
*rev,
highlights.to_owned(),
*semantic_tokens,
);
ctx.set_handled();
}
LapceUICommand::FocusSourceControl => {
for (_, panel) in data.panels.iter_mut() {
for kind in panel.widgets.clone() {
if kind == PanelKind::SourceControl {
let panel = Arc::make_mut(panel);
panel.active = PanelKind::SourceControl;
panel.shown = true;
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::Focus,
Target::Widget(data.source_control.active),
));
}
}
}
ctx.set_handled();
}
LapceUICommand::FocusEditor => {
if let Some(active) = *data.main_split.active {
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::Focus,
Target::Widget(active),
));
}
ctx.set_handled();
}
LapceUICommand::UpdateSyntaxTree {
id,
path,
rev,
tree,
} => {
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer)
.update_syntax_tree(*rev, tree.to_owned());
ctx.set_handled();
}
LapceUICommand::UpdateHisotryChanges {
id,
path,
rev,
history,
changes,
} => {
ctx.set_handled();
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer).update_history_changes(
*rev,
history,
changes.clone(),
);
}
LapceUICommand::UpdateHistoryStyle {
id,
path,
history,
highlights,
} => {
ctx.set_handled();
let buffer =
data.main_split.open_files.get_mut(path).unwrap();
Arc::make_mut(buffer).history_styles.insert(
history.to_string(),
Arc::new(highlights.to_owned()),
);
buffer
.history_line_styles
.borrow_mut()
.insert(history.to_string(), HashMap::new());
}
LapceUICommand::UpdatePickerPwd(path) => {
Arc::make_mut(&mut data.picker).pwd = path.clone();
data.read_picker_pwd(ctx);
ctx.set_handled();
}
LapceUICommand::UpdatePickerItems(path, items) => {
Arc::make_mut(&mut data.picker)
.set_item_children(path, items.clone());
ctx.set_handled();
}
LapceUICommand::UpdateExplorerItems(index, path, items) => {
let file_explorer = Arc::make_mut(&mut data.file_explorer);
if let Some(node) = file_explorer.get_node_mut(path) {
node.children = items
.iter()
.map(|item| (item.path_buf.clone(), item.clone()))
.collect();
node.read = true;
node.open = true;
node.children_open_count = node.children.len();
}
if let Some(paths) = file_explorer.node_tree(path) {
for path in paths.iter() {
file_explorer.update_node_count(path);
}
}
ctx.set_handled();
}
_ => (),
}
}
_ => (),
}
self.settings.event(ctx, event, data, env);
self.picker.event(ctx, event, data, env);
self.palette.event(ctx, event, data, env);
self.completion.event(ctx, event, data, env);
self.code_action.event(ctx, event, data, env);
self.main_split.event(ctx, event, data, env);
self.status.event(ctx, event, data, env);
for (_, panel) in data.panels.clone().iter() {
if panel.is_shown() {
self.panels
.get_mut(&panel.active)
.unwrap()
.event(ctx, event, data, env);
}
}
self.activity.event(ctx, event, data, env);
match event {
Event::MouseUp(_) => {
if data.drag.is_some() {
*Arc::make_mut(&mut data.drag) = None;
}
}
_ => (),
}
}
fn lifecycle(
&mut self,
ctx: &mut LifeCycleCtx,
event: &LifeCycle,
data: &LapceTabData,
env: &Env,
) {
match event {
LifeCycle::Internal(InternalLifeCycle::ParentWindowOrigin) => {
if ctx.window_origin() != data.window_origin {
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::UpdateWindowOrigin,
Target::Widget(data.id),
))
}
}
_ => (),
}
self.palette.lifecycle(ctx, event, data, env);
self.activity.lifecycle(ctx, event, data, env);
self.main_split.lifecycle(ctx, event, data, env);
self.code_action.lifecycle(ctx, event, data, env);
self.status.lifecycle(ctx, event, data, env);
self.completion.lifecycle(ctx, event, data, env);
self.picker.lifecycle(ctx, event, data, env);
self.settings.lifecycle(ctx, event, data, env);
for (_, panel) in self.panels.iter_mut() {
panel.lifecycle(ctx, event, data, env);
}
}
fn update(
&mut self,
ctx: &mut druid::UpdateCtx,
old_data: &LapceTabData,
data: &LapceTabData,
env: &Env,
) {
if old_data.focus != data.focus {
ctx.request_paint();
}
if !old_data.drag.same(&data.drag) {
ctx.request_paint();
}
if old_data
.main_split
.diagnostics
.same(&data.main_split.diagnostics)
{
ctx.request_paint();
}
if !old_data.panels.same(&data.panels) {
ctx.request_layout();
}
if !old_data.config.same(&data.config) {
ctx.request_layout();
}
if old_data.settings.shown != data.settings.shown {
ctx.request_layout();
}
self.palette.update(ctx, data, env);
self.activity.update(ctx, data, env);
self.main_split.update(ctx, data, env);
self.completion.update(ctx, data, env);
self.code_action.update(ctx, data, env);
self.status.update(ctx, data, env);
self.picker.update(ctx, data, env);
self.settings.update(ctx, data, env);
for (_, panel) in data.panels.iter() {
if panel.is_shown() {
self.panels
.get_mut(&panel.active)
.unwrap()
.update(ctx, data, env);
}
}
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &LapceTabData,
env: &Env,
) -> Size {
// ctx.set_paint_insets((0.0, 10.0, 0.0, 0.0));
let self_size = bc.max();
self.height = self_size.height;
let activity_size = self.activity.layout(ctx, bc, data, env);
self.activity.set_origin(ctx, data, env, Point::ZERO);
let status_size = self.status.layout(ctx, bc, data, env);
self.status.set_origin(
ctx,
data,
env,
Point::new(0.0, self_size.height - status_size.height),
);
self.status_height = status_size.height;
let mut active_panels = Vec::new();
let panel_left_top_shown = data
.panels
.get(&PanelPosition::LeftTop)
.map(|p| p.is_shown())
.unwrap_or(false);
let panel_left_bottom_shown = data
.panels
.get(&PanelPosition::LeftBottom)
.map(|p| p.is_shown())
.unwrap_or(false);
let panel_left_width = if panel_left_top_shown || panel_left_bottom_shown {
let left_width = data.panel_size.left;
if panel_left_top_shown && panel_left_bottom_shown {
let top_height = (self_size.height - status_size.height)
* data.panel_size.left_split;
let bottom_height =
self_size.height - status_size.height - top_height;
let panel_left_top =
data.panels.get(&PanelPosition::LeftTop).unwrap().active;
active_panels.push(panel_left_top);
let panel_left_top = self.panels.get_mut(&panel_left_top).unwrap();
panel_left_top.layout(
ctx,
&BoxConstraints::tight(Size::new(left_width, top_height)),
data,
env,
);
panel_left_top.set_origin(
ctx,
data,
env,
Point::new(activity_size.width, 0.0),
);
let panel_left_bottom =
data.panels.get(&PanelPosition::LeftBottom).unwrap().active;
active_panels.push(panel_left_bottom);
let panel_left_bottom =
self.panels.get_mut(&panel_left_bottom).unwrap();
panel_left_bottom.layout(
ctx,
&BoxConstraints::tight(Size::new(left_width, bottom_height)),
data,
env,
);
panel_left_bottom.set_origin(
ctx,
data,
env,
Point::new(activity_size.width, top_height),
);
} else if panel_left_top_shown {
let top_height = self_size.height - status_size.height;
let panel_left_top =
data.panels.get(&PanelPosition::LeftTop).unwrap().active;
active_panels.push(panel_left_top);
let panel_left_top = self.panels.get_mut(&panel_left_top).unwrap();
panel_left_top.layout(
ctx,
&BoxConstraints::tight(Size::new(left_width, top_height)),
data,
env,
);
panel_left_top.set_origin(
ctx,
data,
env,
Point::new(activity_size.width, 0.0),
);
} else if panel_left_bottom_shown {
let bottom_height = self_size.height - status_size.height;
let panel_left_bottom =
data.panels.get(&PanelPosition::LeftBottom).unwrap().active;
active_panels.push(panel_left_bottom);
let panel_left_bottom =
self.panels.get_mut(&panel_left_bottom).unwrap();
panel_left_bottom.layout(
ctx,
&BoxConstraints::tight(Size::new(left_width, bottom_height)),
data,
env,
);
panel_left_bottom.set_origin(
ctx,
data,
env,
Point::new(activity_size.width, 0.0),
);
}
left_width
} else {
0.0
};
let (panel_bottom_left_shown, panel_bottom_left_maximized) = data
.panels
.get(&PanelPosition::BottomLeft)
.map(|p| (p.is_shown(), p.is_maximized()))
.unwrap_or((false, false));
let (panel_bottom_right_shown, panel_bottom_right_maximized) = data
.panels
.get(&PanelPosition::BottomRight)
.map(|p| (p.is_shown(), p.is_maximized()))
.unwrap_or((false, false));
let panel_bottom_height = if panel_bottom_left_shown
|| panel_bottom_right_shown
{
let maximized =
panel_bottom_left_maximized || panel_bottom_right_maximized;
let bottom_height = if maximized {
self_size.height - status_size.height
} else {
data.panel_size.bottom
};
let panel_x = panel_left_width + activity_size.width;
let panel_y = self_size.height - status_size.height - bottom_height;
let panel_width =
self_size.width - activity_size.width - panel_left_width;
if panel_bottom_left_shown && panel_bottom_right_shown {
let left_width = panel_width * data.panel_size.bottom_split;
let right_width = panel_width - left_width;
let panel_bottom_left =
data.panels.get(&PanelPosition::BottomLeft).unwrap().active;
active_panels.push(panel_bottom_left);
let panel_bottom_left =
self.panels.get_mut(&panel_bottom_left).unwrap();
panel_bottom_left.layout(
ctx,
&BoxConstraints::tight(Size::new(left_width, bottom_height)),
data,
env,
);
panel_bottom_left.set_origin(
ctx,
data,
env,
Point::new(panel_left_width + activity_size.width, panel_y),
);
let panel_bottom_right =
data.panels.get(&PanelPosition::BottomRight).unwrap().active;
active_panels.push(panel_bottom_right);
let panel_bottom_right =
self.panels.get_mut(&panel_bottom_right).unwrap();
panel_bottom_right.layout(
ctx,
&BoxConstraints::tight(Size::new(right_width, bottom_height)),
data,
env,
);
panel_bottom_right.set_origin(
ctx,
data,
env,
Point::new(
panel_left_width + left_width + activity_size.width,
panel_y,
),
);
} else if panel_bottom_left_shown {
let panel_bottom_left =
data.panels.get(&PanelPosition::BottomLeft).unwrap().active;
active_panels.push(panel_bottom_left);
let panel_bottom_left =
self.panels.get_mut(&panel_bottom_left).unwrap();
panel_bottom_left.layout(
ctx,
&BoxConstraints::tight(Size::new(panel_width, bottom_height)),
data,
env,
);
panel_bottom_left.set_origin(
ctx,
data,
env,
Point::new(panel_x, panel_y),
);
} else if panel_bottom_right_shown {
let panel_bottom_right =
data.panels.get(&PanelPosition::BottomRight).unwrap().active;
active_panels.push(panel_bottom_right);
let panel_bottom_right =
self.panels.get_mut(&panel_bottom_right).unwrap();
panel_bottom_right.layout(
ctx,
&BoxConstraints::tight(Size::new(panel_width, bottom_height)),
data,
env,
);
panel_bottom_right.set_origin(
ctx,
data,
env,
Point::new(panel_x, panel_y),
);
}
bottom_height
} else {
0.0
};
for (panel_widget_id, panel) in self.panels.iter_mut() {
if !active_panels.contains(panel_widget_id) {
panel.layout(
ctx,
&BoxConstraints::tight(Size::new(300.0, 300.0)),
data,
env,
);
panel.set_origin(ctx, data, env, Point::ZERO);
}
}
let main_split_size = Size::new(
self_size.width - panel_left_width - activity_size.width,
self_size.height - status_size.height - panel_bottom_height,
);
let main_split_bc = BoxConstraints::tight(main_split_size);
let main_split_origin =
Point::new(panel_left_width + activity_size.width, 0.0);
data.main_split.update_split_layout_rect(
*data.main_split.split_id,
main_split_size.to_rect().with_origin(main_split_origin),
);
self.main_split.layout(ctx, &main_split_bc, data, env);
self.main_split
.set_origin(ctx, data, env, main_split_origin);
self.main_split_height = main_split_size.height;
if data.completion.status != CompletionStatus::Inactive {
let completion_origin =
data.completion_origin(ctx.text(), self_size.clone(), &data.config);
self.completion.layout(ctx, bc, data, env);
self.completion
.set_origin(ctx, data, env, completion_origin);
}
if data.main_split.show_code_actions {
let code_action_origin =
data.code_action_origin(ctx.text(), self_size.clone(), &data.config);
self.code_action.layout(ctx, bc, data, env);
self.code_action
.set_origin(ctx, data, env, code_action_origin);
}
if data.palette.status != PaletteStatus::Inactive {
let palette_size = self.palette.layout(ctx, bc, data, env);
self.palette.set_origin(
ctx,
data,
env,
Point::new((self_size.width - palette_size.width) / 2.0, 0.0),
);
}
if data.picker.active {
let picker_size = self.picker.layout(ctx, bc, data, env);
self.picker.set_origin(
ctx,
data,
env,
Point::new(
(self_size.width - picker_size.width) / 2.0,
(self_size.height - picker_size.height) / 3.0,
),
);
}
if data.settings.shown {
self.settings.layout(ctx, bc, data, env);
self.settings.set_origin(ctx, data, env, Point::ZERO);
}
self_size
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &LapceTabData, env: &Env) {
self.main_split.paint(ctx, data, env);
for pos in &[
PanelPosition::BottomLeft,
PanelPosition::BottomRight,
PanelPosition::LeftTop,
PanelPosition::LeftBottom,
PanelPosition::RightTop,
PanelPosition::RightBottom,
] {
if let Some(panel) = data.panels.get(&pos) {
if panel.shown {
if let Some(panel) = self.panels.get_mut(&panel.active) {
let bg = match pos {
PanelPosition::LeftTop
| PanelPosition::LeftBottom
| PanelPosition::RightTop
| PanelPosition::RightBottom => data
.config
.get_color_unchecked(LapceTheme::PANEL_BACKGROUND),
PanelPosition::BottomLeft
| PanelPosition::BottomRight => data
.config
.get_color_unchecked(LapceTheme::EDITOR_BACKGROUND),
};
let rect = panel.layout_rect();
ctx.blurred_rect(
rect,
5.0,
data.config.get_color_unchecked(
LapceTheme::LAPCE_DROPDOWN_SHADOW,
),
);
ctx.fill(rect, bg);
panel.paint(ctx, data, env);
}
}
}
}
self.activity.paint(ctx, data, env);
// if let Some((active_index, (id, kind))) =
// data.panels.get(&PanelPosition::LeftTop).and_then(|panel| {
// panel
// .widgets
// .iter()
// .enumerate()
// .find(|(i, (id, kind))| id == &panel.active)
// })
// {
// let active_offset = 50.0 * active_index as f64;
// let rect = Size::new(50.0, 50.0)
// .to_rect()
// .with_origin(Point::new(0.0, active_offset));
// ctx.fill(
// rect,
// data.config
// .get_color_unchecked(LapceTheme::PANEL_BACKGROUND),
// );
// // self.activity
// // .widget_mut()
// // .paint_svg(ctx, data, active_index, kind);
// }
self.status.paint(ctx, data, env);
self.completion.paint(ctx, data, env);
self.code_action.paint(ctx, data, env);
self.palette.paint(ctx, data, env);
self.picker.paint(ctx, data, env);
self.settings.paint(ctx, data, env);
self.paint_drag(ctx, data);
}
}
pub struct LapceTabHeader {
pub drag_start: Option<(Point, Point)>,
pub mouse_pos: Point,
cross_rect: Rect,
}
impl LapceTabHeader {
pub fn new() -> Self {
Self {
cross_rect: Rect::ZERO,
drag_start: None,
mouse_pos: Point::ZERO,
}
}
pub fn origin(&self) -> Option<Point> {
self.drag_start
.map(|(drag, origin)| origin + (self.mouse_pos - drag))
}
}
impl Widget<LapceTabData> for LapceTabHeader {
fn event(
&mut self,
ctx: &mut EventCtx,
event: &Event,
data: &mut LapceTabData,
env: &Env,
) {
match event {
Event::MouseMove(mouse_event) => {
if ctx.is_active() {
if let Some(pos) = self.drag_start {
self.mouse_pos = ctx.to_window(mouse_event.pos);
ctx.request_layout();
}
return;
}
if self.cross_rect.contains(mouse_event.pos) {
ctx.set_cursor(&druid::Cursor::Pointer);
} else {
ctx.set_cursor(&druid::Cursor::Arrow);
}
}
Event::MouseDown(mouse_event) => {
if self.cross_rect.contains(mouse_event.pos) {
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::CloseTabId(data.id),
Target::Auto,
));
} else {
self.drag_start =
Some((ctx.to_window(mouse_event.pos), ctx.window_origin()));
self.mouse_pos = ctx.to_window(mouse_event.pos);
ctx.set_active(true);
ctx.submit_command(Command::new(
LAPCE_UI_COMMAND,
LapceUICommand::FocusTabId(data.id),
Target::Auto,
));
}
}
Event::MouseUp(mouse_event) => {
ctx.set_active(false);
self.drag_start = None;
}
_ => {}
}
}
fn lifecycle(
&mut self,
ctx: &mut LifeCycleCtx,
event: &LifeCycle,
data: &LapceTabData,
env: &Env,
) {
match event {
LifeCycle::HotChanged(is_hot) => {
ctx.request_paint();
}
_ => (),
}
}
fn update(
&mut self,
ctx: &mut druid::UpdateCtx,
old_data: &LapceTabData,
data: &LapceTabData,
env: &Env,
) {
}
fn layout(
&mut self,
ctx: &mut LayoutCtx,
bc: &BoxConstraints,
data: &LapceTabData,
env: &Env,
) -> Size {
let size = bc.max();
let cross_size = 8.0;
let padding = (size.height - cross_size) / 2.0;
let origin = Point::new(size.width - padding - cross_size, padding);
self.cross_rect = Size::new(cross_size, cross_size)
.to_rect()
.with_origin(origin);
size
}
fn paint(&mut self, ctx: &mut PaintCtx, data: &LapceTabData, env: &Env) {
let dir = data
.workspace
.path
.as_ref()
.map(|p| {
let dir = p.file_name().unwrap().to_str().unwrap();
let dir = match &data.workspace.kind {
LapceWorkspaceType::Local => dir.to_string(),
LapceWorkspaceType::RemoteSSH(user, host) => {
format!("{} [{}@{}]", dir, user, host)
}
};
dir
})
.unwrap_or("Lapce".to_string());
let text_layout = ctx
.text()
.new_text_layout(dir)
.font(FontFamily::SYSTEM_UI, 13.0)
.text_color(
data.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone(),
)
.build()
.unwrap();
let size = ctx.size();
let text_size = text_layout.size();
let x = (size.width - text_size.width) / 2.0;
let y = (size.height - text_size.height) / 2.0;
ctx.draw_text(&text_layout, Point::new(x, y));
if ctx.is_hot() {
let line = Line::new(
Point::new(self.cross_rect.x0, self.cross_rect.y0),
Point::new(self.cross_rect.x1, self.cross_rect.y1),
);
ctx.stroke(
line,
&data
.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone(),
1.0,
);
let line = Line::new(
Point::new(self.cross_rect.x1, self.cross_rect.y0),
Point::new(self.cross_rect.x0, self.cross_rect.y1),
);
ctx.stroke(
line,
&data
.config
.get_color_unchecked(LapceTheme::EDITOR_FOREGROUND)
.clone(),
1.0,
);
}
}
}
| bar_hit_test |
mock_provider.rs | // Copyright 2019 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use crate::common::ASYMMETRIC_KEY_ALGORITHMS;
use crate::crypto_provider::{
AsymmetricProviderKey, CryptoProvider, CryptoProviderError, ProviderKey, SealingProviderKey,
};
use fidl_fuchsia_kms::AsymmetricKeyAlgorithm;
use std::sync::{Arc, Mutex};
/// A mock crypto provider implementation.
///
/// This mock implementation is capable of recording the input data and be provided with output
/// data. Note that fields are references, so tester is able to keep a clone of the MockProvider
/// and use that to change the result during the test.
#[derive(Debug)]
pub struct MockProvider {
// Use Mutex to achieve interior mutability since this provider is expected to be not
// mutable.
key_name: Arc<Mutex<Option<String>>>,
key_data: Arc<Mutex<Option<Vec<u8>>>>,
result: Arc<Mutex<Result<Vec<u8>, CryptoProviderError>>>,
key_result: Arc<Mutex<Result<Vec<u8>, CryptoProviderError>>>,
}
impl CryptoProvider for MockProvider {
fn supported_asymmetric_algorithms(&self) -> Vec<AsymmetricKeyAlgorithm> {
// The mock provider supported all asymmetric algorithms.
ASYMMETRIC_KEY_ALGORITHMS.to_vec()
}
fn get_name(&self) -> &'static str {
"MockProvider"
}
fn box_clone(&self) -> Box<dyn CryptoProvider> {
Box::new(MockProvider {
key_name: Arc::clone(&self.key_name),
key_data: Arc::clone(&self.key_data),
result: Arc::clone(&self.result),
key_result: Arc::clone(&self.key_result),
})
}
fn generate_asymmetric_key(
&self,
key_algorithm: AsymmetricKeyAlgorithm,
key_name: &str,
) -> Result<Box<AsymmetricProviderKey>, CryptoProviderError> {
*self.key_name.lock().unwrap() = Some(key_name.to_string());
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(key_data) => Ok(Box::new(MockAsymmetricKey {
key_data: key_data.to_vec(),
key_algorithm,
result: Arc::clone(&self.key_result),
})),
}
}
fn import_asymmetric_key(
&self,
key_data: &[u8],
key_algorithm: AsymmetricKeyAlgorithm,
key_name: &str,
) -> Result<Box<AsymmetricProviderKey>, CryptoProviderError> {
*self.key_name.lock().unwrap() = Some(key_name.to_string());
*self.key_data.lock().unwrap() = Some(key_data.to_vec());
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(result_data) => Ok(Box::new(MockAsymmetricKey {
key_data: result_data.clone(),
key_algorithm,
result: Arc::clone(&self.key_result),
})),
}
}
fn parse_asymmetric_key(
&self,
key_data: &[u8],
key_algorithm: AsymmetricKeyAlgorithm,
) -> Result<Box<AsymmetricProviderKey>, CryptoProviderError> {
*self.key_data.lock().unwrap() = Some(key_data.to_vec());
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(_) => Ok(Box::new(MockAsymmetricKey {
key_data: key_data.to_vec(),
key_algorithm,
result: Arc::clone(&self.key_result),
})),
}
}
fn generate_sealing_key(
&self,
key_name: &str,
) -> Result<Box<SealingProviderKey>, CryptoProviderError> |
fn parse_sealing_key(
&self,
key_data: &[u8],
) -> Result<Box<SealingProviderKey>, CryptoProviderError> {
*self.key_data.lock().unwrap() = Some(key_data.to_vec());
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(_) => Ok(Box::new(MockSealingKey {
key_data: key_data.to_vec(),
result: Arc::clone(&self.key_result),
})),
}
}
fn calculate_sealed_data_size(&self, original_data_size: u64) -> u64 {
original_data_size
}
}
/// A mock crypto provider class for unit tests.
#[allow(dead_code)]
impl MockProvider {
pub fn new() -> Self {
MockProvider {
key_name: Arc::new(Mutex::new(None)),
key_data: Arc::new(Mutex::new(None)),
result: Arc::new(Mutex::new(Err(CryptoProviderError::new("")))),
key_result: Arc::new(Mutex::new(Err(CryptoProviderError::new("")))),
}
}
/// Set every operation on this provider to return an error.
pub fn set_error(&self) {
*self.result.lock().unwrap() = Err(CryptoProviderError::new(""));
}
/// Set the return result for every operation on this provider.
pub fn set_result(&self, result: &[u8]) {
*self.result.lock().unwrap() = Ok(result.to_vec());
}
/// Set every operation on the key that is generated by this provider to return error.
pub fn set_key_operation_error(&self) {
*self.key_result.lock().unwrap() = Err(CryptoProviderError::new(""));
}
/// Set the return result for every operation on the key that is generated by this provider.
pub fn set_key_result(&self, key_result: Result<Vec<u8>, CryptoProviderError>) {
*self.key_result.lock().unwrap() = key_result;
}
/// Get the key data argument passed to this provider for generating/parsing/importing key.
pub fn get_called_key_data(&self) -> Vec<u8> {
let key_data: Option<Vec<u8>> = self.key_data.lock().unwrap().clone();
key_data.unwrap()
}
/// Get the key name argument passed to this provider for generating/parsing/importing key.
pub fn get_called_key_name(&self) -> String {
let key_name: Option<String> = self.key_name.lock().unwrap().clone();
key_name.unwrap()
}
}
pub struct MockAsymmetricKey {
key_data: Vec<u8>,
key_algorithm: AsymmetricKeyAlgorithm,
result: Arc<Mutex<Result<Vec<u8>, CryptoProviderError>>>,
}
impl AsymmetricProviderKey for MockAsymmetricKey {
fn sign(&self, _data: &[u8]) -> Result<Vec<u8>, CryptoProviderError> {
self.result.lock().unwrap().clone()
}
fn get_der_public_key(&self) -> Result<Vec<u8>, CryptoProviderError> {
self.result.lock().unwrap().clone()
}
fn get_key_algorithm(&self) -> AsymmetricKeyAlgorithm {
self.key_algorithm
}
}
impl ProviderKey for MockAsymmetricKey {
fn delete(&mut self) -> Result<(), CryptoProviderError> {
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(_) => Ok(()),
}
}
fn get_key_data(&self) -> Vec<u8> {
self.key_data.to_vec()
}
fn get_provider_name(&self) -> &'static str {
MockProvider::new().get_name()
}
}
pub struct MockSealingKey {
key_data: Vec<u8>,
result: Arc<Mutex<Result<Vec<u8>, CryptoProviderError>>>,
}
impl SealingProviderKey for MockSealingKey {
fn encrypt(&self, _data: &[u8]) -> Result<Vec<u8>, CryptoProviderError> {
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
result.clone()
}
fn decrypt(&self, _data: &[u8]) -> Result<Vec<u8>, CryptoProviderError> {
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
result.clone()
}
}
impl ProviderKey for MockSealingKey {
fn delete(&mut self) -> Result<(), CryptoProviderError> {
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(_) => Ok(()),
}
}
fn get_key_data(&self) -> Vec<u8> {
self.key_data.to_vec()
}
fn get_provider_name(&self) -> &'static str {
MockProvider::new().get_name()
}
}
| {
*self.key_name.lock().unwrap() = Some(key_name.to_string());
let result: &Result<Vec<u8>, CryptoProviderError> = &self.result.lock().unwrap();
match result {
Err(err) => Err(err.clone()),
Ok(key_data) => Ok(Box::new(MockSealingKey {
key_data: key_data.to_vec(),
result: Arc::clone(&self.key_result),
})),
}
} |
genericpath.py | """
Path operations common to more than one OS
Do not use directly. The OS specific modules import the appropriate
functions from this module themselves.
"""
import os
import stat
__all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime',
'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile',
'samestat']
# Does a path exist?
# This is false for dangling symbolic links on systems that support them.
def exists(path):
"""Test whether a path exists. Returns False for broken symbolic links"""
try:
os.stat(path)
except OSError:
return False
return True
# This follows symbolic links, so both islink() and isdir() can be true
# for the same path on systems that support symlinks
def isfile(path):
"""Test whether a path is a regular file"""
try:
st = os.stat(path)
except OSError:
return False
return stat.S_ISREG(st.st_mode)
# Is a path a directory?
# This follows symbolic links, so both islink() and isdir()
# can be true for the same path on systems that support symlinks
def isdir(s):
"""Return true if the pathname refers to an existing directory."""
try:
st = os.stat(s)
except OSError:
return False
return stat.S_ISDIR(st.st_mode)
def getsize(filename):
"""Return the size of a file, reported by os.stat()."""
return os.stat(filename).st_size
def getmtime(filename):
"""Return the last modification time of a file, reported by os.stat()."""
return os.stat(filename).st_mtime
def getatime(filename):
"""Return the last access time of a file, reported by os.stat()."""
return os.stat(filename).st_atime
def getctime(filename):
"""Return the metadata change time of a file, reported by os.stat()."""
return os.stat(filename).st_ctime
# Return the longest prefix of all list elements.
def | (m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
# Some people pass in a list of pathname parts to operate in an OS-agnostic
# fashion; don't try to translate in that case as that's an abuse of the
# src and they are already doing what they need to be OS-agnostic and so
# they most likely won't be using an os.PathLike object in the sublists.
if not isinstance(m[0], (list, tuple)):
m = tuple(map(os.fspath, m))
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
# Are two stat buffers (obtained from stat, fstat or lstat)
# describing the same file?
def samestat(s1, s2):
"""Test whether two stat buffers reference the same file"""
return (s1.st_ino == s2.st_ino and
s1.st_dev == s2.st_dev)
# Are two filenames really pointing to the same file?
def samefile(f1, f2):
"""Test whether two pathnames reference the same actual file or directory
This is determined by the device number and i-node number and
raises an exception if an os.stat() call on either pathname fails.
"""
s1 = os.stat(f1)
s2 = os.stat(f2)
return samestat(s1, s2)
# Are two open files really referencing the same file?
# (Not necessarily the same file descriptor!)
def sameopenfile(fp1, fp2):
"""Test whether two open file objects reference the same file"""
s1 = os.fstat(fp1)
s2 = os.fstat(fp2)
return samestat(s1, s2)
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
# Generic implementation of splitext, to be parametrized with
# the separators
def _splitext(p, sep, altsep, extsep):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end, ignoring
leading dots. Returns "(root, ext)"; ext may be empty."""
# NOTE: This code must work for text and bytes strings.
sepIndex = p.rfind(sep)
if altsep:
altsepIndex = p.rfind(altsep)
sepIndex = max(sepIndex, altsepIndex)
dotIndex = p.rfind(extsep)
if dotIndex > sepIndex:
# skip all leading dots
filenameIndex = sepIndex + 1
while filenameIndex < dotIndex:
if p[filenameIndex:filenameIndex+1] != extsep:
return p[:dotIndex], p[dotIndex:]
filenameIndex += 1
return p, p[:0]
def _check_arg_types(funcname, *args):
hasstr = hasbytes = False
for s in args:
if isinstance(s, str):
hasstr = True
elif isinstance(s, bytes):
hasbytes = True
else:
raise TypeError('%s() argument must be str or bytes, not %r' %
(funcname, s.__class__.__name__)) from None
if hasstr and hasbytes:
raise TypeError("Can't mix strings and bytes in path components") from None
| commonprefix |
watchdog.rs | //! # Watchdog Example
//!
//! This application demonstrates how to use the RP2040 Watchdog.
//!
//! It may need to be adapted to your particular board layout and/or pin assignment.
//!
//! See the `Cargo.toml` file for Copyright and licence details.
#![no_std]
#![no_main]
// The macro for our start-up function | // Ensure we halt the program on panic (if we don't mention this crate it won't
// be linked)
use panic_halt as _;
// Alias for our HAL crate
use rp2040_hal as hal;
// A shorter alias for the Peripheral Access Crate, which provides low-level
// register access
use hal::pac;
// Some traits we need
use embedded_hal::digital::v2::OutputPin;
use embedded_hal::watchdog::{Watchdog, WatchdogEnable};
use embedded_time::duration::Extensions;
use embedded_time::fixed_point::FixedPoint;
use rp2040_hal::clocks::Clock;
/// The linker will place this boot block at the start of our program image. We
/// need this to help the ROM bootloader get our code up and running.
#[link_section = ".boot2"]
#[used]
pub static BOOT2: [u8; 256] = rp2040_boot2::BOOT_LOADER_W25Q080;
/// External high-speed crystal on the Raspberry Pi Pico board is 12 MHz. Adjust
/// if your board has a different frequency
const XTAL_FREQ_HZ: u32 = 12_000_000u32;
/// Entry point to our bare-metal application.
///
/// The `#[entry]` macro ensures the Cortex-M start-up code calls this function
/// as soon as all global variables are initialised.
///
/// The function configures the RP2040 peripherals, then toggles a GPIO pin in
/// an infinite loop. After a period of time, the watchdog will kick in to reset
/// the CPU.
#[entry]
fn main() -> ! {
// Grab our singleton objects
let mut pac = pac::Peripherals::take().unwrap();
let core = pac::CorePeripherals::take().unwrap();
// Set up the watchdog driver - needed by the clock setup code
let mut watchdog = hal::Watchdog::new(pac.WATCHDOG);
// Configure the clocks
let clocks = hal::clocks::init_clocks_and_plls(
XTAL_FREQ_HZ,
pac.XOSC,
pac.CLOCKS,
pac.PLL_SYS,
pac.PLL_USB,
&mut pac.RESETS,
&mut watchdog,
)
.ok()
.unwrap();
let mut delay = cortex_m::delay::Delay::new(core.SYST, clocks.system_clock.freq().integer());
// The single-cycle I/O block controls our GPIO pins
let sio = hal::Sio::new(pac.SIO);
// Set the pins to their default state
let pins = hal::gpio::Pins::new(
pac.IO_BANK0,
pac.PADS_BANK0,
sio.gpio_bank0,
&mut pac.RESETS,
);
// Configure an LED so we can show the current state of the watchdog
let mut led_pin = pins.gpio25.into_push_pull_output();
// Set the LED high for 2 seconds so we know when we're about to start the watchdog
led_pin.set_high().unwrap();
delay.delay_ms(2000);
// Set to watchdog to reset if it's not reloaded within 1.05 seconds, and start it
watchdog.start(1_050_000u32.microseconds());
// Blink once a second for 5 seconds, refreshing the watchdog timer once a second to avoid a reset
for _ in 1..=5 {
led_pin.set_low().unwrap();
delay.delay_ms(500);
led_pin.set_high().unwrap();
delay.delay_ms(500);
watchdog.feed();
}
// Blink 10 times per second, not feeding the watchdog.
// The processor should reset in 1.05 seconds, or 5 blinks time
loop {
led_pin.set_low().unwrap();
delay.delay_ms(100);
led_pin.set_high().unwrap();
delay.delay_ms(100);
}
}
// End of file | use cortex_m_rt::entry;
|
trpo.py | import torch
from tonic import logger # noqa
from tonic.torch import agents, updaters
def default_actor_updater():
|
class TRPO(agents.A2C):
'''Trust Region Policy Optimization.
TRPO: https://arxiv.org/pdf/1502.05477.pdf
'''
def __init__(
self, model=None, replay=None, actor_updater=None, critic_updater=None
):
actor_updater = actor_updater or default_actor_updater()
super().__init__(
model=model, replay=replay, actor_updater=actor_updater,
critic_updater=critic_updater)
def step(self, observations):
# Sample actions and get their log-probabilities for training.
actions, log_probs, locs, scales = self._step(observations)
actions = actions.numpy()
log_probs = log_probs.numpy()
locs = locs.numpy()
scales = scales.numpy()
# Keep some values for the next update.
self.last_observations = observations.copy()
self.last_actions = actions.copy()
self.last_log_probs = log_probs.copy()
self.last_locs = locs.copy()
self.last_scales = scales.copy()
return actions
def update(self, observations, rewards, resets, terminations):
# Store the last transitions in the replay.
self.replay.store(
observations=self.last_observations, actions=self.last_actions,
next_observations=observations, rewards=rewards, resets=resets,
terminations=terminations, log_probs=self.last_log_probs,
locs=self.last_locs, scales=self.last_scales)
# Prepare to update the normalizers.
if self.model.observation_normalizer:
self.model.observation_normalizer.record(self.last_observations)
if self.model.return_normalizer:
self.model.return_normalizer.record(rewards)
# Update the model if the replay is ready.
if self.replay.ready():
self._update()
def _step(self, observations):
observations = torch.as_tensor(observations)
with torch.no_grad():
distributions = self.model.actor(observations)
if hasattr(distributions, 'sample_with_log_prob'):
actions, log_probs = distributions.sample_with_log_prob()
else:
actions = distributions.sample()
log_probs = distributions.log_prob(actions)
log_probs = log_probs.sum(axis=-1)
locs = distributions.loc
scales = distributions.stddev
return actions, log_probs, locs, scales
def _update(self):
# Compute the lambda-returns.
batch = self.replay.get_full('observations', 'next_observations')
values, next_values = self._evaluate(**batch)
values, next_values = values.numpy(), next_values.numpy()
self.replay.compute_returns(values, next_values)
actor_keys = ('observations', 'actions', 'log_probs', 'locs',
'scales', 'advantages')
actor_batch = self.replay.get_full(*actor_keys)
actor_infos = self.actor_updater(**actor_batch)
for k, v in actor_infos.items():
logger.store('actor/' + k, v.numpy())
critic_keys = 'observations', 'returns'
critic_iterations = 0
for critic_batch in self.replay.get(*critic_keys):
critic_infos = self.critic_updater(**critic_batch)
critic_iterations += 1
for k, v in critic_infos.items():
logger.store('critic/' + k, v.numpy())
logger.store('critic/iterations', critic_iterations)
# Update the normalizers.
if self.model.observation_normalizer:
self.model.observation_normalizer.update()
if self.model.return_normalizer:
self.model.return_normalizer.update()
| return updaters.TrustRegionPolicyGradient(
optimizer=updaters.ConjugateGradient(
constraint_threshold=0.01, damping_coefficient=0.1,
conjugate_gradient_steps=10, backtrack_steps=10,
backtrack_coefficient=0.8)) |
make_corrections_to_node_and_its_descendants.ts | import { BlockMap, ContentBlock } from 'draft-js';
function sortByPos(arr: Array<ContentBlock>) {
const newArr = [...arr];
newArr.sort((item1, item2) => {
// @ts-ignore
return item1.getIn(['data', 'pos']) - item2.getIn(['data', 'pos']);
});
return newArr;
}
/**
* Given a blockMap and a block id, this function traverses the tree from the given block id and returns all blocks under that tree
* It makes some changes to the children as it traverses them
* 1. It sorts the children by pos
* 2. It updates the depth information
*/
export function | (
blockMap: BlockMap,
node: ContentBlock
): Array<ContentBlock> {
const nodeDepth = node.getDepth();
const nodeKey = node.getKey();
const children = sortByPos(
blockMap
.filter(b => !!(b && b.getIn(['data', 'parentId']) === nodeKey))
.map((b?: ContentBlock) => {
return (b ? b.set('depth', nodeDepth + 1) : b) as ContentBlock;
})
.toArray()
);
// termination condition. The node is a leaf node.
if (!children) {
return [node];
}
return [node].concat(
children
.map((child: ContentBlock) =>
makeCorrectionsToNodeAndItsDescendants(blockMap, child)
)
.flat()
);
}
| makeCorrectionsToNodeAndItsDescendants |
tree_options.rs | use {
super::Sort,
crate::{
cli::clap_args,
conf::Conf,
display::{Cols, DEFAULT_COLS},
errors::ConfError,
pattern::*,
},
clap::ArgMatches,
std::convert::TryFrom,
};
/// Options defining how the tree should be build and|or displayed
#[derive(Debug, Clone)]
pub struct TreeOptions {
pub show_selection_mark: bool, // whether to have a triangle left of selected line
pub show_hidden: bool, // whether files whose name starts with a dot should be shown
pub only_folders: bool, // whether to hide normal files and links
pub show_counts: bool, // whether to show the number of files (> 1 only for dirs)
pub show_dates: bool, // whether to show the last modified date
pub show_sizes: bool, // whether to show sizes of files and dirs
pub show_git_file_info: bool,
pub show_device_id: bool,
pub show_root_fs: bool, // show information relative to the fs of the root
pub trim_root: bool, // whether to cut out direct children of root
pub show_permissions: bool, // show classic rwx unix permissions (only on unix)
pub respect_git_ignore: bool, // hide files as requested by .gitignore ?
pub filter_by_git_status: bool, // only show files whose git status is not nul
pub pattern: InputPattern, // an optional filtering/scoring pattern
pub date_time_format: &'static str,
pub sort: Sort,
pub cols_order: Cols, // order of columns
}
impl TreeOptions {
/// clone self but without the pattern (if any)
pub fn without_pattern(&self) -> Self {
TreeOptions {
show_selection_mark: self.show_selection_mark,
show_hidden: self.show_hidden,
only_folders: self.only_folders,
show_counts: self.show_counts,
show_dates: self.show_dates,
show_sizes: self.show_sizes,
show_permissions: self.show_permissions,
respect_git_ignore: self.respect_git_ignore,
filter_by_git_status: self.filter_by_git_status,
show_git_file_info: self.show_git_file_info,
show_device_id: self.show_device_id,
show_root_fs: self.show_root_fs,
trim_root: self.trim_root,
pattern: InputPattern::none(),
date_time_format: self.date_time_format,
sort: self.sort,
cols_order: self.cols_order,
}
}
/// counts must be computed, either for sorting or just for display
pub fn | (&self) -> bool {
self.show_counts || self.sort == Sort::Count
}
/// dates must be computed, either for sorting or just for display
pub fn needs_dates(&self) -> bool {
self.show_dates || self.sort == Sort::Date
}
/// sizes must be computed, either for sorting or just for display
pub fn needs_sizes(&self) -> bool {
self.show_sizes || self.sort == Sort::Size
}
pub fn needs_sum(&self) -> bool {
self.needs_counts() || self.needs_dates() || self.needs_sizes()
}
/// this method does not exist, you saw nothing
/// (at least don't call it other than with the config, once)
pub fn set_date_time_format(&mut self, format: String) {
self.date_time_format = Box::leak(format.into_boxed_str());
}
/// change tree options according to configuration
pub fn apply_config(&mut self, config: &Conf) -> Result<(), ConfError> {
if let Some(default_flags) = &config.default_flags {
let clap_app = clap_args::clap_app().setting(clap::AppSettings::NoBinaryName);
let flags_args = format!("-{}", default_flags);
let conf_matches = clap_app.get_matches_from(vec![&flags_args]);
self.apply_launch_args(&conf_matches);
}
if let Some(b) = &config.show_selection_mark {
self.show_selection_mark = *b;
}
if let Some(format) = &config.date_time_format {
self.set_date_time_format(format.clone());
}
self.cols_order = config
.cols_order
.as_ref()
.map(Cols::try_from)
.transpose()?
.unwrap_or(DEFAULT_COLS);
Ok(())
}
/// change tree options according to broot launch arguments
pub fn apply_launch_args(&mut self, cli_args: &ArgMatches<'_>) {
if cli_args.is_present("sizes") {
self.show_sizes = true;
self.show_root_fs = true;
} else if cli_args.is_present("no-sizes") {
self.show_sizes = false;
}
if cli_args.is_present("whale-spotting") {
self.show_hidden = true;
self.respect_git_ignore = false;
self.sort = Sort::Size;
self.show_sizes = true;
self.show_root_fs = true;
}
if cli_args.is_present("only-folders") {
self.only_folders = true;
} else if cli_args.is_present("no-only-folders") {
self.only_folders = false;
}
if cli_args.is_present("git-status") {
self.filter_by_git_status = true;
self.show_hidden = true;
}
if cli_args.is_present("hidden") {
self.show_hidden = true;
} else if cli_args.is_present("no-hidden") {
self.show_hidden = false;
}
if cli_args.is_present("dates") {
self.show_dates = true;
} else if cli_args.is_present("no-dates") {
self.show_dates = false;
}
if cli_args.is_present("permissions") {
self.show_permissions = true;
} else if cli_args.is_present("no-permissions") {
self.show_permissions = false;
}
if cli_args.is_present("show-root-fs") {
self.show_root_fs = true;
}
if cli_args.is_present("show-gitignored") {
self.respect_git_ignore = false;
} else if cli_args.is_present("no-show-gitignored") {
self.respect_git_ignore = true;
}
if cli_args.is_present("show-git-info") {
self.show_git_file_info = true;
} else if cli_args.is_present("no-show-git-info") {
self.show_git_file_info = false;
}
if cli_args.is_present("sort-by-count") {
self.sort = Sort::Count;
self.show_counts = true;
}
if cli_args.is_present("sort-by-date") {
self.sort = Sort::Date;
self.show_dates = true;
}
if cli_args.is_present("sort-by-size") {
self.sort = Sort::Size;
self.show_sizes = true;
}
if cli_args.is_present("no-sort") {
self.sort = Sort::None;
}
if cli_args.is_present("trim-root") {
self.trim_root = true;
} else if cli_args.is_present("no-trim-root") {
self.trim_root = false;
}
}
}
impl Default for TreeOptions {
fn default() -> Self {
Self {
show_selection_mark: false,
show_hidden: false,
only_folders: false,
show_counts: false,
show_dates: false,
show_sizes: false,
show_git_file_info: false,
show_device_id: false,
show_root_fs: false,
trim_root: false,
show_permissions: false,
respect_git_ignore: true,
filter_by_git_status: false,
pattern: InputPattern::none(),
date_time_format: "%Y/%m/%d %R",
sort: Sort::None,
cols_order: DEFAULT_COLS,
}
}
}
| needs_counts |
infohub.model.js | const mongoose = require("mongoose");
const Schema = mongoose.Schema;
/*
* For mediaType:
* 0 = link or article
* 1 = video
* 2 = podcast
*/
/*
* For sectionID:
* 0 = The Basics on Coronavirus
* 1 = Necessities: Food and Jobs
* 2 = Entertaining and Caring for Yourself
* 3 = Communities to Join
*/
let ResourceSchema = new Schema({
url: { type: String, require: true },
name: { type: String, require: true },
description: { type: String, require: true },
mediaType: { type: Number, required: false },
sectionID: { type: Number, required: false },
associationID: { type: String, required: false },
isPublic: { type: Boolean, required: false, default: false },
categories: [{ type: String, required: false }], | module.exports = mongoose.model("Resources", ResourceSchema); | });
|
app.py | from flask import Flask
from flask_graphql import GraphQLView
from models import db_session
from schema import schema, Department
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return '<p> Hello World!</p>'
app.add_url_rule(
'/graphql',
view_func=GraphQLView.as_view(
'graphql',
schema=schema,
graphiql=True # for having the GraphiQL interface
)
)
@app.teardown_appcontext
def | (exception=None):
db_session.remove()
if __name__ == '__main__':
app.run() | shutdown_session |
polynomial_fitting.py | from __future__ import annotations
from typing import NoReturn
from . import LinearRegression
from ...base import BaseEstimator
import numpy as np
class PolynomialFitting(BaseEstimator):
"""
Polynomial Fitting using Least Squares estimation
"""
def __init__(self, k: int) -> PolynomialFitting:
"""
Instantiate a polynomial fitting estimator
Parameters
----------
k : int
Degree of polynomial to fit
"""
super().__init__()
self.degree = k
self.linear_regression_model = LinearRegression(
include_intercept=False)
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to polynomial transformed samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
x = self.__transform(X)
self.linear_regression_model.fit(x, y)
def | (self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
x = self.__transform(X)
return self.linear_regression_model.predict(x)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
x = self.__transform(X)
return self.linear_regression_model.loss(x, y)
def __transform(self, X: np.ndarray) -> np.ndarray:
"""
Transform given input according to the univariate polynomial
transformation
Parameters
----------
X: ndarray of shape (n_samples,)
Returns
-------
transformed: ndarray of shape (n_samples, k+1)
Vandermonde matrix of given samples up to degree k
"""
return np.vander(X, N=self.degree+1, increasing=True) | _predict |
individual.ts | import { Entity } from '../entity';
export class Individual extends Entity {
| email : string
password: string
pict: string[]
getCin(formated: boolean = false): string {
if (formated)
return this.cin.split(' - ').join('')
else
return this.cin
}
setCin(cin: string , formated: boolean = false) {
if (formated)
this.cin = cin.split(' - ').join('')
else
this.cin = cin
}
setPhone(phone: string ,formated: boolean = false) {
if(formated)
this.phone = phone.split(' ').join('')
else
this.phone = phone
}
} | cin: string
url: string
phone: string |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.