file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
hy_AM_test.go | package hy_AM
import (
"testing"
"time"
"github.com/haiyiyun/validator/locales"
"github.com/haiyiyun/validator/locales/currency"
)
func TestLocale(t *testing.T) {
trans := New()
expected := "hy_AM"
if trans.Locale() != expected {
t.Errorf("Expected '%s' Got '%s'", expected, trans.Locale())
}
}
func TestPluralsRange(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsRange()
// expected := 1
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsOrdinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleTwo,
// },
// {
// expected: locales.PluralRuleFew,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsOrdinal()
// expected := 4
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsCardinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsCardinal()
// expected := 2
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestRangePlurals(t *testing.T) {
trans := New()
tests := []struct {
num1 float64
v1 uint64
num2 float64
v2 uint64
expected locales.PluralRule
}{
// {
// num1: 1,
// v1: 1,
// num2: 2,
// v2: 2,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.RangePluralRule(tt.num1, tt.v1, tt.num2, tt.v2)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestOrdinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 2,
// v: 0,
// expected: locales.PluralRuleTwo,
// },
// {
// num: 3,
// v: 0,
// expected: locales.PluralRuleFew,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.OrdinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestCardinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.CardinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestDaysAbbreviated(t *testing.T) {
trans := New()
days := trans.WeekdaysAbbreviated()
for i, day := range days {
s := trans.WeekdayAbbreviated(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sun",
// },
// {
// idx: 1,
// expected: "Mon",
// },
// {
// idx: 2,
// expected: "Tue",
// },
// {
// idx: 3,
// expected: "Wed",
// },
// {
// idx: 4,
// expected: "Thu",
// },
// {
// idx: 5,
// expected: "Fri",
// },
// {
// idx: 6,
// expected: "Sat",
// },
}
for _, tt := range tests {
s := trans.WeekdayAbbreviated(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysNarrow(t *testing.T) {
trans := New()
days := trans.WeekdaysNarrow()
for i, day := range days {
s := trans.WeekdayNarrow(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", string(day), s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "S",
// },
// {
// idx: 1,
// expected: "M",
// },
// {
// idx: 2,
// expected: "T",
// },
// {
// idx: 3,
// expected: "W",
// },
// {
// idx: 4,
// expected: "T",
// },
// {
// idx: 5,
// expected: "F",
// },
// {
// idx: 6,
// expected: "S",
// },
}
for _, tt := range tests {
s := trans.WeekdayNarrow(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysShort(t *testing.T) {
trans := New()
days := trans.WeekdaysShort()
for i, day := range days {
s := trans.WeekdayShort(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Su",
// },
// {
// idx: 1,
// expected: "Mo",
// },
// {
// idx: 2,
// expected: "Tu",
// },
// {
// idx: 3,
// expected: "We",
// },
// {
// idx: 4,
// expected: "Th",
// },
// {
// idx: 5,
// expected: "Fr",
// },
// {
// idx: 6,
// expected: "Sa",
// },
}
for _, tt := range tests {
s := trans.WeekdayShort(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysWide(t *testing.T) {
trans := New()
days := trans.WeekdaysWide()
for i, day := range days {
s := trans.WeekdayWide(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sunday",
// },
// {
// idx: 1,
// expected: "Monday",
// },
// {
// idx: 2,
// expected: "Tuesday",
// },
// {
// idx: 3,
// expected: "Wednesday",
// },
// {
// idx: 4,
// expected: "Thursday",
// },
// {
// idx: 5,
// expected: "Friday",
// },
// {
// idx: 6,
// expected: "Saturday",
// },
}
for _, tt := range tests {
s := trans.WeekdayWide(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsAbbreviated(t *testing.T) {
trans := New()
months := trans.MonthsAbbreviated()
for i, month := range months {
s := trans.MonthAbbreviated(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "Jan",
// },
// {
// idx: 2,
// expected: "Feb",
// },
// {
// idx: 3,
// expected: "Mar",
// },
// {
// idx: 4,
// expected: "Apr",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "Jun",
// },
// {
// idx: 7,
// expected: "Jul",
// },
// {
// idx: 8,
// expected: "Aug",
// },
// {
// idx: 9,
// expected: "Sep",
// },
// {
// idx: 10,
// expected: "Oct",
// },
// {
// idx: 11,
// expected: "Nov",
// },
// {
// idx: 12,
// expected: "Dec",
// },
}
for _, tt := range tests {
s := trans.MonthAbbreviated(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsNarrow(t *testing.T) |
func TestMonthsWide(t *testing.T) {
trans := New()
months := trans.MonthsWide()
for i, month := range months {
s := trans.MonthWide(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "January",
// },
// {
// idx: 2,
// expected: "February",
// },
// {
// idx: 3,
// expected: "March",
// },
// {
// idx: 4,
// expected: "April",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "June",
// },
// {
// idx: 7,
// expected: "July",
// },
// {
// idx: 8,
// expected: "August",
// },
// {
// idx: 9,
// expected: "September",
// },
// {
// idx: 10,
// expected: "October",
// },
// {
// idx: 11,
// expected: "November",
// },
// {
// idx: 12,
// expected: "December",
// },
}
for _, tt := range tests {
s := string(trans.MonthWide(time.Month(tt.idx)))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeFull(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
// fixed := time.FixedZone("OTHER", -4)
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am Eastern Standard Time",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, fixed),
// expected: "8:05:01 pm OTHER",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeLong(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am EST",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, loc),
// expected: "8:05:01 pm EST",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05:01 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05:01 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateFull(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Wednesday, February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateLong(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Feb 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/16",
// },
// {
// t: time.Date(-500, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/500",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtNumber(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// expected: "1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// expected: "1,123,456.6",
// },
// {
// num: 221123456.5643,
// v: 3,
// expected: "221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtNumber(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtCurrency(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "-$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "-CAD 221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtCurrency(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtAccounting(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "($221,123,456.564)",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "(CAD 221,123,456.564)",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtAccounting(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtPercent(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 15,
// v: 0,
// expected: "15%",
// },
// {
// num: 15,
// v: 2,
// expected: "15.00%",
// },
// {
// num: 434.45,
// v: 0,
// expected: "434%",
// },
// {
// num: 34.4,
// v: 2,
// expected: "34.40%",
// },
// {
// num: -34,
// v: 0,
// expected: "-34%",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtPercent(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
| {
trans := New()
months := trans.MonthsNarrow()
for i, month := range months {
s := trans.MonthNarrow(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "J",
// },
// {
// idx: 2,
// expected: "F",
// },
// {
// idx: 3,
// expected: "M",
// },
// {
// idx: 4,
// expected: "A",
// },
// {
// idx: 5,
// expected: "M",
// },
// {
// idx: 6,
// expected: "J",
// },
// {
// idx: 7,
// expected: "J",
// },
// {
// idx: 8,
// expected: "A",
// },
// {
// idx: 9,
// expected: "S",
// },
// {
// idx: 10,
// expected: "O",
// },
// {
// idx: 11,
// expected: "N",
// },
// {
// idx: 12,
// expected: "D",
// },
}
for _, tt := range tests {
s := trans.MonthNarrow(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
} |
autotag.py | # Copyright 2017-2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import EventAction
from c7n.exceptions import PolicyValidationError
from c7n.manager import resources
from c7n import utils
class AutoTagUser(EventAction):
"""Tag a resource with the user who created/modified it.
.. code-block:: yaml
policies:
- name: ec2-auto-tag-ownercontact | Triggered when a new EC2 Instance is launched. Checks to see if
it's missing the OwnerContact tag. If missing it gets created
with the value of the ID of whomever called the RunInstances API
mode:
type: cloudtrail
role: arn:aws:iam::123456789000:role/custodian-auto-tagger
events:
- RunInstances
filters:
- tag:OwnerContact: absent
actions:
- type: auto-tag-user
tag: OwnerContact
There's a number of caveats to usage. Resources which don't
include tagging as part of their api may have some delay before
automation kicks in to create a tag. Real world delay may be several
minutes, with worst case into hours[0]. This creates a race condition
between auto tagging and automation.
In practice this window is on the order of a fraction of a second, as
we fetch the resource and evaluate the presence of the tag before
attempting to tag it.
References
CloudTrail User
https://docs.aws.amazon.com/awscloudtrail/latest/userguide/cloudtrail-event-reference-user-identity.html
""" # NOQA
schema_alias = True
schema = utils.type_schema(
'auto-tag-user',
required=['tag'],
**{'user-type': {
'type': 'array',
'items': {'type': 'string',
'enum': [
'IAMUser',
'AssumedRole',
'FederatedUser'
]}},
'update': {'type': 'boolean'},
'tag': {'type': 'string'},
'principal_id_tag': {'type': 'string'}
}
)
def get_permissions(self):
return self.manager.action_registry.get(
'tag')({}, self.manager).get_permissions()
def validate(self):
if self.manager.data.get('mode', {}).get('type') != 'cloudtrail':
raise PolicyValidationError(
"Auto tag owner requires an event %s" % (self.manager.data,))
if self.manager.action_registry.get('tag') is None:
raise PolicyValidationError(
"Resource does not support tagging %s" % (self.manager.data,))
if 'tag' not in self.data:
raise PolicyValidationError(
"auto-tag action requires 'tag'")
return self
def process(self, resources, event):
if event is None:
return
event = event['detail']
utype = event['userIdentity']['type']
if utype not in self.data.get('user-type', ['AssumedRole', 'IAMUser', 'FederatedUser']):
return
user = None
if utype == "IAMUser":
user = event['userIdentity']['userName']
principal_id_value = event['userIdentity'].get('principalId', '')
elif utype == "AssumedRole" or utype == "FederatedUser":
user = event['userIdentity']['arn']
prefix, user = user.rsplit('/', 1)
principal_id_value = event['userIdentity'].get('principalId', '').split(':')[0]
# instance role
if user.startswith('i-'):
return
# lambda function (old style)
elif user.startswith('awslambda'):
return
if user is None:
return
# if the auto-tag-user policy set update to False (or it's unset) then we
# will skip writing their UserName tag and not overwrite pre-existing values
if not self.data.get('update', False):
untagged_resources = []
# iterating over all the resources the user spun up in this event
for resource in resources:
tag_already_set = False
for tag in resource.get('Tags', ()):
if tag['Key'] == self.data['tag']:
tag_already_set = True
break
if not tag_already_set:
untagged_resources.append(resource)
# if update is set to True, we will overwrite the userName tag even if
# the user already set a value
else:
untagged_resources = resources
tag_action = self.manager.action_registry.get('tag')
new_tags = {
self.data['tag']: user
}
# if principal_id_key is set (and value), we'll set the principalId tag.
principal_id_key = self.data.get('principal_id_tag', None)
if principal_id_key and principal_id_value:
new_tags[principal_id_key] = principal_id_value
for key, value in new_tags.items():
tag_action({'key': key, 'value': value}, self.manager).process(untagged_resources)
return new_tags
@classmethod
def register_resource(cls, registry, resource_class):
if 'auto-tag-user' in resource_class.action_registry:
return
if resource_class.action_registry.get('tag'):
resource_class.action_registry.register('auto-tag-user', AutoTagUser)
resources.subscribe(AutoTagUser.register_resource) | resource: ec2
description: | |
test_hardware_tapes_devices.py | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 4
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_0_1
from isi_sdk_8_0_1.models.hardware_tapes_devices import HardwareTapesDevices # noqa: E501
from isi_sdk_8_0_1.rest import ApiException
class TestHardwareTapesDevices(unittest.TestCase):
"""HardwareTapesDevices unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testHardwareTapesDevices(self):
|
if __name__ == '__main__':
unittest.main()
| """Test HardwareTapesDevices"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_0_1.models.hardware_tapes_devices.HardwareTapesDevices() # noqa: E501
pass |
app.module.ts | import {BrowserModule} from '@angular/platform-browser';
import {NgModule} from '@angular/core';
import {AppRoutingModule} from './app-routing.module';
import {AppComponent} from './app.component';
import {IconsProviderModule} from './icons-provider.module';
import {NzLayoutModule} from 'ng-zorro-antd/layout';
import {NzMenuModule} from 'ng-zorro-antd/menu';
import {FormsModule} from '@angular/forms';
import {HttpClientModule} from '@angular/common/http';
import {BrowserAnimationsModule} from '@angular/platform-browser/animations';
import {NZ_I18N} from 'ng-zorro-antd/i18n';
import {nl_BE} from 'ng-zorro-antd/i18n';
import {registerLocaleData} from '@angular/common';
import nl from '@angular/common/locales/nl';
import {SiderComponent} from './pages/components/sider.component';
registerLocaleData(nl);
@NgModule({
declarations: [
AppComponent,
SiderComponent,
],
imports: [
BrowserModule,
AppRoutingModule,
IconsProviderModule,
NzLayoutModule,
NzMenuModule,
FormsModule,
HttpClientModule,
BrowserAnimationsModule
],
providers: [{provide: NZ_I18N, useValue: nl_BE}],
bootstrap: [AppComponent]
})
export class | {
}
| AppModule |
ris.py | ###
# Copyright 2016 Hewlett Packard Enterprise, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
# -*- coding: utf-8 -*-
"""RIS implementation"""
#---------Imports---------
import re
import sys
import logging
import threading
import urlparse2 #pylint warning disable
from Queue import Queue
from collections import (OrderedDict)
import jsonpath_rw
import jsonpointer
from jsonpointer import set_pointer
import redfish.rest.v1
from redfish.ris.sharedtypes import Dictable
#---------End of imports---------
#---------Debug logger---------
LOGGER = logging.getLogger(__name__)
#---------End of debug logger---------
class BiosUnregisteredError(Exception):
"""Raised when BIOS has not been registered correctly in iLO"""
pass
class SessionExpiredRis(Exception):
"""Raised when session has expired"""
pass
class RisMonolithMemberBase(Dictable):
"""RIS monolith member base class"""
pass
class RisMonolithMemberv100(RisMonolithMemberBase):
"""Wrapper around RestResponse that adds the monolith data"""
def __init__(self, restresp, isredfish):
self._resp = restresp
self._patches = list()
self._type = None
if isredfish:
self._typestring = u'@odata.type'
else:
self._typestring = u'Type'
def _get_type(self):
"""Return type from monolith"""
if self._typestring in self._resp.dict:
return self._resp.dict[self._typestring]
elif u'type' in self._resp.dict:
return self._resp.dict[u'type']
return None
type = property(_get_type, None)
def _get_maj_type(self):
"""Return maj type from monolith"""
if self.type:
return self.type[:-4]
return None
maj_type = property(_get_maj_type, None)
def _get_resp(self):
"""Return resp from monolith"""
return self._resp
resp = property(_get_resp, None)
def _get_patches(self):
"""Return patches from monolith"""
return self._patches
patches = property(_get_patches, None)
def to_dict(self):
"""Convert monolith to dict"""
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
result[u'links'] = OrderedDict()
result[u'links'][u'href'] = ''
headers = dict()
for header in self._resp.getheaders():
headers[header[0]] = header[1]
result[u'Headers'] = headers
if 'etag' in headers:
result[u'ETag'] = headers['etag']
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._resp.dict
result[u'Patches'] = self._patches
return result
def load_from_dict(self, src):
"""Load variables from dict monolith
:param src: source to load from
:type src: dict
"""
if u'Type' in src:
self._type = src[u'Type']
restreq = redfish.rest.v1.RestRequest(method='GET', \
path=src[u'OriginalUri'])
src['restreq'] = restreq
self._resp = redfish.rest.v1.StaticRestResponse(**src)
self._patches = src[u'Patches']
def _reducer(self, indict, breadcrumbs=None, outdict=OrderedDict()):
"""Monolith reducer
:param indict: input dictionary.
:type indict: dict.
:param breadcrumbs: breadcrumbs from previous operations.
:type breadcrumbs: dict.
:param outdict: expected output format.
:type outdict: dictionary type.
:returns: returns outdict
"""
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key) # push
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'%s' % i) # push
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1] # pop
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1] # pop
else:
outkey = '/'.join(breadcrumbs)
outdict[outkey] = indict
return outdict
def _jsonpath_reducer(self, indict, breadcrumbs=None, \
outdict=OrderedDict()):
"""JSON Path Reducer
:param indict: input dictionary.
:type indict: dict.
:param breadcrumbs: breadcrumbs from previous operations.
:type breadcrumbs: dict.
:param outdict: expected output format.
:type outdict: dictionary type.
:returns: returns outdict
"""
if breadcrumbs is None:
breadcrumbs = []
if isinstance(indict, dict):
for key, val in indict.items():
breadcrumbs.append(key) # push
if isinstance(val, dict):
self._reducer(val, breadcrumbs, outdict)
elif isinstance(val, list) or isinstance(val, tuple):
for i in range(0, len(val)):
breadcrumbs.append(u'[%s]' % i) # push
self._reducer(val[i], breadcrumbs, outdict)
del breadcrumbs[-1] # pop
elif isinstance(val, tuple):
self._reducer(val, breadcrumbs, outdict)
else:
self._reducer(val, breadcrumbs, outdict)
del breadcrumbs[-1] # pop
else:
outkey = '.'.join(breadcrumbs)
outkey = outkey.replace(u'.[', u'[')
outdict[outkey] = indict
return outdict
def reduce(self):
"""Returns a "flatten" dict with nested data represented in
JSONpath notation"""
result = OrderedDict()
if self.type:
result[u'Type'] = self.type
if self.maj_type == u'Collection.1' and \
u'MemberType' in self._resp.dict:
result[u'MemberType'] = self._resp.dict[u'MemberType']
self._reducer(self._resp.dict)
result[u'OriginalUri'] = self._resp.request.path
result[u'Content'] = self._reducer(self._resp.dict)
return result
class RisMonolithv100(Dictable):
"""Monolithic cache of RIS data"""
def __init__(self, client):
"""Initialize RisMonolith
:param client: client to utilize
:type client: RmcClient object
"""
self._client = client
self.name = u"Monolithic output of RIS Service"
self.types = OrderedDict()
self._visited_urls = list()
self._current_location = '/' # "root"
self.queue = Queue()
self._type = None
self._name = None
self.progress = 0
self.reload = False
self.is_redfish = client._rest_client.is_redfish
if self.is_redfish:
self._resourcedir = '/redfish/v1/ResourceDirectory/'
self._typestring = u'@odata.type'
self._hrefstring = u'@odata.id'
else:
self._resourcedir = '/rest/v1/ResourceDirectory'
self._typestring = u'Type'
self._hrefstring = u'href'
def _get_type(self):
"""Return monolith version type"""
return u"Monolith.1.0.0"
type = property(_get_type, None)
def update_progress(self):
"""Simple function to increment the dot progress"""
if self.progress % 6 == 0:
sys.stdout.write('.')
def get_visited_urls(self):
"""Return the visited URLS"""
return self._visited_urls
def set_visited_urls(self, visited_urls):
"""Set visited URLS to given list."""
self._visited_urls = visited_urls
def load(self, path=None, includelogs=False, skipinit=False, \
skipcrawl=False, loadtype='href', loadcomplete=False):
"""Walk entire RIS model and cache all responses in self.
:param path: path to start load from.
:type path: str.
:param includelogs: flag to determine if logs should be downloaded also.
:type includelogs: boolean.
:param skipinit: flag to determine if first run of load.
:type skipinit: boolean.
:param skipcrawl: flag to determine if load should traverse found links.
:type skipcrawl: boolean.
:param loadtype: flag to determine if load is meant for only href items.
:type loadtype: str.
:param loadcomplete: flag to download the entire monolith
:type loadcomplete: boolean
"""
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Discovering data...")
else:
LOGGER.warning("Discovering data...")
self.name = self.name + u' at %s' % self._client.base_url
if not self.types:
self.types = OrderedDict()
if not threading.active_count() >= 6:
for _ in range(5):
workhand = SuperDuperWorker(self.queue)
workhand.setDaemon(True)
workhand.start()
selectivepath = path
if not selectivepath:
selectivepath = self._client._rest_client.default_prefix
self._load(selectivepath, skipcrawl=skipcrawl, includelogs=includelogs,\
skipinit=skipinit, loadtype=loadtype, loadcomplete=loadcomplete)
self.queue.join()
if not skipinit:
if LOGGER.getEffectiveLevel() == 40:
sys.stdout.write("Done\n")
else:
LOGGER.warning("Done\n")
def _load(self, path, skipcrawl=False, originaluri=None, includelogs=False,\
skipinit=False, loadtype='href', loadcomplete=False):
"""Helper function to main load function.
:param path: path to start load from.
:type path: str.
:param skipcrawl: flag to determine if load should traverse found links.
:type skipcrawl: boolean.
:param originaluri: variable to assist in determining originating path.
:type originaluri: str.
:param includelogs: flag to determine if logs should be downloaded also.
:type includelogs: boolean.
:param skipinit: flag to determine if first run of load.
:type skipinit: boolean.
:param loadtype: flag to determine if load is meant for only href items.
:type loadtype: str.
:param loadcomplete: flag to download the entire monolith
:type loadcomplete: boolean
"""
if path.endswith("?page=1"):
return
elif not includelogs:
if "/Logs/" in path:
return
#TODO: need to find a better way to support non ascii characters
path = path.replace("|", "%7C")
#remove fragments
newpath = urlparse2.urlparse(path)
newpath.fragment = ''
path = urlparse2.urlunparse(newpath)
LOGGER.debug(u'_loading %s', path)
if not self.reload:
if path.lower() in self._visited_urls:
return
resp = self._client.get(path)
if resp.status != 200 and path.lower() == self._client.typepath.defs.\
biospath:
raise BiosUnregisteredError()
elif resp.status != 200:
path = path + '/'
resp = self._client.get(path)
if resp.status == 401:
raise SessionExpiredRis("Invalid session. Please logout and "\
"log back in or include credentials.")
elif resp.status != 200:
return
if loadtype == "ref":
self.parse_schema(resp)
self.queue.put((resp, path, skipinit, self))
if loadtype == 'href':
#follow all the href attributes
if self.is_redfish:
jsonpath_expr = jsonpath_rw.parse(u"$..'@odata.id'")
else:
jsonpath_expr = jsonpath_rw.parse(u'$..href')
matches = jsonpath_expr.find(resp.dict)
if 'links' in resp.dict and 'NextPage' in resp.dict['links']:
if originaluri:
next_link_uri = originaluri + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=originaluri, \
includelogs=includelogs, skipcrawl=skipcrawl, \
skipinit=skipinit)
else:
next_link_uri = path + '?page=' + \
str(resp.dict['links']['NextPage']['page'])
href = u'%s' % next_link_uri
self._load(href, originaluri=path, includelogs=includelogs,\
skipcrawl=skipcrawl, skipinit=skipinit)
(newversion, dirmatch) = self.check_for_directory(matches)
if not newversion and not skipcrawl:
for match in matches:
if path == "/rest/v1":
if str(match.full_path) == "links.Schemas.href" or \
str(match.full_path) == "links.Registries.href":
continue
else:
if str(match.full_path) == "[email protected]" or \
str(match.full_path) == "[email protected]":
continue
if match.value == path:
continue
href = u'%s' % match.value
self._load(href, skipcrawl=skipcrawl, \
originaluri=originaluri, includelogs=includelogs, \
skipinit=skipinit)
elif not skipcrawl:
href = u'%s' % dirmatch.value
self._load(href, skipcrawl=skipcrawl, originaluri=originaluri, \
includelogs=includelogs, skipinit=skipinit)
if loadcomplete:
for match in matches:
self._load(match.value, skipcrawl=skipcrawl, originaluri=\
originaluri, includelogs=includelogs, skipinit=skipinit)
def parse_schema(self, resp):
"""Function to get and replace schema $ref with data
:param resp: response data containing ref items.
:type resp: str.
"""
#pylint: disable=maybe-no-member
jsonpath_expr = jsonpath_rw.parse(u'$.."$ref"')
matches = jsonpath_expr.find(resp.dict)
respcopy = resp.dict
listmatch = None
if matches:
for match in matches:
fullpath = str(match.full_path)
jsonfile = match.value.split('#')[0]
jsonpath = match.value.split('#')[1]
if '@odata' in fullpath:
schemapath = '/' + fullpath.replace('@odata.', '~').\
replace('.', '/').replace('~', '@odata.')
else:
schemapath = '/' + fullpath.replace('.', '/')
if '.json' in jsonfile:
itempath = schemapath
if self.is_redfish:
if resp.request.path[-1] == '/':
newpath = '/'.join(resp.request.path.split('/')\
[:-2]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')\
[:-1]) + '/' + jsonfile + '/'
else:
newpath = '/'.join(resp.request.path.split('/')[:-1]) \
+ '/' + jsonfile
if 'href.json' in newpath:
continue
if not newpath.lower() in self._visited_urls:
self.load(newpath, skipcrawl=True, includelogs=False, \
skipinit=True, loadtype='ref')
instance = list()
if u'st' in self.types:
for stitem in self.types[u'st'][u'Instances']:
instance.append(stitem)
if u'ob' in self.types:
for obitem in self.types[u'ob'][u'Instances']:
instance.append(obitem)
for item in instance:
if jsonfile in item.resp._rest_request._path:
if 'anyOf' in fullpath:
break
dictcopy = item.resp.dict
listmatch = re.search('[[][0-9]+[]]', itempath)
if listmatch:
start = listmatch.regs[0][0]
end = listmatch.regs[0][1]
newitempath = [itempath[:start], itempath[end:]]
start = jsonpointer.JsonPointer(newitempath[0])
end = jsonpointer.JsonPointer(newitempath[1])
del start.parts[-1], end.parts[-1]
vals = start.resolve(respcopy)
count = 0
for val in vals:
try:
if '$ref' in end.resolve(val).iterkeys():
end.resolve(val).pop('$ref')
end.resolve(val).update(dictcopy)
replace_pointer = jsonpointer.\
JsonPointer(end.path + jsonpath)
data = replace_pointer.resolve(val)
set_pointer(val, end.path, data)
start.resolve(respcopy)[count].\
update(val)
break
except:
count += 1
else:
itempath = jsonpointer.JsonPointer(itempath)
del itempath.parts[-1]
if '$ref' in itempath.resolve(respcopy).\
iterkeys():
itempath.resolve(respcopy).pop('$ref')
itempath.resolve(respcopy).update(dictcopy)
if jsonpath:
if 'anyOf' in fullpath:
continue
if not jsonfile:
replacepath = jsonpointer.JsonPointer(jsonpath)
schemapath = schemapath.replace('/$ref', '')
schemapath = jsonpointer.JsonPointer(schemapath)
data = replacepath.resolve(respcopy)
if '$ref' in schemapath.resolve(respcopy):
schemapath.resolve(respcopy).pop('$ref')
schemapath.resolve(respcopy).update(data)
else:
if not listmatch:
schemapath = schemapath.replace('/$ref', '')
replacepath = schemapath + jsonpath
replace_pointer = jsonpointer.\
JsonPointer(replacepath)
data = replace_pointer.resolve(respcopy)
set_pointer(respcopy, schemapath, data)
resp.json(respcopy)
else:
resp.json(respcopy)
def check_for_directory(self, matches):
"""Function to allow checking for new directory
:param matches: current found matches.
:type matches: dict.
"""
for match in matches:
if match.value == self._resourcedir:
return (True, match)
return (False, None)
def branch_worker(self, resp, path, skipinit):
"""Helper for load function, creates threaded worker
:param resp: response received.
:type resp: str.
:param path: path correlating to the response.
:type path: str.
:param skipinit: flag to determine if progress bar should be updated.
:type skipinit: boolean.
"""
self._visited_urls.append(path.lower())
member = RisMonolithMemberv100(resp, self.is_redfish)
if not member.type:
return
self.update_member(member)
if not skipinit:
self.progress += 1
if LOGGER.getEffectiveLevel() == 40:
self.update_progress()
def update_member(self, member):
"""Adds member to this monolith. If the member already exists the
data is updated in place.
:param member: Ris monolith member object made by branch worker.
:type member: RisMonolithMemberv100.
"""
if member.maj_type not in self.types:
self.types[member.maj_type] = OrderedDict()
self.types[member.maj_type][u'Instances'] = list()
found = False
for indices in xrange(len(self.types[member.maj_type][u'Instances'])):
inst = self.types[member.maj_type][u'Instances'][indices]
if inst.resp.request.path == member.resp.request.path:
self.types[member.maj_type][u'Instances'][indices] = member
self.types[member.maj_type][u'Instances'][indices].patches.\
extend([patch for patch in inst.patches])
found = True
break
if not found:
self.types[member.maj_type][u'Instances'].append(member)
def load_from_dict(self, src):
"""Load data to monolith from dict
:param src: data receive from rest operation.
:type src: str.
"""
self._type = src[u'Type']
self._name = src[u'Name']
self.types = OrderedDict()
for typ in src[u'Types']:
for inst in typ[u'Instances']:
member = RisMonolithMemberv100(None, self.is_redfish)
member.load_from_dict(inst)
self.update_member(member)
return
def to_dict(self):
"""Convert data to monolith from dict"""
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
type_entry[u'Instances'] = list()
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'].append(inst.to_dict())
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def reduce(self):
"""Reduce monolith data"""
result = OrderedDict()
result[u'Type'] = self.type
result[u'Name'] = self.name
types_list = list()
for typ in self.types.keys():
type_entry = OrderedDict()
type_entry[u'Type'] = typ
for inst in self.types[typ][u'Instances']:
type_entry[u'Instances'] = inst.reduce()
types_list.append(type_entry)
result[u'Types'] = types_list
return result
def _jsonpath2jsonpointer(self, instr):
"""Convert json path to json pointer
:param instr: input path to be converted to pointer.
:type instr: str.
"""
outstr = instr.replace('.[', '[')
outstr = outstr.replace('[', '/')
outstr = outstr.replace(']', '/')
if outstr.endswith('/'):
outstr = outstr[:-1]
return outstr
def _get_current_location(self):
"""Return current location"""
return self._current_location
def _set_current_location(self, newval):
"""Set current location"""
self._current_location = newval
location = property(_get_current_location, _set_current_location)
def list(self, lspath=None):
"""Function for list command
:param lspath: path list.
:type lspath: list.
"""
results = list()
path_parts = [u'Types'] # Types is always assumed
if isinstance(lspath, list) and len(lspath) > 0:
lspath = lspath[0]
path_parts.extend(lspath.split(u'/'))
elif not lspath:
lspath = u'/'
else:
path_parts.extend(lspath.split(u'/'))
| for path_part in path_parts:
if not path_part:
continue
if isinstance(currpos, RisMonolithMemberv100):
break
elif isinstance(currpos, dict) and path_part in currpos:
currpos = currpos[path_part]
elif isinstance(currpos, list):
for positem in currpos:
if u'Type' in positem and path_part == positem[u'Type']:
currpos = positem
break
results.append(currpos)
return results
def killthreads(self):
"""Function to kill threads on logout"""
#TODO: revisit to make sure this is correct
threads = []
for thread in threading.enumerate():
if isinstance(thread, SuperDuperWorker):
self.queue.put(('KILL', 'KILL', 'KILL', 'KILL'))
threads.append(thread)
for thread in threads:
thread.join()
class RisMonolith(RisMonolithv100):
"""Latest implementation of RisMonolith"""
def __init__(self, client):
"""Initialize Latest RisMonolith
:param client: client to utilize
:type client: RmcClient object
"""
super(RisMonolith, self).__init__(client)
class SuperDuperWorker(threading.Thread):
"""Recursive worker implementation"""
def __init__(self, queue):
"""Initialize SuperDuperWorker
:param queue: queue for worker
:type queue: Queue object
"""
threading.Thread.__init__(self)
self.queue = queue
def run(self):
"""Thread creator"""
while True:
(resp, path, skipinit, thobj) = self.queue.get()
if resp == 'KILL' and path == 'KILL' and skipinit == 'KILL' and\
thobj == 'KILL':
break
thobj.branch_worker(resp, path, skipinit)
self.queue.task_done() |
currpos = self.to_dict()
|
hash.rs | use anyhow::{Context, Error};
use crc::{crc64, crc64::Digest, Hasher64};
use std::io;
use swc_common::{sync::Lrc, SourceMap, Span};
use swc_ecma_ast::Module;
use swc_ecma_codegen::{text_writer::WriteJs, Emitter};
pub(crate) fn calc_hash(cm: Lrc<SourceMap>, m: &Module) -> Result<String, Error> {
let digest = crc64::Digest::new(crc64::ECMA);
let mut buf = Hasher { digest };
{
let mut emitter = Emitter {
cfg: Default::default(),
cm,
comments: None,
wr: Box::new(&mut buf) as Box<dyn WriteJs>,
};
emitter
.emit_module(&m)
.context("failed to emit module to calculate hash")?;
}
//
let result = buf.digest.sum64();
Ok(radix_fmt::radix(result, 36).to_string())
}
struct Hasher {
digest: Digest,
}
impl Hasher {
fn w(&mut self, s: &str) {
self.digest.write(s.as_bytes());
}
}
impl WriteJs for &mut Hasher {
fn increase_indent(&mut self) -> io::Result<()> {
Ok(())
}
fn decrease_indent(&mut self) -> io::Result<()> {
Ok(())
}
fn write_semi(&mut self, _: Option<Span>) -> io::Result<()> {
self.w(";");
Ok(())
}
fn | (&mut self) -> io::Result<()> {
self.w(" ");
Ok(())
}
fn write_keyword(&mut self, _: Option<Span>, s: &'static str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_operator(&mut self, _: Option<Span>, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_param(&mut self, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_property(&mut self, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_line(&mut self) -> io::Result<()> {
self.w("\n");
Ok(())
}
fn write_lit(&mut self, _: Span, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_comment(&mut self, _: Span, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_str_lit(&mut self, _: Span, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_str(&mut self, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_symbol(&mut self, _: Span, s: &str) -> io::Result<()> {
self.w(s);
Ok(())
}
fn write_punct(&mut self, _: Option<Span>, s: &'static str) -> io::Result<()> {
self.w(s);
Ok(())
}
}
| write_space |
book-store.service.ts | import { Injectable } from '@angular/core';
import { HttpClient } from '@angular/common/http';
import { Observable } from 'rxjs/Observable';
import { retry, map, catchError } from 'rxjs/operators';
import 'rxjs/add/observable/throw';
import { Book } from './book';
import { BookRaw } from './book-raw';
import { BookFactory } from './book-factory';
@Injectable()
export class BookStoreService {
private api = 'https://book-monkey2-api.angular-buch.com';
constructor(private http: HttpClient) {}
getAll(): Observable<Array<Book>> {
return this.http
.get<BookRaw[]>(`${this.api}/books`)
.pipe(
retry(3),
map(rawBooks => rawBooks
.map(rawBook => BookFactory.fromObject(rawBook)),
),
catchError(this.errorHandler)
);
}
getSingle(isbn: string): Observable<Book> {
return this.http
.get<BookRaw>(`${this.api}/book/${isbn}`)
.pipe(
retry(3),
map(rawBook => BookFactory.fromObject(rawBook)),
catchError(this.errorHandler)
);
}
create(book: Book): Observable<any> {
return this.http
.post(`${this.api}/book`, book, { responseType: 'text' })
.pipe(
catchError(this.errorHandler)
);
}
update(book: Book): Observable<any> {
return this.http
.put(`${this.api}/book/${book.isbn}`, book, { responseType: 'text' })
.pipe(
catchError(this.errorHandler)
);
}
remove(isbn: string): Observable<any> {
return this.http
.delete(`${this.api}/book/${isbn}`, { responseType: 'text' })
.pipe(
catchError(this.errorHandler)
);
}
private errorHandler(error: Error | any): Observable<any> {
return Observable.throw(error);
} | getAllSearch(searchTerm: string): Observable<Array<Book>> {
return this.http
.get<BookRaw[]>(`${this.api}/books/search/${searchTerm}`)
.pipe(
retry(3),
map(rawBooks => rawBooks
.map(rawBook => BookFactory.fromObject(rawBook)),
),
catchError(this.errorHandler)
);
}
} | |
system_test.go | package api4
import (
"fmt"
"net/http"
"os"
"strings"
"testing"
l4g "github.com/alecthomas/log4go"
"github.com/mattermost/mattermost-server/model"
"github.com/stretchr/testify/assert"
)
func TestGetPing(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
goRoutineHealthThreshold := *th.App.Config().ServiceSettings.GoroutineHealthThreshold
defer func() {
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = goRoutineHealthThreshold })
}()
status, resp := Client.GetPing()
CheckNoError(t, resp)
if status != "OK" {
t.Fatal("should return OK")
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.ServiceSettings.GoroutineHealthThreshold = 10 })
status, resp = th.SystemAdminClient.GetPing()
CheckInternalErrorStatus(t, resp)
if status != "unhealthy" {
t.Fatal("should return unhealthy")
}
}
func TestGetConfig(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
_, resp := Client.GetConfig()
CheckForbiddenStatus(t, resp)
cfg, resp := th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
if len(cfg.TeamSettings.SiteName) == 0 {
t.Fatal()
}
if *cfg.LdapSettings.BindPassword != model.FAKE_SETTING && len(*cfg.LdapSettings.BindPassword) != 0 {
t.Fatal("did not sanitize properly")
}
if *cfg.FileSettings.PublicLinkSalt != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.FileSettings.AmazonS3SecretAccessKey != model.FAKE_SETTING && len(cfg.FileSettings.AmazonS3SecretAccessKey) != 0 {
t.Fatal("did not sanitize properly")
}
if cfg.EmailSettings.InviteSalt != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.EmailSettings.SMTPPassword != model.FAKE_SETTING && len(cfg.EmailSettings.SMTPPassword) != 0 {
t.Fatal("did not sanitize properly")
}
if cfg.GitLabSettings.Secret != model.FAKE_SETTING && len(cfg.GitLabSettings.Secret) != 0 {
t.Fatal("did not sanitize properly")
}
if *cfg.SqlSettings.DataSource != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if cfg.SqlSettings.AtRestEncryptKey != model.FAKE_SETTING {
t.Fatal("did not sanitize properly")
}
if !strings.Contains(strings.Join(cfg.SqlSettings.DataSourceReplicas, " "), model.FAKE_SETTING) && len(cfg.SqlSettings.DataSourceReplicas) != 0 {
t.Fatal("did not sanitize properly")
}
if !strings.Contains(strings.Join(cfg.SqlSettings.DataSourceSearchReplicas, " "), model.FAKE_SETTING) && len(cfg.SqlSettings.DataSourceSearchReplicas) != 0 {
t.Fatal("did not sanitize properly")
}
}
func TestReloadConfig(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
flag, resp := Client.ReloadConfig()
CheckForbiddenStatus(t, resp)
if flag {
t.Fatal("should not Reload the config due no permission.")
}
flag, resp = th.SystemAdminClient.ReloadConfig()
CheckNoError(t, resp)
if !flag {
t.Fatal("should Reload the config")
}
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.MaxUsersPerTeam = 50 })
th.App.UpdateConfig(func(cfg *model.Config) { *cfg.TeamSettings.EnableOpenServer = true })
}
func TestUpdateConfig(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
cfg, resp := th.SystemAdminClient.GetConfig()
CheckNoError(t, resp)
_, resp = Client.UpdateConfig(cfg)
CheckForbiddenStatus(t, resp)
SiteName := th.App.Config().TeamSettings.SiteName
cfg.TeamSettings.SiteName = "MyFancyName"
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
if len(cfg.TeamSettings.SiteName) == 0 {
t.Fatal()
} else {
if cfg.TeamSettings.SiteName != "MyFancyName" {
t.Log("It should update the SiteName")
t.Fatal()
}
}
//Revert the change
cfg.TeamSettings.SiteName = SiteName
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
if len(cfg.TeamSettings.SiteName) == 0 {
t.Fatal()
} else {
if cfg.TeamSettings.SiteName != SiteName {
t.Log("It should update the SiteName")
t.Fatal()
}
}
t.Run("Should not be able to modify PluginSettings.EnableUploads", func(t *testing.T) {
oldEnableUploads := *th.App.GetConfig().PluginSettings.EnableUploads
*cfg.PluginSettings.EnableUploads = !oldEnableUploads
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.Equal(t, oldEnableUploads, *cfg.PluginSettings.EnableUploads)
assert.Equal(t, oldEnableUploads, *th.App.GetConfig().PluginSettings.EnableUploads)
cfg.PluginSettings.EnableUploads = nil
cfg, resp = th.SystemAdminClient.UpdateConfig(cfg)
CheckNoError(t, resp)
assert.Equal(t, oldEnableUploads, *cfg.PluginSettings.EnableUploads)
assert.Equal(t, oldEnableUploads, *th.App.GetConfig().PluginSettings.EnableUploads)
})
}
func TestGetEnvironmentConfig(t *testing.T) {
os.Setenv("MM_SERVICESETTINGS_SITEURL", "http://example.mattermost.com")
os.Setenv("MM_SERVICESETTINGS_ENABLECUSTOMEMOJI", "true")
defer os.Unsetenv("MM_SERVICESETTINGS_SITEURL")
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
t.Run("as system admin", func(t *testing.T) {
SystemAdminClient := th.SystemAdminClient
envConfig, resp := SystemAdminClient.GetEnvironmentConfig()
CheckNoError(t, resp)
if serviceSettings, ok := envConfig["ServiceSettings"]; !ok {
t.Fatal("should've returned ServiceSettings")
} else if serviceSettingsAsMap, ok := serviceSettings.(map[string]interface{}); !ok {
t.Fatal("should've returned ServiceSettings as a map")
} else {
if siteURL, ok := serviceSettingsAsMap["SiteURL"]; !ok {
t.Fatal("should've returned ServiceSettings.SiteURL")
} else if siteURLAsBool, ok := siteURL.(bool); !ok {
t.Fatal("should've returned ServiceSettings.SiteURL as a boolean")
} else if !siteURLAsBool {
t.Fatal("should've returned ServiceSettings.SiteURL as true")
}
if enableCustomEmoji, ok := serviceSettingsAsMap["EnableCustomEmoji"]; !ok {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji")
} else if enableCustomEmojiAsBool, ok := enableCustomEmoji.(bool); !ok {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji as a boolean")
} else if !enableCustomEmojiAsBool {
t.Fatal("should've returned ServiceSettings.EnableCustomEmoji as true")
}
}
if _, ok := envConfig["TeamSettings"]; ok {
t.Fatal("should not have returned TeamSettings")
}
})
t.Run("as team admin", func(t *testing.T) {
TeamAdminClient := th.CreateClient()
th.LoginTeamAdminWithClient(TeamAdminClient)
_, resp := TeamAdminClient.GetEnvironmentConfig()
CheckForbiddenStatus(t, resp)
})
t.Run("as regular user", func(t *testing.T) {
Client := th.Client
_, resp := Client.GetEnvironmentConfig()
CheckForbiddenStatus(t, resp)
})
t.Run("as not-regular user", func(t *testing.T) {
Client := th.CreateClient()
_, resp := Client.GetEnvironmentConfig()
CheckUnauthorizedStatus(t, resp)
})
}
func TestGetOldClientConfig(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
config, resp := Client.GetOldClientConfig("")
CheckNoError(t, resp)
if len(config["Version"]) == 0 {
t.Fatal("config not returned correctly")
}
Client.Logout()
_, resp = Client.GetOldClientConfig("")
CheckNoError(t, resp)
if _, err := Client.DoApiGet("/config/client", ""); err == nil || err.StatusCode != http.StatusNotImplemented {
t.Fatal("should have errored with 501")
}
if _, err := Client.DoApiGet("/config/client?format=junk", ""); err == nil || err.StatusCode != http.StatusBadRequest {
t.Fatal("should have errored with 400")
}
}
func TestGetOldClientLicense(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
license, resp := Client.GetOldClientLicense("")
CheckNoError(t, resp)
if len(license["IsLicensed"]) == 0 {
t.Fatal("license not returned correctly")
}
Client.Logout()
_, resp = Client.GetOldClientLicense("")
CheckNoError(t, resp)
if _, err := Client.DoApiGet("/license/client", ""); err == nil || err.StatusCode != http.StatusNotImplemented {
t.Fatal("should have errored with 501")
}
if _, err := Client.DoApiGet("/license/client?format=junk", ""); err == nil || err.StatusCode != http.StatusBadRequest {
t.Fatal("should have errored with 400")
}
license, resp = th.SystemAdminClient.GetOldClientLicense("")
CheckNoError(t, resp)
if len(license["IsLicensed"]) == 0 {
t.Fatal("license not returned correctly")
}
}
func TestGetAudits(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
audits, resp := th.SystemAdminClient.GetAudits(0, 100, "")
CheckNoError(t, resp)
if len(audits) == 0 {
t.Fatal("should not be empty")
}
audits, resp = th.SystemAdminClient.GetAudits(0, 1, "")
CheckNoError(t, resp)
if len(audits) != 1 {
t.Fatal("should only be 1")
}
audits, resp = th.SystemAdminClient.GetAudits(1, 1, "")
CheckNoError(t, resp)
if len(audits) != 1 {
t.Fatal("should only be 1")
}
_, resp = th.SystemAdminClient.GetAudits(-1, -1, "")
CheckNoError(t, resp)
_, resp = Client.GetAudits(0, 100, "")
CheckForbiddenStatus(t, resp)
Client.Logout()
_, resp = Client.GetAudits(0, 100, "")
CheckUnauthorizedStatus(t, resp)
}
func TestEmailTest(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
config := model.Config{
EmailSettings: model.EmailSettings{
SMTPServer: "",
SMTPPort: "",
},
}
_, resp := Client.TestEmail(&config)
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.TestEmail(&config)
CheckErrorMessage(t, resp, "api.admin.test_email.missing_server")
CheckBadRequestStatus(t, resp)
inbucket_host := os.Getenv("CI_HOST")
if inbucket_host == "" {
inbucket_host = "dockerhost"
}
inbucket_port := os.Getenv("CI_INBUCKET_PORT")
if inbucket_port == "" {
inbucket_port = "9000"
}
config.EmailSettings.SMTPServer = inbucket_host
config.EmailSettings.SMTPPort = inbucket_port
_, resp = th.SystemAdminClient.TestEmail(&config)
CheckOKStatus(t, resp)
}
func TestDatabaseRecycle(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
_, resp := Client.DatabaseRecycle()
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.DatabaseRecycle()
CheckNoError(t, resp)
}
func TestInvalidateCaches(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
flag, resp := Client.InvalidateCaches()
CheckForbiddenStatus(t, resp)
if flag {
t.Fatal("should not clean the cache due no permission.")
}
flag, resp = th.SystemAdminClient.InvalidateCaches()
CheckNoError(t, resp)
if !flag {
t.Fatal("should clean the cache")
}
}
func TestGetLogs(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
for i := 0; i < 20; i++ {
l4g.Info(i)
}
logs, resp := th.SystemAdminClient.GetLogs(0, 10)
CheckNoError(t, resp)
if len(logs) != 10 {
t.Log(len(logs))
t.Fatal("wrong length")
}
logs, resp = th.SystemAdminClient.GetLogs(1, 10)
CheckNoError(t, resp)
if len(logs) != 10 {
t.Log(len(logs))
t.Fatal("wrong length")
}
logs, resp = th.SystemAdminClient.GetLogs(-1, -1)
CheckNoError(t, resp)
if len(logs) == 0 {
t.Fatal("should not be empty")
}
_, resp = Client.GetLogs(0, 10)
CheckForbiddenStatus(t, resp)
Client.Logout()
_, resp = Client.GetLogs(0, 10)
CheckUnauthorizedStatus(t, resp)
}
func TestPostLog(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
enableDev := *th.App.Config().ServiceSettings.EnableDeveloper
defer func() {
*th.App.Config().ServiceSettings.EnableDeveloper = enableDev
}()
*th.App.Config().ServiceSettings.EnableDeveloper = true
message := make(map[string]string)
message["level"] = "ERROR"
message["message"] = "this is a test"
_, resp := Client.PostLog(message)
CheckNoError(t, resp)
Client.Logout()
_, resp = Client.PostLog(message)
CheckNoError(t, resp)
*th.App.Config().ServiceSettings.EnableDeveloper = false
_, resp = Client.PostLog(message)
CheckForbiddenStatus(t, resp)
logMessage, resp := th.SystemAdminClient.PostLog(message)
CheckNoError(t, resp)
if len(logMessage) == 0 {
t.Fatal("should return the log message")
}
}
func TestUploadLicenseFile(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
ok, resp := Client.UploadLicenseFile([]byte{})
CheckForbiddenStatus(t, resp)
if ok {
t.Fatal("should fail")
}
ok, resp = th.SystemAdminClient.UploadLicenseFile([]byte{})
CheckBadRequestStatus(t, resp)
if ok {
t.Fatal("should fail")
}
}
func TestRemoveLicenseFile(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
ok, resp := Client.RemoveLicenseFile()
CheckForbiddenStatus(t, resp)
if ok {
t.Fatal("should fail")
} |
ok, resp = th.SystemAdminClient.RemoveLicenseFile()
CheckNoError(t, resp)
if !ok {
t.Fatal("should pass")
}
}
func TestGetAnalyticsOld(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
rows, resp := Client.GetAnalyticsOld("", "")
CheckForbiddenStatus(t, resp)
if rows != nil {
t.Fatal("should be nil")
}
rows, resp = th.SystemAdminClient.GetAnalyticsOld("", "")
CheckNoError(t, resp)
found := false
for _, row := range rows {
if row.Name == "unique_user_count" {
found = true
}
}
if !found {
t.Fatal("should return unique user count")
}
_, resp = th.SystemAdminClient.GetAnalyticsOld("post_counts_day", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetAnalyticsOld("user_counts_with_posts_day", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetAnalyticsOld("extra_counts", "")
CheckNoError(t, resp)
_, resp = th.SystemAdminClient.GetAnalyticsOld("", th.BasicTeam.Id)
CheckNoError(t, resp)
Client.Logout()
_, resp = Client.GetAnalyticsOld("", th.BasicTeam.Id)
CheckUnauthorizedStatus(t, resp)
}
func TestS3TestConnection(t *testing.T) {
th := Setup().InitBasic().InitSystemAdmin()
defer th.TearDown()
Client := th.Client
s3Host := os.Getenv("CI_HOST")
if s3Host == "" {
s3Host = "dockerhost"
}
s3Port := os.Getenv("CI_MINIO_PORT")
if s3Port == "" {
s3Port = "9001"
}
s3Endpoint := fmt.Sprintf("%s:%s", s3Host, s3Port)
config := model.Config{
FileSettings: model.FileSettings{
DriverName: model.NewString(model.IMAGE_DRIVER_S3),
AmazonS3AccessKeyId: model.MINIO_ACCESS_KEY,
AmazonS3SecretAccessKey: model.MINIO_SECRET_KEY,
AmazonS3Bucket: "",
AmazonS3Endpoint: s3Endpoint,
AmazonS3SSL: model.NewBool(false),
},
}
_, resp := Client.TestS3Connection(&config)
CheckForbiddenStatus(t, resp)
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckBadRequestStatus(t, resp)
if resp.Error.Message != "S3 Bucket is required" {
t.Fatal("should return error - missing s3 bucket")
}
config.FileSettings.AmazonS3Bucket = model.MINIO_BUCKET
config.FileSettings.AmazonS3Region = "us-east-1"
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckOKStatus(t, resp)
config.FileSettings.AmazonS3Region = ""
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckOKStatus(t, resp)
config.FileSettings.AmazonS3Bucket = "Wrong_bucket"
_, resp = th.SystemAdminClient.TestS3Connection(&config)
CheckInternalErrorStatus(t, resp)
if resp.Error.Message != "Error checking if bucket exists." {
t.Fatal("should return error ")
}
}
func TestSupportedTimezones(t *testing.T) {
th := Setup().InitBasic()
defer th.TearDown()
Client := th.Client
supportedTimezonesFromConfig := th.App.Timezones()
supportedTimezones, resp := Client.GetSupportedTimezone()
CheckNoError(t, resp)
assert.Equal(t, supportedTimezonesFromConfig, supportedTimezones)
} | |
gulpfile.js | "use strict";
var fs = require("fs");
var path = require("path");
var gulp = require("gulp"),
runSequence = require("run-sequence"),
del = require("del"),
mocha = require("gulp-mocha"),
tslint = require("gulp-tslint"),
tsc = require("gulp-typescript"),
sourcemaps = require("gulp-sourcemaps"),
merge = require("merge2");
gulp.task("lint", function () {
return gulp.src([
"**",
"!**/*.d.ts",
"!**/typings/**"
])
.pipe(tslint({}))
.pipe(tslint.report("verbose"));
});
var sourceMapsConfig = {
includeContent: false,
mapSources: function (sourcePath) {
// HACK: The sourcemaps do not reference source files correctly!
// The recieved sourcePath always starts with '../../source/lib/',
// resulting from the current folder structure!
// This indeed is not feasable for files nested on different levels.
// Therefor we need to count the folder depth of the current file.
// This is done by counting the overall slashes within the path.
// To get the additional ones, we need to subtract the initial path count.
// For this project setting it is the magic number of 4. Please adjust
// this to your project needs.
var initialPathCount = 4;
var depthCount = (sourcePath.match(/\//g) || []).length;
var pathUps = "../".repeat(Math.max(depthCount, initialPathCount) - initialPathCount);
return pathUps + sourcePath;
}
};
var tsProject = tsc.createProject("tsconfig.json");
function | (sourcePath, base, targetPath) {
var tsResult = gulp.src(sourcePath, { base: base })
.pipe(sourcemaps.init())
.pipe(tsProject(tsc.reporter.longReporter()));
return merge([
tsResult.dts
.pipe(gulp.dest("build/")),
tsResult.js
.pipe(sourcemaps.write(".", sourceMapsConfig))
.pipe(gulp.dest("build/"))
]);
}
gulp.task("build-spec", function () {
return build(["src/**/*.ts", "typings/**.d.ts", "!./node_modules/**"], "./src", "");
});
gulp.task("build-lib", function () {
return build(["src/lib/**/*.ts", "typings/**.d.ts", "!./node_modules/**"], "./src", "lib");
});
gulp.task("build-package.json", function () {
var appPackageJson = JSON.parse(fs.readFileSync(__dirname + "/package.json", "utf8"));
var npmPackageJson = {
"name": appPackageJson.name,
"description": appPackageJson.description,
"version": appPackageJson.version,
"author": appPackageJson.author,
"repository": appPackageJson.repository,
"main": "index.js", // TODO: generate this from app package.json
"typings": "index.d.ts", // TODO: generate this from app package.json
"dependencies": appPackageJson.dependencies,
"keywords": appPackageJson.keywords,
"license": appPackageJson.license,
"bugs": appPackageJson.bugs
}
// Is this necessary in any case? fs.mkdirSync(path.join(__dirname, "build"));
fs.mkdirSync(path.join(__dirname, "build"));
fs.mkdirSync(path.join(__dirname, "build", "lib"));
fs.writeFileSync(path.join(__dirname, "build", "lib", "package.json"), JSON.stringify(npmPackageJson, null, 2));
});
function copyStaticSrc() {
return gulp.src([
"./src/lib/**/odata4-mod.abnf"
]);
}
gulp.task("copy-static-lib", ["copy-license"], function () {
return copyStaticSrc().pipe(gulp.dest("build/lib"));
});
gulp.task("copy-static-spec", function () {
return copyStaticSrc().pipe(gulp.dest("build/spec"));
});
gulp.task("copy-license", function () {
return gulp.src([
"README.md",
"LICENSE"
]).pipe(gulp.dest("build/lib"));
});
// TODO: depricated - will be removed soon!
gulp.task("clean-all-old", function () {
return del(["./maps", "./lib"]);
});
gulp.task("clean-all", ["clean-all-old"], function () {
return del(["./build"]);
});
gulp.task("build", function (cb) {
return runSequence(
"clean-all",
["build-lib", "copy-static-lib", "build-package.json"],
cb
);
});
/*gulp.task("build-tests", function (cb) {
return runSequence(
"build-spec",
["copy-static-spec"],
cb
);
});*/
gulp.task("build-all", ["build"], function (cb) {
return runSequence(
"build-spec",
["copy-static-spec"],
cb
);
});
gulp.task("run-tests", function () {
return gulp.src("build/spec/*.js")
.pipe(mocha());
});
gulp.task("tests", ["build-all"], function (cb) {
return runSequence(
"run-tests",
cb
);
});
//alternative name for the "tests" task
gulp.task("specs", ["tests"]);
| build |
register.spec.ts | /*
Copyright 2022 The Matrix.org Foundation C.I.C.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/// <reference types="cypress" />
import { SynapseInstance } from "../../plugins/synapsedocker/index";
describe("Registration", () => {
let synapseId;
let synapsePort;
beforeEach(() => {
cy.task<SynapseInstance>("synapseStart", "consent").then(result => {
synapseId = result.synapseId;
synapsePort = result.port;
});
cy.visit("/#/register");
});
afterEach(() => {
cy.task("synapseStop", synapseId);
});
it("registers an account and lands on the home screen", () => {
cy.get(".mx_ServerPicker_change", { timeout: 15000 }).click();
cy.get(".mx_ServerPickerDialog_otherHomeserver").type(`http://localhost:${synapsePort}`);
cy.get(".mx_ServerPickerDialog_continue").click();
// wait for the dialog to go away
cy.get('.mx_ServerPickerDialog').should('not.exist');
cy.get("#mx_RegistrationForm_username").type("alice");
cy.get("#mx_RegistrationForm_password").type("totally a great password");
cy.get("#mx_RegistrationForm_passwordConfirm").type("totally a great password");
cy.get(".mx_Login_submit").click();
cy.get(".mx_RegistrationEmailPromptDialog button.mx_Dialog_primary").click();
cy.get(".mx_InteractiveAuthEntryComponents_termsPolicy input").click();
cy.get(".mx_InteractiveAuthEntryComponents_termsSubmit").click(); | cy.url().should('contain', '/#/home');
});
}); |
|
peer_storage.rs | // Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.
use std::cell::{Cell, RefCell};
use std::collections::VecDeque;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::mpsc::{self, Receiver, TryRecvError};
use std::sync::Arc;
use std::time::Instant;
use std::{cmp, error, u64};
use engine::rocks::DB;
use engine::Engines;
use engine::{Iterable, Mutable, Peekable};
use engine_rocks::{RocksSnapshot, RocksWriteBatch};
use engine_traits::CF_RAFT;
use engine_traits::{KvEngine, Mutable as MutableTrait, Peekable as PeekableTrait};
use keys::{self, enc_end_key, enc_start_key};
use kvproto::metapb::{self, Region};
use kvproto::raft_serverpb::{
MergeState, PeerState, RaftApplyState, RaftLocalState, RaftSnapshotData, RegionLocalState,
};
use protobuf::Message;
use raft::eraftpb::{ConfState, Entry, HardState, Snapshot};
use raft::{self, Error as RaftError, RaftState, Ready, Storage, StorageError};
use crate::store::fsm::GenSnapTask;
use crate::store::util::conf_state_from_region;
use crate::store::ProposalContext;
use crate::{Error, Result};
use into_other::into_other;
use tikv_util::worker::Scheduler;
use super::metrics::*;
use super::worker::RegionTask;
use super::{SnapEntry, SnapKey, SnapManager, SnapshotStatistics};
// When we create a region peer, we should initialize its log term/index > 0,
// so that we can force the follower peer to sync the snapshot first.
pub const RAFT_INIT_LOG_TERM: u64 = 5;
pub const RAFT_INIT_LOG_INDEX: u64 = 5;
const MAX_SNAP_TRY_CNT: usize = 5;
const RAFT_LOG_MULTI_GET_CNT: u64 = 8;
/// The initial region epoch version.
pub const INIT_EPOCH_VER: u64 = 1;
/// The initial region epoch conf_version.
pub const INIT_EPOCH_CONF_VER: u64 = 1;
// One extra slot for VecDeque internal usage.
const MAX_CACHE_CAPACITY: usize = 1024 - 1;
const SHRINK_CACHE_CAPACITY: usize = 64;
pub const JOB_STATUS_PENDING: usize = 0;
pub const JOB_STATUS_RUNNING: usize = 1;
pub const JOB_STATUS_CANCELLING: usize = 2;
pub const JOB_STATUS_CANCELLED: usize = 3;
pub const JOB_STATUS_FINISHED: usize = 4;
pub const JOB_STATUS_FAILED: usize = 5;
#[derive(Debug)]
pub enum SnapState {
Relax,
Generating(Receiver<Snapshot>),
Applying(Arc<AtomicUsize>),
ApplyAborted,
}
impl PartialEq for SnapState {
fn eq(&self, other: &SnapState) -> bool {
match (self, other) {
(&SnapState::Relax, &SnapState::Relax) | | (&SnapState::ApplyAborted, &SnapState::ApplyAborted)
| (&SnapState::Generating(_), &SnapState::Generating(_)) => true,
(&SnapState::Applying(ref b1), &SnapState::Applying(ref b2)) => {
b1.load(Ordering::Relaxed) == b2.load(Ordering::Relaxed)
}
_ => false,
}
}
}
#[inline]
pub fn first_index(state: &RaftApplyState) -> u64 {
state.get_truncated_state().get_index() + 1
}
#[inline]
pub fn last_index(state: &RaftLocalState) -> u64 {
state.get_last_index()
}
#[derive(Default)]
struct EntryCache {
cache: VecDeque<Entry>,
}
impl EntryCache {
fn first_index(&self) -> Option<u64> {
self.cache.front().map(|e| e.get_index())
}
fn fetch_entries_to(
&self,
begin: u64,
end: u64,
mut fetched_size: u64,
max_size: u64,
ents: &mut Vec<Entry>,
) {
if begin >= end {
return;
}
assert!(!self.cache.is_empty());
let cache_low = self.cache.front().unwrap().get_index();
let start_idx = begin.checked_sub(cache_low).unwrap() as usize;
let limit_idx = end.checked_sub(cache_low).unwrap() as usize;
let mut end_idx = start_idx;
self.cache
.iter()
.skip(start_idx)
.take_while(|e| {
let cur_idx = end_idx as u64 + cache_low;
assert_eq!(e.get_index(), cur_idx);
let m = u64::from(e.compute_size());
fetched_size += m;
if fetched_size == m {
end_idx += 1;
fetched_size <= max_size && end_idx < limit_idx
} else if fetched_size <= max_size {
end_idx += 1;
end_idx < limit_idx
} else {
false
}
})
.count();
// Cache either is empty or contains latest log. Hence we don't need to fetch log
// from rocksdb anymore.
assert!(end_idx == limit_idx || fetched_size > max_size);
let (first, second) = tikv_util::slices_in_range(&self.cache, start_idx, end_idx);
ents.extend_from_slice(first);
ents.extend_from_slice(second);
}
fn append(&mut self, tag: &str, entries: &[Entry]) {
if entries.is_empty() {
return;
}
if let Some(cache_last_index) = self.cache.back().map(|e| e.get_index()) {
let first_index = entries[0].get_index();
if cache_last_index >= first_index {
if self.cache.front().unwrap().get_index() >= first_index {
self.cache.clear();
} else {
let left = self.cache.len() - (cache_last_index - first_index + 1) as usize;
self.cache.truncate(left);
}
if self.cache.len() + entries.len() < SHRINK_CACHE_CAPACITY
&& self.cache.capacity() > SHRINK_CACHE_CAPACITY
{
self.cache.shrink_to_fit();
}
} else if cache_last_index + 1 < first_index {
panic!(
"{} unexpected hole: {} < {}",
tag, cache_last_index, first_index
);
}
}
let mut start_idx = 0;
if let Some(len) = (self.cache.len() + entries.len()).checked_sub(MAX_CACHE_CAPACITY) {
if len < self.cache.len() {
self.cache.drain(..len);
} else {
start_idx = len - self.cache.len();
self.cache.clear();
}
}
for e in &entries[start_idx..] {
self.cache.push_back(e.to_owned());
}
}
pub fn compact_to(&mut self, idx: u64) {
let cache_first_idx = self.first_index().unwrap_or(u64::MAX);
if cache_first_idx > idx {
return;
}
let cache_last_idx = self.cache.back().unwrap().get_index();
// Use `cache_last_idx + 1` to make sure cache can be cleared completely
// if necessary.
self.cache
.drain(..(cmp::min(cache_last_idx + 1, idx) - cache_first_idx) as usize);
if self.cache.len() < SHRINK_CACHE_CAPACITY && self.cache.capacity() > SHRINK_CACHE_CAPACITY
{
// So the peer storage doesn't have much writes since the proposal of compaction,
// we can consider this peer is going to be inactive.
self.cache.shrink_to_fit();
}
}
#[inline]
fn is_empty(&self) -> bool {
self.cache.is_empty()
}
}
#[derive(Default)]
pub struct CacheQueryStats {
pub hit: Cell<u64>,
pub miss: Cell<u64>,
}
impl CacheQueryStats {
pub fn flush(&mut self) {
if self.hit.get() > 0 {
RAFT_ENTRY_FETCHES
.with_label_values(&["hit"])
.inc_by(self.hit.replace(0) as i64);
}
if self.miss.get() > 0 {
RAFT_ENTRY_FETCHES
.with_label_values(&["miss"])
.inc_by(self.miss.replace(0) as i64);
}
}
}
pub trait HandleRaftReadyContext {
fn kv_wb(&self) -> &RocksWriteBatch;
fn kv_wb_mut(&mut self) -> &mut RocksWriteBatch;
fn raft_wb(&self) -> &RocksWriteBatch;
fn raft_wb_mut(&mut self) -> &mut RocksWriteBatch;
fn sync_log(&self) -> bool;
fn set_sync_log(&mut self, sync: bool);
}
fn storage_error<E>(error: E) -> raft::Error
where
E: Into<Box<dyn error::Error + Send + Sync>>,
{
raft::Error::Store(StorageError::Other(error.into()))
}
impl From<Error> for RaftError {
fn from(err: Error) -> RaftError {
storage_error(err)
}
}
pub struct ApplySnapResult {
// prev_region is the region before snapshot applied.
pub prev_region: metapb::Region,
pub region: metapb::Region,
}
/// Returned by `PeerStorage::handle_raft_ready`, used for recording changed status of
/// `RaftLocalState` and `RaftApplyState`.
pub struct InvokeContext {
pub region_id: u64,
/// Changed RaftLocalState is stored into `raft_state`.
pub raft_state: RaftLocalState,
/// Changed RaftApplyState is stored into `apply_state`.
pub apply_state: RaftApplyState,
last_term: u64,
/// The old region is stored here if there is a snapshot.
pub snap_region: Option<Region>,
}
impl InvokeContext {
pub fn new(store: &PeerStorage) -> InvokeContext {
InvokeContext {
region_id: store.get_region_id(),
raft_state: store.raft_state.clone(),
apply_state: store.apply_state.clone(),
last_term: store.last_term,
snap_region: None,
}
}
#[inline]
pub fn has_snapshot(&self) -> bool {
self.snap_region.is_some()
}
#[inline]
pub fn save_raft_state_to(&self, raft_wb: &mut RocksWriteBatch) -> Result<()> {
raft_wb.put_msg(&keys::raft_state_key(self.region_id), &self.raft_state)?;
Ok(())
}
#[inline]
pub fn save_snapshot_raft_state_to(
&self,
snapshot_index: u64,
kv_wb: &mut RocksWriteBatch,
) -> Result<()> {
let mut snapshot_raft_state = self.raft_state.clone();
snapshot_raft_state
.mut_hard_state()
.set_commit(snapshot_index);
snapshot_raft_state.set_last_index(snapshot_index);
kv_wb.put_msg_cf(
CF_RAFT,
&keys::snapshot_raft_state_key(self.region_id),
&snapshot_raft_state,
)?;
Ok(())
}
#[inline]
pub fn save_apply_state_to(&self, kv_wb: &mut RocksWriteBatch) -> Result<()> {
kv_wb.put_msg_cf(
CF_RAFT,
&keys::apply_state_key(self.region_id),
&self.apply_state,
)?;
Ok(())
}
}
pub fn recover_from_applying_state(
engines: &Engines,
raft_wb: &RocksWriteBatch,
region_id: u64,
) -> Result<()> {
let snapshot_raft_state_key = keys::snapshot_raft_state_key(region_id);
let snapshot_raft_state: RaftLocalState =
match box_try!(engines.kv.get_msg_cf(CF_RAFT, &snapshot_raft_state_key)) {
Some(state) => state,
None => {
return Err(box_err!(
"[region {}] failed to get raftstate from kv engine, \
when recover from applying state",
region_id
));
}
};
let raft_state_key = keys::raft_state_key(region_id);
let raft_state: RaftLocalState = match box_try!(engines.raft.get_msg(&raft_state_key)) {
Some(state) => state,
None => RaftLocalState::default(),
};
// if we recv append log when applying snapshot, last_index in raft_local_state will
// larger than snapshot_index. since raft_local_state is written to raft engine, and
// raft write_batch is written after kv write_batch, raft_local_state may wrong if
// restart happen between the two write. so we copy raft_local_state to kv engine
// (snapshot_raft_state), and set snapshot_raft_state.last_index = snapshot_index.
// after restart, we need check last_index.
if last_index(&snapshot_raft_state) > last_index(&raft_state) {
raft_wb.put_msg(&raft_state_key, &snapshot_raft_state)?;
}
Ok(())
}
pub fn init_raft_state(engines: &Engines, region: &Region) -> Result<RaftLocalState> {
let state_key = keys::raft_state_key(region.get_id());
Ok(match engines.raft.get_msg(&state_key)? {
Some(s) => s,
None => {
let mut raft_state = RaftLocalState::default();
if !region.get_peers().is_empty() {
// new split region
raft_state.set_last_index(RAFT_INIT_LOG_INDEX);
raft_state.mut_hard_state().set_term(RAFT_INIT_LOG_TERM);
raft_state.mut_hard_state().set_commit(RAFT_INIT_LOG_INDEX);
engines.raft.put_msg(&state_key, &raft_state)?;
}
raft_state
}
})
}
pub fn init_apply_state(engines: &Engines, region: &Region) -> Result<RaftApplyState> {
Ok(
match engines
.kv
.get_msg_cf(CF_RAFT, &keys::apply_state_key(region.get_id()))?
{
Some(s) => s,
None => {
let mut apply_state = RaftApplyState::default();
if !region.get_peers().is_empty() {
apply_state.set_applied_index(RAFT_INIT_LOG_INDEX);
let state = apply_state.mut_truncated_state();
state.set_index(RAFT_INIT_LOG_INDEX);
state.set_term(RAFT_INIT_LOG_TERM);
}
apply_state
}
},
)
}
fn init_last_term(
engines: &Engines,
region: &Region,
raft_state: &RaftLocalState,
apply_state: &RaftApplyState,
) -> Result<u64> {
let last_idx = raft_state.get_last_index();
if last_idx == 0 {
return Ok(0);
} else if last_idx == RAFT_INIT_LOG_INDEX {
return Ok(RAFT_INIT_LOG_TERM);
} else if last_idx == apply_state.get_truncated_state().get_index() {
return Ok(apply_state.get_truncated_state().get_term());
} else {
assert!(last_idx > RAFT_INIT_LOG_INDEX);
}
let last_log_key = keys::raft_log_key(region.get_id(), last_idx);
let entry = engines.raft.get_msg::<Entry>(&last_log_key)?;
match entry {
None => Err(box_err!(
"[region {}] entry at {} doesn't exist, may lose data.",
region.get_id(),
last_idx
)),
Some(e) => Ok(e.get_term()),
}
}
pub struct PeerStorage {
pub engines: Engines,
peer_id: u64,
region: metapb::Region,
raft_state: RaftLocalState,
apply_state: RaftApplyState,
applied_index_term: u64,
last_term: u64,
snap_state: RefCell<SnapState>,
gen_snap_task: RefCell<Option<GenSnapTask>>,
region_sched: Scheduler<RegionTask>,
snap_tried_cnt: RefCell<usize>,
cache: EntryCache,
stats: CacheQueryStats,
pub tag: String,
}
impl Storage for PeerStorage {
fn initial_state(&self) -> raft::Result<RaftState> {
self.initial_state()
}
fn entries(
&self,
low: u64,
high: u64,
max_size: impl Into<Option<u64>>,
) -> raft::Result<Vec<Entry>> {
self.entries(low, high, max_size.into().unwrap_or(u64::MAX))
}
fn term(&self, idx: u64) -> raft::Result<u64> {
self.term(idx)
}
fn first_index(&self) -> raft::Result<u64> {
Ok(self.first_index())
}
fn last_index(&self) -> raft::Result<u64> {
Ok(self.last_index())
}
fn snapshot(&self, request_index: u64) -> raft::Result<Snapshot> {
self.snapshot(request_index)
}
}
impl PeerStorage {
pub fn new(
engines: Engines,
region: &metapb::Region,
region_sched: Scheduler<RegionTask>,
peer_id: u64,
tag: String,
) -> Result<PeerStorage> {
debug!(
"creating storage on specified path";
"region_id" => region.get_id(),
"peer_id" => peer_id,
"path" => ?engines.kv.path(),
);
let raft_state = init_raft_state(&engines, region)?;
let apply_state = init_apply_state(&engines, region)?;
if raft_state.get_last_index() < apply_state.get_applied_index() {
panic!(
"{} unexpected raft log index: last_index {} < applied_index {}",
tag,
raft_state.get_last_index(),
apply_state.get_applied_index()
);
}
let last_term = init_last_term(&engines, region, &raft_state, &apply_state)?;
Ok(PeerStorage {
engines,
peer_id,
region: region.clone(),
raft_state,
apply_state,
snap_state: RefCell::new(SnapState::Relax),
gen_snap_task: RefCell::new(None),
region_sched,
snap_tried_cnt: RefCell::new(0),
tag,
applied_index_term: RAFT_INIT_LOG_TERM,
last_term,
cache: EntryCache::default(),
stats: CacheQueryStats::default(),
})
}
pub fn is_initialized(&self) -> bool {
!self.region().get_peers().is_empty()
}
pub fn initial_state(&self) -> raft::Result<RaftState> {
let hard_state = self.raft_state.get_hard_state().clone();
if hard_state == HardState::default() {
assert!(
!self.is_initialized(),
"peer for region {:?} is initialized but local state {:?} has empty hard \
state",
self.region,
self.raft_state
);
return Ok(RaftState::new(hard_state, ConfState::default()));
}
Ok(RaftState::new(
hard_state,
conf_state_from_region(self.region()),
))
}
fn check_range(&self, low: u64, high: u64) -> raft::Result<()> {
if low > high {
return Err(storage_error(format!(
"low: {} is greater that high: {}",
low, high
)));
} else if low <= self.truncated_index() {
return Err(RaftError::Store(StorageError::Compacted));
} else if high > self.last_index() + 1 {
return Err(storage_error(format!(
"entries' high {} is out of bound lastindex {}",
high,
self.last_index()
)));
}
Ok(())
}
pub fn entries(&self, low: u64, high: u64, max_size: u64) -> raft::Result<Vec<Entry>> {
self.check_range(low, high)?;
let mut ents = Vec::with_capacity((high - low) as usize);
if low == high {
return Ok(ents);
}
let cache_low = self.cache.first_index().unwrap_or(u64::MAX);
let region_id = self.get_region_id();
if high <= cache_low {
// not overlap
self.stats.miss.update(|m| m + 1);
fetch_entries_to(
&self.engines.raft,
region_id,
low,
high,
max_size,
&mut ents,
)?;
return Ok(ents);
}
let mut fetched_size = 0;
let begin_idx = if low < cache_low {
self.stats.miss.update(|m| m + 1);
fetched_size = fetch_entries_to(
&self.engines.raft,
region_id,
low,
cache_low,
max_size,
&mut ents,
)?;
if fetched_size > max_size {
// max_size exceed.
return Ok(ents);
}
cache_low
} else {
low
};
self.stats.hit.update(|h| h + 1);
self.cache
.fetch_entries_to(begin_idx, high, fetched_size, max_size, &mut ents);
Ok(ents)
}
pub fn term(&self, idx: u64) -> raft::Result<u64> {
if idx == self.truncated_index() {
return Ok(self.truncated_term());
}
self.check_range(idx, idx + 1)?;
if self.truncated_term() == self.last_term || idx == self.last_index() {
return Ok(self.last_term);
}
let entries = self.entries(idx, idx + 1, raft::NO_LIMIT)?;
Ok(entries[0].get_term())
}
#[inline]
pub fn first_index(&self) -> u64 {
first_index(&self.apply_state)
}
#[inline]
pub fn last_index(&self) -> u64 {
last_index(&self.raft_state)
}
#[inline]
pub fn applied_index(&self) -> u64 {
self.apply_state.get_applied_index()
}
#[inline]
pub fn set_applied_state(&mut self, apply_state: RaftApplyState) {
self.apply_state = apply_state;
}
#[inline]
pub fn set_applied_term(&mut self, applied_index_term: u64) {
self.applied_index_term = applied_index_term;
}
#[inline]
pub fn apply_state(&self) -> &RaftApplyState {
&self.apply_state
}
#[inline]
pub fn applied_index_term(&self) -> u64 {
self.applied_index_term
}
#[inline]
pub fn committed_index(&self) -> u64 {
self.raft_state.get_hard_state().get_commit()
}
#[inline]
pub fn truncated_index(&self) -> u64 {
self.apply_state.get_truncated_state().get_index()
}
#[inline]
pub fn truncated_term(&self) -> u64 {
self.apply_state.get_truncated_state().get_term()
}
pub fn region(&self) -> &metapb::Region {
&self.region
}
pub fn set_region(&mut self, region: metapb::Region) {
self.region = region;
}
pub fn raw_snapshot(&self) -> RocksSnapshot {
RocksSnapshot::new(Arc::clone(&self.engines.kv))
}
fn validate_snap(&self, snap: &Snapshot, request_index: u64) -> bool {
let idx = snap.get_metadata().get_index();
if idx < self.truncated_index() || idx < request_index {
// stale snapshot, should generate again.
info!(
"snapshot is stale, generate again";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"snap_index" => idx,
"truncated_index" => self.truncated_index(),
"request_index" => request_index,
);
STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER
.with_label_values(&["stale"])
.inc();
return false;
}
let mut snap_data = RaftSnapshotData::default();
if let Err(e) = snap_data.merge_from_bytes(snap.get_data()) {
error!(
"failed to decode snapshot, it may be corrupted";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"err" => ?e,
);
STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER
.with_label_values(&["decode"])
.inc();
return false;
}
let snap_epoch = snap_data.get_region().get_region_epoch();
let latest_epoch = self.region().get_region_epoch();
if snap_epoch.get_conf_ver() < latest_epoch.get_conf_ver() {
info!(
"snapshot epoch is stale";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"snap_epoch" => ?snap_epoch,
"latest_epoch" => ?latest_epoch,
);
STORE_SNAPSHOT_VALIDATION_FAILURE_COUNTER
.with_label_values(&["epoch"])
.inc();
return false;
}
true
}
/// Gets a snapshot. Returns `SnapshotTemporarilyUnavailable` if there is no unavailable
/// snapshot.
pub fn snapshot(&self, request_index: u64) -> raft::Result<Snapshot> {
let mut snap_state = self.snap_state.borrow_mut();
let mut tried_cnt = self.snap_tried_cnt.borrow_mut();
let (mut tried, mut snap) = (false, None);
if let SnapState::Generating(ref recv) = *snap_state {
tried = true;
match recv.try_recv() {
Err(TryRecvError::Disconnected) => {}
Err(TryRecvError::Empty) => {
return Err(raft::Error::Store(
raft::StorageError::SnapshotTemporarilyUnavailable,
));
}
Ok(s) => snap = Some(s),
}
}
if tried {
*snap_state = SnapState::Relax;
match snap {
Some(s) => {
*tried_cnt = 0;
if self.validate_snap(&s, request_index) {
return Ok(s);
}
}
None => {
warn!(
"failed to try generating snapshot";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"times" => *tried_cnt,
);
}
}
}
if SnapState::Relax != *snap_state {
panic!("{} unexpected state: {:?}", self.tag, *snap_state);
}
if *tried_cnt >= MAX_SNAP_TRY_CNT {
let cnt = *tried_cnt;
*tried_cnt = 0;
return Err(raft::Error::Store(box_err!(
"failed to get snapshot after {} times",
cnt
)));
}
info!(
"requesting snapshot";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"request_index" => request_index,
);
*tried_cnt += 1;
let (tx, rx) = mpsc::sync_channel(1);
*snap_state = SnapState::Generating(rx);
let task = GenSnapTask::new(self.region.get_id(), self.committed_index(), tx);
let mut gen_snap_task = self.gen_snap_task.borrow_mut();
assert!(gen_snap_task.is_none());
*gen_snap_task = Some(task);
Err(raft::Error::Store(
raft::StorageError::SnapshotTemporarilyUnavailable,
))
}
pub fn take_gen_snap_task(&mut self) -> Option<GenSnapTask> {
self.gen_snap_task.get_mut().take()
}
// Append the given entries to the raft log using previous last index or self.last_index.
// Return the new last index for later update. After we commit in engine, we can set last_index
// to the return one.
pub fn append<H: HandleRaftReadyContext>(
&mut self,
invoke_ctx: &mut InvokeContext,
entries: &[Entry],
ready_ctx: &mut H,
) -> Result<u64> {
debug!(
"append entries";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"count" => entries.len(),
);
let prev_last_index = invoke_ctx.raft_state.get_last_index();
if entries.is_empty() {
return Ok(prev_last_index);
}
let (last_index, last_term) = {
let e = entries.last().unwrap();
(e.get_index(), e.get_term())
};
for entry in entries {
if !ready_ctx.sync_log() {
ready_ctx.set_sync_log(get_sync_log_from_entry(entry));
}
ready_ctx.raft_wb_mut().put_msg(
&keys::raft_log_key(self.get_region_id(), entry.get_index()),
entry,
)?;
}
// Delete any previously appended log entries which never committed.
for i in (last_index + 1)..=prev_last_index {
// TODO: Wrap it as an engine::Error.
box_try!(ready_ctx
.raft_wb_mut()
.delete(&keys::raft_log_key(self.get_region_id(), i)));
}
invoke_ctx.raft_state.set_last_index(last_index);
invoke_ctx.last_term = last_term;
// TODO: if the writebatch is failed to commit, the cache will be wrong.
self.cache.append(&self.tag, entries);
Ok(last_index)
}
pub fn compact_to(&mut self, idx: u64) {
self.cache.compact_to(idx);
}
#[inline]
pub fn is_cache_empty(&self) -> bool {
self.cache.is_empty()
}
pub fn maybe_gc_cache(&mut self, replicated_idx: u64, apply_idx: u64) {
if replicated_idx == apply_idx {
// The region is inactive, clear the cache immediately.
self.cache.compact_to(apply_idx + 1);
} else {
let cache_first_idx = match self.cache.first_index() {
None => return,
Some(idx) => idx,
};
if cache_first_idx > replicated_idx + 1 {
// Catching up log requires accessing fs already, let's optimize for
// the common case.
// Maybe gc to second least replicated_idx is better.
self.cache.compact_to(apply_idx + 1);
}
}
}
#[inline]
pub fn flush_cache_metrics(&mut self) {
self.stats.flush();
}
// Apply the peer with given snapshot.
pub fn apply_snapshot(
&mut self,
ctx: &mut InvokeContext,
snap: &Snapshot,
kv_wb: &RocksWriteBatch,
raft_wb: &RocksWriteBatch,
) -> Result<()> {
info!(
"begin to apply snapshot";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
);
let mut snap_data = RaftSnapshotData::default();
snap_data.merge_from_bytes(snap.get_data())?;
let region_id = self.get_region_id();
let region = snap_data.take_region();
if region.get_id() != region_id {
return Err(box_err!(
"mismatch region id {} != {}",
region_id,
region.get_id()
));
}
if self.is_initialized() {
// we can only delete the old data when the peer is initialized.
self.clear_meta(kv_wb, raft_wb)?;
}
write_peer_state(kv_wb, ®ion, PeerState::Applying, None)?;
let last_index = snap.get_metadata().get_index();
ctx.raft_state.set_last_index(last_index);
ctx.last_term = snap.get_metadata().get_term();
ctx.apply_state.set_applied_index(last_index);
// The snapshot only contains log which index > applied index, so
// here the truncate state's (index, term) is in snapshot metadata.
ctx.apply_state.mut_truncated_state().set_index(last_index);
ctx.apply_state
.mut_truncated_state()
.set_term(snap.get_metadata().get_term());
info!(
"apply snapshot with state ok";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"region" => ?region,
"state" => ?ctx.apply_state,
);
fail_point!("before_apply_snap_update_region", |_| { Ok(()) });
ctx.snap_region = Some(region);
Ok(())
}
/// Delete all meta belong to the region. Results are stored in `wb`.
pub fn clear_meta(&mut self, kv_wb: &RocksWriteBatch, raft_wb: &RocksWriteBatch) -> Result<()> {
let region_id = self.get_region_id();
clear_meta(&self.engines, kv_wb, raft_wb, region_id, &self.raft_state)?;
self.cache = EntryCache::default();
Ok(())
}
/// Delete all data belong to the region.
/// If return Err, data may get partial deleted.
pub fn clear_data(&self) -> Result<()> {
let (start_key, end_key) = (enc_start_key(self.region()), enc_end_key(self.region()));
let region_id = self.get_region_id();
box_try!(self
.region_sched
.schedule(RegionTask::destroy(region_id, start_key, end_key)));
Ok(())
}
/// Delete all data that is not covered by `new_region`.
fn clear_extra_data(&self, new_region: &metapb::Region) -> Result<()> {
let (old_start_key, old_end_key) =
(enc_start_key(self.region()), enc_end_key(self.region()));
let (new_start_key, new_end_key) = (enc_start_key(new_region), enc_end_key(new_region));
let region_id = new_region.get_id();
if old_start_key < new_start_key {
box_try!(self.region_sched.schedule(RegionTask::destroy(
region_id,
old_start_key,
new_start_key
)));
}
if new_end_key < old_end_key {
box_try!(self.region_sched.schedule(RegionTask::destroy(
region_id,
new_end_key,
old_end_key
)));
}
Ok(())
}
pub fn get_raft_engine(&self) -> Arc<DB> {
Arc::clone(&self.engines.raft)
}
/// Check whether the storage has finished applying snapshot.
#[inline]
pub fn is_applying_snapshot(&self) -> bool {
match *self.snap_state.borrow() {
SnapState::Applying(_) => true,
_ => false,
}
}
/// Check if the storage is applying a snapshot.
#[inline]
pub fn check_applying_snap(&mut self) -> bool {
let new_state = match *self.snap_state.borrow() {
SnapState::Applying(ref status) => {
let s = status.load(Ordering::Relaxed);
if s == JOB_STATUS_FINISHED {
SnapState::Relax
} else if s == JOB_STATUS_CANCELLED {
SnapState::ApplyAborted
} else if s == JOB_STATUS_FAILED {
// TODO: cleanup region and treat it as tombstone.
panic!("{} applying snapshot failed", self.tag,);
} else {
return true;
}
}
_ => return false,
};
*self.snap_state.borrow_mut() = new_state;
false
}
#[inline]
pub fn is_canceling_snap(&self) -> bool {
match *self.snap_state.borrow() {
SnapState::Applying(ref status) => {
status.load(Ordering::Relaxed) == JOB_STATUS_CANCELLING
}
_ => false,
}
}
/// Cancel applying snapshot, return true if the job can be considered not be run again.
pub fn cancel_applying_snap(&mut self) -> bool {
let is_cancelled = match *self.snap_state.borrow() {
SnapState::Applying(ref status) => {
if status.compare_and_swap(
JOB_STATUS_PENDING,
JOB_STATUS_CANCELLING,
Ordering::SeqCst,
) == JOB_STATUS_PENDING
{
true
} else if status.compare_and_swap(
JOB_STATUS_RUNNING,
JOB_STATUS_CANCELLING,
Ordering::SeqCst,
) == JOB_STATUS_RUNNING
{
return false;
} else {
false
}
}
_ => return false,
};
if is_cancelled {
*self.snap_state.borrow_mut() = SnapState::ApplyAborted;
return true;
}
// now status can only be JOB_STATUS_CANCELLING, JOB_STATUS_CANCELLED,
// JOB_STATUS_FAILED and JOB_STATUS_FINISHED.
!self.check_applying_snap()
}
#[inline]
pub fn set_snap_state(&mut self, state: SnapState) {
*self.snap_state.borrow_mut() = state
}
#[inline]
pub fn is_snap_state(&self, state: SnapState) -> bool {
*self.snap_state.borrow() == state
}
pub fn get_region_id(&self) -> u64 {
self.region().get_id()
}
pub fn schedule_applying_snapshot(&mut self) {
let status = Arc::new(AtomicUsize::new(JOB_STATUS_PENDING));
self.set_snap_state(SnapState::Applying(Arc::clone(&status)));
let task = RegionTask::Apply {
region_id: self.get_region_id(),
status,
};
// TODO: gracefully remove region instead.
if let Err(e) = self.region_sched.schedule(task) {
info!(
"failed to to schedule apply job, are we shutting down?";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"err" => ?e,
);
}
}
/// Save memory states to disk.
///
/// This function only write data to `ready_ctx`'s `WriteBatch`. It's caller's duty to write
/// it explicitly to disk. If it's flushed to disk successfully, `post_ready` should be called
/// to update the memory states properly.
// Using `&Ready` here to make sure `Ready` struct is not modified in this function. This is
// a requirement to advance the ready object properly later.
pub fn handle_raft_ready<H: HandleRaftReadyContext>(
&mut self,
ready_ctx: &mut H,
ready: &Ready,
) -> Result<InvokeContext> {
let mut ctx = InvokeContext::new(self);
let snapshot_index = if raft::is_empty_snap(ready.snapshot()) {
0
} else {
fail_point!("raft_before_apply_snap");
self.apply_snapshot(
&mut ctx,
ready.snapshot(),
&ready_ctx.kv_wb(),
&ready_ctx.raft_wb(),
)?;
fail_point!("raft_after_apply_snap");
last_index(&ctx.raft_state)
};
if ready.must_sync() {
ready_ctx.set_sync_log(true);
}
if !ready.entries().is_empty() {
self.append(&mut ctx, ready.entries(), ready_ctx)?;
}
// Last index is 0 means the peer is created from raft message
// and has not applied snapshot yet, so skip persistent hard state.
if ctx.raft_state.get_last_index() > 0 {
if let Some(hs) = ready.hs() {
ctx.raft_state.set_hard_state(hs.clone());
}
}
// Save raft state if it has changed or peer has applied a snapshot.
if ctx.raft_state != self.raft_state || snapshot_index != 0 {
ctx.save_raft_state_to(ready_ctx.raft_wb_mut())?;
if snapshot_index > 0 {
// in case of restart happen when we just write region state to Applying,
// but not write raft_local_state to raft rocksdb in time.
// we write raft state to default rocksdb, with last index set to snap index,
// in case of recv raft log after snapshot.
ctx.save_snapshot_raft_state_to(snapshot_index, &mut ready_ctx.kv_wb_mut())?;
}
}
// only when apply snapshot
if snapshot_index != 0 {
ctx.save_apply_state_to(&mut ready_ctx.kv_wb_mut())?;
}
Ok(ctx)
}
/// Update the memory state after ready changes are flushed to disk successfully.
pub fn post_ready(&mut self, ctx: InvokeContext) -> Option<ApplySnapResult> {
self.raft_state = ctx.raft_state;
self.apply_state = ctx.apply_state;
self.last_term = ctx.last_term;
// If we apply snapshot ok, we should update some infos like applied index too.
let snap_region = match ctx.snap_region {
Some(r) => r,
None => return None,
};
// cleanup data before scheduling apply task
if self.is_initialized() {
if let Err(e) = self.clear_extra_data(self.region()) {
// No need panic here, when applying snapshot, the deletion will be tried
// again. But if the region range changes, like [a, c) -> [a, b) and [b, c),
// [b, c) will be kept in rocksdb until a covered snapshot is applied or
// store is restarted.
error!(
"failed to cleanup data, may leave some dirty data";
"region_id" => self.region.get_id(),
"peer_id" => self.peer_id,
"err" => ?e,
);
}
}
self.schedule_applying_snapshot();
let prev_region = self.region().clone();
self.set_region(snap_region);
Some(ApplySnapResult {
prev_region,
region: self.region().clone(),
})
}
}
fn get_sync_log_from_entry(entry: &Entry) -> bool {
if entry.get_sync_log() {
return true;
}
let ctx = entry.get_context();
if !ctx.is_empty() {
let ctx = ProposalContext::from_bytes(ctx);
if ctx.contains(ProposalContext::SYNC_LOG) {
return true;
}
}
false
}
pub fn fetch_entries_to(
engine: &DB,
region_id: u64,
low: u64,
high: u64,
max_size: u64,
buf: &mut Vec<Entry>,
) -> raft::Result<u64> {
let mut total_size: u64 = 0;
let mut next_index = low;
let mut exceeded_max_size = false;
if high - low <= RAFT_LOG_MULTI_GET_CNT {
// If election happens in inactive regions, they will just try
// to fetch one empty log.
for i in low..high {
let key = keys::raft_log_key(region_id, i);
match engine.get(&key) {
Ok(None) => return Err(RaftError::Store(StorageError::Unavailable)),
Ok(Some(v)) => {
let mut entry = Entry::default();
entry.merge_from_bytes(&v)?;
assert_eq!(entry.get_index(), i);
total_size += v.len() as u64;
if buf.is_empty() || total_size <= max_size {
buf.push(entry);
}
if total_size > max_size {
break;
}
}
Err(e) => return Err(storage_error(e)),
}
}
return Ok(total_size);
}
let start_key = keys::raft_log_key(region_id, low);
let end_key = keys::raft_log_key(region_id, high);
engine.scan(
&start_key,
&end_key,
true, // fill_cache
|_, value| {
let mut entry = Entry::default();
entry.merge_from_bytes(value)?;
// May meet gap or has been compacted.
if entry.get_index() != next_index {
return Ok(false);
}
next_index += 1;
total_size += value.len() as u64;
exceeded_max_size = total_size > max_size;
if !exceeded_max_size || buf.is_empty() {
buf.push(entry);
}
Ok(!exceeded_max_size)
},
)?;
// If we get the correct number of entries, returns,
// or the total size almost exceeds max_size, returns.
if buf.len() == (high - low) as usize || exceeded_max_size {
return Ok(total_size);
}
// Here means we don't fetch enough entries.
Err(RaftError::Store(StorageError::Unavailable))
}
/// Delete all meta belong to the region. Results are stored in `wb`.
pub fn clear_meta(
engines: &Engines,
kv_wb: &RocksWriteBatch,
raft_wb: &RocksWriteBatch,
region_id: u64,
raft_state: &RaftLocalState,
) -> Result<()> {
let t = Instant::now();
box_try!(kv_wb.delete_cf(CF_RAFT, &keys::region_state_key(region_id)));
box_try!(kv_wb.delete_cf(CF_RAFT, &keys::apply_state_key(region_id)));
let last_index = last_index(raft_state);
let mut first_index = last_index + 1;
let begin_log_key = keys::raft_log_key(region_id, 0);
let end_log_key = keys::raft_log_key(region_id, first_index);
engines
.raft
.scan(&begin_log_key, &end_log_key, false, |key, _| {
first_index = keys::raft_log_index(key).unwrap();
Ok(false)
})?;
for id in first_index..=last_index {
box_try!(raft_wb.delete(&keys::raft_log_key(region_id, id)));
}
box_try!(raft_wb.delete(&keys::raft_state_key(region_id)));
info!(
"finish clear peer meta";
"region_id" => region_id,
"meta_key" => 1,
"apply_key" => 1,
"raft_key" => 1,
"raft_logs" => last_index + 1 - first_index,
"takes" => ?t.elapsed(),
);
Ok(())
}
pub fn do_snapshot<E>(
mgr: SnapManager,
raft_snap: E::Snapshot,
kv_snap: E::Snapshot,
region_id: u64,
) -> raft::Result<Snapshot>
where
E: KvEngine,
{
debug!(
"begin to generate a snapshot";
"region_id" => region_id,
);
let msg = kv_snap
.get_msg_cf(CF_RAFT, &keys::apply_state_key(region_id))
.map_err(into_other::<_, raft::Error>)?;
let apply_state: RaftApplyState = match msg {
None => {
return Err(storage_error(format!(
"could not load raft state of region {}",
region_id
)));
}
Some(state) => state,
};
let idx = apply_state.get_applied_index();
let term = if idx == apply_state.get_truncated_state().get_index() {
apply_state.get_truncated_state().get_term()
} else {
let msg = raft_snap
.get_msg::<Entry>(&keys::raft_log_key(region_id, idx))
.map_err(into_other::<_, raft::Error>)?;
match msg {
None => {
return Err(storage_error(format!(
"entry {} of {} not found.",
idx, region_id
)));
}
Some(entry) => entry.get_term(),
}
};
// Release raft engine snapshot to avoid too many open files.
drop(raft_snap);
let key = SnapKey::new(region_id, term, idx);
mgr.register(key.clone(), SnapEntry::Generating);
defer!(mgr.deregister(&key, &SnapEntry::Generating));
let state: RegionLocalState = kv_snap
.get_msg_cf(CF_RAFT, &keys::region_state_key(key.region_id))
.and_then(|res| match res {
None => Err(box_err!("region {} could not find region info", region_id)),
Some(state) => Ok(state),
})
.map_err(into_other::<_, raft::Error>)?;
if state.get_state() != PeerState::Normal {
return Err(storage_error(format!(
"snap job for {} seems stale, skip.",
region_id
)));
}
let mut snapshot = Snapshot::default();
// Set snapshot metadata.
snapshot.mut_metadata().set_index(key.idx);
snapshot.mut_metadata().set_term(key.term);
let conf_state = conf_state_from_region(state.get_region());
snapshot.mut_metadata().set_conf_state(conf_state);
let mut s = mgr.get_snapshot_for_building::<E>(&key)?;
// Set snapshot data.
let mut snap_data = RaftSnapshotData::default();
snap_data.set_region(state.get_region().clone());
let mut stat = SnapshotStatistics::new();
s.build(
&kv_snap,
state.get_region(),
&mut snap_data,
&mut stat,
Box::new(mgr.clone()),
)?;
let v = snap_data.write_to_bytes()?;
snapshot.set_data(v);
SNAPSHOT_KV_COUNT_HISTOGRAM.observe(stat.kv_count as f64);
SNAPSHOT_SIZE_HISTOGRAM.observe(stat.size as f64);
Ok(snapshot)
}
// When we bootstrap the region we must call this to initialize region local state first.
pub fn write_initial_raft_state<T: MutableTrait>(raft_wb: &T, region_id: u64) -> Result<()> {
let mut raft_state = RaftLocalState::default();
raft_state.set_last_index(RAFT_INIT_LOG_INDEX);
raft_state.mut_hard_state().set_term(RAFT_INIT_LOG_TERM);
raft_state.mut_hard_state().set_commit(RAFT_INIT_LOG_INDEX);
raft_wb.put_msg(&keys::raft_state_key(region_id), &raft_state)?;
Ok(())
}
// When we bootstrap the region or handling split new region, we must
// call this to initialize region apply state first.
pub fn write_initial_apply_state<T: MutableTrait>(kv_wb: &T, region_id: u64) -> Result<()> {
let mut apply_state = RaftApplyState::default();
apply_state.set_applied_index(RAFT_INIT_LOG_INDEX);
apply_state
.mut_truncated_state()
.set_index(RAFT_INIT_LOG_INDEX);
apply_state
.mut_truncated_state()
.set_term(RAFT_INIT_LOG_TERM);
kv_wb.put_msg_cf(CF_RAFT, &keys::apply_state_key(region_id), &apply_state)?;
Ok(())
}
pub fn write_peer_state<T: MutableTrait>(
kv_wb: &T,
region: &metapb::Region,
state: PeerState,
merge_state: Option<MergeState>,
) -> Result<()> {
let region_id = region.get_id();
let mut region_state = RegionLocalState::default();
region_state.set_state(state);
region_state.set_region(region.clone());
if let Some(state) = merge_state {
region_state.set_merge_state(state);
}
debug!(
"writing merge state";
"region_id" => region_id,
"state" => ?region_state,
);
kv_wb.put_msg_cf(CF_RAFT, &keys::region_state_key(region_id), ®ion_state)?;
Ok(())
}
#[cfg(test)]
mod tests {
use crate::coprocessor::CoprocessorHost;
use crate::store::fsm::apply::compact_raft_log;
use crate::store::worker::RegionRunner;
use crate::store::worker::RegionTask;
use crate::store::{bootstrap_store, initial_region, prepare_bootstrap_cluster};
use engine::rocks::util::new_engine;
use engine::Engines;
use engine_rocks::{Compat, RocksWriteBatch};
use engine_traits::{ALL_CFS, CF_DEFAULT};
use kvproto::raft_serverpb::RaftSnapshotData;
use raft::eraftpb::HardState;
use raft::eraftpb::{ConfState, Entry};
use raft::{Error as RaftError, StorageError};
use std::cell::RefCell;
use std::path::Path;
use std::sync::atomic::*;
use std::sync::mpsc::*;
use std::sync::*;
use std::time::Duration;
use tempfile::{Builder, TempDir};
use tikv_util::worker::{Scheduler, Worker};
use super::*;
fn new_storage(sched: Scheduler<RegionTask>, path: &TempDir) -> PeerStorage {
let kv_db =
Arc::new(new_engine(path.path().to_str().unwrap(), None, ALL_CFS, None).unwrap());
let raft_path = path.path().join(Path::new("raft"));
let raft_db =
Arc::new(new_engine(raft_path.to_str().unwrap(), None, &[CF_DEFAULT], None).unwrap());
let shared_block_cache = false;
let engines = Engines::new(kv_db, raft_db, shared_block_cache);
bootstrap_store(&engines, 1, 1).unwrap();
let region = initial_region(1, 1, 1);
prepare_bootstrap_cluster(&engines, ®ion).unwrap();
PeerStorage::new(engines, ®ion, sched, 0, "".to_owned()).unwrap()
}
struct ReadyContext {
kv_wb: RocksWriteBatch,
raft_wb: RocksWriteBatch,
sync_log: bool,
}
impl ReadyContext {
fn new(s: &PeerStorage) -> ReadyContext {
ReadyContext {
kv_wb: s.engines.kv.c().write_batch(),
raft_wb: s.engines.raft.c().write_batch(),
sync_log: false,
}
}
}
impl HandleRaftReadyContext for ReadyContext {
fn kv_wb(&self) -> &RocksWriteBatch {
&self.kv_wb
}
fn kv_wb_mut(&mut self) -> &mut RocksWriteBatch {
&mut self.kv_wb
}
fn raft_wb(&self) -> &RocksWriteBatch {
&self.raft_wb
}
fn raft_wb_mut(&mut self) -> &mut RocksWriteBatch {
&mut self.raft_wb
}
fn sync_log(&self) -> bool {
self.sync_log
}
fn set_sync_log(&mut self, sync: bool) {
self.sync_log = sync;
}
}
fn new_storage_from_ents(
sched: Scheduler<RegionTask>,
path: &TempDir,
ents: &[Entry],
) -> PeerStorage {
let mut store = new_storage(sched, path);
let mut kv_wb = store.engines.kv.c().write_batch();
let mut ctx = InvokeContext::new(&store);
let mut ready_ctx = ReadyContext::new(&store);
store.append(&mut ctx, &ents[1..], &mut ready_ctx).unwrap();
ctx.apply_state
.mut_truncated_state()
.set_index(ents[0].get_index());
ctx.apply_state
.mut_truncated_state()
.set_term(ents[0].get_term());
ctx.apply_state
.set_applied_index(ents.last().unwrap().get_index());
ctx.save_apply_state_to(&mut kv_wb).unwrap();
store.engines.raft.c().write(&ready_ctx.raft_wb).unwrap();
store.engines.kv.c().write(&kv_wb).unwrap();
store.raft_state = ctx.raft_state;
store.apply_state = ctx.apply_state;
store
}
fn append_ents(store: &mut PeerStorage, ents: &[Entry]) {
let mut ctx = InvokeContext::new(store);
let mut ready_ctx = ReadyContext::new(store);
store.append(&mut ctx, ents, &mut ready_ctx).unwrap();
ctx.save_raft_state_to(&mut ready_ctx.raft_wb).unwrap();
store.engines.raft.c().write(&ready_ctx.raft_wb).unwrap();
store.raft_state = ctx.raft_state;
}
fn validate_cache(store: &PeerStorage, exp_ents: &[Entry]) {
assert_eq!(store.cache.cache, exp_ents);
for e in exp_ents {
let key = keys::raft_log_key(store.get_region_id(), e.get_index());
let bytes = store.engines.raft.get(&key).unwrap().unwrap();
let mut entry = Entry::default();
entry.merge_from_bytes(&bytes).unwrap();
assert_eq!(entry, *e);
}
}
fn new_entry(index: u64, term: u64) -> Entry {
let mut e = Entry::default();
e.set_index(index);
e.set_term(term);
e
}
fn size_of<T: protobuf::Message>(m: &T) -> u32 {
m.compute_size()
}
#[test]
fn test_storage_term() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![
(2, Err(RaftError::Store(StorageError::Compacted))),
(3, Ok(3)),
(4, Ok(4)),
(5, Ok(5)),
];
for (i, (idx, wterm)) in tests.drain(..).enumerate() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let store = new_storage_from_ents(sched, &td, &ents);
let t = store.term(idx);
if wterm != t {
panic!("#{}: expect res {:?}, got {:?}", i, wterm, t);
}
}
}
fn get_meta_key_count(store: &PeerStorage) -> usize {
let region_id = store.get_region_id();
let mut count = 0;
let (meta_start, meta_end) = (
keys::region_meta_prefix(region_id),
keys::region_meta_prefix(region_id + 1),
);
store
.engines
.kv
.scan_cf(CF_RAFT, &meta_start, &meta_end, false, |_, _| {
count += 1;
Ok(true)
})
.unwrap();
let (raft_start, raft_end) = (
keys::region_raft_prefix(region_id),
keys::region_raft_prefix(region_id + 1),
);
store
.engines
.kv
.scan_cf(CF_RAFT, &raft_start, &raft_end, false, |_, _| {
count += 1;
Ok(true)
})
.unwrap();
store
.engines
.raft
.scan(&raft_start, &raft_end, false, |_, _| {
count += 1;
Ok(true)
})
.unwrap();
count
}
#[test]
fn test_storage_clear_meta() {
let td = Builder::new().prefix("tikv-store").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut store = new_storage_from_ents(sched, &td, &[new_entry(3, 3), new_entry(4, 4)]);
append_ents(&mut store, &[new_entry(5, 5), new_entry(6, 6)]);
assert_eq!(6, get_meta_key_count(&store));
let kv_wb = store.engines.kv.c().write_batch();
let raft_wb = store.engines.raft.c().write_batch();
store.clear_meta(&kv_wb, &raft_wb).unwrap();
store.engines.kv.c().write(&kv_wb).unwrap();
store.engines.raft.c().write(&raft_wb).unwrap();
assert_eq!(0, get_meta_key_count(&store));
}
#[test]
fn test_storage_entries() {
let ents = vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 6),
];
let max_u64 = u64::max_value();
let mut tests = vec![
(
2,
6,
max_u64,
Err(RaftError::Store(StorageError::Compacted)),
),
(
3,
4,
max_u64,
Err(RaftError::Store(StorageError::Compacted)),
),
(4, 5, max_u64, Ok(vec![new_entry(4, 4)])),
(4, 6, max_u64, Ok(vec![new_entry(4, 4), new_entry(5, 5)])),
(
4,
7,
max_u64,
Ok(vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 6)]),
),
// even if maxsize is zero, the first entry should be returned
(4, 7, 0, Ok(vec![new_entry(4, 4)])),
// limit to 2
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2])),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3]) / 2),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3]) - 1),
Ok(vec![new_entry(4, 4), new_entry(5, 5)]),
),
// all
(
4,
7,
u64::from(size_of(&ents[1]) + size_of(&ents[2]) + size_of(&ents[3])),
Ok(vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 6)]),
),
];
for (i, (lo, hi, maxsize, wentries)) in tests.drain(..).enumerate() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let store = new_storage_from_ents(sched, &td, &ents);
let e = store.entries(lo, hi, maxsize);
if e != wentries {
panic!("#{}: expect entries {:?}, got {:?}", i, wentries, e);
}
}
}
// last_index and first_index are not mutated by PeerStorage on its own,
// so we don't test them here.
#[test]
fn test_storage_compact() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![
(2, Err(RaftError::Store(StorageError::Compacted))),
(3, Err(RaftError::Store(StorageError::Compacted))),
(4, Ok(())),
(5, Ok(())),
];
for (i, (idx, werr)) in tests.drain(..).enumerate() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let store = new_storage_from_ents(sched, &td, &ents);
let mut ctx = InvokeContext::new(&store);
let res = store
.term(idx)
.map_err(From::from)
.and_then(|term| compact_raft_log(&store.tag, &mut ctx.apply_state, idx, term));
// TODO check exact error type after refactoring error.
if res.is_err() ^ werr.is_err() {
panic!("#{}: want {:?}, got {:?}", i, werr, res);
}
if res.is_ok() {
let mut kv_wb = store.engines.kv.c().write_batch();
ctx.save_apply_state_to(&mut kv_wb).unwrap();
store.engines.kv.c().write(&kv_wb).unwrap();
}
}
}
#[test]
fn test_storage_create_snapshot() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut cs = ConfState::default();
cs.set_voters(vec![1, 2, 3]);
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let snap_dir = Builder::new().prefix("snap_dir").tempdir().unwrap();
let mgr = SnapManager::new(snap_dir.path().to_str().unwrap(), None);
let mut worker = Worker::new("region-worker");
let sched = worker.scheduler();
let mut s = new_storage_from_ents(sched.clone(), &td, &ents);
let (router, _) = mpsc::sync_channel(100);
let runner = RegionRunner::new(
s.engines.clone(),
mgr,
0,
true,
Duration::from_secs(0),
CoprocessorHost::default(),
router,
);
worker.start(runner).unwrap();
let snap = s.snapshot(0);
let unavailable = RaftError::Store(StorageError::SnapshotTemporarilyUnavailable);
assert_eq!(snap.unwrap_err(), unavailable);
assert_eq!(*s.snap_tried_cnt.borrow(), 1);
let gen_task = s.gen_snap_task.borrow_mut().take().unwrap();
gen_task
.generate_and_schedule_snapshot(&s.engines, &sched)
.unwrap();
let snap = match *s.snap_state.borrow() {
SnapState::Generating(ref rx) => rx.recv_timeout(Duration::from_secs(3)).unwrap(),
ref s => panic!("unexpected state: {:?}", s),
};
assert_eq!(snap.get_metadata().get_index(), 5);
assert_eq!(snap.get_metadata().get_term(), 5);
assert!(!snap.get_data().is_empty());
let mut data = RaftSnapshotData::default();
protobuf::Message::merge_from_bytes(&mut data, snap.get_data()).unwrap();
assert_eq!(data.get_region().get_id(), 1);
assert_eq!(data.get_region().get_peers().len(), 1);
let (tx, rx) = channel();
s.set_snap_state(SnapState::Generating(rx));
// Empty channel should cause snapshot call to wait.
assert_eq!(s.snapshot(0).unwrap_err(), unavailable);
assert_eq!(*s.snap_tried_cnt.borrow(), 1);
tx.send(snap.clone()).unwrap();
assert_eq!(s.snapshot(0), Ok(snap.clone()));
assert_eq!(*s.snap_tried_cnt.borrow(), 0);
let (tx, rx) = channel();
tx.send(snap.clone()).unwrap();
s.set_snap_state(SnapState::Generating(rx));
// stale snapshot should be abandoned, snapshot index < request index.
assert_eq!(
s.snapshot(snap.get_metadata().get_index() + 1).unwrap_err(),
unavailable
);
assert_eq!(*s.snap_tried_cnt.borrow(), 1);
// Drop the task.
let _ = s.gen_snap_task.borrow_mut().take().unwrap();
let mut ctx = InvokeContext::new(&s);
let mut kv_wb = s.engines.kv.c().write_batch();
let mut ready_ctx = ReadyContext::new(&s);
s.append(
&mut ctx,
&[new_entry(6, 5), new_entry(7, 5)],
&mut ready_ctx,
)
.unwrap();
let mut hs = HardState::default();
hs.set_commit(7);
hs.set_term(5);
ctx.raft_state.set_hard_state(hs);
ctx.raft_state.set_last_index(7);
ctx.apply_state.set_applied_index(7);
ctx.save_raft_state_to(&mut ready_ctx.raft_wb).unwrap();
ctx.save_apply_state_to(&mut kv_wb).unwrap();
s.engines.kv.c().write(&kv_wb).unwrap();
s.engines.raft.c().write(&ready_ctx.raft_wb).unwrap();
s.apply_state = ctx.apply_state;
s.raft_state = ctx.raft_state;
ctx = InvokeContext::new(&s);
let term = s.term(7).unwrap();
compact_raft_log(&s.tag, &mut ctx.apply_state, 7, term).unwrap();
kv_wb = s.engines.kv.c().write_batch();
ctx.save_apply_state_to(&mut kv_wb).unwrap();
s.engines.kv.c().write(&kv_wb).unwrap();
s.apply_state = ctx.apply_state;
let (tx, rx) = channel();
tx.send(snap).unwrap();
s.set_snap_state(SnapState::Generating(rx));
*s.snap_tried_cnt.borrow_mut() = 1;
// stale snapshot should be abandoned, snapshot index < truncated index.
assert_eq!(s.snapshot(0).unwrap_err(), unavailable);
assert_eq!(*s.snap_tried_cnt.borrow(), 1);
let gen_task = s.gen_snap_task.borrow_mut().take().unwrap();
gen_task
.generate_and_schedule_snapshot(&s.engines, &sched)
.unwrap();
match *s.snap_state.borrow() {
SnapState::Generating(ref rx) => {
rx.recv_timeout(Duration::from_secs(3)).unwrap();
worker.stop().unwrap().join().unwrap();
match rx.recv_timeout(Duration::from_secs(3)) {
Err(RecvTimeoutError::Disconnected) => {}
res => panic!("unexpected result: {:?}", res),
}
}
ref s => panic!("unexpected state {:?}", s),
}
// Disconnected channel should trigger another try.
assert_eq!(s.snapshot(0).unwrap_err(), unavailable);
let gen_task = s.gen_snap_task.borrow_mut().take().unwrap();
gen_task
.generate_and_schedule_snapshot(&s.engines, &sched)
.unwrap_err();
assert_eq!(*s.snap_tried_cnt.borrow(), 2);
for cnt in 2..super::MAX_SNAP_TRY_CNT {
// Scheduled job failed should trigger .
assert_eq!(s.snapshot(0).unwrap_err(), unavailable);
let gen_task = s.gen_snap_task.borrow_mut().take().unwrap();
gen_task
.generate_and_schedule_snapshot(&s.engines, &sched)
.unwrap_err();
assert_eq!(*s.snap_tried_cnt.borrow(), cnt + 1);
}
// When retry too many times, it should report a different error.
match s.snapshot(0) {
Err(RaftError::Store(StorageError::Other(_))) => {}
res => panic!("unexpected res: {:?}", res),
}
}
#[test]
fn test_storage_append() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let mut tests = vec![
(
vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)],
vec![new_entry(4, 4), new_entry(5, 5)],
),
(
vec![new_entry(3, 3), new_entry(4, 6), new_entry(5, 6)],
vec![new_entry(4, 6), new_entry(5, 6)],
),
(
vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 5),
],
vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 5)],
),
// truncate incoming entries, truncate the existing entries and append
(
vec![new_entry(2, 3), new_entry(3, 3), new_entry(4, 5)],
vec![new_entry(4, 5)],
),
// truncate the existing entries and append
(vec![new_entry(4, 5)], vec![new_entry(4, 5)]),
// direct append
(
vec![new_entry(6, 5)],
vec![new_entry(4, 4), new_entry(5, 5), new_entry(6, 5)],
),
];
for (i, (entries, wentries)) in tests.drain(..).enumerate() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut store = new_storage_from_ents(sched, &td, &ents);
append_ents(&mut store, &entries);
let li = store.last_index();
let actual_entries = store.entries(4, li + 1, u64::max_value()).unwrap();
if actual_entries != wentries {
panic!("#{}: want {:?}, got {:?}", i, wentries, actual_entries);
}
}
}
#[test]
fn test_storage_cache_fetch() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut store = new_storage_from_ents(sched, &td, &ents);
store.cache.cache.clear();
// empty cache should fetch data from rocksdb directly.
let mut res = store.entries(4, 6, u64::max_value()).unwrap();
assert_eq!(*res, ents[1..]);
let entries = vec![new_entry(6, 5), new_entry(7, 5)];
append_ents(&mut store, &entries);
validate_cache(&store, &entries);
// direct cache access
res = store.entries(6, 8, u64::max_value()).unwrap();
assert_eq!(res, entries);
// size limit should be supported correctly.
res = store.entries(4, 8, 0).unwrap();
assert_eq!(res, vec![new_entry(4, 4)]);
let mut size = ents[1..].iter().map(|e| u64::from(e.compute_size())).sum();
res = store.entries(4, 8, size).unwrap();
let mut exp_res = ents[1..].to_vec();
assert_eq!(res, exp_res);
for e in &entries {
size += u64::from(e.compute_size());
exp_res.push(e.clone());
res = store.entries(4, 8, size).unwrap();
assert_eq!(res, exp_res);
}
// range limit should be supported correctly.
for low in 4..9 {
for high in low..9 {
let res = store.entries(low, high, u64::max_value()).unwrap();
assert_eq!(*res, exp_res[low as usize - 4..high as usize - 4]);
}
}
}
#[test]
fn test_storage_cache_update() {
let ents = vec![new_entry(3, 3), new_entry(4, 4), new_entry(5, 5)];
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut store = new_storage_from_ents(sched, &td, &ents);
store.cache.cache.clear();
// initial cache
let mut entries = vec![new_entry(6, 5), new_entry(7, 5)];
append_ents(&mut store, &entries);
validate_cache(&store, &entries);
// rewrite
entries = vec![new_entry(6, 6), new_entry(7, 6)];
append_ents(&mut store, &entries);
validate_cache(&store, &entries);
// rewrite old entry
entries = vec![new_entry(5, 6), new_entry(6, 6)];
append_ents(&mut store, &entries);
validate_cache(&store, &entries);
// partial rewrite
entries = vec![new_entry(6, 7), new_entry(7, 7)];
append_ents(&mut store, &entries);
let mut exp_res = vec![new_entry(5, 6), new_entry(6, 7), new_entry(7, 7)];
validate_cache(&store, &exp_res);
// direct append
entries = vec![new_entry(8, 7), new_entry(9, 7)];
append_ents(&mut store, &entries);
exp_res.extend_from_slice(&entries);
validate_cache(&store, &exp_res);
// rewrite middle
entries = vec![new_entry(7, 8)];
append_ents(&mut store, &entries);
exp_res.truncate(2);
exp_res.push(new_entry(7, 8));
validate_cache(&store, &exp_res);
let cap = MAX_CACHE_CAPACITY as u64;
// result overflow
entries = (3..=cap).map(|i| new_entry(i + 5, 8)).collect();
append_ents(&mut store, &entries);
exp_res.remove(0);
exp_res.extend_from_slice(&entries);
validate_cache(&store, &exp_res);
// input overflow
entries = (0..=cap).map(|i| new_entry(i + cap + 6, 8)).collect();
append_ents(&mut store, &entries);
exp_res = entries[entries.len() - cap as usize..].to_vec();
validate_cache(&store, &exp_res);
// compact
store.compact_to(cap + 10);
exp_res = (cap + 10..cap * 2 + 7).map(|i| new_entry(i, 8)).collect();
validate_cache(&store, &exp_res);
// compact shrink
assert!(store.cache.cache.capacity() >= cap as usize);
store.compact_to(cap * 2);
exp_res = (cap * 2..cap * 2 + 7).map(|i| new_entry(i, 8)).collect();
validate_cache(&store, &exp_res);
assert!(store.cache.cache.capacity() < cap as usize);
// append shrink
entries = (0..=cap).map(|i| new_entry(i, 8)).collect();
append_ents(&mut store, &entries);
assert!(store.cache.cache.capacity() >= cap as usize);
append_ents(&mut store, &[new_entry(6, 8)]);
exp_res = (1..7).map(|i| new_entry(i, 8)).collect();
validate_cache(&store, &exp_res);
assert!(store.cache.cache.capacity() < cap as usize);
// compact all
store.compact_to(cap + 2);
validate_cache(&store, &[]);
// invalid compaction should be ignored.
store.compact_to(cap);
}
#[test]
fn test_storage_apply_snapshot() {
let ents = vec![
new_entry(3, 3),
new_entry(4, 4),
new_entry(5, 5),
new_entry(6, 6),
];
let mut cs = ConfState::default();
cs.set_voters(vec![1, 2, 3]);
let td1 = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let snap_dir = Builder::new().prefix("snap").tempdir().unwrap();
let mgr = SnapManager::new(snap_dir.path().to_str().unwrap(), None);
let mut worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let s1 = new_storage_from_ents(sched.clone(), &td1, &ents);
let (router, _) = mpsc::sync_channel(100);
let runner = RegionRunner::new(
s1.engines.clone(),
mgr,
0,
true,
Duration::from_secs(0),
CoprocessorHost::default(),
router,
);
worker.start(runner).unwrap();
assert!(s1.snapshot(0).is_err());
let gen_task = s1.gen_snap_task.borrow_mut().take().unwrap();
gen_task
.generate_and_schedule_snapshot(&s1.engines, &sched)
.unwrap();
let snap1 = match *s1.snap_state.borrow() {
SnapState::Generating(ref rx) => rx.recv_timeout(Duration::from_secs(3)).unwrap(),
ref s => panic!("unexpected state: {:?}", s),
};
assert_eq!(s1.truncated_index(), 3);
assert_eq!(s1.truncated_term(), 3);
worker.stop().unwrap().join().unwrap();
let td2 = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let mut s2 = new_storage(sched.clone(), &td2);
assert_eq!(s2.first_index(), s2.applied_index() + 1);
let mut ctx = InvokeContext::new(&s2);
assert_ne!(ctx.last_term, snap1.get_metadata().get_term());
let kv_wb = s2.engines.kv.c().write_batch();
let raft_wb = s2.engines.raft.c().write_batch();
s2.apply_snapshot(&mut ctx, &snap1, &kv_wb, &raft_wb)
.unwrap();
assert_eq!(ctx.last_term, snap1.get_metadata().get_term());
assert_eq!(ctx.apply_state.get_applied_index(), 6);
assert_eq!(ctx.raft_state.get_last_index(), 6);
assert_eq!(ctx.apply_state.get_truncated_state().get_index(), 6);
assert_eq!(ctx.apply_state.get_truncated_state().get_term(), 6);
assert_eq!(s2.first_index(), s2.applied_index() + 1);
validate_cache(&s2, &[]);
let td3 = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let ents = &[new_entry(3, 3), new_entry(4, 3)];
let mut s3 = new_storage_from_ents(sched, &td3, ents);
validate_cache(&s3, &ents[1..]);
let mut ctx = InvokeContext::new(&s3);
assert_ne!(ctx.last_term, snap1.get_metadata().get_term());
let kv_wb = s3.engines.kv.c().write_batch();
let raft_wb = s3.engines.raft.c().write_batch();
s3.apply_snapshot(&mut ctx, &snap1, &kv_wb, &raft_wb)
.unwrap();
assert_eq!(ctx.last_term, snap1.get_metadata().get_term());
assert_eq!(ctx.apply_state.get_applied_index(), 6);
assert_eq!(ctx.raft_state.get_last_index(), 6);
assert_eq!(ctx.apply_state.get_truncated_state().get_index(), 6);
assert_eq!(ctx.apply_state.get_truncated_state().get_term(), 6);
validate_cache(&s3, &[]);
}
#[test]
fn test_canceling_snapshot() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut s = new_storage(sched, &td);
// PENDING can be canceled directly.
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_PENDING,
))));
assert!(s.cancel_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::ApplyAborted);
// RUNNING can't be canceled directly.
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_RUNNING,
))));
assert!(!s.cancel_applying_snap());
assert_eq!(
*s.snap_state.borrow(),
SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_CANCELLING)))
);
// CANCEL can't be canceled again.
assert!(!s.cancel_applying_snap());
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_CANCELLED,
))));
// canceled snapshot can be cancel directly.
assert!(s.cancel_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::ApplyAborted);
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_FINISHED,
))));
assert!(s.cancel_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::Relax);
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_FAILED,
))));
let res = panic_hook::recover_safe(|| s.cancel_applying_snap());
assert!(res.is_err());
}
#[test]
fn test_try_finish_snapshot() {
let td = Builder::new().prefix("tikv-store-test").tempdir().unwrap();
let worker = Worker::new("snap-manager");
let sched = worker.scheduler();
let mut s = new_storage(sched, &td);
// PENDING can be finished.
let mut snap_state = SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_PENDING)));
s.snap_state = RefCell::new(snap_state);
assert!(s.check_applying_snap());
assert_eq!(
*s.snap_state.borrow(),
SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_PENDING)))
);
// RUNNING can't be finished.
snap_state = SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_RUNNING)));
s.snap_state = RefCell::new(snap_state);
assert!(s.check_applying_snap());
assert_eq!(
*s.snap_state.borrow(),
SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_RUNNING)))
);
snap_state = SnapState::Applying(Arc::new(AtomicUsize::new(JOB_STATUS_CANCELLED)));
s.snap_state = RefCell::new(snap_state);
assert!(!s.check_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::ApplyAborted);
// ApplyAborted is not applying snapshot.
assert!(!s.check_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::ApplyAborted);
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_FINISHED,
))));
assert!(!s.check_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::Relax);
// Relax is not applying snapshot.
assert!(!s.check_applying_snap());
assert_eq!(*s.snap_state.borrow(), SnapState::Relax);
s.snap_state = RefCell::new(SnapState::Applying(Arc::new(AtomicUsize::new(
JOB_STATUS_FAILED,
))));
let res = panic_hook::recover_safe(|| s.check_applying_snap());
assert!(res.is_err());
}
#[test]
fn test_sync_log() {
let mut tbl = vec![];
// Do not sync empty entrise.
tbl.push((Entry::default(), false));
// Sync if sync_log is set.
let mut e = Entry::default();
e.set_sync_log(true);
tbl.push((e, true));
// Sync if context is marked sync.
let context = ProposalContext::SYNC_LOG.to_vec();
let mut e = Entry::default();
e.set_context(context);
tbl.push((e.clone(), true));
// Sync if sync_log is set and context is marked sync_log.
e.set_sync_log(true);
tbl.push((e, true));
for (e, sync) in tbl {
assert_eq!(get_sync_log_from_entry(&e), sync, "{:?}", e);
}
}
} | |
config.js | import React, { Component } from "react";
import gql from "graphql-tag.macro";
import { graphql, withApollo } from "react-apollo";
import { Button } from "helpers/reactstrap";
import SubscriptionHelper from "helpers/subscriptionHelper";
const TACTICALMAP_SUB = gql`
subscription TacticalMapUpdate {
tacticalMapsUpdate {
id
name
flight {
id
}
frozen
template
}
}
`;
class | extends Component {
state = {
tacticalMapId: null
};
selectTactical = tacticalMapId => {
this.setState({ tacticalMapId });
};
selectFlightTactical = tacticalMapId => {
let { data, updateData } = this.props;
data = JSON.parse(data);
updateData(JSON.stringify(Object.assign({}, data, { tacticalMapId })));
};
freezeTactical = evt => {
const mutation = gql`
mutation FreezeTacticalMap($id: ID!, $freeze: Boolean!) {
freezeTacticalMap(id: $id, freeze: $freeze)
}
`;
const data = JSON.parse(this.props.data);
const flightTacticalId = data.tacticalMapId;
const variables = {
id: flightTacticalId,
freeze: evt.target.checked
};
this.props.client.mutate({
mutation,
variables
});
};
loadTactical = () => {
const mutation = gql`
mutation LoadTactical($id: ID!, $flightId: ID!) {
loadTacticalMap(id: $id, flightId: $flightId)
}
`;
const variables = {
id: this.state.tacticalMapId,
flightId: this.props.flightId
};
this.props.client
.mutate({
mutation,
variables
})
.then(res => this.selectFlightTactical(res.data.loadTacticalMap));
};
render() {
const { tacticalData } = this.props;
if (tacticalData.loading || !tacticalData.tacticalMaps) return null;
const { tacticalMaps } = this.props.tacticalData;
const { tacticalMapId } = this.state;
const data = JSON.parse(this.props.data);
const flightTacticalId = data.tacticalMapId;
return (
<div className="tacticalmap-config">
<SubscriptionHelper
subscribe={() =>
this.props.tacticalData.subscribeToMore({
document: TACTICALMAP_SUB,
updateQuery: (previousResult, { subscriptionData }) => {
return Object.assign({}, previousResult, {
tacticalMaps: subscriptionData.data.tacticalMapsUpdate
});
}
})
}
/>
<p>Saved Maps</p>
<ul className="saved-list">
{tacticalMaps
.filter(t => t.template)
.map(t => (
<li
key={t.id}
className={t.id === tacticalMapId ? "selected" : ""}
onClick={() => this.selectTactical(t.id)}
>
{t.name}
</li>
))}
</ul>
<Button color="primary" size="sm" onClick={this.loadTactical}>
Load Tactical
</Button>
<div>
<p>Flight Maps</p>
{flightTacticalId && (
<label>
<input
type="checkbox"
checked={
tacticalMaps.find(t => t.id === flightTacticalId) &&
tacticalMaps.find(t => t.id === flightTacticalId).frozen
}
onChange={this.freezeTactical}
/>{" "}
Frozen
</label>
)}
<ul className="saved-list">
{tacticalMaps
.filter(t => t.flight && t.flight.id === this.props.flightId)
.map(t => (
<li
key={t.id}
className={t.id === flightTacticalId ? "selected" : ""}
onClick={() => this.selectFlightTactical(t.id)}
>
{t.name}
</li>
))}
</ul>
<Button color="success" size="sm">
Save as Template Map
</Button>
</div>
<p>
You can click and drag contacts or use WASD/IJKL to move contacts. Use
the tactical map config screen to update tactical maps further.
</p>
</div>
);
}
}
const TACTICALMAP_QUERY = gql`
query TacticalMap {
tacticalMaps {
id
name
flight {
id
}
frozen
template
}
}
`;
export default graphql(TACTICALMAP_QUERY, {
name: "tacticalData"
})(withApollo(TacticalMapConfig));
| TacticalMapConfig |
SentenceLabelDataset.py | from torch.utils.data import Dataset
from typing import List
import bisect
import torch
import logging
import numpy as np
from tqdm import tqdm
from .. import SentenceTransformer
from ..readers.InputExample import InputExample
from multiprocessing import Pool, cpu_count
import multiprocessing
class | (Dataset):
"""
Dataset for training with triplet loss.
This dataset takes a list of sentences grouped by their label and uses this grouping to dynamically select a
positive example from the same group and a negative example from the other sentences for a selected anchor sentence.
This dataset should be used in combination with dataset_reader.LabelSentenceReader
One iteration over this dataset selects every sentence as anchor once.
This also uses smart batching like SentenceDataset.
"""
def __init__(self, examples: List[InputExample], model: SentenceTransformer, provide_positive: bool = True,
provide_negative: bool = True,
parallel_tokenization: bool = True,
max_processes: int = 4,
chunk_size: int = 5000):
"""
Converts input examples to a SentenceLabelDataset usable to train the model with
SentenceTransformer.smart_batching_collate as the collate_fn for the DataLoader
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
smart_batching_collate as collate_fn is required because it transforms the tokenized texts to the tensors.
:param examples:
the input examples for the training
:param model
the Sentence BERT model for the conversion
:param provide_positive:
set this to False, if you don't need a positive example (e.g. for BATCH_HARD_TRIPLET_LOSS).
:param provide_negative:
set this to False, if you don't need a negative example (e.g. for BATCH_HARD_TRIPLET_LOSS
or MULTIPLE_NEGATIVES_RANKING_LOSS).
:param parallel_tokenization
If true, multiple processes will be started for the tokenization
:param max_processes
Maximum number of processes started for tokenization. Cannot be larger can cpu_count()
:param chunk_size
#chunk_size number of examples are send to each process. Larger values increase overall tokenization speed
"""
self.model = model
self.groups_right_border = []
self.grouped_inputs = []
self.grouped_labels = []
self.num_labels = 0
self.max_processes = min(max_processes, cpu_count())
self.chunk_size = chunk_size
self.parallel_tokenization = parallel_tokenization
if self.parallel_tokenization:
if multiprocessing.get_start_method() != 'fork':
logging.info("Parallel tokenization is only available on Unix systems which allow to fork processes. Fall back to sequential tokenization")
self.parallel_tokenization = False
self.convert_input_examples(examples, model)
self.idxs = np.arange(len(self.grouped_inputs))
self.provide_positive = provide_positive
self.provide_negative = provide_negative
def convert_input_examples(self, examples: List[InputExample], model: SentenceTransformer):
"""
Converts input examples to a SentenceLabelDataset.
Assumes only one sentence per InputExample and labels as integers from 0 to max_num_labels
and should be used in combination with dataset_reader.LabelSentenceReader.
Labels with only one example are ignored.
:param examples:
the input examples for the training
:param model
the Sentence Transformer model for the conversion
:param is_pretokenized
If set to true, no tokenization will be applied. It is expected that the input is tokenized via model.tokenize
"""
inputs = []
labels = []
label_sent_mapping = {}
too_long = 0
label_type = None
logging.info("Start tokenization")
if not self.parallel_tokenization or self.max_processes == 1 or len(examples) <= self.chunk_size:
tokenized_texts = [self.tokenize_example(example) for example in examples]
else:
logging.info("Use multi-process tokenization with {} processes".format(self.max_processes))
self.model.to('cpu')
with Pool(self.max_processes) as p:
tokenized_texts = list(p.imap(self.tokenize_example, examples, chunksize=self.chunk_size))
# Group examples and labels
# Add examples with the same label to the same dict
for ex_index, example in enumerate(tqdm(examples, desc="Convert dataset")):
if label_type is None:
if isinstance(example.label, int):
label_type = torch.long
elif isinstance(example.label, float):
label_type = torch.float
tokenized_text = tokenized_texts[ex_index][0]
if hasattr(model, 'max_seq_length') and model.max_seq_length is not None and model.max_seq_length > 0 and len(tokenized_text) > model.max_seq_length:
too_long += 1
if example.label in label_sent_mapping:
label_sent_mapping[example.label].append(ex_index)
else:
label_sent_mapping[example.label] = [ex_index]
inputs.append(tokenized_text)
labels.append(example.label)
# Group sentences, such that sentences with the same label
# are besides each other. Only take labels with at least 2 examples
distinct_labels = list(label_sent_mapping.keys())
for i in range(len(distinct_labels)):
label = distinct_labels[i]
if len(label_sent_mapping[label]) >= 2:
self.grouped_inputs.extend([inputs[j] for j in label_sent_mapping[label]])
self.grouped_labels.extend([labels[j] for j in label_sent_mapping[label]])
self.groups_right_border.append(len(self.grouped_inputs)) #At which position does this label group / bucket end?
self.num_labels += 1
self.grouped_labels = torch.tensor(self.grouped_labels, dtype=label_type)
logging.info("Num sentences: %d" % (len(self.grouped_inputs)))
logging.info("Sentences longer than max_seqence_length: {}".format(too_long))
logging.info("Number of labels with >1 examples: {}".format(len(distinct_labels)))
def tokenize_example(self, example):
if example.texts_tokenized is not None:
return example.texts_tokenized
return [self.model.tokenize(text) for text in example.texts]
def __getitem__(self, item):
if not self.provide_positive and not self.provide_negative:
return [self.grouped_inputs[item]], self.grouped_labels[item]
# Anchor element
anchor = self.grouped_inputs[item]
# Check start and end position for this label in our list of grouped sentences
group_idx = bisect.bisect_right(self.groups_right_border, item)
left_border = 0 if group_idx == 0 else self.groups_right_border[group_idx - 1]
right_border = self.groups_right_border[group_idx]
if self.provide_positive:
positive_item_idx = np.random.choice(np.concatenate([self.idxs[left_border:item], self.idxs[item + 1:right_border]]))
positive = self.grouped_inputs[positive_item_idx]
else:
positive = []
if self.provide_negative:
negative_item_idx = np.random.choice(np.concatenate([self.idxs[0:left_border], self.idxs[right_border:]]))
negative = self.grouped_inputs[negative_item_idx]
else:
negative = []
return [anchor, positive, negative], self.grouped_labels[item]
def __len__(self):
return len(self.grouped_inputs) | SentenceLabelDataset |
flashloan.rs | use cosmwasm_std::testing::{mock_env, mock_info};
use terraswap::asset::{Asset, AssetInfo};
use white_whale::denom::LUNA_DENOM;
use white_whale::luna_vault::msg::*;
use crate::contract::execute;
use crate::error::LunaVaultError;
use crate::state::STATE;
use crate::tests::common::TEST_CREATOR;
use crate::tests::instantiate::{mock_instantiate, mock_instantiate_no_asset_info};
use crate::tests::mock_querier::mock_dependencies;
#[test]
fn | () {
let mut deps = mock_dependencies(&[]);
mock_instantiate(deps.as_mut());
let whitelisted_contracts = STATE
.load(deps.as_mut().storage)
.unwrap()
.whitelisted_contracts;
assert_eq!(0, whitelisted_contracts.len());
let msg = ExecuteMsg::FlashLoan {
payload: FlashLoanPayload {
requested_asset: Asset {
info: AssetInfo::NativeToken {
denom: LUNA_DENOM.to_string(),
},
amount: Default::default(),
},
callback: Default::default(),
},
};
let info = mock_info(TEST_CREATOR, &[]);
let res = execute(deps.as_mut(), mock_env(), info, msg);
match res {
Err(LunaVaultError::NotWhitelisted {}) => (),
_ => panic!("Must return LunaVaultError::NotWhitelisted"),
}
}
| unsuccessful_flashloan_not_whitelisted |
ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
from mmcv.parallel import is_module_wrapper
from mmcv.runner.hooks import HOOKS, Hook
class BaseEMAHook(Hook):
"""Exponential Moving Average Hook.
Use Exponential Moving Average on all parameters of model in training
process. All parameters have a ema backup, which update by the formula
as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,
the original model parameters are actually saved in ema field after train.
Args:
momentum (float): The momentum used for updating ema parameter.
Ema's parameter are updated with the formula:
`ema_param = (1-momentum) * ema_param + momentum * cur_param`.
Defaults to 0.0002.
skip_buffers (bool): Whether to skip the model buffers, such as
batchnorm running stats (running_mean, running_var), it does not
perform the ema operation. Default to False.
interval (int): Update ema parameter every interval iteration.
Defaults to 1.
resume_from (str, optional): The checkpoint path. Defaults to None.
momentum_fun (func, optional): The function to change momentum
during early iteration (also warmup) to help early training.
It uses `momentum` as a constant. Defaults to None.
"""
def __init__(self,
momentum=0.0002,
interval=1,
skip_buffers=False,
resume_from=None,
momentum_fun=None):
assert 0 < momentum < 1
self.momentum = momentum
self.skip_buffers = skip_buffers
self.interval = interval
self.checkpoint = resume_from
self.momentum_fun = momentum_fun
def before_run(self, runner):
"""To resume model with it's ema parameters more friendly.
Register ema parameter as ``named_buffer`` to model.
"""
model = runner.model
if is_module_wrapper(model):
model = model.module
self.param_ema_buffer = {}
if self.skip_buffers:
self.model_parameters = dict(model.named_parameters())
else:
self.model_parameters = model.state_dict()
for name, value in self.model_parameters.items():
# "." is not allowed in module's buffer name
buffer_name = f"ema_{name.replace('.', '_')}"
self.param_ema_buffer[name] = buffer_name
model.register_buffer(buffer_name, value.data.clone())
self.model_buffers = dict(model.named_buffers())
if self.checkpoint is not None:
runner.resume(self.checkpoint)
def get_momentum(self, runner):
return self.momentum_fun(runner.iter) if self.momentum_fun else \
self.momentum
def after_train_iter(self, runner):
"""Update ema parameter every self.interval iterations."""
if (runner.iter + 1) % self.interval != 0:
return
momentum = self.get_momentum(runner)
for name, parameter in self.model_parameters.items():
# exclude num_tracking
| parameter.data, alpha=momentum)
def after_train_epoch(self, runner):
"""We load parameter values from ema backup to model before the
EvalHook."""
self._swap_ema_parameters()
def before_train_epoch(self, runner):
"""We recover model's parameter from ema backup after last epoch's
EvalHook."""
self._swap_ema_parameters()
def _swap_ema_parameters(self):
"""Swap the parameter of model with parameter in ema_buffer."""
for name, value in self.model_parameters.items():
temp = value.data.clone()
ema_buffer = self.model_buffers[self.param_ema_buffer[name]]
value.data.copy_(ema_buffer.data)
ema_buffer.data.copy_(temp)
@HOOKS.register_module()
class ExpMomentumEMAHook(BaseEMAHook):
"""EMAHook using exponential momentum strategy.
Args:
total_iter (int): The total number of iterations of EMA momentum.
Defaults to 2000.
"""
def __init__(self, total_iter=2000, **kwargs):
super(ExpMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-(
1 + x) / total_iter) + self.momentum
@HOOKS.register_module()
class LinearMomentumEMAHook(BaseEMAHook):
"""EMAHook using linear momentum strategy.
Args:
warm_up (int): During first warm_up steps, we may use smaller decay
to update ema parameters more slowly. Defaults to 100.
"""
def __init__(self, warm_up=100, **kwargs):
super(LinearMomentumEMAHook, self).__init__(**kwargs)
self.momentum_fun = lambda x: min(self.momentum**self.interval,
(1 + x) / (warm_up + x)) | if parameter.dtype.is_floating_point:
buffer_name = self.param_ema_buffer[name]
buffer_parameter = self.model_buffers[buffer_name]
buffer_parameter.mul_(1 - momentum).add_(
|
views.py | from allauth.account import app_settings as allauth_settings
from allauth.account.models import EmailAddress
from allauth.account.utils import complete_signup
from allauth.account.views import ConfirmEmailView
from django.contrib.auth import get_user_model
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.debug import sensitive_post_parameters
from rest_framework import status
from rest_framework.decorators import action
from rest_framework.exceptions import MethodNotAllowed, ValidationError
from rest_framework.generics import CreateAPIView, GenericAPIView
from rest_framework.mixins import (
CreateModelMixin,
ListModelMixin,
RetrieveModelMixin,
UpdateModelMixin,
)
from rest_framework.permissions import AllowAny, IsAuthenticated
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.viewsets import GenericViewSet
from .serializers import (
CreateUserSerializer,
PasswordChangeSerializer,
PasswordResetConfirmSerializer,
RegisterSerializer,
ResendEmailVerificationSerializer,
UserPasswordResetSerializer,
UserSerializer,
VerifyEmailSerializer,
)
User = get_user_model()
sensitive_post_parameters_m = method_decorator(
sensitive_post_parameters(
"password",
"old_password",
"new_password1",
"new_password2",
),
)
class UserViewSet(RetrieveModelMixin, ListModelMixin, UpdateModelMixin, GenericViewSet):
serializer_class = UserSerializer
queryset = User.objects.all()
lookup_field = "username"
def get_queryset(self, *args, **kwargs):
return self.queryset.filter(id=self.request.user.id)
@action(detail=False, methods=["GET"])
def me(self, request):
serializer = UserSerializer(request.user, context={"request": request})
user_data = {"user": serializer.data}
return Response(status=status.HTTP_200_OK, data=user_data)
class UserCreateViewSet(CreateModelMixin, GenericViewSet):
queryset = User.objects.all()
serializer_class = CreateUserSerializer
permission_classes = [
AllowAny,
]
class | (GenericAPIView):
serializer_class = UserPasswordResetSerializer
permission_classes = (AllowAny,)
def post(self, request, *args, **kwargs):
# Create a serializer with request.data
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
# Return the success message with OK HTTP status
return Response(
{"detail": _("Password reset e-mail has been sent.")},
status=status.HTTP_200_OK,
)
class PasswordResetConfirmView(GenericAPIView):
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("Password has been reset with the new password")})
class PasswordChangeView(GenericAPIView):
serializer_class = PasswordChangeSerializer
permission_classes = (IsAuthenticated,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response({"detail": _("New password has been saved")})
class VerifyEmailView(APIView, ConfirmEmailView):
permission_classes = (AllowAny,)
allowed_methods = ("POST", "OPTIONS", "HEAD")
def get_serializer(self, *args, **kwargs):
return VerifyEmailSerializer(*args, **kwargs)
def get(self, *args, **kwargs):
raise MethodNotAllowed("GET")
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.kwargs["key"] = serializer.validated_data["key"]
confirmation = self.get_object()
confirmation.confirm(self.request)
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class ResendEmailVerificationView(CreateAPIView):
permission_classes = (AllowAny,)
serializer_class = ResendEmailVerificationSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
email = EmailAddress.objects.get(**serializer.validated_data)
if not email:
raise ValidationError("Account does not exist")
if email.verified:
raise ValidationError("Account is already verified")
email.send_confirmation()
return Response({"detail": _("ok")}, status=status.HTTP_200_OK)
class RegisterView(CreateAPIView):
serializer_class = RegisterSerializer
permission_classes = (AllowAny,)
@sensitive_post_parameters_m
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_response_data(self, user):
if (
allauth_settings.EMAIL_VERIFICATION
== allauth_settings.EmailVerificationMethod.MANDATORY
):
return {"detail": _("Verification e-mail sent.")}
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
user = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
return Response(
self.get_response_data(user),
status=status.HTTP_201_CREATED,
headers=headers,
)
def perform_create(self, serializer):
user = serializer.save(self.request)
complete_signup(
self.request._request,
user,
allauth_settings.EMAIL_VERIFICATION,
None,
)
return user
| PasswordResetView |
13.Dictionary.py | size=int(input("enter size="))
dict1={}
for i in range(1,size+1): | subjects = input("enter subjects=")
marks = int(input("enter marks="))
dict1[subjects]=marks
print(dict1)
for subjects,marks in dict1.items():
print(subjects,"=",marks) | |
t21.py | x = 1 | while x < 3:
break
x = x + 1
print x |
|
session_test.go | package vatinator
import (
"errors"
"io/ioutil"
"net/http/httptest"
"os" | "testing"
"github.com/gorilla/securecookie"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSessionService(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test-session")
require.NoError(t, err)
ss, err := NewSessionService(tmpdir, securecookie.GenerateRandomKey(32))
require.NoError(t, err)
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
_, errNoSession := ss.Get(w, r)
assert.True(t, errors.Is(errNoSession, SessionNotValid))
if err := ss.New(w, r, AccountID(1)); err != nil {
require.NoError(t, err)
}
assert.Greater(t, len(w.Header().Get("Set-Cookie")), 0)
r2 := httptest.NewRequest("GET", "/", nil)
r2.Header.Set("Cookie", w.Header().Get("Set-Cookie"))
id, err := ss.Get(w, r2)
assert.Equal(t, AccountID(1), id)
assert.NoError(t, err)
}
func TestDBSessionService(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test-session")
require.NoError(t, err)
ss, err := NewDBSessionService(filepath.Join(tmpdir, "session.db"))
require.NoError(t, err)
r := httptest.NewRequest("GET", "/", nil)
w := httptest.NewRecorder()
_, errNoSession := ss.Get(w, r)
assert.True(t, errors.Is(errNoSession, SessionNotValid))
if err := ss.New(w, r, AccountID(1)); err != nil {
require.NoError(t, err)
}
assert.Greater(t, len(w.Header().Get("Set-Cookie")), 0)
r2 := httptest.NewRequest("GET", "/", nil)
r2.Header.Set("Cookie", w.Header().Get("Set-Cookie"))
id, err := ss.Get(w, r2)
assert.Equal(t, AccountID(1), id)
assert.NoError(t, err)
}
func TestSessionKeys(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "test-session")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
db, err := NewDB(filepath.Join(tmpdir, "test.db"))
require.NoError(t, err)
assert.NoError(t, Migrate(db, "1.sql"))
// should create a session key on startup
keys, err := GetSessionKeys(db)
assert.NoError(t, err)
assert.Equal(t, 1, len(keys))
var b []byte
if err := db.Get(&b, "SELECT key FROM keys WHERE id = $1", 1); err != nil {
require.NoError(t, err)
}
assert.Equal(t, len(b), 32)
} | "path/filepath" |
process_standard.rs | //! Tock default Process implementation.
//!
//! `ProcessStandard` is an implementation for a userspace process running on
//! the Tock kernel.
use core::cell::Cell;
use core::cmp;
use core::fmt::Write;
use core::ptr::NonNull;
use core::{mem, ptr, slice, str};
use crate::collections::queue::Queue;
use crate::collections::ring_buffer::RingBuffer;
use crate::config;
use crate::debug;
use crate::errorcode::ErrorCode;
use crate::kernel::Kernel;
use crate::platform::chip::Chip;
use crate::platform::mpu::{self, MPU};
use crate::process::{Error, FunctionCall, FunctionCallSource, Process, State, Task};
use crate::process::{FaultAction, ProcessCustomGrantIdentifer, ProcessId, ProcessStateCell};
use crate::process::{ProcessAddresses, ProcessSizes};
use crate::process_policies::ProcessFaultPolicy;
use crate::process_utilities::ProcessLoadError;
use crate::processbuffer::{ReadOnlyProcessBuffer, ReadWriteProcessBuffer};
use crate::syscall::{self, Syscall, SyscallReturn, UserspaceKernelBoundary};
use crate::upcall::UpcallId;
use crate::utilities::cells::{MapCell, NumericCellExt};
// The completion code for a process if it faulted.
const COMPLETION_FAULT: u32 = 0xffffffff;
/// State for helping with debugging apps.
///
/// These pointers and counters are not strictly required for kernel operation,
/// but provide helpful information when an app crashes.
struct ProcessStandardDebug {
/// If this process was compiled for fixed addresses, save the address
/// it must be at in flash. This is useful for debugging and saves having
/// to re-parse the entire TBF header.
fixed_address_flash: Option<u32>,
/// If this process was compiled for fixed addresses, save the address
/// it must be at in RAM. This is useful for debugging and saves having
/// to re-parse the entire TBF header.
fixed_address_ram: Option<u32>,
/// Where the process has started its heap in RAM.
app_heap_start_pointer: Option<*const u8>,
/// Where the start of the stack is for the process. If the kernel does the
/// PIC setup for this app then we know this, otherwise we need the app to
/// tell us where it put its stack.
app_stack_start_pointer: Option<*const u8>,
/// How low have we ever seen the stack pointer.
app_stack_min_pointer: Option<*const u8>,
/// How many syscalls have occurred since the process started.
syscall_count: usize,
/// What was the most recent syscall.
last_syscall: Option<Syscall>,
/// How many upcalls were dropped because the queue was insufficiently
/// long.
dropped_upcall_count: usize,
/// How many times this process has been paused because it exceeded its
/// timeslice.
timeslice_expiration_count: usize,
}
/// Entry that is stored in the grant pointer table at the top of process
/// memory.
///
/// One copy of this entry struct is stored per grant region defined in the
/// kernel. This type allows the core kernel to lookup a grant based on the
/// driver_num associated with the grant, and also holds the pointer to the
/// memory allocated for the particular grant.
#[repr(C)]
struct GrantPointerEntry {
/// The syscall driver number associated with the allocated grant.
///
/// This defaults to 0 if the grant has not been allocated. Note, however,
/// that 0 is a valid driver_num, and therefore cannot be used to check if a
/// grant is allocated or not.
driver_num: usize,
/// The start of the memory location where the grant has been allocated, or
/// null if the grant has not been allocated.
grant_ptr: *mut u8,
}
/// A type for userspace processes in Tock.
pub struct ProcessStandard<'a, C: 'static + Chip> {
/// Identifier of this process and the index of the process in the process
/// table.
process_id: Cell<ProcessId>,
/// Pointer to the main Kernel struct.
kernel: &'static Kernel,
/// Pointer to the struct that defines the actual chip the kernel is running
/// on. This is used because processes have subtle hardware-based
/// differences. Specifically, the actual syscall interface and how
/// processes are switched to is architecture-specific, and how memory must
/// be allocated for memory protection units is also hardware-specific.
chip: &'static C,
/// Application memory layout:
///
/// ```text
/// ╒════════ ← memory_start + memory_len
/// ╔═ │ Grant Pointers
/// ║ │ ──────
/// │ Process Control Block
/// D │ ──────
/// Y │ Grant Regions
/// N │
/// A │ ↓
/// M │ ────── ← kernel_memory_break
/// I │
/// C │ ────── ← app_break ═╗
/// │ ║
/// ║ │ ↑ A
/// ║ │ Heap P C
/// ╠═ │ ────── ← app_heap_start R C
/// │ Data O E
/// F │ ────── ← data_start_pointer C S
/// I │ Stack E S
/// X │ ↓ S I
/// E │ S B
/// D │ ────── ← current_stack_pointer L
/// │ ║ E
/// ╚═ ╘════════ ← memory_start ═╝
/// ```
///
/// The start of process memory. We store this as a pointer and length and
/// not a slice due to Rust aliasing rules. If we were to store a slice,
/// then any time another slice to the same memory or an ProcessBuffer is
/// used in the kernel would be undefined behavior.
memory_start: *const u8,
/// Number of bytes of memory allocated to this process.
memory_len: usize,
/// Reference to the slice of `GrantPointerEntry`s stored in the process's
/// memory reserved for the kernel. These driver numbers are zero and
/// pointers are null if the grant region has not been allocated. When the
/// grant region is allocated these pointers are updated to point to the
/// allocated memory and the driver number is set to match the driver that
/// owns the grant. No other reference to these pointers exists in the Tock
/// kernel.
grant_pointers: MapCell<&'static mut [GrantPointerEntry]>,
/// Pointer to the end of the allocated (and MPU protected) grant region.
kernel_memory_break: Cell<*const u8>,
/// Pointer to the end of process RAM that has been sbrk'd to the process.
app_break: Cell<*const u8>,
/// Pointer to high water mark for process buffers shared through `allow`
allow_high_water_mark: Cell<*const u8>,
/// Process flash segment. This is the region of nonvolatile flash that
/// the process occupies.
flash: &'static [u8],
/// Collection of pointers to the TBF header in flash.
header: tock_tbf::types::TbfHeader,
/// State saved on behalf of the process each time the app switches to the
/// kernel.
stored_state:
MapCell<<<C as Chip>::UserspaceKernelBoundary as UserspaceKernelBoundary>::StoredState>,
/// The current state of the app. The scheduler uses this to determine
/// whether it can schedule this app to execute.
///
/// The `state` is used both for bookkeeping for the scheduler as well as
/// for enabling control by other parts of the system. The scheduler keeps
/// track of if a process is ready to run or not by switching between the
/// `Running` and `Yielded` states. The system can control the process by
/// switching it to a "stopped" state to prevent the scheduler from
/// scheduling it.
state: ProcessStateCell<'static>,
/// How to respond if this process faults.
fault_policy: &'a dyn ProcessFaultPolicy,
/// Configuration data for the MPU
mpu_config: MapCell<<<C as Chip>::MPU as MPU>::MpuConfig>,
/// MPU regions are saved as a pointer-size pair.
mpu_regions: [Cell<Option<mpu::Region>>; 6],
/// Essentially a list of upcalls that want to call functions in the
/// process.
tasks: MapCell<RingBuffer<'a, Task>>,
/// Count of how many times this process has entered the fault condition and
/// been restarted. This is used by some `ProcessRestartPolicy`s to
/// determine if the process should be restarted or not.
restart_count: Cell<usize>,
/// Name of the app.
process_name: &'static str,
/// Values kept so that we can print useful debug messages when apps fault.
debug: MapCell<ProcessStandardDebug>,
}
impl<C: Chip> Process for ProcessStandard<'_, C> {
fn processid(&self) -> ProcessId {
self.process_id.get()
}
fn enqueue_task(&self, task: Task) -> Result<(), ErrorCode> {
// If this app is in a `Fault` state then we shouldn't schedule
// any work for it.
if !self.is_active() {
return Err(ErrorCode::NODEVICE);
}
let ret = self.tasks.map_or(Err(ErrorCode::FAIL), |tasks| {
match tasks.enqueue(task) {
true => {
// The task has been successfully enqueued.
Ok(())
}
false => {
// The task could not be enqueued as there is
// insufficient space in the ring buffer.
Err(ErrorCode::NOMEM)
}
}
});
if ret.is_ok() {
self.kernel.increment_work();
} else {
// On any error we were unable to enqueue the task. Record the
// error, but importantly do _not_ increment kernel work.
self.debug.map(|debug| {
debug.dropped_upcall_count += 1;
});
}
ret
}
fn ready(&self) -> bool {
self.tasks.map_or(false, |ring_buf| ring_buf.has_elements())
|| self.state.get() == State::Running
}
fn remove_pending_upcalls(&self, upcall_id: UpcallId) {
self.tasks.map(|tasks| {
let count_before = tasks.len();
tasks.retain(|task| match task {
// Remove only tasks that are function calls with an id equal
// to `upcall_id`.
Task::FunctionCall(function_call) => match function_call.source {
FunctionCallSource::Kernel => true,
FunctionCallSource::Driver(id) => {
if id != upcall_id {
true
} else {
self.kernel.decrement_work();
false
}
}
},
_ => true,
});
if config::CONFIG.trace_syscalls {
let count_after = tasks.len();
debug!(
"[{:?}] remove_pending_upcalls[{:#x}:{}] = {} upcall(s) removed",
self.processid(),
upcall_id.driver_num,
upcall_id.subscribe_num,
count_before - count_after,
);
}
});
}
fn get_state(&self) -> State {
self.state.get()
}
fn set_yielded_state(&self) {
if self.state.get() == State::Running {
self.state.update(State::Yielded);
}
}
fn stop(&self) {
match self.state.get() {
State::Running => self.state.update(State::StoppedRunning),
State::Yielded => self.state.update(State::StoppedYielded),
_ => {} // Do nothing
}
}
fn resume(&self) {
match self.state.get() {
State::StoppedRunning => self.state.update(State::Running),
State::StoppedYielded => self.state.update(State::Yielded),
_ => {} // Do nothing
}
}
fn set_fault_state(&self) {
// Use the per-process fault policy to determine what action the kernel
// should take since the process faulted.
let action = self.fault_policy.action(self);
match action {
FaultAction::Panic => {
// process faulted. Panic and print status
self.state.update(State::Faulted);
panic!("Process {} had a fault", self.process_name);
}
FaultAction::Restart => {
self.try_restart(COMPLETION_FAULT);
}
FaultAction::Stop => {
// This looks a lot like restart, except we just leave the app
// how it faulted and mark it as `Faulted`. By clearing
// all of the app's todo work it will not be scheduled, and
// clearing all of the grant regions will cause capsules to drop
// this app as well.
self.terminate(COMPLETION_FAULT);
self.state.update(State::Faulted);
}
}
}
fn try_restart(&self, completion_code: u32) {
// Terminate the process, freeing its state and removing any
// pending tasks from the scheduler's queue.
self.terminate(completion_code);
// If there is a kernel policy that controls restarts, it should be
// implemented here. For now, always restart.
let _res = self.restart();
// Decide what to do with res later. E.g., if we can't restart
// want to reclaim the process resources.
}
fn terminate(&self, _completion_code: u32) {
// Remove the tasks that were scheduled for the app from the
// amount of work queue.
let tasks_len = self.tasks.map_or(0, |tasks| tasks.len());
for _ in 0..tasks_len {
self.kernel.decrement_work();
}
// And remove those tasks
self.tasks.map(|tasks| {
tasks.empty();
});
// Clear any grant regions this app has setup with any capsules.
unsafe {
self.grant_ptrs_reset();
}
// Mark the app as stopped so the scheduler won't try to run it.
self.state.update(State::Terminated);
}
fn get_restart_count(&self) -> usize {
self.restart_count.get()
}
fn has_tasks(&self) -> bool {
self.tasks.map_or(false, |tasks| tasks.has_elements())
}
fn dequeue_task(&self) -> Option<Task> {
self.tasks.map_or(None, |tasks| {
tasks.dequeue().map(|cb| {
self.kernel.decrement_work();
cb
})
})
}
fn pending_tasks(&self) -> usize {
self.tasks.map_or(0, |tasks| tasks.len())
}
fn mem_start(&self) -> *const u8 {
self.memory_start
}
fn mem_end(&self) -> *const u8 {
self.memory_start.wrapping_add(self.memory_len)
}
fn flash_start(&self) -> *const u8 {
self.flash.as_ptr()
}
fn flash_non_protected_start(&self) -> *const u8 {
((self.flash.as_ptr() as usize) + self.header.get_protected_size() as usize) as *const u8
}
fn flash_end(&self) -> *const u8 {
self.flash.as_ptr().wrapping_add(self.flash.len())
}
fn kernel_memory_break(&self) -> *const u8 {
self.kernel_memory_break.get()
}
fn number_writeable_flash_regions(&self) -> usize {
self.header.number_writeable_flash_regions()
}
fn get_writeable_flash_region(&self, region_index: usize) -> (u32, u32) {
self.header.get_writeable_flash_region(region_index)
}
fn update_stack_start_pointer(&self, stack_pointer: *const u8) {
if stack_pointer >= self.mem_start() && stack_pointer < self.mem_end() {
self.debug.map(|debug| {
debug.app_stack_start_pointer = Some(stack_pointer);
// We also reset the minimum stack pointer because whatever
// value we had could be entirely wrong by now.
debug.app_stack_min_pointer = Some(stack_pointer);
});
}
}
fn update_heap_start_pointer(&self, heap_pointer: *const u8) {
if heap_pointer >= self.mem_start() && heap_pointer < self.mem_end() {
self.debug.map(|debug| {
debug.app_heap_start_pointer = Some(heap_pointer);
});
}
}
fn app_memory_break(&self) -> *const u8 {
self.app_break.get()
}
fn setup_mpu(&self) {
self.mpu_config.map(|config| {
self.chip.mpu().configure_mpu(&config, &self.processid());
});
}
fn add_mpu_region(
&self,
unallocated_memory_start: *const u8,
unallocated_memory_size: usize,
min_region_size: usize,
) -> Option<mpu::Region> {
self.mpu_config.and_then(|mut config| {
let new_region = self.chip.mpu().allocate_region(
unallocated_memory_start,
unallocated_memory_size,
min_region_size,
mpu::Permissions::ReadWriteOnly,
&mut config,
);
if new_region.is_none() {
return None;
}
for region in self.mpu_regions.iter() {
if region.get().is_none() {
region.set(new_region);
return new_region;
}
}
// Not enough room in Process struct to store the MPU region.
None
})
}
fn remove_mpu_region(&self, region: mpu::Region) -> Result<(), ErrorCode> {
self.mpu_config.map_or(Err(ErrorCode::INVAL), |mut config| {
// Find the existing mpu region that we are removing; it needs to match exactly.
if let Some(internal_region) = self
.mpu_regions
.iter()
.find(|r| r.get().map_or(false, |r| r == region))
{
self.chip
.mpu()
.remove_memory_region(region, &mut config)
.or(Err(ErrorCode::FAIL))?;
// Remove this region from the tracking cache of mpu_regions
internal_region.set(None);
Ok(())
} else {
Err(ErrorCode::INVAL)
}
})
}
fn sbrk(&self, increment: isize) -> Result<*const u8, Error> {
// Do not modify an inactive process.
if !self.is_active() {
return Err(Error::InactiveApp);
}
let new_break = unsafe { self.app_break.get().offset(increment) };
self.brk(new_break)
}
fn brk(&self, new_break: *const u8) -> Result<*const u8, Error> {
// Do not modify an inactive process.
if !self.is_active() {
return Err(Error::InactiveApp);
}
self.mpu_config
.map_or(Err(Error::KernelError), |mut config| {
if new_break < self.allow_high_water_mark.get() || new_break >= self.mem_end() {
Err(Error::AddressOutOfBounds)
} else if new_break > self.kernel_memory_break.get() {
Err(Error::OutOfMemory)
} else if let Err(_) = self.chip.mpu().update_app_memory_region(
new_break,
self.kernel_memory_break.get(),
mpu::Permissions::ReadWriteOnly,
&mut config,
) {
Err(Error::OutOfMemory)
} else {
let old_break = self.app_break.get();
self.app_break.set(new_break);
self.chip.mpu().configure_mpu(&config, &self.processid());
Ok(old_break)
}
})
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn build_readwrite_process_buffer(
&self,
buf_start_addr: *mut u8,
size: usize,
) -> Result<ReadWriteProcessBuffer, ErrorCode> {
if !self.is_active() {
// Do not operate on an inactive process
return Err(ErrorCode::FAIL);
}
// A process is allowed to pass any pointer if the buffer length is 0,
// as to revoke kernel access to a memory region without granting access
// to another one
if size == 0 {
// Clippy complains that we're dereferencing a pointer in a public
// and safe function here. While we are not dereferencing the
// pointer here, we pass it along to an unsafe function, which is as
// dangerous (as it is likely to be dereferenced down the line).
//
// Relevant discussion:
// https://github.com/rust-lang/rust-clippy/issues/3045
//
// It should be fine to ignore the lint here, as a buffer of length
// 0 will never allow dereferencing any memory in a safe manner.
//
// ### Safety
//
// We specific a zero-length buffer, so the implementation of
// `ReadWriteProcessBuffer` will handle any safety issues.
// Therefore, we can encapsulate the unsafe.
Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, 0, self.processid()) })
} else if self.in_app_owned_memory(buf_start_addr, size) {
// TODO: Check for buffer aliasing here
// Valid buffer, we need to adjust the app's watermark
// note: in_app_owned_memory ensures this offset does not wrap
let buf_end_addr = buf_start_addr.wrapping_add(size);
let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
self.allow_high_water_mark.set(new_water_mark);
// Clippy complains that we're dereferencing a pointer in a public
// and safe function here. While we are not dereferencing the
// pointer here, we pass it along to an unsafe function, which is as
// dangerous (as it is likely to be dereferenced down the line).
//
// Relevant discussion:
// https://github.com/rust-lang/rust-clippy/issues/3045
//
// It should be fine to ignore the lint here, as long as we make
// sure that we're pointing towards userspace memory (verified using
// `in_app_owned_memory`) and respect alignment and other
// constraints of the Rust references created by
// ReadWriteProcessBuffer.
//
// ### Safety
//
// We encapsulate the unsafe here on the condition in the TODO
// above, as we must ensure that this `ReadWriteProcessBuffer` will
// be the only reference to this memory.
Ok(unsafe { ReadWriteProcessBuffer::new(buf_start_addr, size, self.processid()) })
} else {
Err(ErrorCode::INVAL)
}
}
#[allow(clippy::not_unsafe_ptr_arg_deref)]
fn build_readonly_process_buffer(
&self,
buf_start_addr: *const u8,
size: usize,
) -> Result<ReadOnlyProcessBuffer, ErrorCode> {
if !self.is_active() {
// Do not operate on an inactive process
return Err(ErrorCode::FAIL);
}
// A process is allowed to pass any pointer if the buffer length is 0,
// as to revoke kernel access to a memory region without granting access
// to another one
if size == 0 {
// Clippy complains that we're dereferencing a pointer in a public
// and safe function here. While we are not dereferencing the
// pointer here, we pass it along to an unsafe function, which is as
// dangerous (as it is likely to be dereferenced down the line).
//
// Relevant discussion:
// https://github.com/rust-lang/rust-clippy/issues/3045
//
// It should be fine to ignore the lint here, as a buffer of length
// 0 will never allow dereferencing any memory in a safe manner.
//
// ### Safety
//
// We specific a zero-length buffer, so the implementation of
// `ReadOnlyProcessBuffer` will handle any safety issues. Therefore,
// we can encapsulate the unsafe.
Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, 0, self.processid()) })
} else if self.in_app_owned_memory(buf_start_addr, size)
|| self.in_app_flash_memory(buf_start_addr, size)
{
// TODO: Check for buffer aliasing here
if self.in_app_owned_memory(buf_start_addr, size) {
// Valid buffer, and since this is in read-write memory (i.e.
// not flash), we need to adjust the process's watermark. Note:
// `in_app_owned_memory()` ensures this offset does not wrap.
let buf_end_addr = buf_start_addr.wrapping_add(size);
let new_water_mark = cmp::max(self.allow_high_water_mark.get(), buf_end_addr);
self.allow_high_water_mark.set(new_water_mark);
}
// Clippy complains that we're dereferencing a pointer in a public
// and safe function here. While we are not dereferencing the
// pointer here, we pass it along to an unsafe function, which is as
// dangerous (as it is likely to be dereferenced down the line).
//
// Relevant discussion:
// https://github.com/rust-lang/rust-clippy/issues/3045
//
// It should be fine to ignore the lint here, as long as we make
// sure that we're pointing towards userspace memory (verified using
// `in_app_owned_memory` or `in_app_flash_memory`) and respect
// alignment and other constraints of the Rust references created by
// ReadWriteProcessBuffer.
//
// ### Safety
//
// We encapsulate the unsafe here on the condition in the TODO
// above, as we must ensure that this `ReadOnlyProcessBuffer` will
// be the only reference to this memory.
Ok(unsafe { ReadOnlyProcessBuffer::new(buf_start_addr, size, self.processid()) })
} else {
Err(ErrorCode::INVAL)
}
}
unsafe fn set_byte(&self, addr: *mut u8, value: u8) -> bool {
if self.in_app_owned_memory(addr, 1) {
// We verify that this will only write process-accessible memory,
// but this can still be undefined behavior if something else holds
// a reference to this memory.
*addr = value;
true
} else {
false
}
}
fn grant_is_allocated(&self, grant_num: usize) -> Option<bool> {
// Do not modify an inactive process.
if !self.is_active() {
return None;
}
// Update the grant pointer to the address of the new allocation.
self.grant_pointers.map_or(None, |grant_pointers| {
// Implement `grant_pointers[grant_num]` without a chance of a
// panic.
grant_pointers
.get(grant_num)
.map_or(None, |grant_entry| Some(!grant_entry.grant_ptr.is_null()))
})
}
fn allocate_grant(
&self,
grant_num: usize,
driver_num: usize,
size: usize,
align: usize,
) -> Option<NonNull<u8>> {
// Do not modify an inactive process.
if !self.is_active() {
return None;
}
// Verify the grant_num is valid.
if grant_num >= self.kernel.get_grant_count_and_finalize() {
return None;
}
// Verify that the grant is not already allocated. If the pointer is not
// null then the grant is already allocated.
if let Some(is_allocated) = self.grant_is_allocated(grant_num) {
if is_allocated {
return None;
}
}
// Verify that there is not already a grant allocated with the same
// driver_num.
let exists = self.grant_pointers.map_or(false, |grant_pointers| {
// Check our list of grant pointers if the driver number is used.
grant_pointers.iter().any(|grant_entry| {
// Check if the grant is both allocated (its grant pointer is
// non null) and the driver number matches.
(!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
})
});
// If we find a match, then the driver_num must already be used and the
// grant allocation fails.
if exists {
return None;
}
// Use the shared grant allocator function to actually allocate memory.
// Returns `None` if the allocation cannot be created.
if let Some(grant_ptr) = self.allocate_in_grant_region_internal(size, align) {
// Update the grant pointer to the address of the new allocation.
self.grant_pointers.map_or(None, |grant_pointers| {
// Implement `grant_pointers[grant_num] = grant_ptr` without a
// chance of a panic.
grant_pointers
.get_mut(grant_num)
.map_or(None, |grant_entry| {
// Actually set the driver num and grant pointer.
grant_entry.driver_num = driver_num;
grant_entry.grant_ptr = grant_ptr.as_ptr() as *mut u8;
// If all of this worked, return the allocated pointer.
Some(grant_ptr)
})
})
} else {
// Could not allocate the memory for the grant region.
None
}
}
fn allocate_custom_grant(
&self,
size: usize,
align: usize,
) -> Option<(ProcessCustomGrantIdentifer, NonNull<u8>)> {
// Do not modify an inactive process.
if !self.is_active() {
return None;
}
// Use the shared grant allocator function to actually allocate memory.
// Returns `None` if the allocation cannot be created.
if let Some(ptr) = self.allocate_in_grant_region_internal(size, align) {
// Create the identifier that the caller will use to get access to
// this custom grant in the future.
let identifier = self.create_custom_grant_identifier(ptr);
Some((identifier, ptr))
} else {
// Could not allocate memory for the custom grant.
None
}
}
fn enter_grant(&self, grant_num: usize) -> Result<*mut u8, Error> {
// Do not try to access the grant region of inactive process.
if !self.is_active() {
return Err(Error::InactiveApp);
}
// Retrieve the grant pointer from the `grant_pointers` slice. We use
// `[slice].get()` so that if the grant number is invalid this will
// return `Err` and not panic.
self.grant_pointers
.map_or(Err(Error::KernelError), |grant_pointers| {
// Implement `grant_pointers[grant_num]` without a chance of a
// panic.
match grant_pointers.get_mut(grant_num) {
Some(grant_entry) => {
// Get a copy of the actual grant pointer.
let grant_ptr = grant_entry.grant_ptr;
// Check if the grant pointer is marked that the grant
// has already been entered. If so, return an error.
if (grant_ptr as usize) & 0x1 == 0x1 {
// Lowest bit is one, meaning this grant has been
// entered.
Err(Error::AlreadyInUse)
} else {
// Now, to mark that the grant has been entered, we
// set the lowest bit to one and save this as the
// grant pointer.
grant_entry.grant_ptr = (grant_ptr as usize | 0x1) as *mut u8;
// And we return the grant pointer to the entered
// grant.
Ok(grant_ptr)
}
}
None => Err(Error::AddressOutOfBounds),
}
})
}
fn enter_custom_grant(
&self,
identifier: ProcessCustomGrantIdentifer,
) -> Result<*mut u8, Error> {
// Do not try to access the grant region of inactive process.
if !self.is_active() {
return Err(Error::InactiveApp);
}
// Get the address of the custom grant based on the identifier.
let custom_grant_address = self.get_custom_grant_address(identifier);
// We never deallocate custom grants and only we can change the
// `identifier` so we know this is a valid address for the custom grant.
Ok(custom_grant_address as *mut u8)
}
fn leave_grant(&self, grant_num: usize) {
// Do not modify an inactive process.
if !self.is_active() {
return;
}
self.grant_pointers.map(|grant_pointers| {
// Implement `grant_pointers[grant_num]` without a chance of a
// panic.
match grant_pointers.get_mut(grant_num) {
Some(grant_entry) => {
// Get a copy of the actual grant pointer.
let grant_ptr = grant_entry.grant_ptr;
// Now, to mark that the grant has been released, we set the
// lowest bit back to zero and save this as the grant
// pointer.
grant_entry.grant_ptr = (grant_ptr as usize & !0x1) as *mut u8;
}
None => {}
}
});
}
fn grant_allocated_count(&self) -> Option<usize> {
// Do not modify an inactive process.
if !self.is_active() {
return None;
}
self.grant_pointers.map(|grant_pointers| {
// Filter our list of grant pointers into just the non null ones,
// and count those. A grant is allocated if its grant pointer is non
// null.
grant_pointers
.iter()
.filter(|grant_entry| !grant_entry.grant_ptr.is_null())
.count()
})
}
fn lookup_grant_from_driver_num(&self, driver_num: usize) -> Result<usize, Error> {
self.grant_pointers
.map_or(Err(Error::KernelError), |grant_pointers| {
// Filter our list of grant pointers into just the non null
// ones, and count those. A grant is allocated if its grant
// pointer is non null.
match grant_pointers.iter().position(|grant_entry| {
// Only consider allocated grants.
(!grant_entry.grant_ptr.is_null()) && grant_entry.driver_num == driver_num
}) {
Some(idx) => Ok(idx),
None => Err(Error::OutOfMemory),
}
})
}
fn is_valid_upcall_function_pointer(&self, upcall_fn: NonNull<()>) -> bool {
let ptr = upcall_fn.as_ptr() as *const u8;
let size = mem::size_of::<*const u8>();
// It is ok if this function is in memory or flash.
self.in_app_flash_memory(ptr, size) || self.in_app_owned_memory(ptr, size)
}
fn get_process_name(&self) -> &'static str {
self.process_name
}
fn set_syscall_return_value(&self, return_value: SyscallReturn) {
match self.stored_state.map(|stored_state| unsafe {
// Actually set the return value for a particular process.
//
// The UKB implementation uses the bounds of process-accessible
// memory to verify that any memory changes are valid. Here, the
// unsafe promise we are making is that the bounds passed to the UKB
// are correct.
self.chip
.userspace_kernel_boundary()
.set_syscall_return_value(
self.mem_start(),
self.app_break.get(),
stored_state,
return_value,
)
}) {
Some(Ok(())) => {
// If we get an `Ok` we are all set.
}
Some(Err(())) => {
// If we get an `Err`, then the UKB implementation could not set
// the return value, likely because the process's stack is no
// longer accessible to it. All we can do is fault.
self.set_fault_state();
}
None => {
// We should never be here since `stored_state` should always be
// occupied.
self.set_fault_state();
}
}
}
fn set_process_function(&self, callback: FunctionCall) {
// See if we can actually enqueue this function for this process.
// Architecture-specific code handles actually doing this since the
// exact method is both architecture- and implementation-specific.
//
// This can fail, for example if the process does not have enough memory
// remaining.
match self.stored_state.map(|stored_state| {
// Let the UKB implementation handle setting the process's PC so
// that the process executes the upcall function. We encapsulate
// unsafe here because we are guaranteeing that the memory bounds
// passed to `set_process_function` are correct.
unsafe {
self.chip.userspace_kernel_boundary().set_process_function(
self.mem_start(),
self.app_break.get(),
stored_state,
callback,
)
}
}) {
Some(Ok(())) => {
// If we got an `Ok` we are all set and should mark that this
// process is ready to be scheduled.
// Move this process to the "running" state so the scheduler
// will schedule it.
self.state.update(State::Running);
}
Some(Err(())) => {
// If we got an Error, then there was likely not enough room on
// the stack to allow the process to execute this function given
// the details of the particular architecture this is running
// on. This process has essentially faulted, so we mark it as
// such.
self.set_fault_state();
}
None => {
// We should never be here since `stored_state` should always be
// occupied.
self.set_fault_state();
}
}
}
fn switch_to(&self) -> Option<syscall::ContextSwitchReason> {
// Cannot switch to an invalid process
if !self.is_active() {
return None;
}
let (switch_reason, stack_pointer) =
self.stored_state.map_or((None, None), |stored_state| {
// Switch to the process. We guarantee that the memory pointers
// we pass are valid, ensuring this context switch is safe.
// Therefore we encapsulate the `unsafe`.
unsafe {
let (switch_reason, optional_stack_pointer) = self
.chip
.userspace_kernel_boundary()
.switch_to_process(self.mem_start(), self.app_break.get(), stored_state);
(Some(switch_reason), optional_stack_pointer)
}
});
// If the UKB implementation passed us a stack pointer, update our
// debugging state. This is completely optional.
stack_pointer.map(|sp| {
self.debug.map(|debug| {
match debug.app_stack_min_pointer {
None => debug.app_stack_min_pointer = Some(sp),
Some(asmp) => {
// Update max stack depth if needed.
if sp < asmp {
debug.app_stack_min_pointer = Some(sp);
}
}
}
});
});
switch_reason
}
fn debug_syscall_count(&self) -> usize {
self.debug.map_or(0, |debug| debug.syscall_count)
}
fn debug_dropped_upcall_count(&self) -> usize {
self.debug.map_or(0, |debug| debug.dropped_upcall_count)
}
fn debug_timeslice_expiration_count(&self) -> usize {
self.debug
.map_or(0, |debug| debug.timeslice_expiration_count)
}
fn debug_timeslice_expired(&self) {
self.debug
.map(|debug| debug.timeslice_expiration_count += 1);
}
fn debug_syscall_called(&self, last_syscall: Syscall) {
self.debug.map(|debug| {
debug.syscall_count += 1;
debug.last_syscall = Some(last_syscall);
});
}
fn debug_heap_start(&self) -> Option<*const u8> {
self.debug
.map_or(None, |debug| debug.app_heap_start_pointer.map(|p| p))
}
fn debug_stack_start(&self) -> Option<*const u8> {
self.debug
.map_or(None, |debug| debug.app_stack_start_pointer.map(|p| p))
}
fn debug_stack_end(&self) -> Option<*const u8> {
self.debug
.map_or(None, |debug| debug.app_stack_min_pointer.map(|p| p))
}
fn get_addresses(&self) -> ProcessAddresses {
ProcessAddresses {
flash_start: self.flash_start() as usize,
flash_non_protected_start: self.flash_non_protected_start() as usize,
flash_end: self.flash_end() as usize,
sram_start: self.mem_start() as usize,
sram_app_brk: self.app_memory_break() as usize,
sram_grant_start: self.kernel_memory_break() as usize,
sram_end: self.mem_end() as usize,
sram_heap_start: self.debug.map_or(None, |debug| {
debug.app_heap_start_pointer.map(|p| p as usize)
}),
sram_stack_top: self.debug.map_or(None, |debug| {
debug.app_stack_start_pointer.map(|p| p as usize)
}),
sram_stack_bottom: self.debug.map_or(None, |debug| {
debug.app_stack_min_pointer.map(|p| p as usize)
}),
}
}
fn get_sizes(&self) -> ProcessSizes {
ProcessSizes {
grant_pointers: mem::size_of::<GrantPointerEntry>()
* self.kernel.get_grant_count_and_finalize(),
upcall_list: Self::CALLBACKS_OFFSET,
process_control_block: Self::PROCESS_STRUCT_OFFSET,
}
}
fn print_memory_map(&self, writer: &mut dyn Write) {
if !config::CONFIG.debug_panics {
return;
}
// Flash
let flash_end = self.flash.as_ptr().wrapping_add(self.flash.len()) as usize;
let flash_start = self.flash.as_ptr() as usize;
let flash_protected_size = self.header.get_protected_size() as usize;
let flash_app_start = flash_start + flash_protected_size;
let flash_app_size = flash_end - flash_app_start;
// Grant pointers size.
let grant_ptr_size = mem::size_of::<GrantPointerEntry>();
let grant_ptrs_num = self.kernel.get_grant_count_and_finalize();
let sram_grant_pointers_size = grant_ptrs_num * grant_ptr_size;
// SRAM addresses
let sram_end = self.mem_end() as usize;
let sram_grant_pointers_start = sram_end - sram_grant_pointers_size;
let sram_upcall_list_start = sram_grant_pointers_start - Self::CALLBACKS_OFFSET;
let process_struct_memory_location = sram_upcall_list_start - Self::PROCESS_STRUCT_OFFSET;
let sram_grant_start = self.kernel_memory_break.get() as usize;
let sram_heap_end = self.app_break.get() as usize;
let sram_heap_start: Option<usize> = self.debug.map_or(None, |debug| {
debug.app_heap_start_pointer.map(|p| p as usize)
});
let sram_stack_start: Option<usize> = self.debug.map_or(None, |debug| {
debug.app_stack_start_pointer.map(|p| p as usize)
});
let sram_stack_bottom: Option<usize> = self.debug.map_or(None, |debug| {
debug.app_stack_min_pointer.map(|p| p as usize)
});
let sram_start = self.mem_start() as usize;
// SRAM sizes
let sram_upcall_list_size = Self::CALLBACKS_OFFSET;
let sram_process_struct_size = Self::PROCESS_STRUCT_OFFSET;
let sram_grant_size = process_struct_memory_location - sram_grant_start;
let sram_grant_allocated = process_struct_memory_location - sram_grant_start;
// application statistics
let events_queued = self.pending_tasks();
let syscall_count = self.debug.map_or(0, |debug| debug.syscall_count);
let last_syscall = self.debug.map(|debug| debug.last_syscall);
let dropped_upcall_count = self.debug.map_or(0, |debug| debug.dropped_upcall_count);
let restart_count = self.restart_count.get();
let _ = writer.write_fmt(format_args!(
"\
𝐀𝐩𝐩: {} - [{:?}]\
\r\n Events Queued: {} Syscall Count: {} Dropped Upcall Count: {}\
\r\n Restart Count: {}\r\n",
self.process_name,
self.state.get(),
events_queued,
syscall_count,
dropped_upcall_count,
restart_count,
));
let _ = match last_syscall {
Some(syscall) => writer.write_fmt(format_args!(" Last Syscall: {:?}\r\n", syscall)),
None => writer.write_str(" Last Syscall: None\r\n"),
};
let _ = writer.write_fmt(format_args!(
"\
\r\n\
\r\n ╔═══════════╤══════════════════════════════════════════╗\
\r\n ║ Address │ Region Name Used | Allocated (bytes) ║\
\r\n ╚{:#010X}═╪══════════════════════════════════════════╝\
\r\n │ Grant Ptrs {:6}\
\r\n │ Upcalls {:6}\
\r\n │ Process {:6}\
\r\n {:#010X} ┼───────────────────────────────────────────\
\r\n │ ▼ Grant {:6} | {:6}{}\
\r\n {:#010X} ┼───────────────────────────────────────────\
\r\n │ Unused\
\r\n {:#010X} ┼───────────────────────────────────────────",
sram_end,
sram_grant_pointers_size,
sram_upcall_list_size,
sram_process_struct_size,
process_struct_memory_location,
sram_grant_size,
sram_grant_allocated,
exceeded_check(sram_grant_size, sram_grant_allocated),
sram_grant_start,
sram_heap_end,
));
match sram_heap_start {
Some(sram_heap_start) => {
let sram_heap_size = sram_heap_end - sram_heap_start;
let sram_heap_allocated = sram_grant_start - sram_heap_start;
let _ = writer.write_fmt(format_args!(
"\
\r\n │ ▲ Heap {:6} | {:6}{} S\
\r\n {:#010X} ┼─────────────────────────────────────────── R",
sram_heap_size,
sram_heap_allocated,
exceeded_check(sram_heap_size, sram_heap_allocated),
sram_heap_start,
));
}
None => {
let _ = writer.write_str(
"\
\r\n │ ▲ Heap ? | ? S\
\r\n ?????????? ┼─────────────────────────────────────────── R",
);
}
}
match (sram_heap_start, sram_stack_start) {
(Some(sram_heap_start), Some(sram_stack_start)) => {
let sram_data_size = sram_heap_start - sram_stack_start;
let sram_data_allocated = sram_data_size as usize;
let _ = writer.write_fmt(format_args!(
"\
\r\n │ Data {:6} | {:6} A",
sram_data_size, sram_data_allocated,
));
}
_ => {
let _ = writer.write_str(
"\
\r\n │ Data ? | ? A",
);
}
}
match (sram_stack_start, sram_stack_bottom) {
(Some(sram_stack_start), Some(sram_stack_bottom)) => {
let sram_stack_size = sram_stack_start - sram_stack_bottom;
let sram_stack_allocated = sram_stack_start - sram_start;
let _ = writer.write_fmt(format_args!(
"\
\r\n {:#010X} ┼─────────────────────────────────────────── M\
\r\n │ ▼ Stack {:6} | {:6}{}",
sram_stack_start,
sram_stack_size,
sram_stack_allocated,
exceeded_check(sram_stack_size, sram_stack_allocated),
));
}
_ => {
let _ = writer.write_str(
"\
\r\n ?????????? ┼─────────────────────────────────────────── M\
\r\n │ ▼ Stack ? | ?",
);
}
}
let _ = writer.write_fmt(format_args!(
"\
\r\n {:#010X} ┼───────────────────────────────────────────\
\r\n │ Unused\
\r\n {:#010X} ┴───────────────────────────────────────────\
\r\n .....\
\r\n {:#010X} ┬─────────────────────────────────────────── F\
\r\n │ App Flash {:6} L\
\r\n {:#010X} ┼─────────────────────────────────────────── A\
\r\n │ Protected {:6} S\
\r\n {:#010X} ┴─────────────────────────────────────────── H\
\r\n",
sram_stack_bottom.unwrap_or(0),
sram_start,
flash_end,
flash_app_size,
flash_app_start,
flash_protected_size,
flash_start
));
}
fn print_full_process(&self, writer: &mut dyn Write) {
if !config::CONFIG.debug_panics {
return;
}
self.print_memory_map(writer);
self.stored_state.map(|stored_state| {
// We guarantee the memory bounds pointers provided to the UKB are
// correct.
unsafe {
self.chip.userspace_kernel_boundary().print_context(
self.mem_start(),
self.app_break.get(),
stored_state,
writer,
);
}
});
// Display grant information.
let number_grants = self.kernel.get_grant_count_and_finalize();
let _ = writer.write_fmt(format_args!(
"\
\r\n Total number of grant regions defined: {}\r\n",
self.kernel.get_grant_count_and_finalize()
));
let rows = (number_grants + 2) / 3;
// Access our array of grant pointers.
self.grant_pointers.map(|grant_pointers| {
// Iterate each grant and show its address.
for i in 0..rows {
for j in 0..3 {
let index = i + (rows * j);
if index >= number_grants {
break;
}
// Implement `grant_pointers[grant_num]` without a chance of
// a panic.
grant_pointers.get(index).map(|grant_entry| {
if grant_entry.grant_ptr.is_null() {
let _ =
writer.write_fmt(format_args!(" Grant {:>2} : -- ", index));
} else {
let _ = writer.write_fmt(format_args!(
" Grant {:>2} {:#x}: {:p}",
index, grant_entry.driver_num, grant_entry.grant_ptr
));
}
});
}
let _ = writer.write_fmt(format_args!("\r\n"));
}
});
// Display the current state of the MPU for this process.
self.mpu_config.map(|config| {
let _ = writer.write_fmt(format_args!("{}", config));
});
// Print a helpful message on how to re-compile a process to view the
// listing file. If a process is PIC, then we also need to print the
// actual addresses the process executed at so that the .lst file can be
// generated for those addresses. If the process was already compiled
// for a fixed address, then just generating a .lst file is fine.
self.debug.map(|debug| {
if debug.fixed_address_flash.is_some() {
// Fixed addresses, can just run `make lst`.
let _ = writer.write_fmt(format_args!(
"\
\r\nTo debug, run `make lst` in the app's folder\
\r\nand open the arch.{:#x}.{:#x}.lst file.\r\n\r\n",
debug.fixed_address_flash.unwrap_or(0),
debug.fixed_address_ram.unwrap_or(0)
));
} else {
// PIC, need to specify the addresses.
let sram_start = self.mem_start() as usize;
let flash_start = self.flash.as_ptr() as usize;
let flash_init_fn = flash_start + self.header.get_init_function_offset() as usize;
let _ = writer.write_fmt(format_args!(
"\
\r\nTo debug, run `make debug RAM_START={:#x} FLASH_INIT={:#x}`\
\r\nin the app's folder and open the .lst file.\r\n\r\n",
sram_start, flash_init_fn
));
}
});
}
}
// Only used if debug_panics == true
#[allow(unused)]
fn exceeded_check(size: usize, allocated: usize) -> &'static str {
if size > allocated {
" EXCEEDED!"
} else {
" "
}
}
impl<C: 'static + Chip> ProcessStandard<'_, C> {
// Memory offset for upcall ring buffer (10 element length).
const CALLBACK_LEN: usize = 10;
const CALLBACKS_OFFSET: usize = mem::size_of::<Task>() * Self::CALLBACK_LEN;
// Memory offset to make room for this process's metadata.
const PROCESS_STRUCT_OFFSET: usize = mem::size_of::<ProcessStandard<C>>();
pub(crate) unsafe fn create<'a>(
kernel: &'static Kernel,
chip: &'static C,
app_flash: &'static [u8],
header_length: usize,
app_version: u16,
remaining_memory: &'a mut [u8],
fault_policy: &'static dyn ProcessFaultPolicy,
require_kernel_version: bool,
index: usize,
) -> Result<(Option<&'static dyn Process>, &'a mut [u8]), ProcessLoadError> {
// Get a slice for just the app header.
let header_flash = app_flash
.get(0..header_length as usize)
.ok_or(ProcessLoadError::NotEnoughFlash)?;
// Parse the full TBF header to see if this is a valid app. If the
// header can't parse, we will error right here.
let tbf_header = tock_tbf::parse::parse_tbf_header(header_flash, app_version)?;
// First thing: check that the process is at the correct location in
// flash if the TBF header specified a fixed address. If there is a
// mismatch we catch that early.
if let Some(fixed_flash_start) = tbf_header.get_fixed_address_flash() {
// The flash address in the header is based on the app binary,
// so we need to take into account the header length.
let actual_address = app_flash.as_ptr() as u32 + tbf_header.get_protected_size();
let expected_address = fixed_flash_start;
if actual_address != expected_address {
return Err(ProcessLoadError::IncorrectFlashAddress {
actual_address,
expected_address,
});
}
}
let process_name = tbf_header.get_package_name();
// If this isn't an app (i.e. it is padding) or it is an app but it
// isn't enabled, then we can skip it and do not create a `Process`
// object.
if !tbf_header.is_app() || !tbf_header.enabled() {
if config::CONFIG.debug_load_processes {
if !tbf_header.is_app() {
debug!(
"Padding in flash={:#010X}-{:#010X}",
app_flash.as_ptr() as usize,
app_flash.as_ptr() as usize + app_flash.len() - 1
);
}
if !tbf_header.enabled() {
debug!(
"Process not enabled flash={:#010X}-{:#010X} process={:?}",
app_flash.as_ptr() as usize,
app_flash.as_ptr() as usize + app_flash.len() - 1,
process_name.unwrap_or("(no name)")
);
}
}
// Return no process and the full memory slice we were given.
return Ok((None, remaining_memory));
}
if let Some((major, minor)) = tbf_header.get_kernel_version() {
// If the `KernelVersion` header is present, we read the requested
// kernel version and compare it to the running kernel version.
if crate::MAJOR != major || crate::MINOR < minor {
// If the kernel major version is different, we prevent the
// process from being loaded.
//
// If the kernel major version is the same, we compare the
// kernel minor version. The current running kernel minor
// version has to be greater or equal to the one that the
// process has requested. If not, we prevent the process from
// loading.
if config::CONFIG.debug_load_processes {
debug!("WARN process {:?} not loaded as it requires kernel version >= {}.{} and < {}.0, (running kernel {}.{})", process_name.unwrap_or("(no name)"), major, minor, (major+1), crate::MAJOR, crate::MINOR);
} | }
} else {
if require_kernel_version {
// If enforcing the kernel version is requested, and the
// `KernelVersion` header is not present, we prevent the process
// from loading.
if config::CONFIG.debug_load_processes {
debug!("WARN process {:?} not loaded as it has no kernel version header, please upgrade to elf2tab >= 0.8.0",
process_name.unwrap_or ("(no name"));
}
return Err(ProcessLoadError::IncompatibleKernelVersion { version: None });
}
}
// Otherwise, actually load the app.
let process_ram_requested_size = tbf_header.get_minimum_app_ram_size() as usize;
let init_fn = app_flash
.as_ptr()
.offset(tbf_header.get_init_function_offset() as isize) as usize;
// Initialize MPU region configuration.
let mut mpu_config: <<C as Chip>::MPU as MPU>::MpuConfig = Default::default();
// Allocate MPU region for flash.
if chip
.mpu()
.allocate_region(
app_flash.as_ptr(),
app_flash.len(),
app_flash.len(),
mpu::Permissions::ReadExecuteOnly,
&mut mpu_config,
)
.is_none()
{
if config::CONFIG.debug_load_processes {
debug!(
"[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate MPU region for flash",
app_flash.as_ptr() as usize,
app_flash.as_ptr() as usize + app_flash.len() - 1,
process_name
);
}
return Err(ProcessLoadError::MpuInvalidFlashLength);
}
// Determine how much space we need in the application's memory space
// just for kernel and grant state. We need to make sure we allocate
// enough memory just for that.
// Make room for grant pointers.
let grant_ptr_size = mem::size_of::<GrantPointerEntry>();
let grant_ptrs_num = kernel.get_grant_count_and_finalize();
let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
// Initial size of the kernel-owned part of process memory can be
// calculated directly based on the initial size of all kernel-owned
// data structures.
let initial_kernel_memory_size =
grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET;
// By default we start with the initial size of process-accessible
// memory set to 0. This maximizes the flexibility that processes have
// to allocate their memory as they see fit. If a process needs more
// accessible memory it must use the `brk` memop syscalls to request
// more memory.
//
// We must take into account any process-accessible memory required by
// the context switching implementation and allocate at least that much
// memory so that we can successfully switch to the process. This is
// architecture and implementation specific, so we query that now.
let min_process_memory_size = chip
.userspace_kernel_boundary()
.initial_process_app_brk_size();
// We have to ensure that we at least ask the MPU for
// `min_process_memory_size` so that we can be sure that `app_brk` is
// not set inside the kernel-owned memory region. Now, in practice,
// processes should not request 0 (or very few) bytes of memory in their
// TBF header (i.e. `process_ram_requested_size` will almost always be
// much larger than `min_process_memory_size`), as they are unlikely to
// work with essentially no available memory. But, we still must protect
// for that case.
let min_process_ram_size = cmp::max(process_ram_requested_size, min_process_memory_size);
// Minimum memory size for the process.
let min_total_memory_size = min_process_ram_size + initial_kernel_memory_size;
// Check if this process requires a fixed memory start address. If so,
// try to adjust the memory region to work for this process.
//
// Right now, we only support skipping some RAM and leaving a chunk
// unused so that the memory region starts where the process needs it
// to.
let remaining_memory = if let Some(fixed_memory_start) = tbf_header.get_fixed_address_ram()
{
// The process does have a fixed address.
if fixed_memory_start == remaining_memory.as_ptr() as u32 {
// Address already matches.
remaining_memory
} else if fixed_memory_start > remaining_memory.as_ptr() as u32 {
// Process wants a memory address farther in memory. Try to
// advance the memory region to make the address match.
let diff = (fixed_memory_start - remaining_memory.as_ptr() as u32) as usize;
if diff > remaining_memory.len() {
// We ran out of memory.
let actual_address =
remaining_memory.as_ptr() as u32 + remaining_memory.len() as u32 - 1;
let expected_address = fixed_memory_start;
return Err(ProcessLoadError::MemoryAddressMismatch {
actual_address,
expected_address,
});
} else {
// Change the memory range to start where the process
// requested it.
remaining_memory
.get_mut(diff..)
.ok_or(ProcessLoadError::InternalError)?
}
} else {
// Address is earlier in memory, nothing we can do.
let actual_address = remaining_memory.as_ptr() as u32;
let expected_address = fixed_memory_start;
return Err(ProcessLoadError::MemoryAddressMismatch {
actual_address,
expected_address,
});
}
} else {
remaining_memory
};
// Determine where process memory will go and allocate MPU region for
// app-owned memory.
let (app_memory_start, app_memory_size) = match chip.mpu().allocate_app_memory_region(
remaining_memory.as_ptr() as *const u8,
remaining_memory.len(),
min_total_memory_size,
min_process_memory_size,
initial_kernel_memory_size,
mpu::Permissions::ReadWriteOnly,
&mut mpu_config,
) {
Some((memory_start, memory_size)) => (memory_start, memory_size),
None => {
// Failed to load process. Insufficient memory.
if config::CONFIG.debug_load_processes {
debug!(
"[!] flash={:#010X}-{:#010X} process={:?} - couldn't allocate memory region of size >= {:#X}",
app_flash.as_ptr() as usize,
app_flash.as_ptr() as usize + app_flash.len() - 1,
process_name,
min_total_memory_size
);
}
return Err(ProcessLoadError::NotEnoughMemory);
}
};
// Get a slice for the memory dedicated to the process. This can fail if
// the MPU returns a region of memory that is not inside of the
// `remaining_memory` slice passed to `create()` to allocate the
// process's memory out of.
let memory_start_offset = app_memory_start as usize - remaining_memory.as_ptr() as usize;
// First split the remaining memory into a slice that contains the
// process memory and a slice that will not be used by this process.
let (app_memory_oversize, unused_memory) =
remaining_memory.split_at_mut(memory_start_offset + app_memory_size);
// Then since the process's memory need not start at the beginning of
// the remaining slice given to create(), get a smaller slice as needed.
let app_memory = app_memory_oversize
.get_mut(memory_start_offset..)
.ok_or(ProcessLoadError::InternalError)?;
// Check if the memory region is valid for the process. If a process
// included a fixed address for the start of RAM in its TBF header (this
// field is optional, processes that are position independent do not
// need a fixed address) then we check that we used the same address
// when we allocated it in RAM.
if let Some(fixed_memory_start) = tbf_header.get_fixed_address_ram() {
let actual_address = app_memory.as_ptr() as u32;
let expected_address = fixed_memory_start;
if actual_address != expected_address {
return Err(ProcessLoadError::MemoryAddressMismatch {
actual_address,
expected_address,
});
}
}
// Set the initial process-accessible memory to the amount specified by
// the context switch implementation.
let initial_app_brk = app_memory.as_ptr().add(min_process_memory_size);
// Set the initial allow high water mark to the start of process memory
// since no `allow` calls have been made yet.
let initial_allow_high_water_mark = app_memory.as_ptr();
// Set up initial grant region.
let mut kernel_memory_break = app_memory.as_mut_ptr().add(app_memory.len());
// Now that we know we have the space we can setup the grant
// pointers.
kernel_memory_break = kernel_memory_break.offset(-(grant_ptrs_offset as isize));
// This is safe today, as MPU constraints ensure that `memory_start`
// will always be aligned on at least a word boundary, and that
// memory_size will be aligned on at least a word boundary, and
// `grant_ptrs_offset` is a multiple of the word size. Thus,
// `kernel_memory_break` must be word aligned. While this is unlikely to
// change, it should be more proactively enforced.
//
// TODO: https://github.com/tock/tock/issues/1739
#[allow(clippy::cast_ptr_alignment)]
// Set all grant pointers to null.
let grant_pointers = slice::from_raw_parts_mut(
kernel_memory_break as *mut GrantPointerEntry,
grant_ptrs_num,
);
for grant_entry in grant_pointers.iter_mut() {
grant_entry.driver_num = 0;
grant_entry.grant_ptr = ptr::null_mut();
}
// Now that we know we have the space we can setup the memory for the
// upcalls.
kernel_memory_break = kernel_memory_break.offset(-(Self::CALLBACKS_OFFSET as isize));
// This is safe today, as MPU constraints ensure that `memory_start`
// will always be aligned on at least a word boundary, and that
// memory_size will be aligned on at least a word boundary, and
// `grant_ptrs_offset` is a multiple of the word size. Thus,
// `kernel_memory_break` must be word aligned. While this is unlikely to
// change, it should be more proactively enforced.
//
// TODO: https://github.com/tock/tock/issues/1739
#[allow(clippy::cast_ptr_alignment)]
// Set up ring buffer for upcalls to the process.
let upcall_buf =
slice::from_raw_parts_mut(kernel_memory_break as *mut Task, Self::CALLBACK_LEN);
let tasks = RingBuffer::new(upcall_buf);
// Last thing in the kernel region of process RAM is the process struct.
kernel_memory_break = kernel_memory_break.offset(-(Self::PROCESS_STRUCT_OFFSET as isize));
let process_struct_memory_location = kernel_memory_break;
// Create the Process struct in the app grant region.
let mut process: &mut ProcessStandard<C> =
&mut *(process_struct_memory_location as *mut ProcessStandard<'static, C>);
// Ask the kernel for a unique identifier for this process that is being
// created.
let unique_identifier = kernel.create_process_identifier();
// Save copies of these in case the app was compiled for fixed addresses
// for later debugging.
let fixed_address_flash = tbf_header.get_fixed_address_flash();
let fixed_address_ram = tbf_header.get_fixed_address_ram();
process
.process_id
.set(ProcessId::new(kernel, unique_identifier, index));
process.kernel = kernel;
process.chip = chip;
process.allow_high_water_mark = Cell::new(initial_allow_high_water_mark);
process.memory_start = app_memory.as_ptr();
process.memory_len = app_memory.len();
process.header = tbf_header;
process.kernel_memory_break = Cell::new(kernel_memory_break);
process.app_break = Cell::new(initial_app_brk);
process.grant_pointers = MapCell::new(grant_pointers);
process.flash = app_flash;
process.stored_state = MapCell::new(Default::default());
// Mark this process as unstarted
process.state = ProcessStateCell::new(process.kernel);
process.fault_policy = fault_policy;
process.restart_count = Cell::new(0);
process.mpu_config = MapCell::new(mpu_config);
process.mpu_regions = [
Cell::new(None),
Cell::new(None),
Cell::new(None),
Cell::new(None),
Cell::new(None),
Cell::new(None),
];
process.tasks = MapCell::new(tasks);
process.process_name = process_name.unwrap_or("");
process.debug = MapCell::new(ProcessStandardDebug {
fixed_address_flash: fixed_address_flash,
fixed_address_ram: fixed_address_ram,
app_heap_start_pointer: None,
app_stack_start_pointer: None,
app_stack_min_pointer: None,
syscall_count: 0,
last_syscall: None,
dropped_upcall_count: 0,
timeslice_expiration_count: 0,
});
let flash_protected_size = process.header.get_protected_size() as usize;
let flash_app_start_addr = app_flash.as_ptr() as usize + flash_protected_size;
process.tasks.map(|tasks| {
tasks.enqueue(Task::FunctionCall(FunctionCall {
source: FunctionCallSource::Kernel,
pc: init_fn,
argument0: flash_app_start_addr,
argument1: process.memory_start as usize,
argument2: process.memory_len,
argument3: process.app_break.get() as usize,
}));
});
// Handle any architecture-specific requirements for a new process.
//
// NOTE! We have to ensure that the start of process-accessible memory
// (`app_memory_start`) is word-aligned. Since we currently start
// process-accessible memory at the beginning of the allocated memory
// region, we trust the MPU to give us a word-aligned starting address.
//
// TODO: https://github.com/tock/tock/issues/1739
match process.stored_state.map(|stored_state| {
chip.userspace_kernel_boundary().initialize_process(
app_memory_start,
initial_app_brk,
stored_state,
)
}) {
Some(Ok(())) => {}
_ => {
if config::CONFIG.debug_load_processes {
debug!(
"[!] flash={:#010X}-{:#010X} process={:?} - couldn't initialize process",
app_flash.as_ptr() as usize,
app_flash.as_ptr() as usize + app_flash.len() - 1,
process_name
);
}
return Err(ProcessLoadError::InternalError);
}
};
kernel.increment_work();
// Return the process object and a remaining memory for processes slice.
Ok((Some(process), unused_memory))
}
/// Restart the process, resetting all of its state and re-initializing it
/// to start running. Assumes the process is not running but is still in
/// flash and still has its memory region allocated to it. This implements
/// the mechanism of restart.
fn restart(&self) -> Result<(), ErrorCode> {
// We need a new process identifier for this process since the restarted
// version is in effect a new process. This is also necessary to
// invalidate any stored `ProcessId`s that point to the old version of
// the process. However, the process has not moved locations in the
// processes array, so we copy the existing index.
let old_index = self.process_id.get().index;
let new_identifier = self.kernel.create_process_identifier();
self.process_id
.set(ProcessId::new(self.kernel, new_identifier, old_index));
// Reset debug information that is per-execution and not per-process.
self.debug.map(|debug| {
debug.syscall_count = 0;
debug.last_syscall = None;
debug.dropped_upcall_count = 0;
debug.timeslice_expiration_count = 0;
});
// FLASH
// We are going to start this process over again, so need the init_fn
// location.
let app_flash_address = self.flash_start();
let init_fn = unsafe {
app_flash_address.offset(self.header.get_init_function_offset() as isize) as usize
};
// Reset MPU region configuration.
//
// TODO: ideally, this would be moved into a helper function used by
// both create() and reset(), but process load debugging complicates
// this. We just want to create new config with only flash and memory
// regions.
let mut mpu_config: <<C as Chip>::MPU as MPU>::MpuConfig = Default::default();
// Allocate MPU region for flash.
let app_mpu_flash = self.chip.mpu().allocate_region(
self.flash.as_ptr(),
self.flash.len(),
self.flash.len(),
mpu::Permissions::ReadExecuteOnly,
&mut mpu_config,
);
if app_mpu_flash.is_none() {
// We were unable to allocate an MPU region for flash. This is very
// unexpected since we previously ran this process. However, we
// return now and leave the process faulted and it will not be
// scheduled.
return Err(ErrorCode::FAIL);
}
// RAM
// Re-determine the minimum amount of RAM the kernel must allocate to
// the process based on the specific requirements of the syscall
// implementation.
let min_process_memory_size = self
.chip
.userspace_kernel_boundary()
.initial_process_app_brk_size();
// Recalculate initial_kernel_memory_size as was done in create()
let grant_ptr_size = mem::size_of::<(usize, *mut u8)>();
let grant_ptrs_num = self.kernel.get_grant_count_and_finalize();
let grant_ptrs_offset = grant_ptrs_num * grant_ptr_size;
let initial_kernel_memory_size =
grant_ptrs_offset + Self::CALLBACKS_OFFSET + Self::PROCESS_STRUCT_OFFSET;
let app_mpu_mem = self.chip.mpu().allocate_app_memory_region(
self.mem_start(),
self.memory_len,
self.memory_len, //we want exactly as much as we had before restart
min_process_memory_size,
initial_kernel_memory_size,
mpu::Permissions::ReadWriteOnly,
&mut mpu_config,
);
let (app_mpu_mem_start, app_mpu_mem_len) = match app_mpu_mem {
Some((start, len)) => (start, len),
None => {
// We couldn't configure the MPU for the process. This shouldn't
// happen since we were able to start the process before, but at
// this point it is better to leave the app faulted and not
// schedule it.
return Err(ErrorCode::NOMEM);
}
};
// Reset memory pointers now that we know the layout of the process
// memory and know that we can configure the MPU.
// app_brk is set based on minimum syscall size above the start of
// memory.
let app_brk = app_mpu_mem_start.wrapping_add(min_process_memory_size);
self.app_break.set(app_brk);
// kernel_brk is calculated backwards from the end of memory the size of
// the initial kernel data structures.
let kernel_brk = app_mpu_mem_start
.wrapping_add(app_mpu_mem_len)
.wrapping_sub(initial_kernel_memory_size);
self.kernel_memory_break.set(kernel_brk);
// High water mark for `allow`ed memory is reset to the start of the
// process's memory region.
self.allow_high_water_mark.set(app_mpu_mem_start);
// Drop the old config and use the clean one
self.mpu_config.replace(mpu_config);
// Handle any architecture-specific requirements for a process when it
// first starts (as it would when it is new).
let ukb_init_process = self.stored_state.map_or(Err(()), |stored_state| unsafe {
self.chip.userspace_kernel_boundary().initialize_process(
app_mpu_mem_start,
app_brk,
stored_state,
)
});
match ukb_init_process {
Ok(()) => {}
Err(_) => {
// We couldn't initialize the architecture-specific state for
// this process. This shouldn't happen since the app was able to
// be started before, but at this point the app is no longer
// valid. The best thing we can do now is leave the app as still
// faulted and not schedule it.
return Err(ErrorCode::RESERVE);
}
};
// And queue up this app to be restarted.
let flash_protected_size = self.header.get_protected_size() as usize;
let flash_app_start = app_flash_address as usize + flash_protected_size;
// Mark the state as `Unstarted` for the scheduler.
self.state.update(State::Unstarted);
// Mark that we restarted this process.
self.restart_count.increment();
// Enqueue the initial function.
self.tasks.map(|tasks| {
tasks.enqueue(Task::FunctionCall(FunctionCall {
source: FunctionCallSource::Kernel,
pc: init_fn,
argument0: flash_app_start,
argument1: self.mem_start() as usize,
argument2: self.memory_len,
argument3: self.app_break.get() as usize,
}));
});
// Mark that the process is ready to run.
self.kernel.increment_work();
Ok(())
}
/// Checks if the buffer represented by the passed in base pointer and size
/// is within the RAM bounds currently exposed to the processes (i.e. ending
/// at `app_break`). If this method returns `true`, the buffer is guaranteed
/// to be accessible to the process and to not overlap with the grant
/// region.
fn in_app_owned_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
let buf_end_addr = buf_start_addr.wrapping_add(size);
buf_end_addr >= buf_start_addr
&& buf_start_addr >= self.mem_start()
&& buf_end_addr <= self.app_break.get()
}
/// Checks if the buffer represented by the passed in base pointer and size
/// are within the readable region of an application's flash memory. If
/// this method returns true, the buffer is guaranteed to be readable to the
/// process.
fn in_app_flash_memory(&self, buf_start_addr: *const u8, size: usize) -> bool {
let buf_end_addr = buf_start_addr.wrapping_add(size);
buf_end_addr >= buf_start_addr
&& buf_start_addr >= self.flash_non_protected_start()
&& buf_end_addr <= self.flash_end()
}
/// Reset all `grant_ptr`s to NULL.
unsafe fn grant_ptrs_reset(&self) {
self.grant_pointers.map(|grant_pointers| {
for grant_entry in grant_pointers.iter_mut() {
grant_entry.driver_num = 0;
grant_entry.grant_ptr = ptr::null_mut();
}
});
}
/// Allocate memory in a process's grant region.
///
/// Ensures that the allocation is of `size` bytes and aligned to `align`
/// bytes.
///
/// If there is not enough memory, or the MPU cannot isolate the process
/// accessible region from the new kernel memory break after doing the
/// allocation, then this will return `None`.
fn allocate_in_grant_region_internal(&self, size: usize, align: usize) -> Option<NonNull<u8>> {
self.mpu_config.and_then(|mut config| {
// First, compute the candidate new pointer. Note that at this point
// we have not yet checked whether there is space for this
// allocation or that it meets alignment requirements.
let new_break_unaligned = self.kernel_memory_break.get().wrapping_sub(size);
// Our minimum alignment requirement is two bytes, so that the
// lowest bit of the address will always be zero and we can use it
// as a flag. It doesn't hurt to increase the alignment (except for
// potentially a wasted byte) so we make sure `align` is at least
// two.
let align = cmp::max(align, 2);
// The alignment must be a power of two, 2^a. The expression
// `!(align - 1)` then returns a mask with leading ones, followed by
// `a` trailing zeros.
let alignment_mask = !(align - 1);
let new_break = (new_break_unaligned as usize & alignment_mask) as *const u8;
// Verify there is space for this allocation
if new_break < self.app_break.get() {
None
// Verify it didn't wrap around
} else if new_break > self.kernel_memory_break.get() {
None
// Verify this is compatible with the MPU.
} else if let Err(_) = self.chip.mpu().update_app_memory_region(
self.app_break.get(),
new_break,
mpu::Permissions::ReadWriteOnly,
&mut config,
) {
None
} else {
// Allocation is valid.
// We always allocate down, so we must lower the
// kernel_memory_break.
self.kernel_memory_break.set(new_break);
// We need `grant_ptr` as a mutable pointer.
let grant_ptr = new_break as *mut u8;
// ### Safety
//
// Here we are guaranteeing that `grant_ptr` is not null. We can
// ensure this because we just created `grant_ptr` based on the
// process's allocated memory, and we know it cannot be null.
unsafe { Some(NonNull::new_unchecked(grant_ptr)) }
}
})
}
/// Create the identifier for a custom grant that grant.rs uses to access
/// the custom grant.
///
/// We create this identifier by calculating the number of bytes between
/// where the custom grant starts and the end of the process memory.
fn create_custom_grant_identifier(&self, ptr: NonNull<u8>) -> ProcessCustomGrantIdentifer {
let custom_grant_address = ptr.as_ptr() as usize;
let process_memory_end = self.mem_end() as usize;
ProcessCustomGrantIdentifer {
offset: process_memory_end - custom_grant_address,
}
}
/// Use a ProcessCustomGrantIdentifer to find the address of the custom
/// grant.
///
/// This reverses `create_custom_grant_identifier()`.
fn get_custom_grant_address(&self, identifier: ProcessCustomGrantIdentifer) -> usize {
let process_memory_end = self.mem_end() as usize;
// Subtract the offset in the identifier from the end of the process
// memory to get the address of the custom grant.
process_memory_end - identifier.offset
}
/// Check if the process is active.
///
/// "Active" is defined as the process can resume executing in the future.
/// This means its state in the `Process` struct is still valid, and that
/// the kernel could resume its execution without completely restarting and
/// resetting its state.
///
/// A process is inactive if the kernel cannot resume its execution, such as
/// if the process faults and is in an invalid state, or if the process
/// explicitly exits.
fn is_active(&self) -> bool {
let current_state = self.state.get();
current_state != State::Terminated && current_state != State::Faulted
}
} | return Err(ProcessLoadError::IncompatibleKernelVersion {
version: Some((major, minor)),
}); |
poi.config.js | module.exports = {
presets: [require('poi-preset-react')()],
entry: './src/index.jsx',
webpack: config => ({
...config,
resolve: { | ...config.alias,
Babel: '@babel/standalone',
},
},
}),
homepage: 'https://mmiller42.github.io/jsx-to-rjs/',
dist: 'docs',
} | ...config.resolve,
alias: { |
tmpl.go | package character
import (
"fmt"
"math"
"math/rand"
"github.com/genshinsim/gcsim/pkg/core"
)
type Tmpl struct {
Core *core.Core
Rand *rand.Rand
Index int
//this should describe the frame in which the abil becomes available
//if frame > current then it's available. no need to decrement this way
// CD map[string]int
ActionCD []int
Mods []core.CharStatMod
PreDamageMods []core.PreDamageMod
ReactMod []core.ReactionBonusMod
Tags map[string]int
//Profile info
Base core.CharacterBase
Weapon core.WeaponProfile
Stats [core.EndStatType]float64
Talents core.TalentProfile
SkillCon int
BurstCon int
CharZone core.ZoneType
CDReductionFuncs []core.CDAdjust
Energy float64
EnergyMax float64
HPCurrent float64
HPMax float64
//counters
NormalHitNum int //how many hits in a normal combo
NormalCounter int
//infusion
Infusion core.WeaponInfusion //TODO currently just overides the old; disregarding any existing
}
func NewTemplateChar(x *core.Core, p core.CharacterProfile) (*Tmpl, error) {
c := Tmpl{}
c.Core = x
c.Rand = x.Rand
c.ActionCD = make([]int, core.EndActionType)
c.Mods = make([]core.CharStatMod, 0, 10)
c.Tags = make(map[string]int)
c.CDReductionFuncs = make([]core.CDAdjust, 0, 5)
c.Base = p.Base
c.Weapon = p.Weapon
c.Talents = p.Talents
c.SkillCon = 3
c.BurstCon = 5
if c.Talents.Attack < 1 || c.Talents.Attack > 15 {
return nil, fmt.Errorf("invalid talent lvl: attack - %v", c.Talents.Attack)
}
if c.Talents.Attack < 1 || c.Talents.Attack > 12 {
return nil, fmt.Errorf("invalid talent lvl: skill - %v", c.Talents.Skill)
}
if c.Talents.Attack < 1 || c.Talents.Attack > 12 {
return nil, fmt.Errorf("invalid talent lvl: burst - %v", c.Talents.Burst)
}
for i, v := range p.Stats {
c.Stats[i] = v
}
if p.Base.StartHP > -1 {
c.Core.Log.Debugw("setting starting hp", "frame", x.F, "event", core.LogCharacterEvent, "character", p.Base.Key.String(), "hp", p.Base.StartHP)
c.HPCurrent = p.Base.StartHP
} else {
c.HPCurrent = math.MaxInt64
}
return &c, nil
}
| t.Index = index
hpp := t.Stats[core.HPP]
hp := t.Stats[core.HP]
for _, m := range t.Mods {
if m.Expiry > t.Core.F || m.Expiry == -1 {
a, ok := m.Amount(core.AttackTagNone)
if ok {
hpp += a[core.HPP]
hp += a[core.HP]
}
}
}
t.HPMax = t.Base.HP*(1+hpp) + hp
// c.HPCurrent = 1
if t.HPCurrent > t.HPMax {
t.HPCurrent = t.HPMax
}
}
func (c *Tmpl) AddWeaponInfuse(inf core.WeaponInfusion) {
c.Infusion = inf
}
func (c *Tmpl) AddPreDamageMod(mod core.PreDamageMod) {
ind := len(c.PreDamageMods)
for i, v := range c.PreDamageMods {
if v.Key == mod.Key {
ind = i
}
}
if ind != 0 && ind != len(c.PreDamageMods) {
c.Core.Log.Debugw("char pre damage mod added", "frame", c.Core.F, "event", core.LogCharacterEvent, "overwrite", true, "key", mod.Key)
c.PreDamageMods[ind] = mod
} else {
c.PreDamageMods = append(c.PreDamageMods, mod)
c.Core.Log.Debugw("char pre damage mod added", "frame", c.Core.F, "event", core.LogCharacterEvent, "overwrite", true, "key", mod.Key)
}
}
func (c *Tmpl) AddMod(mod core.CharStatMod) {
ind := len(c.Mods)
for i, v := range c.Mods {
if v.Key == mod.Key {
ind = i
}
}
if ind != 0 && ind != len(c.Mods) {
c.Core.Log.Debugw("char mod added", "frame", c.Core.F, "char", c.Index, "event", core.LogCharacterEvent, "overwrite", true, "key", mod.Key)
c.Mods[ind] = mod
} else {
c.Mods = append(c.Mods, mod)
c.Core.Log.Debugw("char mod added", "frame", c.Core.F, "char", c.Index, "event", core.LogCharacterEvent, "overwrite", true, "key", mod.Key)
}
}
func (t *Tmpl) AddReactBonusMod(mod core.ReactionBonusMod) {
ind := -1
for i, v := range t.ReactMod {
if v.Key == mod.Key {
ind = i
}
}
if ind != -1 {
t.Core.Log.Debugw("react bonus mod overwritten", "frame", t.Core.F, "event", core.LogEnemyEvent, "count", len(t.ReactMod), "char", t.Index)
// LogEnemyEvent
t.ReactMod[ind] = mod
return
}
t.ReactMod = append(t.ReactMod, mod)
t.Core.Log.Debugw("react bonus mod added", "frame", t.Core.F, "event", core.LogEnemyEvent, "count", len(t.ReactMod), "char", t.Index)
}
func (c *Tmpl) Tag(key string) int {
return c.Tags[key]
}
func (c *Tmpl) AddTag(key string, val int) {
c.Tags[key] = val
}
func (c *Tmpl) RemoveTag(key string) {
delete(c.Tags, key)
} | func (t *Tmpl) Init(index int) { |
summarize_host_insight_resource_statistics_request_response.go | // Copyright (c) 2016, 2018, 2022, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
package opsi
import (
"fmt"
"github.com/oracle/oci-go-sdk/v58/common"
"net/http"
"strings"
)
// SummarizeHostInsightResourceStatisticsRequest wrapper for the SummarizeHostInsightResourceStatistics operation
//
// See also
//
// Click https://docs.cloud.oracle.com/en-us/iaas/tools/go-sdk-examples/latest/opsi/SummarizeHostInsightResourceStatistics.go.html to see an example of how to use SummarizeHostInsightResourceStatisticsRequest.
type SummarizeHostInsightResourceStatisticsRequest struct {
// The OCID (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment.
CompartmentId *string `mandatory:"true" contributesTo:"query" name:"compartmentId"`
// Filter by host resource metric.
// Supported values are CPU, MEMORY, and LOGICAL_MEMORY.
ResourceMetric *string `mandatory:"true" contributesTo:"query" name:"resourceMetric"`
// Specify time period in ISO 8601 format with respect to current time.
// Default is last 30 days represented by P30D.
// If timeInterval is specified, then timeIntervalStart and timeIntervalEnd will be ignored.
// Examples P90D (last 90 days), P4W (last 4 weeks), P2M (last 2 months), P1Y (last 12 months), . Maximum value allowed is 25 months prior to current time (P25M).
AnalysisTimeInterval *string `mandatory:"false" contributesTo:"query" name:"analysisTimeInterval"`
// Analysis start time in UTC in ISO 8601 format(inclusive).
// Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
// The minimum allowed value is 2 years prior to the current day.
// timeIntervalStart and timeIntervalEnd parameters are used together.
// If analysisTimeInterval is specified, this parameter is ignored.
TimeIntervalStart *common.SDKTime `mandatory:"false" contributesTo:"query" name:"timeIntervalStart"`
// Analysis end time in UTC in ISO 8601 format(exclusive).
// Example 2019-10-30T00:00:00Z (yyyy-MM-ddThh:mm:ssZ).
// timeIntervalStart and timeIntervalEnd are used together.
// If timeIntervalEnd is not specified, current time is used as timeIntervalEnd.
TimeIntervalEnd *common.SDKTime `mandatory:"false" contributesTo:"query" name:"timeIntervalEnd"`
// Filter by one or more platform types.
// Supported platformType(s) for MACS-managed external host insight: [LINUX].
// Supported platformType(s) for EM-managed external host insight: [LINUX, SOLARIS, SUNOS].
PlatformType []SummarizeHostInsightResourceStatisticsPlatformTypeEnum `contributesTo:"query" name:"platformType" omitEmpty:"true" collectionFormat:"multi"`
// Optional list of host insight resource OCIDs (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
Id []string `contributesTo:"query" name:"id" collectionFormat:"multi"`
// Optional list of exadata insight resource OCIDs (https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm).
ExadataInsightId []string `contributesTo:"query" name:"exadataInsightId" collectionFormat:"multi"`
// Percentile values of daily usage to be used for computing the aggregate resource usage.
Percentile *int `mandatory:"false" contributesTo:"query" name:"percentile"`
// Return data of a specific insight
// Possible values are High Utilization, Low Utilization, Any ,High Utilization Forecast,
// Low Utilization Forecast
InsightBy *string `mandatory:"false" contributesTo:"query" name:"insightBy"`
// Number of days used for utilization forecast analysis.
ForecastDays *int `mandatory:"false" contributesTo:"query" name:"forecastDays"`
// For list pagination. The maximum number of results per page, or items to
// return in a paginated "List" call.
// For important details about how pagination works, see
// List Pagination (https://docs.cloud.oracle.com/Content/API/Concepts/usingapi.htm#nine).
// Example: `50`
Limit *int `mandatory:"false" contributesTo:"query" name:"limit"`
// For list pagination. The value of the `opc-next-page` response header from
// the previous "List" call. For important details about how pagination works,
// see List Pagination (https://docs.cloud.oracle.com/Content/API/Concepts/usingapi.htm#nine).
Page *string `mandatory:"false" contributesTo:"query" name:"page"`
// The sort order to use, either ascending (`ASC`) or descending (`DESC`).
SortOrder SummarizeHostInsightResourceStatisticsSortOrderEnum `mandatory:"false" contributesTo:"query" name:"sortOrder" omitEmpty:"true"`
// The order in which resource statistics records are listed.
SortBy SummarizeHostInsightResourceStatisticsSortByEnum `mandatory:"false" contributesTo:"query" name:"sortBy" omitEmpty:"true"`
// Unique Oracle-assigned identifier for the request. If you need to contact
// Oracle about a particular request, please provide the request ID.
OpcRequestId *string `mandatory:"false" contributesTo:"header" name:"opc-request-id"`
// A list of tag filters to apply. Only resources with a defined tag matching the value will be returned.
// Each item in the list has the format "{namespace}.{tagName}.{value}". All inputs are case-insensitive.
// Multiple values for the same key (i.e. same namespace and tag name) are interpreted as "OR".
// Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as "AND".
DefinedTagEquals []string `contributesTo:"query" name:"definedTagEquals" collectionFormat:"multi"`
// A list of tag filters to apply. Only resources with a freeform tag matching the value will be returned.
// The key for each tag is "{tagName}.{value}". All inputs are case-insensitive.
// Multiple values for the same tag name are interpreted as "OR". Values for different tag names are interpreted as "AND".
FreeformTagEquals []string `contributesTo:"query" name:"freeformTagEquals" collectionFormat:"multi"`
// A list of tag existence filters to apply. Only resources for which the specified defined tags exist will be returned.
// Each item in the list has the format "{namespace}.{tagName}.true" (for checking existence of a defined tag)
// or "{namespace}.true". All inputs are case-insensitive.
// Currently, only existence ("true" at the end) is supported. Absence ("false" at the end) is not supported.
// Multiple values for the same key (i.e. same namespace and tag name) are interpreted as "OR".
// Values for different keys (i.e. different namespaces, different tag names, or both) are interpreted as "AND".
DefinedTagExists []string `contributesTo:"query" name:"definedTagExists" collectionFormat:"multi"`
// A list of tag existence filters to apply. Only resources for which the specified freeform tags exist the value will be returned.
// The key for each tag is "{tagName}.true". All inputs are case-insensitive.
// Currently, only existence ("true" at the end) is supported. Absence ("false" at the end) is not supported.
// Multiple values for different tag names are interpreted as "AND".
FreeformTagExists []string `contributesTo:"query" name:"freeformTagExists" collectionFormat:"multi"`
// A flag to search all resources within a given compartment and all sub-compartments.
CompartmentIdInSubtree *bool `mandatory:"false" contributesTo:"query" name:"compartmentIdInSubtree"`
// Metadata about the request. This information will not be transmitted to the service, but
// represents information that the SDK will consume to drive retry behavior.
RequestMetadata common.RequestMetadata
}
func (request SummarizeHostInsightResourceStatisticsRequest) String() string {
return common.PointerString(request)
}
// HTTPRequest implements the OCIRequest interface
func (request SummarizeHostInsightResourceStatisticsRequest) HTTPRequest(method, path string, binaryRequestBody *common.OCIReadSeekCloser, extraHeaders map[string]string) (http.Request, error) {
_, err := request.ValidateEnumValue()
if err != nil {
return http.Request{}, err
}
return common.MakeDefaultHTTPRequestWithTaggedStructAndExtraHeaders(method, path, request, extraHeaders)
}
// BinaryRequestBody implements the OCIRequest interface
func (request SummarizeHostInsightResourceStatisticsRequest) BinaryRequestBody() (*common.OCIReadSeekCloser, bool) {
return nil, false
}
// RetryPolicy implements the OCIRetryableRequest interface. This retrieves the specified retry policy.
func (request SummarizeHostInsightResourceStatisticsRequest) RetryPolicy() *common.RetryPolicy {
return request.RequestMetadata.RetryPolicy
}
// ValidateEnumValue returns an error when providing an unsupported enum value
// This function is being called during constructing API request process
// Not recommended for calling this function directly
func (request SummarizeHostInsightResourceStatisticsRequest) ValidateEnumValue() (bool, error) {
errMessage := []string{}
for _, val := range request.PlatformType {
if _, ok := GetMappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum(string(val)); !ok && val != "" {
errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for PlatformType: %s. Supported values are: %s.", val, strings.Join(GetSummarizeHostInsightResourceStatisticsPlatformTypeEnumStringValues(), ",")))
}
}
if _, ok := GetMappingSummarizeHostInsightResourceStatisticsSortOrderEnum(string(request.SortOrder)); !ok && request.SortOrder != "" {
errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for SortOrder: %s. Supported values are: %s.", request.SortOrder, strings.Join(GetSummarizeHostInsightResourceStatisticsSortOrderEnumStringValues(), ",")))
}
if _, ok := GetMappingSummarizeHostInsightResourceStatisticsSortByEnum(string(request.SortBy)); !ok && request.SortBy != "" {
errMessage = append(errMessage, fmt.Sprintf("unsupported enum value for SortBy: %s. Supported values are: %s.", request.SortBy, strings.Join(GetSummarizeHostInsightResourceStatisticsSortByEnumStringValues(), ",")))
}
if len(errMessage) > 0 {
return true, fmt.Errorf(strings.Join(errMessage, "\n"))
}
return false, nil
}
// SummarizeHostInsightResourceStatisticsResponse wrapper for the SummarizeHostInsightResourceStatistics operation
type SummarizeHostInsightResourceStatisticsResponse struct {
// The underlying http response
RawResponse *http.Response
// A list of SummarizeHostInsightResourceStatisticsAggregationCollection instances
SummarizeHostInsightResourceStatisticsAggregationCollection `presentIn:"body"` | // Oracle about a particular request, please provide the request ID.
OpcRequestId *string `presentIn:"header" name:"opc-request-id"`
// For pagination of a list of items. When paging through a list, if this header appears in the response,
// then a partial list might have been returned. Include this value as the `page` parameter for the
// subsequent GET request to get the next batch of items.
OpcNextPage *string `presentIn:"header" name:"opc-next-page"`
}
func (response SummarizeHostInsightResourceStatisticsResponse) String() string {
return common.PointerString(response)
}
// HTTPResponse implements the OCIResponse interface
func (response SummarizeHostInsightResourceStatisticsResponse) HTTPResponse() *http.Response {
return response.RawResponse
}
// SummarizeHostInsightResourceStatisticsPlatformTypeEnum Enum with underlying type: string
type SummarizeHostInsightResourceStatisticsPlatformTypeEnum string
// Set of constants representing the allowable values for SummarizeHostInsightResourceStatisticsPlatformTypeEnum
const (
SummarizeHostInsightResourceStatisticsPlatformTypeLinux SummarizeHostInsightResourceStatisticsPlatformTypeEnum = "LINUX"
SummarizeHostInsightResourceStatisticsPlatformTypeSolaris SummarizeHostInsightResourceStatisticsPlatformTypeEnum = "SOLARIS"
SummarizeHostInsightResourceStatisticsPlatformTypeSunos SummarizeHostInsightResourceStatisticsPlatformTypeEnum = "SUNOS"
)
var mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum = map[string]SummarizeHostInsightResourceStatisticsPlatformTypeEnum{
"LINUX": SummarizeHostInsightResourceStatisticsPlatformTypeLinux,
"SOLARIS": SummarizeHostInsightResourceStatisticsPlatformTypeSolaris,
"SUNOS": SummarizeHostInsightResourceStatisticsPlatformTypeSunos,
}
// GetSummarizeHostInsightResourceStatisticsPlatformTypeEnumValues Enumerates the set of values for SummarizeHostInsightResourceStatisticsPlatformTypeEnum
func GetSummarizeHostInsightResourceStatisticsPlatformTypeEnumValues() []SummarizeHostInsightResourceStatisticsPlatformTypeEnum {
values := make([]SummarizeHostInsightResourceStatisticsPlatformTypeEnum, 0)
for _, v := range mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum {
values = append(values, v)
}
return values
}
// GetSummarizeHostInsightResourceStatisticsPlatformTypeEnumStringValues Enumerates the set of values in String for SummarizeHostInsightResourceStatisticsPlatformTypeEnum
func GetSummarizeHostInsightResourceStatisticsPlatformTypeEnumStringValues() []string {
return []string{
"LINUX",
"SOLARIS",
"SUNOS",
}
}
// GetMappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum performs case Insensitive comparison on enum value and return the desired enum
func GetMappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum(val string) (SummarizeHostInsightResourceStatisticsPlatformTypeEnum, bool) {
mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnumIgnoreCase := make(map[string]SummarizeHostInsightResourceStatisticsPlatformTypeEnum)
for k, v := range mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnum {
mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnumIgnoreCase[strings.ToLower(k)] = v
}
enum, ok := mappingSummarizeHostInsightResourceStatisticsPlatformTypeEnumIgnoreCase[strings.ToLower(val)]
return enum, ok
}
// SummarizeHostInsightResourceStatisticsSortOrderEnum Enum with underlying type: string
type SummarizeHostInsightResourceStatisticsSortOrderEnum string
// Set of constants representing the allowable values for SummarizeHostInsightResourceStatisticsSortOrderEnum
const (
SummarizeHostInsightResourceStatisticsSortOrderAsc SummarizeHostInsightResourceStatisticsSortOrderEnum = "ASC"
SummarizeHostInsightResourceStatisticsSortOrderDesc SummarizeHostInsightResourceStatisticsSortOrderEnum = "DESC"
)
var mappingSummarizeHostInsightResourceStatisticsSortOrderEnum = map[string]SummarizeHostInsightResourceStatisticsSortOrderEnum{
"ASC": SummarizeHostInsightResourceStatisticsSortOrderAsc,
"DESC": SummarizeHostInsightResourceStatisticsSortOrderDesc,
}
// GetSummarizeHostInsightResourceStatisticsSortOrderEnumValues Enumerates the set of values for SummarizeHostInsightResourceStatisticsSortOrderEnum
func GetSummarizeHostInsightResourceStatisticsSortOrderEnumValues() []SummarizeHostInsightResourceStatisticsSortOrderEnum {
values := make([]SummarizeHostInsightResourceStatisticsSortOrderEnum, 0)
for _, v := range mappingSummarizeHostInsightResourceStatisticsSortOrderEnum {
values = append(values, v)
}
return values
}
// GetSummarizeHostInsightResourceStatisticsSortOrderEnumStringValues Enumerates the set of values in String for SummarizeHostInsightResourceStatisticsSortOrderEnum
func GetSummarizeHostInsightResourceStatisticsSortOrderEnumStringValues() []string {
return []string{
"ASC",
"DESC",
}
}
// GetMappingSummarizeHostInsightResourceStatisticsSortOrderEnum performs case Insensitive comparison on enum value and return the desired enum
func GetMappingSummarizeHostInsightResourceStatisticsSortOrderEnum(val string) (SummarizeHostInsightResourceStatisticsSortOrderEnum, bool) {
mappingSummarizeHostInsightResourceStatisticsSortOrderEnumIgnoreCase := make(map[string]SummarizeHostInsightResourceStatisticsSortOrderEnum)
for k, v := range mappingSummarizeHostInsightResourceStatisticsSortOrderEnum {
mappingSummarizeHostInsightResourceStatisticsSortOrderEnumIgnoreCase[strings.ToLower(k)] = v
}
enum, ok := mappingSummarizeHostInsightResourceStatisticsSortOrderEnumIgnoreCase[strings.ToLower(val)]
return enum, ok
}
// SummarizeHostInsightResourceStatisticsSortByEnum Enum with underlying type: string
type SummarizeHostInsightResourceStatisticsSortByEnum string
// Set of constants representing the allowable values for SummarizeHostInsightResourceStatisticsSortByEnum
const (
SummarizeHostInsightResourceStatisticsSortByUtilizationpercent SummarizeHostInsightResourceStatisticsSortByEnum = "utilizationPercent"
SummarizeHostInsightResourceStatisticsSortByUsage SummarizeHostInsightResourceStatisticsSortByEnum = "usage"
SummarizeHostInsightResourceStatisticsSortByUsagechangepercent SummarizeHostInsightResourceStatisticsSortByEnum = "usageChangePercent"
SummarizeHostInsightResourceStatisticsSortByHostname SummarizeHostInsightResourceStatisticsSortByEnum = "hostName"
SummarizeHostInsightResourceStatisticsSortByPlatformtype SummarizeHostInsightResourceStatisticsSortByEnum = "platformType"
)
var mappingSummarizeHostInsightResourceStatisticsSortByEnum = map[string]SummarizeHostInsightResourceStatisticsSortByEnum{
"utilizationPercent": SummarizeHostInsightResourceStatisticsSortByUtilizationpercent,
"usage": SummarizeHostInsightResourceStatisticsSortByUsage,
"usageChangePercent": SummarizeHostInsightResourceStatisticsSortByUsagechangepercent,
"hostName": SummarizeHostInsightResourceStatisticsSortByHostname,
"platformType": SummarizeHostInsightResourceStatisticsSortByPlatformtype,
}
// GetSummarizeHostInsightResourceStatisticsSortByEnumValues Enumerates the set of values for SummarizeHostInsightResourceStatisticsSortByEnum
func GetSummarizeHostInsightResourceStatisticsSortByEnumValues() []SummarizeHostInsightResourceStatisticsSortByEnum {
values := make([]SummarizeHostInsightResourceStatisticsSortByEnum, 0)
for _, v := range mappingSummarizeHostInsightResourceStatisticsSortByEnum {
values = append(values, v)
}
return values
}
// GetSummarizeHostInsightResourceStatisticsSortByEnumStringValues Enumerates the set of values in String for SummarizeHostInsightResourceStatisticsSortByEnum
func GetSummarizeHostInsightResourceStatisticsSortByEnumStringValues() []string {
return []string{
"utilizationPercent",
"usage",
"usageChangePercent",
"hostName",
"platformType",
}
}
// GetMappingSummarizeHostInsightResourceStatisticsSortByEnum performs case Insensitive comparison on enum value and return the desired enum
func GetMappingSummarizeHostInsightResourceStatisticsSortByEnum(val string) (SummarizeHostInsightResourceStatisticsSortByEnum, bool) {
mappingSummarizeHostInsightResourceStatisticsSortByEnumIgnoreCase := make(map[string]SummarizeHostInsightResourceStatisticsSortByEnum)
for k, v := range mappingSummarizeHostInsightResourceStatisticsSortByEnum {
mappingSummarizeHostInsightResourceStatisticsSortByEnumIgnoreCase[strings.ToLower(k)] = v
}
enum, ok := mappingSummarizeHostInsightResourceStatisticsSortByEnumIgnoreCase[strings.ToLower(val)]
return enum, ok
} |
// Unique Oracle-assigned identifier for the request. If you need to contact |
stability_summary.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This module crawls a `clean::Crate` and produces a summarization of the
//! stability levels within the crate. The summary contains the module
//! hierarchy, with item counts for every stability level per module. A parent
//! module's count includes its children's.
use std::ops::Add;
use std::num::Zero;
use std::iter::AdditiveIterator;
use syntax::attr::{Deprecated, Experimental, Unstable, Stable, Frozen, Locked};
use syntax::ast::Public;
use clean::{Crate, Item, ModuleItem, Module, StructItem, Struct, EnumItem, Enum};
use clean::{ImplItem, Impl, Trait, TraitItem, TraitMethod, ProvidedMethod, RequiredMethod};
use clean::{TypeTraitItem, ViewItemItem, PrimitiveItem};
#[deriving(Zero, Encodable, Decodable, PartialEq, Eq)]
/// The counts for each stability level.
pub struct Counts {
pub deprecated: uint,
pub experimental: uint,
pub unstable: uint,
pub stable: uint,
pub frozen: uint,
pub locked: uint,
/// No stability level, inherited or otherwise.
pub unmarked: uint,
}
impl Add<Counts, Counts> for Counts {
fn add(&self, other: &Counts) -> Counts {
Counts {
deprecated: self.deprecated + other.deprecated,
experimental: self.experimental + other.experimental,
unstable: self.unstable + other.unstable,
stable: self.stable + other.stable,
frozen: self.frozen + other.frozen,
locked: self.locked + other.locked,
unmarked: self.unmarked + other.unmarked,
}
}
}
impl Counts {
pub fn | (&self) -> uint {
self.deprecated + self.experimental + self.unstable + self.stable +
self.frozen + self.locked + self.unmarked
}
}
#[deriving(Encodable, Decodable, PartialEq, Eq)]
/// A summarized module, which includes total counts and summarized children
/// modules.
pub struct ModuleSummary {
pub name: String,
pub counts: Counts,
pub submodules: Vec<ModuleSummary>,
}
impl PartialOrd for ModuleSummary {
fn partial_cmp(&self, other: &ModuleSummary) -> Option<Ordering> {
self.name.partial_cmp(&other.name)
}
}
impl Ord for ModuleSummary {
fn cmp(&self, other: &ModuleSummary) -> Ordering {
self.name.cmp(&other.name)
}
}
// is the item considered publically visible?
fn visible(item: &Item) -> bool {
match item.inner {
ImplItem(_) => true,
_ => item.visibility == Some(Public)
}
}
// Produce the summary for an arbitrary item. If the item is a module, include a
// module summary. The counts for items with nested items (e.g. modules, traits,
// impls) include all children counts.
fn summarize_item(item: &Item) -> (Counts, Option<ModuleSummary>) {
// count this item
let item_counts = match item.stability {
None => Counts { unmarked: 1, .. Zero::zero() },
Some(ref stab) => match stab.level {
Deprecated => Counts { deprecated: 1, .. Zero::zero() },
Experimental => Counts { experimental: 1, .. Zero::zero() },
Unstable => Counts { unstable: 1, .. Zero::zero() },
Stable => Counts { stable: 1, .. Zero::zero() },
Frozen => Counts { frozen: 1, .. Zero::zero() },
Locked => Counts { locked: 1, .. Zero::zero() },
}
};
// Count this item's children, if any. Note that a trait impl is
// considered to have no children.
match item.inner {
// Require explicit `pub` to be visible
StructItem(Struct { fields: ref subitems, .. }) |
ImplItem(Impl { items: ref subitems, trait_: None, .. }) => {
let subcounts = subitems.iter().filter(|i| visible(*i))
.map(summarize_item)
.map(|s| s.val0())
.sum();
(item_counts + subcounts, None)
}
// `pub` automatically
EnumItem(Enum { variants: ref subitems, .. }) => {
let subcounts = subitems.iter().map(summarize_item)
.map(|s| s.val0())
.sum();
(item_counts + subcounts, None)
}
TraitItem(Trait {
items: ref trait_items,
..
}) => {
fn extract_item<'a>(trait_item: &'a TraitMethod) -> &'a Item {
match *trait_item {
ProvidedMethod(ref item) |
RequiredMethod(ref item) |
TypeTraitItem(ref item) => item
}
}
let subcounts = trait_items.iter()
.map(extract_item)
.map(summarize_item)
.map(|s| s.val0())
.sum();
(item_counts + subcounts, None)
}
ModuleItem(Module { items: ref items, .. }) => {
let mut counts = item_counts;
let mut submodules = Vec::new();
for (subcounts, submodule) in items.iter().filter(|i| visible(*i))
.map(summarize_item) {
counts = counts + subcounts;
submodule.map(|m| submodules.push(m));
}
submodules.sort();
(counts, Some(ModuleSummary {
name: item.name.as_ref().map_or("".to_string(), |n| n.clone()),
counts: counts,
submodules: submodules,
}))
}
// no stability information for the following items:
ViewItemItem(_) | PrimitiveItem(_) => (Zero::zero(), None),
_ => (item_counts, None)
}
}
/// Summarizes the stability levels in a crate.
pub fn build(krate: &Crate) -> ModuleSummary {
match krate.module {
None => ModuleSummary {
name: krate.name.clone(),
counts: Zero::zero(),
submodules: Vec::new(),
},
Some(ref item) => ModuleSummary {
name: krate.name.clone(), .. summarize_item(item).val1().unwrap()
}
}
}
| total |
ProtectedRoute.tsx | import { useRecoilQuery } from 'hooks/recoil'
import React from 'react'
import { Redirect, Route, RouteProps } from 'react-router-dom'
import { userInfoGetters } from 'stores/user' |
interface Props extends RouteProps {
isAuthenticated?: boolean
}
function ProtectedRoute(props: Props) {
const { isLoading, data } = useRecoilQuery(userInfoGetters)
if (isLoading) {
return <Route {...props} />
}
if (!data || !data.isAuthenticated) {
return <Redirect to="/login" />
}
return <Route {...props} />
}
export default ProtectedRoute | |
main.go | package main
import (
"errors"
"flag"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/BurntSushi/toml"
)
var (
buildpacksDir string
orderPath string
inputBuildpackDir string
)
type ErrorFail struct {
Err error
Code int
Action []string
}
func (e *ErrorFail) Error() string {
message := "failed to " + strings.Join(e.Action, " ")
if e.Err == nil {
return message
}
return fmt.Sprintf("%s: %s", message, e.Err)
}
func failErr(err error, action ...string) error {
code := 1
if err, ok := err.(*ErrorFail); ok {
code = err.Code
}
return failErrCode(err, code, action...)
}
func failErrCode(err error, code int, action ...string) error {
return &ErrorFail{Err: err, Code: code, Action: action}
}
func exit(err error) {
if err == nil {
os.Exit(0)
}
fmt.Printf("Error: %s\n", err)
if err, ok := err.(*ErrorFail); ok {
os.Exit(err.Code)
}
os.Exit(1)
}
type Buildpack struct {
ID string `toml:"id"`
Version string `toml:"version"`
}
func (b *Buildpack) escapedID() string {
return strings.Replace(b.ID, "/", "_", -1)
}
type BuildpackTOML struct {
Buildpack Buildpack `toml:"buildpack"`
}
func init() {
flag.StringVar(&inputBuildpackDir, "buildpack", "", "local path of the buildpack to install")
flag.StringVar(&buildpacksDir, "buildpacks", "/buildpacks", "path to buildpacks directory")
flag.StringVar(&orderPath, "order", "/buildpacks/order.toml", "path to order.toml")
}
func main() {
flag.Parse()
if flag.NArg() != 0 {
exit(errors.New("failed to parse arguments"))
}
exit(install())
}
func | () error {
var buildpackTOML BuildpackTOML
_, err := toml.DecodeFile(filepath.Join(inputBuildpackDir, "buildpack.toml"), &buildpackTOML)
if err != nil {
return failErr(err, "read buildpack metadata file")
}
metadata := buildpackTOML.Buildpack
if metadata.ID == "" {
return failErr(err, "parse buildpack ID")
}
if metadata.Version == "" {
return failErr(err, "parse buildpack version")
}
buildpackDirName := filepath.Base(inputBuildpackDir)
outputBuildpackDir := filepath.Join(buildpacksDir, buildpackDirName, metadata.Version)
if err = os.MkdirAll(filepath.Dir(outputBuildpackDir), os.ModePerm); err != nil {
return failErr(err, "create buildpack directory")
}
if err := os.Rename(inputBuildpackDir, outputBuildpackDir); err != nil {
return failErr(err, "install buildpack version")
}
f, err := os.OpenFile(orderPath, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600)
if err != nil {
return failErr(err, "opening order.toml")
}
defer f.Close()
groupTemplate := `[[groups]]
buildpacks = [ { id = "%s", version = "%s" } ]
`
_, err = f.WriteString(fmt.Sprintf(groupTemplate, metadata.ID, metadata.Version))
if err != nil {
return failErr(err, "writing to order.toml")
}
return nil
}
| install |
token.go | package types
import (
"encoding/json"
"math"
"strconv"
"strings"
"github.com/gogo/protobuf/proto"
"gopkg.in/yaml.v2"
sdk "github.com/cosmos/cosmos-sdk/types"
sdkerrors "github.com/cosmos/cosmos-sdk/types/errors"
)
type TokenI interface {
GetSymbol() string
GetName() string
GetScale() uint32
GetMinUnit() string
GetInitialSupply() uint64
GetMaxSupply() uint64
GetMintable() bool
GetOwner() sdk.AccAddress
ToMainCoin(coin sdk.Coin) (sdk.DecCoin, error)
ToMinCoin(coin sdk.DecCoin) (sdk.Coin, error)
}
var _ proto.Message = &Token{}
// NewToken constructs a new Token instance
func NewToken(
symbol,
name,
minUnit string,
scale uint32,
initialSupply,
maxSupply uint64,
mintable bool,
owner sdk.AccAddress,
) Token {
symbol = strings.ToLower(strings.TrimSpace(symbol))
minUnit = strings.ToLower(strings.TrimSpace(minUnit))
name = strings.TrimSpace(name)
if maxSupply == 0 {
if mintable {
maxSupply = MaximumMaxSupply
} else {
maxSupply = initialSupply
}
}
return Token{
Symbol: symbol,
Name: name,
MinUnit: minUnit,
Scale: scale,
InitialSupply: initialSupply,
MaxSupply: maxSupply,
Mintable: mintable,
Owner: owner,
}
}
// GetSymbol implements exported.TokenI
func (t Token) GetSymbol() string {
return t.Symbol
}
// GetName implements exported.TokenI
func (t Token) GetName() string {
return t.Name
}
// GetScale implements exported.TokenI
func (t Token) GetScale() uint32 {
return t.Scale
}
// GetMinUnit implements exported.TokenI
func (t Token) GetMinUnit() string {
return t.MinUnit
}
// GetInitialSupply implements exported.TokenI
func (t Token) GetInitialSupply() uint64 {
return t.InitialSupply
}
// GetMaxSupply implements exported.TokenI
func (t Token) GetMaxSupply() uint64 {
return t.MaxSupply
}
// GetMintable implements exported.TokenI
func (t Token) GetMintable() bool {
return t.Mintable
}
// GetOwner implements exported.TokenI
func (t Token) GetOwner() sdk.AccAddress {
return t.Owner
}
func (t Token) String() string {
bz, _ := yaml.Marshal(t)
return string(bz)
}
//ToMainCoin return the main denom coin from args
func (t Token) ToMainCoin(coin sdk.Coin) (sdk.DecCoin, error) {
if t.Symbol != coin.Denom && t.MinUnit != coin.Denom {
return sdk.NewDecCoinFromDec(coin.Denom, sdk.ZeroDec()), sdkerrors.Wrapf(ErrTokenNotExists, "token not match")
}
if t.Symbol == coin.Denom {
return sdk.NewDecCoin(coin.Denom, coin.Amount), nil
}
precision := math.Pow10(int(t.Scale))
precisionStr := strconv.FormatFloat(precision, 'f', 0, 64)
precisionDec, err := sdk.NewDecFromStr(precisionStr)
if err != nil {
return sdk.DecCoin{}, err
}
// dest amount = src amount / 10^(scale)
amount := sdk.NewDecFromInt(coin.Amount).Quo(precisionDec)
return sdk.NewDecCoinFromDec(t.Symbol, amount), nil
}
//ToMinCoin return the min denom coin from args
func (t Token) ToMinCoin(coin sdk.DecCoin) (newCoin sdk.Coin, err error) {
if t.Symbol != coin.Denom && t.MinUnit != coin.Denom {
return sdk.NewCoin(coin.Denom, sdk.ZeroInt()), sdkerrors.Wrapf(ErrTokenNotExists, "token not match")
}
if t.MinUnit == coin.Denom {
return sdk.NewCoin(coin.Denom, coin.Amount.TruncateInt()), nil
}
precision := math.Pow10(int(t.Scale))
precisionStr := strconv.FormatFloat(precision, 'f', 0, 64)
precisionDec, err := sdk.NewDecFromStr(precisionStr)
if err != nil {
return sdk.Coin{}, err
}
// dest amount = src amount * 10^(dest scale)
amount := coin.Amount.Mul(precisionDec)
return sdk.NewCoin(t.MinUnit, amount.TruncateInt()), nil
}
func | (token Token) error {
if token.Owner.Empty() {
return ErrNilOwner
}
nameLen := len(strings.TrimSpace(token.Name))
if nameLen == 0 || nameLen > MaximumNameLen {
return sdkerrors.Wrapf(ErrInvalidName, "invalid token name %s, only accepts length (0, %d]", token.Name, MaximumNameLen)
}
if err := CheckSymbol(token.Symbol); err != nil {
return err
}
minUnitLen := len(strings.TrimSpace(token.MinUnit))
if minUnitLen < MinimumMinUnitLen || minUnitLen > MaximumMinUnitLen || !IsAlphaNumericDash(token.MinUnit) || !IsBeginWithAlpha(token.MinUnit) {
return sdkerrors.Wrapf(ErrInvalidMinUnit, "invalid token min_unit %s, only accepts alphanumeric characters, and begin with an english letter, length [%d, %d]", token.MinUnit, MinimumMinUnitLen, MaximumMinUnitLen)
}
if token.InitialSupply > MaximumInitSupply {
return sdkerrors.Wrapf(ErrInvalidInitSupply, "invalid token initial supply %d, only accepts value [0, %d]", token.InitialSupply, MaximumInitSupply)
}
if token.MaxSupply < token.InitialSupply || token.MaxSupply > MaximumMaxSupply {
return sdkerrors.Wrapf(ErrInvalidMaxSupply, "invalid token max supply %d, only accepts value [%d, %d]", token.MaxSupply, token.InitialSupply, MaximumMaxSupply)
}
if token.Scale > MaximumScale {
return sdkerrors.Wrapf(ErrInvalidScale, "invalid token scale %d, only accepts value [0, %d]", token.Scale, MaximumScale)
}
return nil
}
// CheckSymbol checks if the given symbol is valid
func CheckSymbol(symbol string) error {
if len(symbol) < MinimumSymbolLen || len(symbol) > MaximumSymbolLen {
return sdkerrors.Wrapf(ErrInvalidSymbol, "invalid symbol: %s, only accepts length [%d, %d]", symbol, MinimumSymbolLen, MaximumSymbolLen)
}
if !IsBeginWithAlpha(symbol) || !IsAlphaNumericDash(symbol) {
return sdkerrors.Wrapf(ErrInvalidSymbol, "invalid symbol: %s, only accepts alphanumeric characters, and begin with an english letter", symbol)
}
return nil
}
type Bool string
const (
False Bool = "false"
True Bool = "true"
Nil Bool = ""
)
func (b Bool) ToBool() bool {
v := string(b)
if len(v) == 0 {
return false
}
result, _ := strconv.ParseBool(v)
return result
}
func (b Bool) String() string {
return string(b)
}
// Marshal needed for protobuf compatibility
func (b Bool) Marshal() ([]byte, error) {
return []byte(b), nil
}
// Unmarshal needed for protobuf compatibility
func (b *Bool) Unmarshal(data []byte) error {
*b = Bool(data[:])
return nil
}
// Marshals to JSON using string
func (b Bool) MarshalJSON() ([]byte, error) {
return json.Marshal(b.String())
}
// UnmarshalJSON from using string
func (b *Bool) UnmarshalJSON(data []byte) error {
var s string
err := json.Unmarshal(data, &s)
if err != nil {
return err
}
*b = Bool(s)
return nil
}
func ParseBool(v string) (Bool, error) {
if len(v) == 0 {
return Nil, nil
}
result, err := strconv.ParseBool(v)
if err != nil {
return Nil, err
}
if result {
return True, nil
}
return False, nil
}
| ValidateToken |
test.rs | #![allow(dead_code)]
#![allow(non_snake_case)]
#![allow(unused_imports)]
use super::*;
use crate::instructions::decode_opcode;
fn get_vm() -> Chip8VM {
Chip8VM {
waiting_for_key_press: false,
key_index_store: 0x00,
display_data: [false; DISPLAY_SIZE[0] * DISPLAY_SIZE[1]],
memory: [0 as u8; MEMORY_SIZE],
v: [0 as u8; 16],
i: 0,
delay_timer: 0,
sound_timer: 0,
program_counter: 0x200,
stack_pointer: 0,
stack: [0 as u16; 16],
pressed_key: None,
}
}
#[test]
fn test_00E0() {
// 0x00E0 - Clear the screen
let opcode = 0x00E0;
let mut vm = get_vm();
vm.display_data = [true; DISPLAY_SIZE[0] * DISPLAY_SIZE[1]];
vm.execute_instruction(decode_opcode(opcode), opcode);
for i in 0..vm.display_data.len() {
assert_eq!(vm.display_data[i], false);
}
}
#[test]
fn test_00EE() {
// 0x00EE - Return from a subroutine
let opcode = 0x00EE;
let mut vm = get_vm();
vm.stack[0x0] = 0x200;
vm.stack_pointer = 1;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x200);
assert_eq!(vm.stack_pointer, 0);
}
#[test]
fn test_1NNN() {
// 0x1NNN - Jump to address NNN
let opcode = 0x1234;
let mut vm = get_vm();
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter + 2, 0x0234); // add 2 to jump result because in normal execution after jump CPU will increase pc by 2
}
#[test]
fn test_2NNN() {
// 0x2NNN - Execute subroutine starting at address NNN
let opcode = 0x2345;
let mut vm = get_vm();
let old_pc_value = vm.program_counter;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter + 2, 0x0345); // add 2 to jump result because in normal execution after jump CPU will increase pc by 2
assert_eq!(vm.stack_pointer, 1);
assert_eq!(vm.stack[0x0], old_pc_value);
}
#[test]
fn test_3XNN() {
// 0x3XNN - Skip the following instruction if the value of register VX equals NN
let opcode = 0x3456;
let mut vm = get_vm();
//not equal
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x0);
//equal
vm.program_counter = 0x0;
vm.v[0x4] = 0x56;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x2);
}
#[test]
fn test_4XNN() {
// 0x4XNN - Skip the following instruction if the value of register VX is not equal to NN
let opcode = 0x4567;
let mut vm = get_vm();
//not equal
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x2);
//equal
vm.program_counter = 0x0;
vm.v[0x5] = 0x67;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x0);
}
#[test]
fn test_5XY0() {
// 0x5XY0 - Skip the following instruction if the value of register VX is equal to the value of register VY
let opcode = 0x5670;
let mut vm = get_vm();
//not equal
vm.v[0x6] = 0x0;
vm.v[0x7] = 0x1;
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x0);
//equal
vm.v[0x6] = 0x1;
vm.v[0x7] = 0x1;
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x2);
}
#[test]
fn test_6XNN() {
// 0x6XNN - Store number NN in register VX
let opcode = 0x6789;
let mut vm = get_vm();
vm.v[0x7] = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x7], 0x89);
}
#[test]
fn test_7XNN() |
#[test]
fn test_8XY0() {
// 0x8XY0 - Store the value of register VY in register VX
let opcode = 0x89A0;
let mut vm = get_vm();
vm.v[0x9] = 0x99;
vm.v[0xA] = 0xAA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], vm.v[0xA]);
assert_eq!(vm.v[0x9], 0xAA);
}
#[test]
fn test_8XY1() {
// 0x8XY1 - Set VX to VX OR VY
let opcode = 0x89A1;
let mut vm = get_vm();
vm.v[0x9] = 0x99;
vm.v[0xA] = 0xAA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x99 | 0xAA);
}
#[test]
fn test_8XY2() {
// 0x8XY2 - Set VX to VX AND VY
let opcode = 0x89A2;
let mut vm = get_vm();
vm.v[0x9] = 0x99;
vm.v[0xA] = 0xAA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x99 & 0xAA);
}
#[test]
fn test_8XY3() {
// 0x8XY3 - Set VX to VX XOR VY
let opcode = 0x89A3;
let mut vm = get_vm();
vm.v[0x9] = 0x99;
vm.v[0xA] = 0xAA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0xAA ^ 0x99);
}
#[test]
fn test_8XY4() {
// 0x8XY4 - Add the value of register VY to register VX
// Set VF to 01 if a carry occurs
// Set VF to 00 if a carry does not occur
let opcode = 0x89A4;
let mut vm = get_vm();
// with borrow
vm.v[0x9] = 0x99;
vm.v[0xA] = 0xAA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x43);
assert_eq!(vm.v[0xF], 0x1);
// without borrow
vm.v[0x9] = 0x11;
vm.v[0xA] = 0x22;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x33);
assert_eq!(vm.v[0xF], 0x0);
}
#[test]
fn test_8XY5() {
// 0x8XY5 - Subtract the value of register VY from register VX
// Set VF to 00 if a borrow occurs
// Set VF to 01 if a borrow does not occur
let opcode = 0x89A5;
let mut vm = get_vm();
// without borrow
vm.v[0x9] = 0xFF;
vm.v[0xA] = 0x01;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0xFE);
assert_eq!(vm.v[0xF], 0x1);
// with borrow
vm.v[0x9] = 0x01;
vm.v[0xA] = 0x02;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0xFF);
assert_eq!(vm.v[0xF], 0x0);
}
#[test]
fn test_8XY6() {
// 0x8XY6 - Store the value of register VY shifted right one bit in register VX
// Set register VF to the least significant bit prior to the shift
// VY is unchange
let opcode = 0x89A6;
let mut vm = get_vm();
// the least-significant bit of Vx is 1
vm.v[0xA] = 0xFF;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x7F);
assert_eq!(vm.v[0xF], 0x1);
// the least-significant bit of Vx is 0
vm.v[0xA] = 0xFE;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x7F);
assert_eq!(vm.v[0xF], 0x0);
}
#[test]
fn test_8XY7() {
// 0x8XY7 - Set register VX to the value of VY minus VX
// Set VF to 00 if a borrow occurs
// Set VF to 01 if a borrow does not occur
let opcode = 0x89A7;
let mut vm = get_vm();
// without borrow
vm.v[0x9] = 0x02;
vm.v[0xA] = 0x08;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0x06);
assert_eq!(vm.v[0xF], 0x1);
// with borrow
vm.v[0x9] = 0x04;
vm.v[0xA] = 0x02;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x9], 0xFE);
assert_eq!(vm.v[0xF], 0x0);
}
#[test]
fn test_8XYE() {
// 0x8XYE - Store the value of register VY shifted left one bit in register VX
// Set register VF to the most significant bit prior to the shift
// VY is unchanged
let opcode = 0x89AE;
let mut vm = get_vm();
// the most-significant bit of Vx is 0
vm.v[0xA] = 0x11;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0xF], 0x00);
assert_eq!(vm.v[0xA], 0x11);
assert_eq!(vm.v[0x9], 0x22);
// the most-significant bit of Vx is 1
vm.v[0xA] = 0x81;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0xA], 0x81);
assert_eq!(vm.v[0x9], 0x02);
assert_eq!(vm.v[0xF], 0x01);
}
#[test]
fn test_9XY0() {
// 0x9XY0 - Skip the following instruction if the value of register VX is not equal to the value of register VY
let opcode = 0x9AB0;
let mut vm = get_vm();
//equal
vm.v[0xA] = 0x1;
vm.v[0xB] = 0x1;
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x0);
//not equal
vm.v[0xA] = 0x0;
vm.v[0xB] = 0x1;
vm.program_counter = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x2);
}
#[test]
fn test_ANNN() {
// 0xANNN - Store memory address NNN in register I
let opcode = 0xABCD;
let mut vm = get_vm();
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.i, 0xBCD);
}
#[test]
fn test_BNNN() {
// 0xBNNN - Jump to address NNN + V0
let opcode = 0xBCDE;
let mut vm = get_vm();
vm.v[0x0] = 0x04;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter + 2, 0xCDE + 0x04); // add 2 to jump result because in normal execution after jump CPU will increase pc by 2
}
#[test]
fn test_CXNN() {
// 0xCXNN - Set VX to a random number with a mask of NN
let opcode = 0xCD00;
let mut vm = get_vm();
vm.v[0xD] = 0xFF;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0xD], 0x00);
}
fn load_test_sprite(memory: &mut [u8; MEMORY_SIZE]) {
memory[0xA] = 0xF0;
memory[0xB] = 0x90;
memory[0xC] = 0xF0;
memory[0xD] = 0x90;
memory[0xE] = 0xF0;
}
fn assert_sprite_drawing(display_data: &[bool; DISPLAY_SIZE[0] * DISPLAY_SIZE[1]]) {
let assert_pixel = |x, y, expected: bool| {
assert_eq!(
display_data[y * DISPLAY_SIZE[0] + x],
expected,
"pixel [{}, {}] should be {}",
x,
y,
expected
);
};
assert_pixel(0, 0, true);
assert_pixel(1, 0, true);
assert_pixel(2, 0, true);
assert_pixel(3, 0, true);
assert_pixel(0, 1, true);
assert_pixel(1, 1, false);
assert_pixel(2, 1, false);
assert_pixel(3, 1, true);
assert_pixel(0, 2, true);
assert_pixel(1, 2, true);
assert_pixel(2, 2, true);
assert_pixel(3, 2, true);
assert_pixel(0, 3, true);
assert_pixel(1, 3, false);
assert_pixel(2, 3, false);
assert_pixel(3, 3, true);
assert_pixel(0, 4, true);
assert_pixel(1, 4, true);
assert_pixel(2, 4, true);
assert_pixel(3, 4, true);
}
fn assert_sprite_ereasing(display_data: &[bool; DISPLAY_SIZE[0] * DISPLAY_SIZE[1]]) {
let assert_pixel = |x, y, expected: bool| {
assert_eq!(
display_data[y * DISPLAY_SIZE[0] + x],
expected,
"pixel [{}, {}] should be {}",
x,
y,
expected
);
};
assert_pixel(0, 0, false);
assert_pixel(1, 0, false);
assert_pixel(2, 0, false);
assert_pixel(3, 0, false);
assert_pixel(0, 1, false);
assert_pixel(1, 1, false);
assert_pixel(2, 1, false);
assert_pixel(3, 1, false);
assert_pixel(0, 2, false);
assert_pixel(1, 2, false);
assert_pixel(2, 2, false);
assert_pixel(3, 2, false);
assert_pixel(0, 3, false);
assert_pixel(1, 3, false);
assert_pixel(2, 3, false);
assert_pixel(3, 3, false);
assert_pixel(0, 4, false);
assert_pixel(1, 4, false);
assert_pixel(2, 4, false);
assert_pixel(3, 4, false);
}
#[test]
fn test_DXYN() {
// 0xDXYN - Draw a sprite at position VX, VY with N bytes of sprite data starting at the address stored in I
// Set VF to 01 if any set pixels are changed to unset, and 00 otherwise
let opcode = 0xD005;
let mut vm = get_vm();
load_test_sprite(&mut vm.memory);
vm.i = 0xA;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_sprite_drawing(&vm.display_data);
assert_eq!(vm.v[0xF], 0x00);
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_sprite_ereasing(&vm.display_data);
assert_eq!(vm.v[0xF], 0x01);
}
#[test]
fn test_EX9E() {
// 0xEX9E - skip the following instruction if the key corresponding to the hex value currently stored in register VX is pressed
let opcode = 0xEE9E;
let mut vm = get_vm();
vm.v[0xE] = 0x01;
//is pressed
vm.program_counter = 0x00;
vm.pressed_key = Some(0x01);
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x02);
//is not pressed
vm.pressed_key = Some(0x02);
vm.program_counter = 0x00;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x00);
}
#[test]
fn test_EXA1() {
// 0xEXA1 - skip the following instruction if the key corresponding to the hex value currently stored in register VX is not pressed
let opcode = 0xEEA1;
let mut vm = get_vm();
vm.v[0xE] = 0x01;
//is pressed
vm.program_counter = 0x00;
vm.pressed_key = Some(0x01);
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x00);
//is not pressed
vm.pressed_key = Some(0x02);
vm.program_counter = 0x00;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.program_counter, 0x02);
}
#[test]
fn test_FX07() {
// 0xFX07 - Store the current value of the delay timer in register VX
let opcode = 0xF007;
let mut vm = get_vm();
vm.delay_timer = 0x32;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x0], vm.delay_timer);
}
#[test]
fn test_FX0A() {
// 0xFX0A - skip the following instruction if the key corresponding to the hex value currently stored in register VX is not pressed
let opcode = 0xF00A;
let mut vm = get_vm();
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.waiting_for_key_press, true);
}
#[test]
fn test_FX15() {
// 0xFX15 - Set the delay timer to the value of register VX
let opcode = 0xF015;
let mut vm = get_vm();
vm.delay_timer = 0x22;
vm.v[0x0] = 0x33;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.delay_timer, vm.v[0x0]);
assert_eq!(vm.delay_timer, 0x33);
}
#[test]
fn test_FX18() {
// 0xEXA1 - Set the sound timer to the value of register VX
let opcode = 0xF018;
let mut vm = get_vm();
vm.sound_timer = 0x22;
vm.v[0x0] = 0x33;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.sound_timer, vm.v[0x0]);
assert_eq!(vm.sound_timer, 0x33);
}
#[test]
fn test_FX1E() {
// 0xEX1E - sAdd the value stored in register VX to register I
let opcode = 0xF01E;
let mut vm = get_vm();
vm.i = 0x22;
vm.v[0x0] = 0x33;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.i, 0x55);
}
#[test]
fn test_FX29() {
// 0xFX29 - Set I to the memory address of the sprite data corresponding to the hexadecimal digit stored in register VX
let opcode = 0xF029;
let mut vm = get_vm();
vm.v[0x0] = 0x10;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.i, 0x50);
}
#[test]
fn test_FX33() {
// 0xFX33 - Store the binary-coded decimal equivalent of the value stored in register VX at addresses I, I + 1, and I + 2
let opcode = 0xF033;
let mut vm = get_vm();
vm.v[0x0] = 123;
vm.i = 0x0;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.memory[vm.i as usize], 1);
assert_eq!(vm.memory[vm.i as usize + 1], 2);
assert_eq!(vm.memory[vm.i as usize + 2], 3);
}
#[test]
fn test_FX55() {
// 0xFX55 - Store the values of registers V0 to VX inclusive in memory starting at address I
// I is set to I + X + 1 after operation
let opcode = 0xF455;
let mut vm = get_vm();
vm.i = 0x0;
vm.v[0x0] = 0x0;
vm.v[0x1] = 0x1;
vm.v[0x2] = 0x2;
vm.v[0x3] = 0x3;
vm.v[0x4] = 0x4;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.memory[0x0], 0x00);
assert_eq!(vm.memory[0x1], 0x01);
assert_eq!(vm.memory[0x2], 0x02);
assert_eq!(vm.memory[0x3], 0x03);
assert_eq!(vm.memory[0x4], 0x04);
assert_eq!(vm.i, 0x05);
}
#[test]
fn test_FX65() {
// 0xFX65 - Fill registers V0 to VX inclusive with the values stored in memory starting at address I
let opcode = 0xF465;
let mut vm = get_vm();
vm.i = 0x0;
vm.memory[0x0] = 0x0;
vm.memory[0x1] = 0x1;
vm.memory[0x2] = 0x2;
vm.memory[0x3] = 0x3;
vm.memory[0x4] = 0x4;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x0], 0x00);
assert_eq!(vm.v[0x1], 0x01);
assert_eq!(vm.v[0x2], 0x02);
assert_eq!(vm.v[0x3], 0x03);
assert_eq!(vm.v[0x4], 0x04);
assert_eq!(vm.i, 0x05);
}
| {
// 0x7XNN - Add the value NN to register VX
let opcode = 0x789A;
let mut vm = get_vm();
vm.v[0x8] = 0x11;
vm.execute_instruction(decode_opcode(opcode), opcode);
assert_eq!(vm.v[0x8], 0xAB);
} |
__main__.py | from buttonlist.app import main
if __name__ == '__main__':
| main().main_loop() |
|
base-handlers.ts | // Base event handlers for browser window
import {history} from "../common/store";
import routes from "../common/routes";
import {pathToRegexp} from "path-to-regexp";
// Global drag&drop
const handleDragOver = (e: DragEvent) => { | return;
}
e.preventDefault();
e.dataTransfer.effectAllowed = 'none';
e.dataTransfer.dropEffect = 'none';
}
// Global click handler
const handleClick = (e: Event) => {
const el = e.target as HTMLElement;
// Anchor link handler
if (el.tagName === "A" || (el.parentElement && el.parentElement.tagName === "A")) {
const href = el.getAttribute("href") || (el.parentElement ? el.parentElement.getAttribute("href") : null);
if (href && href.startsWith("/") && href.indexOf("#") !== -1) {
const [route, anchor] = href.split("#");
// make sure link matches with one of app routes
if (Object.values(routes).find(p => pathToRegexp(p).test(route))) {
e.preventDefault();
let delay = 75;
if (history!.location.pathname !== route) {
history!.push(href);
}
// scroll to anchor element
const el = document.getElementById(anchor);
if (el) {
setTimeout(() => {
el.scrollIntoView();
}, delay);
}
}
}
}
// Handle links in static pages. (faq etc...)
if (el.tagName === "A") {
if (el.classList.contains("push-link")) {
e.preventDefault();
const href = el.getAttribute("href");
if (href && href.startsWith("/")) {
// make sure link matches with one of app routes
if (Object.values(routes).find(p => pathToRegexp(p).test(href))) {
e.preventDefault();
history!.push(href);
}
}
}
}
}
document.addEventListener("DOMContentLoaded", function () {
document.body.addEventListener('dragover', handleDragOver);
document.body.addEventListener('click', handleClick);
}); | if (!(e.target && e.dataTransfer)) { |
get_blocking_state_audit_logs_with_history_parameters.go | // Code generated by go-swagger; DO NOT EDIT.
package account
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"context"
"net/http"
"time"
"github.com/go-openapi/errors"
"github.com/go-openapi/runtime"
cr "github.com/go-openapi/runtime/client"
strfmt "github.com/go-openapi/strfmt"
)
// NewGetBlockingStateAuditLogsWithHistoryParams creates a new GetBlockingStateAuditLogsWithHistoryParams object
// with the default values initialized.
func NewGetBlockingStateAuditLogsWithHistoryParams() *GetBlockingStateAuditLogsWithHistoryParams {
var ()
return &GetBlockingStateAuditLogsWithHistoryParams{
timeout: cr.DefaultTimeout,
}
}
// NewGetBlockingStateAuditLogsWithHistoryParamsWithTimeout creates a new GetBlockingStateAuditLogsWithHistoryParams object
// with the default values initialized, and the ability to set a timeout on a request
func NewGetBlockingStateAuditLogsWithHistoryParamsWithTimeout(timeout time.Duration) *GetBlockingStateAuditLogsWithHistoryParams {
var ()
return &GetBlockingStateAuditLogsWithHistoryParams{
timeout: timeout,
}
}
// NewGetBlockingStateAuditLogsWithHistoryParamsWithContext creates a new GetBlockingStateAuditLogsWithHistoryParams object
// with the default values initialized, and the ability to set a context for a request
func NewGetBlockingStateAuditLogsWithHistoryParamsWithContext(ctx context.Context) *GetBlockingStateAuditLogsWithHistoryParams {
var ()
return &GetBlockingStateAuditLogsWithHistoryParams{
Context: ctx,
}
}
// NewGetBlockingStateAuditLogsWithHistoryParamsWithHTTPClient creates a new GetBlockingStateAuditLogsWithHistoryParams object
// with the default values initialized, and the ability to set a custom HTTPClient for a request
func NewGetBlockingStateAuditLogsWithHistoryParamsWithHTTPClient(client *http.Client) *GetBlockingStateAuditLogsWithHistoryParams {
var ()
return &GetBlockingStateAuditLogsWithHistoryParams{
HTTPClient: client,
}
}
/*GetBlockingStateAuditLogsWithHistoryParams contains all the parameters to send to the API endpoint
for the get blocking state audit logs with history operation typically these are written to a http.Request
*/
type GetBlockingStateAuditLogsWithHistoryParams struct {
/*BlockingID*/
BlockingID strfmt.UUID
WithStackTrace *bool // If set, returns full stack trace with error message
timeout time.Duration
Context context.Context
HTTPClient *http.Client
ProcessLocationHeader bool // For create APIs that return 201, send another request and retrieve the resource.
}
// WithTimeout adds the timeout to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) WithTimeout(timeout time.Duration) *GetBlockingStateAuditLogsWithHistoryParams {
o.SetTimeout(timeout)
return o
}
// SetTimeout adds the timeout to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
}
// WithContext adds the context to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) WithContext(ctx context.Context) *GetBlockingStateAuditLogsWithHistoryParams {
o.SetContext(ctx)
return o
}
// SetContext adds the context to the get blocking state audit logs with history params | }
// WithHTTPClient adds the HTTPClient to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) WithHTTPClient(client *http.Client) *GetBlockingStateAuditLogsWithHistoryParams {
o.SetHTTPClient(client)
return o
}
// SetHTTPClient adds the HTTPClient to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
}
// WithBlockingID adds the blockingID to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) WithBlockingID(blockingID strfmt.UUID) *GetBlockingStateAuditLogsWithHistoryParams {
o.SetBlockingID(blockingID)
return o
}
// SetBlockingID adds the blockingId to the get blocking state audit logs with history params
func (o *GetBlockingStateAuditLogsWithHistoryParams) SetBlockingID(blockingID strfmt.UUID) {
o.BlockingID = blockingID
}
// WriteToRequest writes these params to a swagger request
func (o *GetBlockingStateAuditLogsWithHistoryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
// path param blockingId
if err := r.SetPathParam("blockingId", o.BlockingID.String()); err != nil {
return err
}
// header param withStackTrace
if o.WithStackTrace != nil && *o.WithStackTrace {
if err := r.SetQueryParam("withStackTrace", "true"); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | func (o *GetBlockingStateAuditLogsWithHistoryParams) SetContext(ctx context.Context) {
o.Context = ctx |
pageSizeForm.ts | import { TranslateService } from 'ng2-translate';
import { Component, ViewChild } from "@angular/core";
import { NavController, NavParams, Slides, AlertController } from "ionic-angular";
import { GoogleAnalyticsProvider } from '../../providers/ga';
import { KidsSize, MenSize, WomenSize } from "whats-size";
import { WhatsSizeDatabase } from '../../db/component';
import { SizeModel } from '../../db/size';
import { SizeOptionsProvider } from "../../providers/options";
import { SizeProvider } from "../../providers/size";
import { PageProvider } from "../../providers/page";
import { ISettings } from '../../db/settings';
export class SizeFieldModel {
public icon: string;
public key: string;
public text: string;
public value: string;
public options: Array<string>;
constructor(args: any) {
args = args || {};
this.icon = args.icon;
this.key = args.key;
this.text = args.text;
this.value = args.value;
this.options = args.options || [];
}
}
@Component({
selector: "page-pageSizeForm",
templateUrl: "pageSizeForm.html"
})
export class PageSizeForm {
@ViewChild("pageSlider") slider: Slides;
kidsSize: KidsSize;
menSize: MenSize;
womenSize: WomenSize;
country: string;
options: any;
model: {
isLoaded: Boolean,
id?: number,
type?: string,
name?: string,
source?: string,
fields?: {
selected: Array<SizeFieldModel>,
kids: Array<SizeFieldModel>,
men: Array<SizeFieldModel>,
women: Array<SizeFieldModel>
}
};
settings: ISettings;
constructor(public navCtrl: NavController
, public navParams: NavParams
, public alertCtrl: AlertController
, public translate: TranslateService
, public dbContext: WhatsSizeDatabase
, public size: SizeProvider
, public sizeOptions: SizeOptionsProvider
, public page: PageProvider
, public ga: GoogleAnalyticsProvider) {
ga.trackView("Cadastro");
this.model = {
isLoaded: false,
type: "kids",
name: "",
fields: {
selected: [],
kids: [],
men: [],
women: []
}
};
this.options = {
types: []
};
}
ionViewDidLoad() {
this.dbContext.stores.settings.get().then(settings => {
this.settings = settings;
this.sizeOptions.initialize(this.settings);
this.country = settings.source || "usa";
this.kidsSize = new KidsSize(this.country);
this.menSize = new MenSize(this.country);
this.womenSize = new WomenSize(this.country);
let options = {
kids: this.sizeOptions.getKidsOptions(),
men: this.sizeOptions.getMenOptions(),
women: this.sizeOptions.getWomenOptions()
};
this.model.fields = {
selected: [],
kids: [
new SizeFieldModel({ key: "kidsSimple", text: "SIZES.SELECT_KIDS.SIMPLE", options: options.kids.simple }),
new SizeFieldModel({ key: "kidsShoes", text: "SIZES.SELECT_KIDS.SHOES", options: options.kids.shoes }),
new SizeFieldModel({ key: "kidsClothes", text: "SIZES.SELECT_KIDS.CLOTHES", options: options.kids.clothes }),
],
men: [
new SizeFieldModel({ key: "menSimple", text: "SIZES.SELECT_MEN.TSHIRTS", options: options.men.simple }),
new SizeFieldModel({ key: "menShirts", text: "SIZES.SELECT_MEN.SHIRTS", options: options.men.shirts }),
new SizeFieldModel({ key: "menShoes", text: "SIZES.SELECT_MEN.SHOES", options: options.men.shoes }),
new SizeFieldModel({ key: "menSuits", text: "SIZES.SELECT_MEN.SUITS", options: options.men.suits }),
],
women: [
new SizeFieldModel({ key: "womenSimple", text: "SIZES.SELECT_WOMAN.SHIRTS", options: options.women.simple }),
new SizeFieldModel({ key: "womenBlouses", text: "SIZES.SELECT_WOMAN.BLOUSES", options: options.women.blouses }),
new SizeFieldModel({ key: "womenCoats", text: "SIZES.SELECT_WOMAN.COATS", options: options.women.coats }),
new SizeFieldModel({ key: "womenDresses", text: "SIZES.SELECT_WOMAN.DRESSES", options: options.women.dresses }),
new SizeFieldModel({ key: "womenSkirts", text: "SIZES.SELECT_WOMAN.SKIRTS", options: options.women.skirts }),
new SizeFieldModel({ key: "womenShoes", text: "SIZES.SELECT_WOMAN.SHOES", options: options.women.shoes })
]
};
this.options.types = this.sizeOptions.getTypes();
this.loadFieldByTypes(this.model.type);
this.loadSizeModel();
});
}
loadSizeModel() {
let model = this.navParams.get('model') as SizeModel;
if (model) {
this.model.id = model.id;
this.model.name = model.personName;
this.model.type = model.sizeType;
this.model.source = model.source;
this.loadFieldByTypes(this.model.type);
this.model.fields[model.sizeType].forEach((s: SizeFieldModel) => {
let sizeModel = model.sizes.filter(x => x.key == s.key)[0];
if (sizeModel) {
s.value = sizeModel.value;
}
});
this.model.isLoaded = true;
} else {
this.model.isLoaded = true;
}
}
onChangeType(value: any) {
this.loadFieldByTypes(value);
}
loadFieldByTypes(type: string) {
switch (type) {
case "kids":
this.model.fields.selected = this.model.fields.kids;
break;
case "men":
this.model.fields.selected = this.model.fields.men;
break;
case "women":
this.model.fields.selected = this.model.fields.women;
break;
}
}
goToSlide(slideNum: number) {
if (slideNum === 1) {
if (!this.validateForm()) {
return;
}
}
this.slider.slideTo(slideNum, 500);
}
validateForm(): boolean {
var valid = true;
if (!this.model.name || this.model.name.length < 3) {
valid = false;
}
if (!valid) {
let alertMessage = this.page.getTranslate("SIZEFORM.ALERTS.VALIDATIONERROR"); | return valid;
}
createOrUpdateSize() {
if (!this.validateForm()) {
return;
}
var model = new SizeModel({
id: this.model.id,
personName: this.model.name,
sizeType: this.model.type,
source: this.settings.source || this.model.source,
sizes: this.size.getSizeValues(this.model.fields.selected)
});
var isNew = !model.id;
let success = x => {
model.sizes.forEach(x => {
let key = JSON.stringify([model.source, model.sizeType, x.key, x.value]);
this.ga.trackEvent("Tamanho", "Cadastro", key, 1);
});
let alertMessage = this.page.getTranslate("SIZEFORM.ALERTS.SUCCESS");
this.page.alert(alertMessage.title, alertMessage.message).then(x => {
this.page.goHome();
});
};
let error = x => {
let alertMessage = this.page.getTranslate("SIZEFORM.ALERTS.ERROR");
this.page.alert(alertMessage.title, alertMessage.message);
};
if (isNew) {
this.dbContext.stores.sizes.create(model).then(success).catch(error);
} else {
this.dbContext.stores.sizes.update(model).then(success).catch(error);
}
}
} | this.page.alert(alertMessage.title, alertMessage.message);
}
|
urls.py | """django_admin_demo URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings | url(r'^admin/', admin.site.urls),
url(r'api/user/search', APIUserSearchView.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | from web.views import APIUserSearchView
urlpatterns = [ |
merkle_proof.rs | // Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod vec_backend;
use self::core::core::merkle_proof::MerkleProof;
use self::core::core::pmmr::PMMR;
use self::core::ser::{self, PMMRIndexHashable};
use crate::vec_backend::{TestElem, VecBackend};
use grin_core as core;
#[test]
fn empty_merkle_proof() {
let proof = MerkleProof::empty();
assert_eq!(proof.path, vec![]);
assert_eq!(proof.mmr_size, 0);
}
#[test]
fn merkle_proof_ser_deser() {
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
for x in 0..15 {
pmmr.push(&TestElem([0, 0, 0, x])).unwrap();
}
let proof = pmmr.merkle_proof(9).unwrap();
let mut vec = Vec::new();
ser::serialize_default(&mut vec, &proof).expect("serialization failed");
let proof_2: MerkleProof = ser::deserialize_default(&mut &vec[..]).unwrap();
assert_eq!(proof, proof_2);
}
#[test]
fn pmmr_merkle_proof_prune_and_rewind() {
let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba);
pmmr.push(&TestElem([0, 0, 0, 1])).unwrap();
pmmr.push(&TestElem([0, 0, 0, 2])).unwrap();
let proof = pmmr.merkle_proof(2).unwrap();
// now prune an element and check we can still generate
// the correct Merkle proof for the other element (after sibling pruned)
pmmr.prune(1).unwrap();
let proof_2 = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof, proof_2);
}
#[test]
fn pmmr_merkle_proof() {
let elems = [
TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
TestElem([0, 0, 0, 3]),
TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
TestElem([0, 0, 0, 6]),
TestElem([0, 0, 0, 7]),
TestElem([0, 0, 0, 8]),
TestElem([1, 0, 0, 0]),
]; | let mut pmmr = PMMR::new(&mut ba);
pmmr.push(&elems[0]).unwrap();
let pos_0 = elems[0].hash_with_index(0);
assert_eq!(pmmr.get_hash(1).unwrap(), pos_0);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
pmmr.push(&elems[1]).unwrap();
let pos_1 = elems[1].hash_with_index(1);
assert_eq!(pmmr.get_hash(2).unwrap(), pos_1);
let pos_2 = (pos_0, pos_1).hash_with_index(2);
assert_eq!(pmmr.get_hash(3).unwrap(), pos_2);
assert_eq!(pmmr.root().unwrap(), pos_2);
assert_eq!(pmmr.peaks(), [pos_2]);
// single peak, path with single sibling
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![pos_1]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof.path, vec![pos_0]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
// three leaves, two peaks (one also the right-most leaf)
pmmr.push(&elems[2]).unwrap();
let pos_3 = elems[2].hash_with_index(3);
assert_eq!(pmmr.get_hash(4).unwrap(), pos_3);
assert_eq!(pmmr.root().unwrap(), (pos_2, pos_3).hash_with_index(4));
assert_eq!(pmmr.peaks(), [pos_2, pos_3]);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(proof.path, vec![pos_1, pos_3]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(proof.path, vec![pos_0, pos_3]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
let proof = pmmr.merkle_proof(4).unwrap();
assert_eq!(proof.path, vec![pos_2]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok());
// 7 leaves, 3 peaks, 11 pos in total
pmmr.push(&elems[3]).unwrap();
let pos_4 = elems[3].hash_with_index(4);
assert_eq!(pmmr.get_hash(5).unwrap(), pos_4);
let pos_5 = (pos_3, pos_4).hash_with_index(5);
assert_eq!(pmmr.get_hash(6).unwrap(), pos_5);
let pos_6 = (pos_2, pos_5).hash_with_index(6);
assert_eq!(pmmr.get_hash(7).unwrap(), pos_6);
pmmr.push(&elems[4]).unwrap();
let pos_7 = elems[4].hash_with_index(7);
assert_eq!(pmmr.get_hash(8).unwrap(), pos_7);
pmmr.push(&elems[5]).unwrap();
let pos_8 = elems[5].hash_with_index(8);
assert_eq!(pmmr.get_hash(9).unwrap(), pos_8);
let pos_9 = (pos_7, pos_8).hash_with_index(9);
assert_eq!(pmmr.get_hash(10).unwrap(), pos_9);
pmmr.push(&elems[6]).unwrap();
let pos_10 = elems[6].hash_with_index(10);
assert_eq!(pmmr.get_hash(11).unwrap(), pos_10);
assert_eq!(pmmr.unpruned_size(), 11);
let proof = pmmr.merkle_proof(1).unwrap();
assert_eq!(
proof.path,
vec![pos_1, pos_5, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok());
let proof = pmmr.merkle_proof(2).unwrap();
assert_eq!(
proof.path,
vec![pos_0, pos_5, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok());
let proof = pmmr.merkle_proof(4).unwrap();
assert_eq!(
proof.path,
vec![pos_4, pos_2, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok());
let proof = pmmr.merkle_proof(5).unwrap();
assert_eq!(
proof.path,
vec![pos_3, pos_2, (pos_9, pos_10).hash_with_index(11)]
);
assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 5).is_ok());
let proof = pmmr.merkle_proof(8).unwrap();
assert_eq!(proof.path, vec![pos_8, pos_10, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 8).is_ok());
let proof = pmmr.merkle_proof(9).unwrap();
assert_eq!(proof.path, vec![pos_7, pos_10, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 9).is_ok());
let proof = pmmr.merkle_proof(11).unwrap();
assert_eq!(proof.path, vec![pos_9, pos_6]);
assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 11).is_ok());
} |
let mut ba = VecBackend::new(); |
sexpr.rs | use anyhow::Error;
use pest;
use pest::iterators::Pair;
use pest::iterators::Pairs;
use pest::Parser;
#[throws]
pub fn parse(text: &str) -> S {
let exprs: Vec<_> = LangParser::parse(Rule::file, text)?
.filter(|pair| !matches!(pair.as_rule(), Rule::EOI))
.map(parse_s)
.collect();
panic!();
}
fn parse_expr(pair: Pair<Rule>) -> S {
assert!(matches!(pair.as_rule(), Rule::expr), "{}", pair);
let mut exprs = Vec::new();
for pair in pair.into_inner() {
let e = match pair.as_rule() {
Rule::atom => S::Atom(pair.as_str().to_string()),
Rule::sexpr => S::List(parse_s(pair)),
Rule::comment => S::Comment(parse_comment(pair)),
_ => unreachable!("{:?}", pair.as_rule()),
};
exprs.push(e);
}
S::List(exprs)
}
fn parse_s(pair: Pair<Rule>) -> Vec<S> {
assert!(matches!(pair.as_rule(), Rule::sexpr), "{}", pair);
pair.into_inner().map(parse_expr).collect()
}
fn parse_comment(pair: Pair<Rule>) -> String {
assert!(matches!(pair.as_rule(), Rule::comment), "{}", pair);
expect!(pair.into_inner().next()).as_str().to_string()
}
#[derive(Parser)]
#[grammar = "sexpr.pest"]
pub struct LangParser;
pub enum | {
Atom(String),
List(Vec<S>),
Comment(String),
}
pub struct Formatter<'a> {
w: &'a mut dyn std::io::Write,
indent: u64,
indent_value: &'static str,
}
impl<'a> Formatter<'a> {
fn new(w: &'a mut dyn std::io::Write) -> Formatter<'a> {
Formatter {
w,
indent: 0,
indent_value: " ",
}
}
#[throws(std::io::Error)]
fn fmt(&mut self, s: &S) {
match s {
S::Atom(v) => self.w.write_all(v.as_bytes())?,
S::List(v) => {
self.start_paren()?;
let mut items = v.iter();
if let Some(s) = items.next() {
self.fmt(s)?;
}
for s in items {
self.start_line()?;
self.fmt(s)?;
}
self.end_paren()?;
}
S::Comment(v) => {
self.start_line()?;
for l in v.lines() {
self.w.write_all(l.as_bytes())?;
self.start_line()?;
}
}
}
}
#[throws(std::io::Error)]
fn start_paren(&mut self) {
self.w.write_all(b"(")?;
self.indent += 1;
}
#[throws(std::io::Error)]
fn end_paren(&mut self) {
self.w.write_all(b")")?;
self.indent -= 1;
}
#[throws(std::io::Error)]
fn new_line(&mut self) {
self.w.write_all(b"\n")?;
self.start_line()?;
}
#[throws(std::io::Error)]
fn start_line(&mut self) {
for _ in 0..self.indent {
self.w.write_all(self.indent_value.as_bytes())?;
}
}
}
| S |
exercise_cev97.py | def escreva(nome):
print('~' * (len(nome) + 6))
print(f' {nome} ')
print('~' * (len(nome) + 6))
| nome = 'Ian'
escreva(nome)
nome = 'Estuda y Estuda'
escreva(nome)
nome = 'Coding and Conding'
escreva(nome) | |
config.rs | // Copyright 2015-2019 Benjamin Fry <[email protected]>
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
/// Configuration for master file based zones
#[derive(Deserialize, PartialEq, Debug)]
pub struct | {
/// path to the master file
pub zone_file_path: String,
}
| FileConfig |
DDE.js | import { Collapse, Form, Tabs, Card, Button, Modal,message } from "antd";
import "antd/dist/antd.css";
import Axios from 'axios';
import Personaldetails from '../../../../Compositeviews/Personaldetails/Personaldetails';
import Addressdetails from '../../../../Compositeviews/Addressdetails/Addressdetails';
import Loandetails from '../../../../Compositeviews/Loandetails/Loandetails';
import Statementdetails from '../../../../Compositeviews/Statementdetails/Statementdetails';
import Dms from '../../../../Components/DMS/Dms2';
import Casehistory from '../../../../Compositeviews/Casehistory/Casehistory';
import Incomedetails from '../../../../Compositeviews/Incomedetails/Incomedetails';
import React, { Component } from 'react';
// import CollateralDetails from '../../../../Compositeviews/Collateraldetails/CollateralDetails';
import Dcc from '../../../../Compositeviews/Dcc/Dcc';
import Employmentdetails from '../../../../Compositeviews/Employmentdetails/Employmentdetails';
import Identificationdetails from '../../../../Compositeviews/Identificationdetails/Identificationdetails';
import VerificationList from '../../../../Compositeviews/VerificationList/VerificationList';
import DisbursementDetails from '../../../../Compositeviews/DisbursementDetails/DisbursementDetails';
import DedupeResult from '../../../../Compositeviews/DedupeResult/DedupeResult';
import CollateralDetails from '../../../../Compositeviews/CollateralDetails/CollateralDetails';
import Riskprofile from '../../../../Components/Risk Profile/Riskprofile';
const { Panel } = Collapse;
export default class | extends Component {
componentDidMount() {
console.log("DDE page loaded");
const SectionName = 'IdentificationDetails,AddressDetails';
this.getData();
}
getData = () => {
// console.dir("id " + this.props.match.params.id);
//console.log("sectionr "+ this.prop.SectionName);
Axios.get('/rest/bpm/wle/v1/task/4853?action=getData&fields=IdentificationDetails,AddressDetails', {
auth: {
username: 'p8admin',
password: 'Password123'
}
})
.then(res => {
const result = res.data.data.resultMap;
console.dir(result);
console.log('before props');
console.dir(this);
// let fieldvalues = this.props.form.getFieldsValue();
//console.dir(fieldvalues);
})
}
completeTask = (taskId) => {
let params = '{"applicationDetails":{"userAction":"ToUnderwriter"}}';
let url = `/rest/bpm/wle/v1/task/` + taskId + '?action=complete¶ms=' + params + '&parts=all';
message.config({ top: 100, });
message.loading('Submitting Task Data',60).then(
Axios.put(url, {
auth: {
username: 'p8admin',
password: 'Password12'
}
})
.then(res => {
message.destroy();
message.success(`Task completed successfully`, 2);
this.props.history.push("/inbox");
})
.catch(function (error) {
message.destroy();
message.error('Something went wrong. Please try again!', 2);
})
)
}
state = {
size: "large",
width: "150px",
visible:false
};
showModalcase_history=()=>{
this.setState({visible:true});
}
handleOkcasehistory=(e) =>{
this.setState({visible:false});
}
handlecancelcasehistory=(e)=>{
this.setState({
visible:false
})
}
handleSizeChange = e => {
this.setState({ size: e.target.value });
};
handleSubmit = e => {
this.completeTask(this.props.match.params.id);
};
normFile = e => {
console.log("Upload event:", e);
if (Array.isArray(e)) {
return e;
}
return e && e.fileList;
};
render() {
const { size } = this.state;
const { TabPane } = Tabs;
return (
<Form onSubmit={this.handleSubmit} layout="horizontal">
<a className='fixed-widgets' onClick={this.showModalcase_history}><i className='ant-avatar fixed-widgets-avatar ant-dropdown-trigger ant-avatar-circle ant-avatar-icon fa fa-history'/><span>Case History</span></a>
<Modal
title='Case History'
visible={this.state.visible}
onOk={this.handleOkcasehistory}
onCancel={this.handlecancelcasehistory}>
<Card>
<Casehistory></Casehistory>
</Card>
</Modal>
<div className="card-container cust_tabs_card form-group">
<Tabs>
<TabPane tab="Customer Details" key="1">
<Collapse defaultActiveKey={["1"]}>
<Panel header="Personal Details" key="1" danger>
<Personaldetails form={this.props.form}></Personaldetails>
</Panel>
<Panel header="Identification Details" key="2">
<Identificationdetails></Identificationdetails>
</Panel>
<Panel header="Address Details" key="3">
<Addressdetails></Addressdetails>
</Panel>
</Collapse>
</TabPane>
<TabPane tab="Loan Details" key="2">
<Collapse defaultActiveKey={["1"]}>
<Panel header="Loan Details" key="1" danger>
<Loandetails></Loandetails>
</Panel>
</Collapse>
</TabPane>
<TabPane tab="Employment & Income Details" key="3">
<Collapse defaultActiveKey={["1"]}>
<Panel header="Employment Details" key="1" danger>
<Employmentdetails></Employmentdetails>
</Panel>
<Panel header="Income Details" key="2">
<Incomedetails></Incomedetails>
</Panel>
</Collapse>
</TabPane>
<TabPane tab="Document Check List" key="5">
{/* <Card> */}
<Dcc></Dcc>
{/* </Card> */}
</TabPane>
<TabPane tab="Document Details" key="6">
<Dms></Dms>
</TabPane>
<TabPane tab="Collateral Details" key="7">
<Collapse defaultActiveKey={["1"]}>
<Panel header="Collateral Details" key="1" danger>
<CollateralDetails></CollateralDetails>
</Panel>
</Collapse>
</TabPane>
<TabPane tab="Verification Details" key="13">
<VerificationList></VerificationList>
</TabPane>
</Tabs>
</div>
<Form.Item>
<div className='pull-right'>
<Button
type="primary"
htmlType="submit"
className='mar-rig-10'
size={size}
>
Save as Draft
</Button>
<Button type="primary" htmlType="submit"
size={size}>
Submit
</Button>
</div>
</Form.Item>
</Form>
);
}
}
| DDE |
CameraReality.js | import Reality from '../Reality.js'
import XRAnchor from '../XRAnchor.js'
import XRViewPose from '../XRViewPose.js'
import XRAnchorOffset from '../XRAnchorOffset.js'
import XRLightEstimate from '../XRLightEstimate.js'
import MatrixMath from '../fill/MatrixMath.js'
import Quaternion from '../fill/Quaternion.js'
import ARKitWrapper from '../platform/ARKitWrapper.js'
import ARCoreCameraRenderer from '../platform/ARCoreCameraRenderer.js'
import XRImageAnchor from "../XRImageAnchor.js"
import XRPlaneAnchor from "../XRPlaneAnchor.js"
import XRFaceAnchor from "../XRFaceAnchor.js"
/*
CameraReality displays the forward facing camera.
If this is running in the iOS ARKit wrapper app, the camera data will be displayed in a Metal layer below the WKWebKit layer.
If this is running in the Google ARCore Chrome application, it will create a canvas element and use the ARCore provided camera data.
If there is no ARKit or ARCore available, it will use WebRTC's MediaStream to render camera data into a canvas.
*/
export default class | extends Reality {
constructor(xr){
super(xr, 'Camera', true, true)
this._initialized = false
this._running = false
// camera fovy: start with 70 degrees on the long axis of at 320x240
this._cameraFov = 70 * Math.PI/180
this._focalLength = 160 / Math.tan(this._cameraFov / 2)
this._cameraIntrinsics = [this._focalLength, 0 , 0,
0, this._focalLength, 0,
160, 120, 1 ]
// These are used if we have access to ARKit
this._arKitWrapper = null
// These are used if we do not have access to ARKit
this._mediaStream = null
this._videoEl = null
// These are used if we're using the Google ARCore web app
this._arCoreCameraRenderer = null
this._arCoreCanvas = null
this._elContext = null
this._vrDisplay = null
this._vrFrameData = null
// dealing with video frames from webrtc
this._sendingVideo = false;
this._videoFramesPaused = false;
this._sendVideoFrame = false;
this._videoProjectionMatrix = MatrixMath.mat4_generateIdentity();
this._videoViewMatrix = MatrixMath.mat4_generateIdentity();
this._lightEstimate = new XRLightEstimate();
// Try to find a WebVR 1.1 display that supports Google's ARCore extensions
if(typeof navigator.getVRDisplays === 'function'){
navigator.getVRDisplays().then(displays => {
for(let display of displays){
if(display === null) continue
if(display.capabilities.hasPassThroughCamera){ // This is the ARCore extension to WebVR 1.1
this._vrDisplay = display
this._vrFrameData = new VRFrameData()
if (!window.WebARonARKitSetData) {
this._arCoreCanvas = document.createElement('canvas')
this._xr._realityEls.appendChild(this._arCoreCanvas)
this._arCoreCanvas.width = window.innerWidth
this._arCoreCanvas.height = window.innerHeight
this._elContext = this._arCoreCanvas.getContext('webgl')
if(this._elContext === null){
throw 'Could not create CameraReality GL context'
}
}
break
}
}
})
}
window.addEventListener('resize', () => {
if(this._arCoreCanvas){
this._arCoreCanvas.width = window.innerWidth
this._arCoreCanvas.height = window.innerHeight
}
if (this._videoEl) {
setTimeout(() => {
this._adjustVideoSize();
}, 10)
}
}, false)
}
_setFovy (fovy) {
this._cameraFov = fovy * Math.PI/180
if (!this._videoEl) {
this._focalLength = 0
return
}
if (this._videoRenderWidth > this._videoRenderHeight) {
this._focalLength = (this._videoRenderWidth/2) / Math.tan(this._cameraFov / 2)
} else {
this._focalLength = (this._videoRenderHeight/2) / Math.tan(this._cameraFov / 2)
}
this._cameraIntrinsics = [this._focalLength, 0 , 0,
0, this._focalLength, 0,
(this._videoRenderWidth/2), (this._videoRenderHeight/2), 1 ]
}
_adjustVideoSize () {
var canvasWidth = this._videoRenderWidth;
var canvasHeight = this._videoRenderHeight;
var cameraAspect = canvasWidth / canvasHeight;
var width = this._videoEl.videoWidth;
var height = this._videoEl.videoHeight;
var videoSourceAspect = width / height;
if (videoSourceAspect != cameraAspect) {
// let's pick a size such that the video is below 512 in size in both dimensions
while (width > 512 || height > 512) {
width = width / 2
height = height / 2
}
canvasWidth = this._videoRenderWidth = width;
canvasHeight = this._videoRenderHeight = height;
var cameraAspect = canvasWidth / canvasHeight;
this._videoFrameCanvas.width = width;
this._videoFrameCanvas.height = height;
}
this._setFovy(this._cameraFov / (Math.PI/180))
var windowWidth = this._xr._realityEls.clientWidth;
var windowHeight = this._xr._realityEls.clientHeight;
var windowAspect = windowWidth / windowHeight;
var translateX = 0;
var translateY = 0;
if (cameraAspect > windowAspect) {
canvasWidth = canvasHeight * windowAspect;
windowWidth = windowHeight * cameraAspect;
translateX = -(windowWidth - this._xr._realityEls.clientWidth)/2;
} else {
canvasHeight = canvasWidth / windowAspect;
windowHeight = windowWidth / cameraAspect;
translateY = -(windowHeight - this._xr._realityEls.clientHeight)/2;
}
this._videoEl.style.width = windowWidth.toFixed(2) + 'px'
this._videoEl.style.height = windowHeight.toFixed(2) + 'px'
this._videoEl.style.transform = "translate(" + translateX.toFixed(2) + "px, "+ translateY.toFixed(2) + "px)"
try {
this.dispatchEvent(
new CustomEvent(
Reality.WINDOW_RESIZE_EVENT,
{
source: this,
detail: {
width: canvasWidth,
height: canvasHeight,
focalLength: this._focalLength
}
}
)
)
} catch(e) {
console.error('WINDOW_RESIZE_EVENT error', e)
}
}
/*
Called by a session before it hands a new XRPresentationFrame to the app
*/
_handleNewFrame(frame){
if(this._vrDisplay){
if (this._arCoreCameraRenderer) {
this._arCoreCameraRenderer.render()
}
this._vrDisplay.getFrameData(this._vrFrameData)
}
// WebRTC video
if (this._videoEl && this._sendVideoFrame && !this._videoFramesPaused) {
this._sendVideoFrame = false;
var canvasWidth = this._videoRenderWidth;
var canvasHeight = this._videoRenderHeight;
this._videoCtx.drawImage(this._videoEl, 0, 0, canvasWidth, canvasHeight);
var imageData = this._videoCtx.getImageData(0, 0, canvasWidth, canvasHeight);
var data = imageData.data
var len = imageData.data.length
// imageData = new ArrayBuffer(len)
// var buffData = new Uint8Array(imageData);
// for (var i = 0; i < len; i++) buffData[i] = data[i]
var buffers = [
{
size: {
width: canvasWidth,
height: canvasHeight,
bytesPerRow: canvasWidth * 4,
bytesPerPixel: 4
},
buffer: imageData
}];
var pixelFormat = XRVideoFrame.IMAGEFORMAT_RGBA32;
var timestamp = frame.timestamp;
// set from frame
var view = frame.views[0];
//this._videoViewMatrix.set(view.viewMatrix);
MatrixMath.mat4_invert(this._videoViewMatrix, view.viewMatrix)
this._videoProjectionMatrix.set(view.projectionMatrix)
var camera = {
arCamera: false,
cameraOrientation: 0,
cameraIntrinsics: this._cameraIntrinsics.slice(0),
// cameraIntrinsics: [(this._videoEl.videoWidth/2) / Math.tan(view._fov.leftDegrees * Math.PI/180), 0, (this._videoEl.videoWidth/2),
// 0, (this._videoEl.videoHeight/2) / Math.tan(view._fov.upDegrees * Math.PI/180), (this._videoEl.videoHeight/2),
// 0, 0, 1],
cameraImageResolution: {
width: this._videoEl.videoWidth,
height: this._videoEl.videoHeight
},
viewMatrix: this._videoViewMatrix,
projectionMatrix: this._videoProjectionMatrix
}
var xrVideoFrame = new XRVideoFrame(buffers, pixelFormat, timestamp, camera )
try {
this.dispatchEvent(
new CustomEvent(
Reality.COMPUTER_VISION_DATA,
{
source: this,
detail: xrVideoFrame
}
)
)
} catch(e) {
console.error('COMPUTER_VISION_DATA event error', e)
}
}
// TODO update the anchor positions using ARCore or ARKit
}
_start(parameters=null){
if(this._running) return
this._running = true
if(this._vrDisplay !== null){ // Using WebAR
if (window.WebARonARKitSetData) {
// WebARonARKit renders camera separately
} else {
this._arCoreCameraRenderer = new ARCoreCameraRenderer(this._vrDisplay, this._elContext)
}
this._initialized = true
} else if(ARKitWrapper.HasARKit()){ // Using ARKit
if(this._initialized === false){
this._initialized = true
this._arKitWrapper = ARKitWrapper.GetOrCreate()
this._arKitWrapper.addEventListener(ARKitWrapper.WATCH_EVENT, this._handleARKitWatch.bind(this))
this._arKitWrapper.waitForInit().then(() => {
this._arKitWrapper.watch(parameters)
})
} else {
this._arKitWrapper.watch(parameters)
}
} else { // Using WebRTC
if(this._initialized === false){
this._initialized = true
navigator.mediaDevices.getUserMedia({
audio: false,
video: { facingMode: "environment" }
}).then(stream => {
this._videoEl = document.createElement('video')
this._xr._realityEls.appendChild(this._videoEl)
this._videoEl.setAttribute('class', 'camera-reality-video')
this._videoEl.setAttribute('playsinline', true);
this._videoEl.style.width = '100%'
this._videoEl.style.height = '100%'
this._videoEl.srcObject = stream
this._videoEl.play()
this._setupWebRTC(parameters)
}).catch(err => {
console.error('Could not set up video stream', err)
this._initialized = false
this._running = false
})
} else {
if (this._videoEl) {
this._xr._realityEls.appendChild(this._videoEl)
this._videoEl.play()
this._setupWebRTC(parameters)
}
}
}
}
_setupWebRTC(parameters) {
if (parameters.videoFrames) {
this._sendingVideo = true;
this._videoEl.addEventListener('loadedmetadata', () => {
var width = this._videoEl.videoWidth;
var height = this._videoEl.videoHeight;
// let's pick a size such that the video is below 512 in size in both dimensions
while (width > 256 || height > 256) {
width = width / 2
height = height / 2
}
this._videoRenderWidth = width;
this._videoRenderHeight = height;
this._videoFrameCanvas = document.createElement('canvas');
this._videoFrameCanvas.width = width;
this._videoFrameCanvas.height = height;
this._videoCtx = this._videoFrameCanvas.getContext('2d');
this._adjustVideoSize();
this._sendVideoFrame = true;
});
}
}
_requestVideoFrame() {
this._sendVideoFrame = true;
}
_stopVideoFrames() {
this._videoFramesPaused = true;
}
_startVideoFrames() {
this._videoFramesPaused = false;
}
_stop(){
if(this._running === false) return
this._running = false
if(ARKitWrapper.HasARKit()){
if(this._arKitWrapper === null){
return
}
this._arKitWrapper.stop()
} else if(this._arCoreCanvas){
this._xr._realityEls.removeChild(this._arCoreCanvas)
this._arCoreCanvas = null
} else if(this._videoEl !== null){
this._videoEl.pause()
this._xr._realityEls.removeChild(this._videoEl)
}
}
_handleARKitWatch(ev){
if(ev.detail && ev.detail.objects){
for(let anchorInfo of ev.detail.objects){
this._updateAnchorFromARKitUpdate(anchorInfo.uuid, anchorInfo)
try {
this.dispatchEvent(
new CustomEvent(
Reality.UPDATE_WORLD_ANCHOR,
{
source: this,
detail: anchorInfo.uuid
}
)
)
} catch(e) {
console.error('UPDATE_WORLD_ANCHOR event error', e)
}
}
}
if (ev.detail && ev.detail.removedObjects) {
for (let removedAnchor of ev.detail.removedObjects) {
try {
this.dispatchEvent(
new CustomEvent(
Reality.REMOVE_WORLD_ANCHOR,
{
source: this,
detail: removedAnchor
}
)
)
} catch(e) {
console.error('REMOVE_WORLD_ANCHOR event error', e)
}
this._deleteAnchorFromARKitUpdate(removedAnchor)
}
}
if (ev.detail && ev.detail.newObjects) {
for (let addedAnchor of ev.detail.newObjects) {
try {
this.dispatchEvent(
new CustomEvent(
Reality.NEW_WORLD_ANCHOR,
{
source: this,
detail: addedAnchor
}
)
)
} catch(e) {
console.error('NEW_WORLD_ANCHOR event error', e)
}
}
}
}
_deleteAnchorFromARKitUpdate(anchorUUID) {
this._anchors.delete(anchorUUID)
}
_handleARKitAddObject(anchorInfo){
this._updateAnchorFromARKitUpdate(anchorInfo.uuid, anchorInfo)
}
_updateAnchorFromARKitUpdate(uid, anchorInfo){
const anchor = this._anchors.get(uid) || null
if(anchor === null){
// console.log('unknown anchor', anchor)
return
}
// This assumes that the anchor's coordinates are in the tracker coordinate system
anchor.coordinateSystem._relativeMatrix = anchorInfo.transform
// update internal data if any
switch (anchorInfo.type) {
case ARKitWrapper.ANCHOR_TYPE_PLANE:
anchor.center = anchorInfo.plane_center
anchor.extent =
[anchorInfo.plane_extent.x, anchorInfo.plane_extent.z]
anchor.alignment = anchorInfo.plane_alignment
anchor.geometry = anchorInfo.geometry
break
case ARKitWrapper.ANCHOR_TYPE_FACE:
if (anchorInfo.geometry) {
anchor.geometry.vertices = anchorInfo.geometry.vertices
}
if (anchorInfo.blendShapes) {
anchor.updateBlendShapes(anchorInfo.blendShapes)
}
break
case ARKitWrapper.ANCHOR_TYPE_ANCHOR:
break
case ARKitWrapper.ANCHOR_TYPE_IMAGE:
break
}
}
_addAnchor(anchor, display){
// Convert coordinates to the tracker coordinate system so that updating from ARKit transforms is simple
if(this._arKitWrapper !== null){
this._arKitWrapper.addAnchor(anchor.uid, anchor.coordinateSystem._poseModelMatrix).then(
detail => this._handleARKitAddObject(detail)
)
}
// ARCore as implemented in the browser does not offer anchors except on a surface, so we just use untracked anchors
// We also use untracked anchors for in-browser display, with WebRTC
this._anchors.set(anchor.uid, anchor)
return anchor.uid
}
/*
Creates an anchor offset relative to a surface, as found by a ray
normalized screen x and y are in range 0..1, with 0,0 at top left and 1,1 at bottom right
returns a Promise that resolves either to an AnchorOffset with the first hit result or null if the hit test failed
*/
_findAnchor(normalizedScreenX, normalizedScreenY, display, testOptions=null){
return new Promise((resolve, reject) => {
if(this._arKitWrapper !== null){
// Perform a hit test using the ARKit integration
this._arKitWrapper.hitTest(normalizedScreenX, normalizedScreenY, testOptions || ARKitWrapper.HIT_TEST_TYPE_EXISTING_PLANES).then(hits => {
if(hits.length === 0){
resolve(null)
// console.log('miss')
return
}
const hit = this._pickARKitHit(hits)
// if it's a plane
if (hit.anchor_transform) {
hit.anchor_transform[13] += XRViewPose.SITTING_EYE_HEIGHT
hit.world_transform[13] += XRViewPose.SITTING_EYE_HEIGHT
// Use the first hit to create an XRAnchorOffset, creating the XRAnchor as necessary
// TODO use XRPlaneAnchor for anchors with extents; hopefully the plane will have been created, tho
let anchor = this._getAnchor(hit.uuid)
if(anchor === null){
let coordinateSystem = new XRCoordinateSystem(display, XRCoordinateSystem.TRACKER)
coordinateSystem._relativeMatrix = hit.anchor_transform
anchor = new XRAnchor(coordinateSystem, hit.uuid)
this._anchors.set(anchor.uid, anchor)
}
const offsetPosition = [
hit.world_transform[12] - hit.anchor_transform[12],
hit.world_transform[13] - hit.anchor_transform[13],
hit.world_transform[14] - hit.anchor_transform[14]
]
const worldRotation = new Quaternion().setFromRotationMatrix(hit.world_transform)
const inverseAnchorRotation = new Quaternion().setFromRotationMatrix(hit.anchor_transform).inverse()
const offsetRotation = new Quaternion().multiplyQuaternions(worldRotation, inverseAnchorRotation)
const anchorOffset = new XRAnchorOffset(anchor.uid)
anchorOffset.poseMatrix = MatrixMath.mat4_fromRotationTranslation(new Float32Array(16), offsetRotation.toArray(), offsetPosition)
resolve(anchorOffset)
} else {
let coordinateSystem = new XRCoordinateSystem(display, XRCoordinateSystem.TRACKER)
coordinateSystem._relativeMatrix = hit.world_transform
const anchor = new XRAnchor(coordinateSystem, hit.uuid)
this._anchors.set(anchor.uid, anchor)
const anchorOffset = new XRAnchorOffset(anchor.uid)
resolve(anchorOffset)
}
})
} else if(this._vrDisplay !== null){
// Perform a hit test using the ARCore data
let hits = this._vrDisplay.hitTest(normalizedScreenX, normalizedScreenY)
if(hits.length == 0){
resolve(null)
return
}
hits.sort((a, b) => a.distance - b.distance)
let anchor = this._getAnchor(hits[0].uuid)
if(anchor === null){
let coordinateSystem = new XRCoordinateSystem(display, XRCoordinateSystem.TRACKER)
coordinateSystem._relativeMatrix = hits[0].modelMatrix
coordinateSystem._relativeMatrix[13] += XRViewPose.SITTING_EYE_HEIGHT
anchor = new XRAnchor(coordinateSystem)
this._anchors.set(anchor.uid, anchor)
}
resolve(new XRAnchorOffset(anchor.uid))
} else {
resolve(null) // No platform support for finding anchors
}
})
}
/**
* Creates an ARReferenceImage in the ARKit native side
* @param uid the ID of the image to create
* @param buffer the base64 encoded image
* @param width
* @param height
* @param physicalWidthInMeters
* @returns a promise when the image has been created, error otherwise
* @private
*/
_createImageAnchor(uid, buffer, width, height, physicalWidthInMeters) {
if (this._arKitWrapper) {
return this._arKitWrapper.createImageAnchor(uid, buffer, width, height, physicalWidthInMeters)
} else {
return null;
}
}
/**
* _activateDetectionImage Uses the ARKit wrapper to add a new reference image to the set of detection images in the ARKit configuration object
* and runs the session again. The promise is resolved when the image is detected by ARKit
* @param uid The name (id) if the image to activate. It has to be previously created calling the "createImageAnchor" method
* @param display The current display
* @returns {Promise<any>} A promise resolved with the image transform in case of success, rejected with error otherwise
*/
_activateDetectionImage(uid, display) {
return new Promise((resolve, reject) => {
if (this._arKitWrapper) {
this._arKitWrapper.activateDetectionImage(uid).then(aRKitImageAnchor => {
if (aRKitImageAnchor.activated === true) {
let coordinateSystem = new XRCoordinateSystem(display, XRCoordinateSystem.TRACKER)
coordinateSystem._relativeMatrix = aRKitImageAnchor.imageAnchor.transform
let anchor = new XRImageAnchor(coordinateSystem, aRKitImageAnchor.imageAnchor.uuid)
this._anchors.set(aRKitImageAnchor.imageAnchor.uuid, anchor)
resolve(aRKitImageAnchor.imageAnchor.transform)
} else if (aRKitImageAnchor.error !== null) {
reject(aRKitImageAnchor.error)
} else {
reject(null)
}
})
} else {
reject('ARKit not supported')
}
})
}
_removeAnchor(uid){
if(this._arKitWrapper) {
this._arKitWrapper.removeAnchor(uid)
} else if (this._getAnchor(uid)) {
this._anchors.delete(uid)
}
}
_pickARKitHit(data){
if(data.length === 0) return null
let info = null
let planeResults = data.filter(
hitTestResult => hitTestResult.type != ARKitWrapper.HIT_TEST_TYPE_FEATURE_POINT
)
let planeExistingUsingExtentResults = planeResults.filter(
hitTestResult => hitTestResult.type == ARKitWrapper.HIT_TEST_TYPE_EXISTING_PLANE_USING_EXTENT
)
let planeExistingResults = planeResults.filter(
hitTestResult => hitTestResult.type == ARKitWrapper.HIT_TEST_TYPE_EXISTING_PLANE
)
if (planeExistingUsingExtentResults.length) {
// existing planes using extent first
planeExistingUsingExtentResults = planeExistingUsingExtentResults.sort((a, b) => a.distance - b.distance)
info = planeExistingUsingExtentResults[0]
} else if (planeExistingResults.length) {
// then other existing planes
planeExistingResults = planeExistingResults.sort((a, b) => a.distance - b.distance)
info = planeExistingResults[0]
} else if (planeResults.length) {
// other types except feature points
planeResults = planeResults.sort((a, b) => a.distance - b.distance)
info = planeResults[0]
} else {
// feature points if any
info = data[0]
}
return info
}
/*
Found intersections with anchors and planes by a ray normalized screen x and y are in range 0..1, with 0,0 at top left and 1,1 at bottom right
returns an Array of VRHit
*/
_hitTestNoAnchor(normalizedScreenX, normalizedScreenY, display){
if(this._arKitWrapper !== null){
// Perform a hit test using the ARKit integration
let hits = this._arKitWrapper.hitTestNoAnchor(normalizedScreenX, normalizedScreenY);
for (let i = 0; i < hits.length; i++) {
hits[i].modelMatrix[13] += XRViewPose.SITTING_EYE_HEIGHT
}
if(hits.length == 0){
return null;
}
return hits;
} else if(this._vrDisplay !== null) {
// Perform a hit test using the ARCore data
let hits = this._vrDisplay.hitTest(normalizedScreenX, normalizedScreenY)
for (let i = 0; i < hits.length; i++) {
hits[i].modelMatrix[13] += XRViewPose.SITTING_EYE_HEIGHT
}
if(hits.length == 0){
return null;
}
return hits;
} else {
// No platform support for finding anchors
return null;
}
}
_getHasLightEstimate(){
if(this._arKitWrapper !== null){
return true;
}else{
return false;
}
}
_getLightAmbientIntensity(){
if(this._arKitWrapper !== null){
this._lightEstimate.ambientIntensity = this._arKitWrapper.lightIntensity;
return this._lightEstimate.ambientIntensity;
}else{
// No platform support for ligth estimation
return null;
}
}
_getWorldMappingStatus(){
if(this._arKitWrapper !== null){
return this._arKitWrapper.worldMappingStatus;
}else{
// No platform support for ligth estimation
return null;
}
}
/**
* retrieves a worldMap from the platform, if possible
* @returns a promise when the worldMap has been retrieved
* @private
*/
_getWorldMap() {
return new Promise((resolve, reject) => {
if (this._arKitWrapper) {
this._arKitWrapper.getWorldMap().then(ARKitWorldMap => {
if (ARKitWorldMap.saved === true) {
resolve(ARKitWorldMap.worldMap)
} else if (ARKitWorldMap.error !== null) {
reject(ARKitWorldMap.error)
} else {
reject(null)
}
})
} else {
reject('ARKit not supported')
}
})
}
/**
* sets a worldMap for the platform, if possible
* @param worldMap a platform specific worldmap
* @returns a promise when the worldMap has been set
* @private
*/
_setWorldMap(worldMap) {
if (this._arKitWrapper) {
return this._arKitWrapper.setWorldMap(worldMap)
} else {
return new Promise((resolve, reject) => {
reject(new Error('setWorldMap not supported'));
})
}
}
_getTimeStamp(timestamp) {
if(this._arKitWrapper !== null){
return this._arKitWrapper.timestamp;
}else{
// use performance.now()
//return ( performance || Date ).now();
return timestamp
}
}
/*
No floor in AR
*/
_findFloorAnchor(display, uid=null){
return new Promise((resolve, reject) => {
resolve(null)
})
}
}
| CameraReality |
stages.go | package spec
import (
"context"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/coreos/coreos-assembler-schema/cosa"
log "github.com/sirupsen/logrus"
)
// GetStage returns the stage with the matching ID
func (j *JobSpec) GetStage(id string) (*Stage, error) {
for _, stage := range j.Stages {
if stage.ID == id {
return &stage, nil
}
}
return nil, fmt.Errorf("no such stage with ID %q", id)
}
// Stage is a single stage.
type Stage struct {
ID string `yaml:"id,omitempty" json:"id,omitempty"`
Description string `yaml:"description,omitempty" json:"description,omitempty"`
ConcurrentExecution bool `yaml:"concurrent,omitempty" json:"concurrent,omitempty"`
// DirectExec signals that the command should not be written
// to a file. Rather the command should directly executed.
DirectExec bool `yaml:"direct_exec,omitempty" json:"direct_exec,omitempty"`
// NotBlocking means that the stage does not block another stage
// from starting execution (i.e. concurrent stage).
NotBlocking bool `yaml:"not_blocking,omitempty" json:"not_blocking,omitempty"`
// RequireArtifacts is a name of the required artifacts. If the
// required artifact is missing (per the meta.json), the stage
// will not be executed. RequireArticts _implies_ sending builds/builds.json
// and builds/<BUILDID>/meta.json.
RequireArtifacts []string `yaml:"require_artifacts,flow,omitempty" json:"require_artifacts,omitempty"`
// RequestArtifacts are files that are provided if they are there. Examples include
// 'caches' for `/srv/cache` and `/srv/tmp/repo` tarballs or `ostree` which are really useful
// for base builds.
RequestArtifacts []string `yaml:"request_artifacts,flow,omitempty" json:"request_artifacts,omitempty"`
// BuildArtifacts produces "known" artifacts. The special "base"
// will produce an OSTree and QCOWs.
BuildArtifacts []string `yaml:"build_artifacts,flow,omitempty" json:"build_artifacts,omitempty"`
// Commands are arbitrary commands run after an Artifact builds.
// Instead of running `cosa buildextend-?` as a command, its preferrable
// use the bare name in BuildArtifact.
Commands []string `yaml:"commands,flow,omitempty" json:"commands,omitempty"`
// PublishArtifacts will upload defined BuildArtifacts to the cloud providers
PublishArtifacts []string `yaml:"publish_artifacts,omitempty" json:"publish_artifacts,omitempty"`
// PrepCommands are run before Artifact builds, while
// PostCommands are run after. Prep and Post Commands are run serially.
PrepCommands []string `yaml:"prep_commands,flow,omitempty" json:"prep_commands,omitempty"`
PostCommands []string `yaml:"post_commands,flow,omitempty" json:"post_commands,omitempty"`
// PostAlways ensures that the PostCommands are always run.
PostAlways bool `yaml:"post_always,omitempty" json:"post_always,omitempty"`
// ExecutionOrder is a number value that defines the order of stages. If two stages
// share the same execution order number, then they are allowed to run concurrently to each other.
ExecutionOrder int `yaml:"execution_order,omitempty" json:"execution_order,omitempty"`
// ReturnCache returns a tarball of `/srv/cache`, while RequireCahce ensures the tarball
// is fetched unpacked into `/srv/cahce`. RequestCache is a non-blocking, optional versopn
// of RequireCache.
ReturnCache bool `yaml:"return_cache,omitempty" json:"return_cache,omitempty"`
RequireCache bool `yaml:"require_cache,omitempty" json:"require_cache_repo,omitempty"`
RequestCache bool `yaml:"request_cache,omitempty" json:"reqest_cache_repo,omitempty"`
// ReturnCacheRepo returns a tarball of `/srv/repo`, while RequireCacheRepo ensures the
// tarball is fetched and unpacked into `/srv/repo`. RequestCacheRepo is a non-blocking, optional
// version of RequireCacheRepo
ReturnCacheRepo bool `yaml:"return_cache_repo,omitempty" json:"return_cache_repo,omitempty"`
RequireCacheRepo bool `yaml:"require_cache_repo,omitempty" json:"require_cache_repo_repo,omitempty"`
RequestCacheRepo bool `yaml:"request_cache_repo,omitempty" json:"request_cache_repo_repo,omitempty"`
// ReturnFiles returns a list of files that were requested to be returned.
ReturnFiles []string `yaml:"return_files,omitempty" json:"return_files,omitempty"`
// KolaTests are shorthands for testing.
KolaTests []string `yaml:"kola_tests,omitempty" json:"kola_tests,omitempty"`
// Overrides is a list of Overrides to apply to the OS tree
Overrides []Override `yaml:"overrides,omitempty" json:"overrides,omitempty"`
}
// These are the only hard-coded commands that Gangplank understand.
const (
// defaultBaseCommand is the basic build command
defaultBaseCommand = "cosa fetch; cosa build %s;"
// defaultBaseDelayMergeCommand is used for distributed build using
// parallel workers pods.
defaultBaseDelayMergeCommand = "cosa fetch; cosa build %s --delay-meta-merge;"
// defaultFinalizeComamnd ensures that the meta.json is merged.
defaultFinalizeCommand = "cosa meta --finalize;"
)
// cosaBuildCmds checks if b is a buildable artifact type and then
// returns it.
func cosaBuildCmd(b string, js *JobSpec) ([]string, error) {
log.WithField("command", b).Info("checking shorthand")
switch v := strings.ToLower(b); v {
case "base", "ostree", "qemu":
if v == "base" {
v = ""
}
if js.DelayedMetaMerge {
return []string{fmt.Sprintf(defaultBaseDelayMergeCommand, v)}, nil
}
return []string{fmt.Sprintf(defaultBaseCommand, v)}, nil
case "finalize":
return []string{defaultFinalizeCommand}, nil
case "live":
return []string{fmt.Sprintf("cosa buildextend-%s", b)}, nil
}
if cosa.CanArtifact(b) {
return []string{fmt.Sprintf("cosa buildextend-%s", b)}, nil
}
return nil, fmt.Errorf("%s is not a known buildable artifact", b)
}
// getCommands renders the automatic artifacts and publication commands
func (s *Stage) getCommands(rd *RenderData) ([]string, error) {
if len(s.BuildArtifacts) > 0 {
log.WithField("mapping artifacts", s.BuildArtifacts).Infof("Mapping artifacts")
}
numBuildArtifacts := len(s.BuildArtifacts)
totalCmds := len(s.Commands) + numBuildArtifacts
ret := make([]string, totalCmds)
for i, ba := range s.BuildArtifacts {
log.WithField("artifact", ba).Info("mapping artifact to command")
cmds, err := cosaBuildCmd(ba, rd.JobSpec)
if err != nil {
log.WithError(err).Errorf("failed to map build artifacts: %v", ba)
return nil, err
}
ret[i] = strings.Join(cmds, "\n")
}
for i, c := range s.Commands {
ret[(numBuildArtifacts + i)] = c
}
return ret, nil
}
// getPostCommands generates the post commands from a synthatis of pre-defined
// post commands, kola tests and the cloud publication steps.
func (s *Stage) getPostCommands(rd *RenderData) ([]string, error) {
ret := s.PostCommands
log.WithField("mapping tests", s.KolaTests).Infof("Resolving test definitions")
for _, kolaTest := range s.KolaTests {
tk, ok := kolaTestDefinitions[kolaTest]
if !ok {
return nil, fmt.Errorf("test %q is an unknown short hand", kolaTest)
}
ret = append(ret, tk.PostCommands...)
}
pc, err := s.getPublishCommands(rd)
if err != nil {
return nil, err
}
ret = append(ret, pc...)
return ret, nil
}
// getPublishCommands returns the cloud publication commands.
func (s *Stage) getPublishCommands(rd *RenderData) ([]string, error) {
var publishCommands []string
c := rd.JobSpec.CloudsCfgs
for _, cloud := range s.PublishArtifacts {
if !cosa.CanArtifact(cloud) {
return nil, fmt.Errorf("Invalid cloud artifact: %v", cloud)
}
config, err := c.GetCloudCfg(cloud)
if err != nil {
return nil, err
}
pc, err := config.GetPublishCommand(rd.Meta.BuildID)
if err != nil {
return nil, err
}
publishCommands = append(publishCommands, pc)
}
return publishCommands, nil
}
// Execute runs the commands of a stage.
func (s *Stage) Execute(ctx context.Context, rd *RenderData, envVars []string) error {
if ctx == nil {
return errors.New("context must not be nil")
}
if rd == nil {
return errors.New("render data must not be nil")
}
log.Infof("Stage: %v", s)
cmds, err := s.getCommands(rd)
if err != nil {
log.WithError(err).Error("failed to get stage commands")
return err
}
postCommands, err := s.getPostCommands(rd)
if err != nil {
log.WithError(err).Error("failed to get post commands")
return err
}
if len(s.PrepCommands) == 0 && len(cmds) == 0 && len(postCommands) == 0 {
return errors.New("no commands to execute")
}
log.WithField("cmd", cmds).Info("stage commands readied")
tmpd, err := ioutil.TempDir("", "stages")
if err != nil {
return err
}
defer os.RemoveAll(tmpd)
// Render the pre and post scripts.
prepScript := filepath.Join(tmpd, "prep.sh")
if err := ioutil.WriteFile(prepScript, []byte(strings.Join(s.PrepCommands, "\n")), 0755); err != nil {
return err
}
if err := rd.RendererExecuter(ctx, envVars, prepScript); err != nil {
return fmt.Errorf("Failed execution of the prep stage: %w", err)
}
postScript := filepath.Join(tmpd, "post.sh")
if err := ioutil.WriteFile(postScript, []byte(strings.Join(postCommands, "\n")), 0755); err != nil {
return err
}
if s.PostAlways {
log.Info("PostCommand will be executed regardless of command success")
defer func() {
_ = rd.RendererExecuter(ctx, envVars, postScript)
}()
}
// Write out each command to their own file. To enable concurrent execution.
scripts := make(map[int]string)
for i, c := range cmds {
outf := filepath.Join(tmpd, fmt.Sprintf("script-%d.sh", i))
if err := ioutil.WriteFile(outf, []byte(c), 0755); err != nil {
return nil
}
scripts[i] = outf
log.Infof("%s: %s", outf, c)
}
// Execute the main command stage.
if !s.ConcurrentExecution {
// Non-concurrent commands are run serially. Any failure will immediately
// break the run.
log.Infof("Executing %d stage commands serially", len(scripts))
// Don't use `range scripts` here because the map is unordered
// and we want to execute the commands in order. We know the map
// was populated in order with index[i] so just use the length
// here and count from 0 to len(scripts).
for i := 0; i < len(scripts); i++ {
if err := rd.RendererExecuter(ctx, envVars, scripts[i]); err != nil {
return err
}
}
} else {
// Concurrent commands are run in parallel until all complete OR
// one fails.
log.Infof("Executing %d stage commands concurrently", len(scripts))
wg := &sync.WaitGroup{}
errors := make(chan error, len(scripts))
for _, s := range scripts {
wg.Add(1)
go func(s string, w *sync.WaitGroup, ctx context.Context) {
defer w.Done()
log.Infof("STARTING command: %s", s)
e := rd.RendererExecuter(ctx, envVars, s)
errors <- e
if err != nil {
log.Infof("ERROR %s", s)
return
}
log.Infof("SUCCESS %s", s)
}(s, wg, ctx)
// hack: ensure that scripts are started serially
// but may run concurrently
time.Sleep(50 * time.Millisecond)
}
// Wait for the concurrent commands to run, and check
// all errors to make sure non are swallowed.
wg.Wait()
var e error = nil
for x := 0; x <= len(errors); x++ {
err, ok := <-errors
if !ok {
break
}
if err != nil {
log.Errorf("error recieved: %v", err)
e = err
}
}
if e != nil {
return e
}
}
// If PostAlways, then the postScript is executed in defer call above.
if !s.PostAlways {
return rd.RendererExecuter(ctx, envVars, postScript)
}
return nil
}
var (
// pseudoStages are special setup and tear down phases.
pseudoStages = []string{"base", "finalize", "live"}
// buildableArtifacts are known artifacts types from the schema.
buildableArtifacts = append(pseudoStages, cosa.GetCommandBuildableArtifacts()...)
// baseArtifacts are default built by the "base" short-hand
baseArtifacts = []string{"ostree", "qemu"}
)
// isBaseArtifact is a check function for determining if an artifact
// is built by the base stage.
func isBaseArtifact(artifact string) bool {
for _, k := range baseArtifacts {
if k == artifact {
return true
}
}
return false
}
// GetArtifactShortHandNames returns shorthands for buildable stages
func GetArtifactShortHandNames() []string {
return buildableArtifacts
}
// addShorthandToStage adds in a build shorthand into the stage and
// ensures that required dependencies are correclty ordered
// Ordering assumptions:
// 1. Base builds
// 2. Basic Kola Tests
// 3. Metal and Live ISO images
// 4. Metal and Live ISO testings
// 5. Cloud stages
func addShorthandToStage(artifact string, stage *Stage) {
quickStage := func(noun string) *Stage {
switch noun {
case "base":
return &Stage{
BuildArtifacts: []string{"base"},
ExecutionOrder: 1,
RequestArtifacts: []string{"ostree"},
RequestCache: true,
RequestCacheRepo: true,
}
case "extensions":
return &Stage{
BuildArtifacts: []string{"extensions"},
ExecutionOrder: 2,
RequireArtifacts: []string{"ostree"},
RequireCache: true,
RequireCacheRepo: true,
}
case "finalize":
return &Stage{
BuildArtifacts: []string{"finalize"},
ExecutionOrder: 999,
}
case "live":
return &Stage{
ExecutionOrder: 2,
BuildArtifacts: []string{"live"},
RequireArtifacts: []string{"ostree", "metal", "metal4k"},
}
case "metal":
return &Stage{
ExecutionOrder: 3,
BuildArtifacts: []string{"metal"},
RequireArtifacts: []string{"ostree"},
}
case "metal4k":
return &Stage{
ExecutionOrder: 3,
BuildArtifacts: []string{"metal4k"},
RequireArtifacts: []string{"ostree"},
}
case "oscontainer":
return &Stage{
BuildArtifacts: []string{"oscontainer"},
ExecutionOrder: 2,
RequireArtifacts: []string{"ostree"},
RequireCache: true,
RequireCacheRepo: true,
}
default:
// check if the short hand is a test stage
testStage, ok := kolaTestDefinitions[noun]
if ok {
return &testStage
}
// otherwise its likely a cloud stage
if !cosa.CanArtifact(artifact) {
break
}
return &Stage{
ExecutionOrder: 5,
BuildArtifacts: []string{artifact},
RequireArtifacts: []string{"qemu"},
}
}
log.WithField("artifact", noun).Fatalf("unknown artifact type")
return nil
}
working := quickStage(artifact)
// remove is helper for removing the first matching item from a slice
remove := func(slice []string, key string) ([]string, bool) {
for x := 0; x < len(slice); x++ {
if slice[x] == key {
return append(slice[:x], slice[x+1:]...), true
}
}
return slice, false
}
unique := func(strSlice []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range strSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// if the stage returns cache/repo cache then it provides the requires
if working.RequireCache && !stage.ReturnCache {
stage.RequireCache = true
stage.RequestCache = false
}
if working.RequireCacheRepo && !stage.ReturnCacheRepo {
stage.RequireCacheRepo = true
stage.RequestCacheRepo = false
}
// Handle the return/requires for cache and repo cache
if working.ReturnCache {
stage.ReturnCache = working.ReturnCache
}
if working.ReturnCacheRepo {
stage.ReturnCacheRepo = working.ReturnCacheRepo
}
// Only set RequestCache[Repo] we don't require them.
if working.RequestCache && (!stage.RequireCache || !working.RequireCache) {
stage.RequestCache = true
}
if working.RequestCacheRepo && (!stage.RequireCacheRepo || !working.RequireCacheRepo) {
stage.RequestCacheRepo = true
}
// if the stage returns cache/repo cache then it provides the requires
if working.RequireCache && !stage.ReturnCache {
stage.RequireCache = true
}
if working.RequireCacheRepo && !stage.ReturnCacheRepo {
stage.RequireCacheRepo = true
}
// Add the commands if defined
stage.Commands = append(stage.Commands, working.Commands...)
stage.PrepCommands = append(stage.PrepCommands, working.PrepCommands...)
stage.PostCommands = append(stage.PostCommands, working.PostCommands...)
stage.RequestArtifacts = append(stage.RequestArtifacts, working.RequestArtifacts...)
stage.BuildArtifacts = append(stage.BuildArtifacts, working.BuildArtifacts...)
stage.RequireArtifacts = append(stage.RequireArtifacts, working.RequireArtifacts...)
// Assume the lowest stage execution order
if working.ExecutionOrder < stage.ExecutionOrder || stage.ExecutionOrder == 0 {
stage.ExecutionOrder = working.ExecutionOrder
}
randID := time.Now().UTC().UnixNano() // Ensure a random ID
stage.ID = fmt.Sprintf("ExecOrder %d Stage %d", stage.ExecutionOrder, randID)
stage.Description = fmt.Sprintf("Stage %d execution %s",
stage.ExecutionOrder, strings.Join(append(stage.BuildArtifacts, stage.KolaTests...), ","))
// Get the order that artifacts should be built
artifactOrder := make(map[int][]string)
for _, v := range stage.BuildArtifacts {
if v == "caches" {
stage.RequireCache = true
stage.RequireCacheRepo = true
} else {
fakeStage := quickStage(v)
artifactOrder[fakeStage.ExecutionOrder] = append(artifactOrder[fakeStage.ExecutionOrder], v)
}
}
newOrder := []string{}
for _, v := range artifactOrder {
newOrder = append(newOrder, v...)
}
stage.BuildArtifacts = unique(newOrder)
// Base implies building ostree and qemu
buildArtifacts, buildsBase := remove(unique(newOrder), "base")
if buildsBase {
buildArtifacts, _ = remove(buildArtifacts, "ostree")
buildArtifacts, _ = remove(buildArtifacts, "qemu")
stage.BuildArtifacts = append([]string{"base"}, buildArtifacts...)
}
// If the synthetic stages requires/request optional artifact, but also builds it
// then we need to remove it from the the requires.
realRequires := stage.RequireArtifacts
realOptional := stage.RequestArtifacts
for _, ba := range stage.BuildArtifacts {
for _, ra := range stage.RequireArtifacts {
if ra == ba {
realRequires, _ = remove(realRequires, ra)
}
}
for _, oa := range stage.RequestArtifacts {
if oa == ba {
realOptional, _ = remove(realOptional, oa)
}
}
}
// base is short hand of ostree and qemu. Its handled specially
// since we have to consider that "qemu"
var foundBase bool
realRequires, foundBase = remove(realRequires, "base")
if foundBase || buildsBase {
for _, v := range baseArtifacts {
realRequires, _ = remove(realRequires, v)
realOptional, _ = remove(realOptional, v)
}
}
stage.RequireArtifacts = unique(realRequires)
stage.RequestArtifacts = unique(realOptional)
}
// isValidArtifactShortHand checks if the shortand is valid
func isValidArtifactShortHand(a string) bool {
valid := false
for _, v := range strings.Split(strings.ToLower(a), "+") {
if cosa.CanArtifact(v) {
valid = true
}
for _, ps := range pseudoStages {
if v == ps {
valid = true
break
}
}
}
return valid
}
// GenerateStages creates stages.
func (j *JobSpec) GenerateStages(fromNames, testNames []string, singleStage bool) error {
j.DelayedMetaMerge = true
j.Job.StrictMode = true
for _, k := range fromNames {
if !isValidArtifactShortHand(k) {
return fmt.Errorf("artifact %s is an invalid artifact", k)
}
}
for _, k := range testNames {
if _, ok := kolaTestDefinitions[k]; !ok {
return fmt.Errorf("kola test %s is an invalid kola name", k)
}
}
if singleStage && len(fromNames) > 0 {
newList := []string{strings.Join(append(fromNames, testNames...), "+")}
fromNames = newList
}
for _, k := range append(fromNames, testNames...) {
var s Stage
for _, k := range strings.Split(k, "+") {
addShorthandToStage(k, &s)
}
j.Stages = append(j.Stages, s)
}
return nil
}
// DeepCopy does a lazy deep copy by rendering the stage to JSON
// and then returning a new Stage defined by the JSON
func (s *Stage) DeepCopy() (Stage, error) {
ns := Stage{}
out, err := json.Marshal(s)
if err != nil {
return ns, err
}
err = json.Unmarshal(out, &ns)
return ns, err
}
// addAllShortandsToStage adds all the shorthands
func addAllShorthandsToStage(stage *Stage, shorthands ...string) | {
for _, short := range shorthands {
addShorthandToStage(short, stage)
}
} |
|
push.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package trigger
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
prowapi "k8s.io/test-infra/prow/apis/prowjobs/v1"
"k8s.io/test-infra/prow/config"
"k8s.io/test-infra/prow/github"
"k8s.io/test-infra/prow/pjutil"
)
func listPushEventChanges(pe github.PushEvent) config.ChangedFilesProvider {
return func() ([]string, error) {
changed := make(map[string]bool)
for _, commit := range pe.Commits {
for _, added := range commit.Added {
changed[added] = true
}
for _, removed := range commit.Removed {
changed[removed] = true
}
for _, modified := range commit.Modified {
changed[modified] = true
}
}
var changedFiles []string
for file := range changed {
changedFiles = append(changedFiles, file)
}
return changedFiles, nil
}
}
func createRefs(pe github.PushEvent) prowapi.Refs {
return prowapi.Refs{
Org: pe.Repo.Owner.Name,
Repo: pe.Repo.Name,
BaseRef: pe.Branch(),
BaseSHA: pe.After,
BaseLink: pe.Compare,
}
}
func handlePE(c Client, pe github.PushEvent) error | {
if pe.Deleted || pe.After == "0000000000000000000000000000000000000000" {
// we should not trigger jobs for a branch deletion
return nil
}
org := pe.Repo.Owner.Login
repo := pe.Repo.Name
shaGetter := func() (string, error) {
return pe.After, nil
}
postsubmits := getPostsubmits(c.Logger, c.GitClient, c.Config, org+"/"+repo, shaGetter)
for _, j := range postsubmits {
if shouldRun, err := j.ShouldRun(pe.Branch(), listPushEventChanges(pe)); err != nil {
return err
} else if !shouldRun {
continue
}
refs := createRefs(pe)
labels := make(map[string]string)
for k, v := range j.Labels {
labels[k] = v
}
labels[github.EventGUID] = pe.GUID
pj := pjutil.NewProwJob(pjutil.PostsubmitSpec(j, refs), labels, j.Annotations)
c.Logger.WithFields(pjutil.ProwJobFields(&pj)).Info("Creating a new prowjob.")
if _, err := c.ProwJobClient.Create(context.TODO(), &pj, metav1.CreateOptions{}); err != nil {
return err
}
}
return nil
} |
|
new-instance-prepare.py | #!/usr/bin/env python3
import os
import subprocess
import click
from rich import pretty, inspect
from rich.console import Console
# setup Rich
pretty.install()
console = Console()
print = console.print
log = console.log
# setup click
@click.command()
@click.option('--dns-name', prompt='DNS Name', help='DNS name used by this Nextcloud instance.')
@click.option('--dns-resolver', prompt='DNS Server', help='DNS server used internaly by the proxy.')
@click.option('--nextcloud-version', prompt='Nextcloud version', help='Nextcloud version; i.e.: 21')
@click.option('--nfs-server', prompt='NFS Server', help='NFS Server to use for nextcloud "data" volume.')
@click.option('--nfs-path', prompt='NFS Path', help='Exported path on the NFS server; i.e.: "/tank/user/cloud"')
@click.option('--proxy-tls/--no-proxy-tls', default=False, help='Enable TLS proxy')
@click.option('--proxy-port-http', default=0, help='Exposed HTTP port')
@click.option('--proxy-port-https', default=0, help='Exposed HTTPS port')
@click.option('--proxy-bind-ip', required=False, multiple=True, help='IP address(es) to bind. Default bind: "0.0.0.0"')
@click.option('--container-restart-policy', help='One of: no, on-failure, always, unless-stopped', default='unless-stopped')
def | (
dns_name, dns_resolver, nextcloud_version, nfs_server, nfs_path,
proxy_tls, proxy_port_http, proxy_port_https, proxy_bind_ip,
container_restart_policy):
# copy template directory
# generate docker-compose.yml
# generate db.env
# print:
# - generated http port
# - generated https port
# - checklist of possible pending todos
# - tls certs
# - docker-compose up -d
# - firewall at router
# - firewall on this host machine
# - public dns record
# - local dns record
# - nfs server path creation and export
# - php occ maintenance:install --database="mysql" --database-name="nextcloud" --database-host="localhost" --database-user="root" --database-pass="12345678" --database-table-prefix="" --admin-user="yourname" --admin-pass="87654321"
# - php occ db:add-missing-indices
# - php occ db:convert-filecache-bigint
# - php occ db:add-missing-primary-keys
# - php occ config:system:set trusted_domains 1 --value="nextcloud.my.domain"
# - php occ config:system:set overwriteprotocol --value="https"
# - php occ config:system:set defaultapp --value="files"
# - php occ config:system:set log_rotate_size --value="10485760" --type=integer
pass
if __name__ == '__main__':
main()
| main |
flyout.ts | import { Ref, ref, watch, readonly, onUnmounted } from 'vue'
interface UseFlyoutOptions {
el: Ref<HTMLElement | undefined>
onFocus?(): void
onBlur?(): void
}
export const focusedElement = ref<HTMLElement>()
let active = false
let listeners = 0
export function useFlyout(options: UseFlyoutOptions) {
const focus = ref(false)
if (typeof window !== 'undefined') {
!active && activateFocusTracking()
listeners++
const unwatch = watch(focusedElement, (el) => {
if (el === options.el.value || options.el.value?.contains(el as Node)) {
focus.value = true
options.onFocus?.()
} else {
focus.value = false
options.onBlur?.()
}
})
onUnmounted(() => {
unwatch()
listeners--
if (!listeners) {
deactivateFocusTracking()
}
})
}
| function activateFocusTracking() {
document.addEventListener('focusin', handleFocusIn)
active = true
focusedElement.value = document.activeElement as HTMLElement
}
function deactivateFocusTracking() {
document.removeEventListener('focusin', handleFocusIn)
}
function handleFocusIn() {
focusedElement.value = document.activeElement as HTMLElement
} | return readonly(focus)
}
|
build.rs | #![feature(path_try_exists)]
use std::fmt::Debug;
use std::fs;
use std::env;
use std::path::PathBuf;
use std::process::Command;
use std::str::FromStr;
use cmake;
fn main() |
fn get_env_var<'a, T, E>(key: &str) -> Result<T, env::VarError>
where T: FromStr<Err=E>,
E: Debug
{
let str_var = env::var(key)?;
let var = match T::from_str(str_var.as_str()) {
Ok(x) => x,
Err(e) => panic!("Failed to parse env var. value: '{}', error:'{}': {:?}", key, str_var, e)
};
Ok(var)
} | {
// Gather any env vars we need
let is_debug = get_env_var::<bool, _>("DEBUG").unwrap_or(false);
let is_secure = get_env_var::<String, _>("CARGO_FEATURE_SECURE").is_ok();
let debug = if is_debug { "Debug" } else { "Release" };
let secure = if is_secure { "ON" } else { "OFF" };
let out_dir = get_env_var::<PathBuf, _>("OUT_DIR")
.expect("Failed to get the output directory");
let lib_path = out_dir.join("mimalloc");
// Clone the configured version of mi-malloc if not already present
if !fs::try_exists(&lib_path).unwrap_or(false) {
let output = Command::new("git")
.arg("clone")
.arg("-b")
.arg("v1.7.2")
.arg("--single-branch")
.arg("https://github.com/microsoft/mimalloc.git")
.arg(lib_path.display().to_string())
.output()
.expect("Failed to find git on the system. Install git to build this project.");
if !output.status.success() {
panic!(
"Failed to clone mi-malloc repository with error '{}'",
String::from_utf8_lossy(&output.stderr)
);
}
}
// Build the library
let dst = cmake::Config::new(lib_path)
.define("MAKE_BUILD_TYPE", debug)
.define("MI_SECURE", secure)
.define("MI_OVERRIDE", "OFF")
.define("MI_OSX_ZONE", "OFF")
.define("MI_BUILD_SHARED", "OFF")
.define("MI_BUILD_TESTS", "OFF")
.define("MI_BUILD_OBJECT", "OFF")
.build();
let search_path = if is_debug { dst.join("build/Debug") } else { dst.join("build/Release") };
let lib_name = match (is_debug, is_secure) {
(true, true) => "mimalloc-secure-static-debug",
(true, false) => "mimalloc-static-debug",
(false, true) => "mimalloc-secure-static",
(false, false) => "mimalloc-static",
};
// Tell cargo to link the built library
println!("cargo:rustc-link-search=native={}", search_path.display());
println!("cargo:rustc-link-lib=static={}", lib_name);
// We only need to run the build script once
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=DEBUG");
println!("cargo:rerun-if-env-changed=CARGO_FEATURE_SECURE");
} |
content.rs |
//! Abstractions and definitions involving content and fragment injection.
use tendril;
use event;
use text;
use modifier;
impl event::IntoStream for text::Value {
type Stream = Stream;
fn into_stream(self) -> Self::Stream {
Stream { data: Some(self.into()) }
}
}
impl<'a> event::IntoStream for &'a text::Value {
type Stream = Stream;
fn into_stream(self) -> Self::Stream {
Stream { data: Some(self.clone().into()) }
}
}
impl event::IntoStream for text::Data {
type Stream = Stream;
fn into_stream(self) -> Self::Stream {
Stream { data: Some(self) }
}
}
impl<'a> event::IntoStream for &'a text::Data {
type Stream = Stream;
fn into_stream(self) -> Self::Stream |
}
impl event::IntoStream for &'static str {
type Stream = Stream;
fn into_stream(self) -> Self::Stream {
Stream { data: Some(text::Data::from_unencoded_static_str(self)) }
}
}
impl<S> event::IntoStream for S where S: event::Stream {
type Stream = S;
fn into_stream(self) -> S { self }
}
impl<S, E> event::IntoStream for Result<S, E>
where
S: event::IntoStream,
E: Into<event::StreamError>,
{
type Stream = modifier::Fallible<S::Stream>;
fn into_stream(self) -> Self::Stream {
modifier::Fallible::new(
self.map(event::IntoStream::into_stream)
.map_err(Into::into)
)
}
}
macro_rules! impl_octal {
($name:ty) => {
impl event::IntoStream for $name {
type Stream = Stream;
fn into_stream(self) -> Self::Stream {
use std::fmt::{ Write };
let mut tendril = tendril::StrTendril::new();
write!(tendril, "{}", self).expect("writing octal to template");
Stream { data: Some(text::Data::from_encoded_tendril(tendril)) }
}
}
}
}
impl_octal!(usize);
impl_octal!(u8);
impl_octal!(u16);
impl_octal!(u32);
impl_octal!(u64);
impl_octal!(isize);
impl_octal!(i8);
impl_octal!(i16);
impl_octal!(i32);
impl_octal!(i64);
/// Single event data injection stream.
///
/// This stream is generated by some `event::IntoStream` implementations when only a single
/// piece of data content has to be injected into the stream.
#[derive(Debug)]
pub struct Stream {
data: Option<text::Data>,
}
impl event::Stream for Stream {
fn next_event(&mut self) -> event::StreamResult {
Ok(self.data.take().map(event::data))
}
}
| {
Stream { data: Some(self.clone()) }
} |
healthcheck.go | package healthcheck
import (
"context"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"sync"
"time"
"github.com/go-kit/kit/metrics"
"github.com/traefik/traefik/v2/pkg/config/runtime"
"github.com/traefik/traefik/v2/pkg/log"
"github.com/traefik/traefik/v2/pkg/safe"
"github.com/vulcand/oxy/roundrobin"
)
const (
serverUp = "UP"
serverDown = "DOWN"
)
var (
singleton *HealthCheck
once sync.Once
)
// Balancer is the set of operations required to manage the list of servers in a load-balancer.
type Balancer interface {
Servers() []*url.URL
RemoveServer(u *url.URL) error
UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error
}
// BalancerHandler includes functionality for load-balancing management.
type BalancerHandler interface {
ServeHTTP(w http.ResponseWriter, req *http.Request)
Balancer
}
// metricsRegistry is a local interface in the health check package,
// exposing only the required metrics necessary for the health check package.
// This makes it easier for the tests.
type metricsRegistry interface {
BackendServerUpGauge() metrics.Gauge
}
// Options are the public health check options.
type Options struct {
Headers map[string]string
Hostname string
Scheme string
Path string
Port int
FollowRedirects bool
Transport http.RoundTripper
Interval time.Duration
Timeout time.Duration
LB Balancer
}
func (opt Options) String() string {
return fmt.Sprintf("[Hostname: %s Headers: %v Path: %s Port: %d Interval: %s Timeout: %s FollowRedirects: %v]", opt.Hostname, opt.Headers, opt.Path, opt.Port, opt.Interval, opt.Timeout, opt.FollowRedirects)
}
type backendURL struct {
url *url.URL
weight int
}
// BackendConfig HealthCheck configuration for a backend.
type BackendConfig struct {
Options
name string | u, err := serverURL.Parse(b.Path)
if err != nil {
return nil, err
}
if len(b.Scheme) > 0 {
u.Scheme = b.Scheme
}
if b.Port != 0 {
u.Host = net.JoinHostPort(u.Hostname(), strconv.Itoa(b.Port))
}
return http.NewRequest(http.MethodGet, u.String(), http.NoBody)
}
// this function adds additional http headers and hostname to http.request.
func (b *BackendConfig) addHeadersAndHost(req *http.Request) *http.Request {
if b.Options.Hostname != "" {
req.Host = b.Options.Hostname
}
for k, v := range b.Options.Headers {
req.Header.Set(k, v)
}
return req
}
// HealthCheck struct.
type HealthCheck struct {
Backends map[string]*BackendConfig
metrics metricsRegistry
cancel context.CancelFunc
}
// SetBackendsConfiguration set backends configuration.
func (hc *HealthCheck) SetBackendsConfiguration(parentCtx context.Context, backends map[string]*BackendConfig) {
hc.Backends = backends
if hc.cancel != nil {
hc.cancel()
}
ctx, cancel := context.WithCancel(parentCtx)
hc.cancel = cancel
for _, backend := range backends {
currentBackend := backend
safe.Go(func() {
hc.execute(ctx, currentBackend)
})
}
}
func (hc *HealthCheck) execute(ctx context.Context, backend *BackendConfig) {
logger := log.FromContext(ctx)
logger.Debugf("Initial health check for backend: %q", backend.name)
hc.checkBackend(ctx, backend)
ticker := time.NewTicker(backend.Interval)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
logger.Debugf("Stopping current health check goroutines of backend: %s", backend.name)
return
case <-ticker.C:
logger.Debugf("Refreshing health check for backend: %s", backend.name)
hc.checkBackend(ctx, backend)
}
}
}
func (hc *HealthCheck) checkBackend(ctx context.Context, backend *BackendConfig) {
logger := log.FromContext(ctx)
enabledURLs := backend.LB.Servers()
var newDisabledURLs []backendURL
for _, disabledURL := range backend.disabledURLs {
if err := checkHealth(disabledURL.url, backend); err == nil {
logger.Warnf("Health check up: Returning to server list. Backend: %q URL: %q Weight: %d",
backend.name, disabledURL.url.String(), disabledURL.weight)
if err = backend.LB.UpsertServer(disabledURL.url, roundrobin.Weight(disabledURL.weight)); err != nil {
logger.Error(err)
}
} else {
logger.Warnf("Health check still failing. Backend: %q URL: %q Reason: %s", backend.name, disabledURL.url.String(), err)
newDisabledURLs = append(newDisabledURLs, disabledURL)
}
}
backend.disabledURLs = newDisabledURLs
for _, enableURL := range enabledURLs {
if err := checkHealth(enableURL, backend); err != nil {
weight := 1
rr, ok := backend.LB.(*roundrobin.RoundRobin)
if ok {
var gotWeight bool
weight, gotWeight = rr.ServerWeight(enableURL)
if !gotWeight {
weight = 1
}
}
logger.Warnf("Health check failed, removing from server list. Backend: %q URL: %q Weight: %d Reason: %s", backend.name, enableURL.String(), weight, err)
if err := backend.LB.RemoveServer(enableURL); err != nil {
logger.Error(err)
}
backend.disabledURLs = append(backend.disabledURLs, backendURL{enableURL, weight})
}
}
}
// GetHealthCheck returns the health check which is guaranteed to be a singleton.
func GetHealthCheck() *HealthCheck {
once.Do(func() {
singleton = newHealthCheck()
})
return singleton
}
func newHealthCheck() *HealthCheck {
return &HealthCheck{
Backends: make(map[string]*BackendConfig),
}
}
// NewBackendConfig Instantiate a new BackendConfig.
func NewBackendConfig(options Options, backendName string) *BackendConfig {
return &BackendConfig{
Options: options,
name: backendName,
}
}
// checkHealth returns a nil error in case it was successful and otherwise
// a non-nil error with a meaningful description why the health check failed.
func checkHealth(serverURL *url.URL, backend *BackendConfig) error {
req, err := backend.newRequest(serverURL)
if err != nil {
return fmt.Errorf("failed to create HTTP request: %w", err)
}
req = backend.addHeadersAndHost(req)
client := http.Client{
Timeout: backend.Options.Timeout,
Transport: backend.Options.Transport,
}
if !backend.FollowRedirects {
client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
}
}
resp, err := client.Do(req)
if err != nil {
return fmt.Errorf("HTTP request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode < http.StatusOK || resp.StatusCode >= http.StatusBadRequest {
return fmt.Errorf("received error status code: %v", resp.StatusCode)
}
return nil
}
// NewLBStatusUpdater returns a new LbStatusUpdater.
func NewLBStatusUpdater(bh BalancerHandler, info *runtime.ServiceInfo) *LbStatusUpdater {
return &LbStatusUpdater{
BalancerHandler: bh,
serviceInfo: info,
}
}
// LbStatusUpdater wraps a BalancerHandler and a ServiceInfo,
// so it can keep track of the status of a server in the ServiceInfo.
type LbStatusUpdater struct {
BalancerHandler
serviceInfo *runtime.ServiceInfo // can be nil
}
// RemoveServer removes the given server from the BalancerHandler,
// and updates the status of the server to "DOWN".
func (lb *LbStatusUpdater) RemoveServer(u *url.URL) error {
err := lb.BalancerHandler.RemoveServer(u)
if err == nil && lb.serviceInfo != nil {
lb.serviceInfo.UpdateServerStatus(u.String(), serverDown)
}
return err
}
// UpsertServer adds the given server to the BalancerHandler,
// and updates the status of the server to "UP".
func (lb *LbStatusUpdater) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error {
err := lb.BalancerHandler.UpsertServer(u, options...)
if err == nil && lb.serviceInfo != nil {
lb.serviceInfo.UpdateServerStatus(u.String(), serverUp)
}
return err
}
// Balancers is a list of Balancers(s) that implements the Balancer interface.
type Balancers []Balancer
// Servers returns the servers url from all the BalancerHandler.
func (b Balancers) Servers() []*url.URL {
var servers []*url.URL
for _, lb := range b {
servers = append(servers, lb.Servers()...)
}
return servers
}
// RemoveServer removes the given server from all the BalancerHandler,
// and updates the status of the server to "DOWN".
func (b Balancers) RemoveServer(u *url.URL) error {
for _, lb := range b {
if err := lb.RemoveServer(u); err != nil {
return err
}
}
return nil
}
// UpsertServer adds the given server to all the BalancerHandler,
// and updates the status of the server to "UP".
func (b Balancers) UpsertServer(u *url.URL, options ...roundrobin.ServerOption) error {
for _, lb := range b {
if err := lb.UpsertServer(u, options...); err != nil {
return err
}
}
return nil
} | disabledURLs []backendURL
}
func (b *BackendConfig) newRequest(serverURL *url.URL) (*http.Request, error) { |
band_cn779_787.go | package band
import (
"time"
"github.com/brocaar/lorawan"
)
type cn779Band struct {
band
}
func (b *cn779Band) Name() string {
return "CN779"
}
func (b *cn779Band) GetDefaults() Defaults {
return Defaults{
RX2Frequency: 786000000,
RX2DataRate: 0,
ReceiveDelay1: time.Second,
ReceiveDelay2: time.Second * 2,
JoinAcceptDelay1: time.Second * 5,
JoinAcceptDelay2: time.Second * 6,
}
}
func (b *cn779Band) GetDownlinkTXPower(freq int) int {
return 10
}
func (b *cn779Band) GetDefaultMaxUplinkEIRP() float32 {
return 12.15
}
func (b *cn779Band) GetPingSlotFrequency(lorawan.DevAddr, time.Duration) (int, error) {
return 785000000, nil
}
func (b *cn779Band) GetRX1ChannelIndexForUplinkChannelIndex(uplinkChannel int) (int, error) {
return uplinkChannel, nil
}
func (b *cn779Band) GetRX1FrequencyForUplinkFrequency(uplinkFrequency int) (int, error) {
return uplinkFrequency, nil
}
func (b *cn779Band) ImplementsTXParamSetup(protocolVersion string) bool {
return false
}
func newCN779Band(repeaterCompatible bool) (Band, error) | {
b := cn779Band{
band: band{
supportsExtraChannels: true,
dataRates: map[int]DataRate{
0: {Modulation: LoRaModulation, SpreadFactor: 12, Bandwidth: 125, uplink: true, downlink: true},
1: {Modulation: LoRaModulation, SpreadFactor: 11, Bandwidth: 125, uplink: true, downlink: true},
2: {Modulation: LoRaModulation, SpreadFactor: 10, Bandwidth: 125, uplink: true, downlink: true},
3: {Modulation: LoRaModulation, SpreadFactor: 9, Bandwidth: 125, uplink: true, downlink: true},
4: {Modulation: LoRaModulation, SpreadFactor: 8, Bandwidth: 125, uplink: true, downlink: true},
5: {Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 125, uplink: true, downlink: true},
6: {Modulation: LoRaModulation, SpreadFactor: 7, Bandwidth: 250, uplink: true, downlink: true},
7: {Modulation: FSKModulation, BitRate: 50000, uplink: true, downlink: true},
},
rx1DataRateTable: map[int][]int{
0: {0, 0, 0, 0, 0, 0},
1: {1, 0, 0, 0, 0, 0},
2: {2, 1, 0, 0, 0, 0},
3: {3, 2, 1, 0, 0, 0},
4: {4, 3, 2, 1, 0, 0},
5: {5, 4, 3, 2, 1, 0},
6: {6, 5, 4, 3, 2, 1},
7: {7, 6, 5, 4, 3, 2},
},
txPowerOffsets: []int{
0,
-2,
-4,
-6,
-8,
-10,
},
uplinkChannels: []Channel{
{Frequency: 779500000, MinDR: 0, MaxDR: 5, enabled: true},
{Frequency: 779700000, MinDR: 0, MaxDR: 5, enabled: true},
{Frequency: 779900000, MinDR: 0, MaxDR: 5, enabled: true},
},
downlinkChannels: []Channel{
{Frequency: 779500000, MinDR: 0, MaxDR: 5, enabled: true},
{Frequency: 779700000, MinDR: 0, MaxDR: 5, enabled: true},
{Frequency: 779900000, MinDR: 0, MaxDR: 5, enabled: true},
},
},
}
if repeaterCompatible {
b.band.maxPayloadSizePerDR = map[string]map[string]map[int]MaxPayloadSize{
LoRaWAN_1_0_0: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.0
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 230, N: 222},
5: {M: 230, N: 222},
6: {M: 250, N: 242},
7: {M: 230, N: 222},
},
},
LoRaWAN_1_0_1: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.1
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 230, N: 222},
5: {M: 230, N: 222},
6: {M: 250, N: 242},
7: {M: 230, N: 222},
},
},
LoRaWAN_1_0_2: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.2A, LoRaWAN 1.0.2B
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 230, N: 222},
5: {M: 230, N: 222},
6: {M: 250, N: 242},
7: {M: 230, N: 222},
},
},
LoRaWAN_1_0_3: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.3A
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 230, N: 222},
5: {M: 230, N: 222},
6: {M: 250, N: 242},
7: {M: 230, N: 222},
},
},
latest: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // RP002-1.0.0, RP002-1.0.1
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 230, N: 222},
5: {M: 230, N: 222},
6: {M: 230, N: 222},
7: {M: 230, N: 222},
},
},
}
} else {
b.band.maxPayloadSizePerDR = map[string]map[string]map[int]MaxPayloadSize{
LoRaWAN_1_0_0: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.0
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 250, N: 242},
5: {M: 250, N: 242},
6: {M: 250, N: 242},
7: {M: 250, N: 242},
},
},
LoRaWAN_1_0_1: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.1
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 250, N: 242},
5: {M: 250, N: 242},
6: {M: 250, N: 242},
7: {M: 250, N: 242},
},
},
LoRaWAN_1_0_2: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.2A, LoRaWAN 1.0.2B
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 250, N: 242},
5: {M: 250, N: 242},
6: {M: 250, N: 242},
7: {M: 250, N: 242},
},
},
LoRaWAN_1_0_3: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // LoRaWAN 1.0.3A
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 250, N: 242},
5: {M: 250, N: 242},
6: {M: 250, N: 242},
7: {M: 250, N: 242},
},
},
latest: map[string]map[int]MaxPayloadSize{
latest: map[int]MaxPayloadSize{ // RP002-1.0.0, RP002-1.0.1
0: {M: 59, N: 51},
1: {M: 59, N: 51},
2: {M: 59, N: 51},
3: {M: 123, N: 115},
4: {M: 250, N: 242},
5: {M: 250, N: 242},
6: {M: 250, N: 242},
7: {M: 250, N: 242},
},
},
}
}
return &b, nil
} |
|
acme.component.ts | /*
acme.component.ts
Copyright (c) 2020 Acme
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
import { Component, OnInit, OnDestroy } from '@angular/core';
import { HttpService } from '../../http.service';
import { Observable, Subscription } from 'rxjs';
import { NotificationService } from '../../notification.service';
import { AcmeResponse } from './acme.type';
import { ChecklistItemStates } from '../../checklist-item/checklist-item.component';
import { TranslateService } from '@ngx-translate/core';
import { AlertMessages } from '../../alert/alert.model';
@Component({
selector: 'app-acme',
templateUrl: './acme.component.html'
})
export class | implements OnInit, OnDestroy {
private subscriptions: Subscription;
status = ChecklistItemStates.IDLE;
mainTitle = '';
constructor(private readonly http: HttpService,
private readonly notificationService: NotificationService,
private readonly translate: TranslateService) {}
ngOnInit() {
this.subscriptions = new Subscription();
this.subscriptions.add(this.notificationService.acmeCheckTriggered$.subscribe(() => {
this.status = ChecklistItemStates.LOADING;
this.onCheckAcme().subscribe(
result => this.notificationService.publishAcmeCheckResult(result),
error => {
this.notificationService.showErrorAlert(AlertMessages.COULD_NOT_CHECK_ACME);
this.notificationService.publishAcmeCheckResult({successful: false, message: error.message});
}
);
}));
this.subscriptions.add(this.notificationService.acmeCheckCompleted$.subscribe(result => {
this.status = result.successful ? ChecklistItemStates.COMPLETED : ChecklistItemStates.FAILED;
}));
this.subscriptions.add(this.translate.get('acme').subscribe(translation => {
this.mainTitle = translation;
}));
}
ngOnDestroy() {
this.subscriptions.unsubscribe();
}
onCheckAcme(): Observable<AcmeResponse> {
return this.http.getAcmeAvailability();
}
}
| AcmeComponent |
sanitize-test-name-test.ts | import { sanitizedBranchName } from '../../src/lib/sanitize-branch'
describe('sanitizedBranchName', () => {
it('leaves a good branch name alone', () => {
const branchName = 'this-is/fine'
const result = sanitizedBranchName(branchName)
expect(result).toBe('this-is/fine')
})
it('replaces invalid characters with dashes', () => {
const branchName = '.this..is\\not fine:yo?|is-it'
const result = sanitizedBranchName(branchName)
expect(result).toBe('this-is-not-fine-yo-is-it')
})
it('does not allow branch name to end in slash', () => {
const branchName = 'hello/'
const result = sanitizedBranchName(branchName)
expect(result).toBe('hello-')
})
it('does not allow name to start with plus', () => {
const branchName = '++but-can-still-keep-the-rest'
const result = sanitizedBranchName(branchName)
expect(result).toBe('but-can-still-keep-the-rest')
})
it('does not allow name to start with minus', () => {
const branchName = '--but-can-still-keep-the-rest'
const result = sanitizedBranchName(branchName)
expect(result).toBe('but-can-still-keep-the-rest')
})
it('does not allow name to end in `.lock`', () => {
const branchName = 'foo.lock.lock'
const result = sanitizedBranchName(branchName)
expect(result).toBe('foo.lock-')
})
it('replaces newlines with dash', () => {
const branchName = 'hello\r\nworld'
const result = sanitizedBranchName(branchName)
expect(result).toBe('hello-world')
})
it('removes starting dot', () => {
const branchName = '.first.dot.is.not.ok'
const result = sanitizedBranchName(branchName)
expect(result).toBe('first.dot.is.not.ok')
})
it('allows double dashes after first character', () => {
const branchName = 'branch--name'
const result = sanitizedBranchName(branchName) | expect(result).toBe(branchName)
})
}) | |
main_test.go | // +build integration
package main
import (
"bytes"
"context"
"encoding/json"
"flag"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"testing"
"time"
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/openstack"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/osbuild/osbuild-composer/cmd/osbuild-image-tests/constants"
"github.com/osbuild/osbuild-composer/internal/boot"
"github.com/osbuild/osbuild-composer/internal/boot/azuretest"
"github.com/osbuild/osbuild-composer/internal/boot/openstacktest"
"github.com/osbuild/osbuild-composer/internal/boot/vmwaretest"
"github.com/osbuild/osbuild-composer/internal/common"
"github.com/osbuild/osbuild-composer/internal/test"
"github.com/osbuild/osbuild-composer/internal/upload/vmware"
)
type testcaseStruct struct {
ComposeRequest struct {
Distro string
Arch string
Filename string
} `json:"compose-request"`
Manifest json.RawMessage
ImageInfo json.RawMessage `json:"image-info"`
Boot *struct {
Type string
}
}
var disableLocalBoot = flag.Bool("disable-local-boot", false, "when this flag is given, no images are booted locally using qemu (this does not affect testing in clouds)")
var failLocalBoot = flag.Bool("fail-local-boot", true, "when this flag is on (default), local boot will fail. Usually indicates missing cloud credentials")
var skipSELinuxCtxCheck = flag.Bool("skip-selinux-ctx-check", false, "when this flag is on, the 'selinux/context-mismatch' part is removed from the image-info report before it is checked.")
// runOsbuild runs osbuild with the specified manifest and output-directory.
func runOsbuild(manifest []byte, store, outputDirectory string, exports []string) error {
cmd := constants.GetOsbuildCommand(store, outputDirectory, exports)
cmd.Stdin = bytes.NewReader(manifest)
var outBuffer, errBuffer bytes.Buffer
cmd.Stdout = &outBuffer
cmd.Stderr = &errBuffer
err := cmd.Run()
if err != nil {
fmt.Println("stdout:")
// stdout is json, indent it, otherwise we get a huge one-liner
var formattedStdout bytes.Buffer
indentErr := json.Indent(&formattedStdout, outBuffer.Bytes(), "", " ")
if indentErr == nil {
fmt.Println(formattedStdout.String())
} else {
// fallback to raw output if json indent failed
fmt.Println(outBuffer.String())
}
// stderr isn't structured, print it as is
fmt.Printf("stderr:\n%s", errBuffer.String())
return fmt.Errorf("running osbuild failed: %v", err)
}
return nil
}
// Delete the 'selinux/context-mismatch' part of the image-info report to
// workaround https://bugzilla.redhat.com/show_bug.cgi?id=1973754
func deleteSELinuxCtxFromImageInfoReport(imageInfoReport interface{}) {
imageInfoMap := imageInfoReport.(map[string]interface{})
selinuxReport, exists := imageInfoMap["selinux"]
if exists {
selinuxReportMap := selinuxReport.(map[string]interface{})
delete(selinuxReportMap, "context-mismatch")
}
}
// testImageInfo runs image-info on image specified by imageImage and
// compares the result with expected image info
func testImageInfo(t *testing.T, imagePath string, rawImageInfoExpected []byte) {
var imageInfoExpected interface{}
err := json.Unmarshal(rawImageInfoExpected, &imageInfoExpected)
require.NoErrorf(t, err, "cannot decode expected image info: %v", err)
cmd := constants.GetImageInfoCommand(imagePath)
cmd.Stderr = os.Stderr
reader, writer := io.Pipe()
cmd.Stdout = writer
err = cmd.Start()
require.NoErrorf(t, err, "image-info cannot start: %v", err)
var imageInfoGot interface{}
err = json.NewDecoder(reader).Decode(&imageInfoGot)
require.NoErrorf(t, err, "decoding image-info output failed: %v", err)
err = cmd.Wait()
require.NoErrorf(t, err, "running image-info failed: %v", err)
if *skipSELinuxCtxCheck {
fmt.Println("ignoring 'selinux/context-mismatch' part of the image-info report")
deleteSELinuxCtxFromImageInfoReport(imageInfoExpected)
deleteSELinuxCtxFromImageInfoReport(imageInfoGot)
}
assert.Equal(t, imageInfoExpected, imageInfoGot)
}
type timeoutError struct{}
func (*timeoutError) Error() string { return "" }
// trySSHOnce tries to test the running image using ssh once
// It returns timeoutError if ssh command returns 255, if it runs for more
// that 10 seconds or if systemd-is-running returns starting.
// It returns nil if systemd-is-running returns running or degraded.
// It can also return other errors in other error cases.
func trySSHOnce(address string, privateKey string, ns *boot.NetNS) error {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
cmdName := "ssh"
cmdArgs := []string{
"-p", "22",
"-i", privateKey,
"-o", "StrictHostKeyChecking=no",
"-o", "UserKnownHostsFile=/dev/null",
"redhat@" + address,
"systemctl --wait is-system-running",
}
var cmd *exec.Cmd
if ns != nil {
cmd = ns.NamespacedCommandContext(ctx, cmdName, cmdArgs...)
} else {
cmd = exec.CommandContext(ctx, cmdName, cmdArgs...)
}
output, err := cmd.Output()
if ctx.Err() == context.DeadlineExceeded {
return &timeoutError{}
}
if err != nil {
if exitError, ok := err.(*exec.ExitError); ok {
if exitError.ExitCode() == 255 {
return &timeoutError{}
}
} else {
return fmt.Errorf("ssh command failed from unknown reason: %v", err)
}
}
outputString := strings.TrimSpace(string(output))
switch outputString {
case "running":
return nil
case "degraded":
log.Print("ssh test passed, but the system is degraded")
return nil
case "starting":
return &timeoutError{}
default:
return fmt.Errorf("ssh test failed, system status is: %s", outputString)
}
}
// testSSH tests the running image using ssh.
// It tries 20 attempts before giving up. If a major error occurs, it might
// return earlier.
func testSSH(t *testing.T, address string, privateKey string, ns *boot.NetNS) {
const attempts = 20
for i := 0; i < attempts; i++ {
err := trySSHOnce(address, privateKey, ns)
if err == nil {
// pass the test
return
}
// if any other error than the timeout one happened, fail the test immediately
if _, ok := err.(*timeoutError); !ok {
t.Fatal(err)
}
fmt.Println(err)
time.Sleep(10 * time.Second)
}
t.Errorf("ssh test failure, %d attempts were made", attempts)
}
func testBootUsingQemu(t *testing.T, imagePath string) {
if *failLocalBoot {
t.Fatal("-fail-local-boot specified. Check missing cloud credentials!")
}
bootWithQemu(t, imagePath)
}
// will not fail even if -fail-local-boot is specified
func bootWithQemu(t *testing.T, imagePath string) {
if *disableLocalBoot {
t.Skip("local booting was disabled by -disable-local-boot, skipping")
}
err := boot.WithNetworkNamespace(func(ns boot.NetNS) error {
return boot.WithBootedQemuImage(imagePath, ns, func() error {
testSSH(t, "localhost", constants.TestPaths.PrivateKey, &ns)
return nil
})
})
require.NoError(t, err)
}
func testBootUsingNspawnImage(t *testing.T, imagePath string) {
err := boot.WithNetworkNamespace(func(ns boot.NetNS) error {
return boot.WithBootedNspawnImage(imagePath, ns, func() error {
testSSH(t, "localhost", constants.TestPaths.PrivateKey, &ns)
return nil
})
})
require.NoError(t, err)
}
func testBootUsingNspawnDirectory(t *testing.T, imagePath string) {
err := boot.WithNetworkNamespace(func(ns boot.NetNS) error {
return boot.WithExtractedTarArchive(imagePath, func(dir string) error {
return boot.WithBootedNspawnDirectory(dir, ns, func() error {
testSSH(t, "localhost", constants.TestPaths.PrivateKey, &ns)
return nil
})
})
})
require.NoError(t, err)
}
func testBootUsingAWS(t *testing.T, imagePath string) {
creds, err := boot.GetAWSCredentialsFromEnv()
require.NoError(t, err)
// if no credentials are given, fall back to qemu
if creds == nil {
log.Print("no AWS credentials given, falling back to booting using qemu")
testBootUsingQemu(t, imagePath)
return
}
imageName, err := test.GenerateCIArtifactName("osbuild-image-tests-image-")
require.NoError(t, err)
e, err := boot.NewEC2(creds)
require.NoError(t, err)
// the following line should be done by osbuild-composer at some point
err = boot.UploadImageToAWS(creds, imagePath, imageName)
require.NoErrorf(t, err, "upload to amazon failed, resources could have been leaked")
imageDesc, err := boot.DescribeEC2Image(e, imageName)
require.NoErrorf(t, err, "cannot describe the ec2 image")
// delete the image after the test is over
defer func() {
err = boot.DeleteEC2Image(e, imageDesc)
require.NoErrorf(t, err, "cannot delete the ec2 image, resources could have been leaked")
}()
securityGroupName, err := test.GenerateCIArtifactName("osbuild-image-tests-security-group-")
require.NoError(t, err)
instanceTypeForArch := map[string]string{
"x86_64": "t3.micro",
"aarch64": "t4g.micro",
}
instanceType, exists := instanceTypeForArch[common.CurrentArch()]
if !exists {
panic("unsupported AWS arch")
}
// boot the uploaded image and try to connect to it
err = boot.WithSSHKeyPair(func(privateKey, publicKey string) error {
return boot.WithBootedImageInEC2(e, securityGroupName, imageDesc, publicKey, instanceType, func(address string) error {
testSSH(t, address, privateKey, nil)
return nil
})
})
require.NoError(t, err)
}
func testBootUsingAzure(t *testing.T, imagePath string) {
creds, err := azuretest.GetAzureCredentialsFromEnv()
require.NoError(t, err)
// if no credentials are given, fall back to qemu
if creds == nil {
log.Print("no Azure credentials given, falling back to booting using qemu")
testBootUsingQemu(t, imagePath)
return
}
// create a random test id to name all the resources used in this test
testId, err := test.GenerateCIArtifactName("")
require.NoError(t, err)
imageName := "image-" + testId + ".vhd"
// the following line should be done by osbuild-composer at some point
err = azuretest.UploadImageToAzure(creds, imagePath, imageName)
require.NoErrorf(t, err, "upload to azure failed, resources could have been leaked")
// delete the image after the test is over
defer func() {
err = azuretest.DeleteImageFromAzure(creds, imageName)
require.NoErrorf(t, err, "cannot delete the azure image, resources could have been leaked")
}()
// boot the uploaded image and try to connect to it
err = boot.WithSSHKeyPair(func(privateKey, publicKey string) error {
return azuretest.WithBootedImageInAzure(creds, imageName, testId, publicKey, func(address string) error {
testSSH(t, address, privateKey, nil)
return nil
})
})
require.NoError(t, err)
}
func testBootUsingOpenStack(t *testing.T, imagePath string) {
creds, err := openstack.AuthOptionsFromEnv()
// if no credentials are given, fall back to qemu
if (creds == gophercloud.AuthOptions{}) {
log.Print("No OpenStack credentials given, falling back to booting using qemu")
testBootUsingQemu(t, imagePath)
return
}
require.NoError(t, err)
// provider is the top-level client that all OpenStack services derive from
provider, err := openstack.AuthenticatedClient(creds)
require.NoError(t, err)
// create a random test id to name all the resources used in this test
imageName, err := test.GenerateCIArtifactName("osbuild-image-tests-openstack-image-")
require.NoError(t, err)
// the following line should be done by osbuild-composer at some point
image, err := openstacktest.UploadImageToOpenStack(provider, imagePath, imageName)
require.NoErrorf(t, err, "Upload to OpenStack failed, resources could have been leaked")
require.NotNil(t, image)
// delete the image after the test is over
defer func() {
err = openstacktest.DeleteImageFromOpenStack(provider, image.ID)
require.NoErrorf(t, err, "Cannot delete OpenStack image, resources could have been leaked")
}()
// boot the uploaded image and try to connect to it
err = boot.WithSSHKeyPair(func(privateKey, publicKey string) error {
userData, err := boot.CreateUserData(publicKey)
require.NoErrorf(t, err, "Creating user data failed: %v", err)
return openstacktest.WithBootedImageInOpenStack(provider, image.ID, userData, func(address string) error {
testSSH(t, address, privateKey, nil)
return nil
})
})
require.NoError(t, err)
}
func testBootUsingVMware(t *testing.T, imagePath string) {
creds, err := vmwaretest.AuthOptionsFromEnv()
// if no credentials are given, fall back to qemu
if creds == nil {
log.Print("No vCenter credentials given, falling back to booting using qemu")
log.Printf("Error=%v", err)
testBootUsingQemu(t, imagePath)
return
}
require.NoError(t, err)
// convert to streamOptimized vmdk
imageF, err := vmware.OpenAsStreamOptimizedVmdk(imagePath)
require.NoError(t, err)
// we don't need the file descriptor to be opened b/c import.vmdk operates
// on the file path
imageF.Close()
imagePath = imageF.Name()
require.NotEqual(t, "", imagePath)
defer os.Remove(imagePath)
// create a random test id to name all the resources used in this test
imageName, err := test.GenerateCIArtifactName("osbuild-image-tests-vmware-image-")
require.NoError(t, err)
// the following line should be done by osbuild-composer at some point
err = vmwaretest.ImportImage(creds, imagePath, imageName)
require.NoErrorf(t, err, "Upload to vCenter failed, resources could have been leaked")
// delete the image after the test is over
defer func() {
err = vmwaretest.DeleteImage(creds, imageName)
require.NoErrorf(t, err, "Cannot delete image from vCenter, resources could have been leaked")
}()
// boot the uploaded image and try to connect to it
err = vmwaretest.WithSSHKeyPair(func(privateKey, publicKey string) error {
return vmwaretest.WithBootedImage(creds, imagePath, imageName, publicKey, func(address string) error {
testSSH(t, address, privateKey, nil)
return nil
})
})
require.NoError(t, err)
}
// testBoot tests if the image is able to successfully boot
// Before the test it boots the image respecting the specified bootType.
// The test passes if the function is able to connect to the image via ssh
// in defined number of attempts and systemd-is-running returns running
// or degraded status.
func testBoot(t *testing.T, imagePath string, bootType string) {
switch bootType {
case "qemu":
bootWithQemu(t, imagePath)
case "nspawn":
testBootUsingNspawnImage(t, imagePath)
case "nspawn-extract":
testBootUsingNspawnDirectory(t, imagePath)
case "aws":
testBootUsingAWS(t, imagePath)
case "azure":
testBootUsingAzure(t, imagePath)
case "openstack":
testBootUsingOpenStack(t, imagePath)
case "vmware":
testBootUsingVMware(t, imagePath)
default:
panic("unknown boot type!")
}
}
// testImage performs a series of tests specified in the testcase
// on an image
func testImage(t *testing.T, testcase testcaseStruct, imagePath string) {
if testcase.ImageInfo != nil {
t.Run("image info", func(t *testing.T) {
testImageInfo(t, imagePath, testcase.ImageInfo)
})
}
if testcase.Boot != nil {
t.Run("boot", func(t *testing.T) {
testBoot(t, imagePath, testcase.Boot.Type)
})
}
}
// guessPipelineToExport return a best-effort guess about which
// pipeline should be exported when running osbuild for the testcase
//
// If this function detects that this is a version 1 manifest, it
// always returns "assembler"
//
// For manifests version 2, the name of the last pipeline is returned.
func guessPipelineToExport(rawManifest json.RawMessage) string {
const v1ManifestExportName = "assembler"
var v2Manifest struct {
Version string `json:"version"`
Pipelines []struct {
Name string `json:"name,omitempty"`
} `json:"pipelines"`
}
err := json.Unmarshal(rawManifest, &v2Manifest)
if err != nil {
// if we cannot unmarshal, let's just assume that it's a version 1 manifest
return v1ManifestExportName
}
if v2Manifest.Version == "2" {
return v2Manifest.Pipelines[len(v2Manifest.Pipelines)-1].Name
}
return v1ManifestExportName
}
// runTestcase builds the pipeline specified in the testcase and then it
// tests the result
func | (t *testing.T, testcase testcaseStruct, store string) {
_ = os.Mkdir("/var/lib/osbuild-composer-tests", 0755)
outputDirectory, err := ioutil.TempDir("/var/lib/osbuild-composer-tests", "osbuild-image-tests-*")
require.NoError(t, err, "error creating temporary output directory")
defer func() {
err := os.RemoveAll(outputDirectory)
require.NoError(t, err, "error removing temporary output directory")
}()
exports := []string{guessPipelineToExport(testcase.Manifest)}
err = runOsbuild(testcase.Manifest, store, outputDirectory, exports)
require.NoError(t, err)
for _, export := range exports {
imagePath := filepath.Join(outputDirectory, export, testcase.ComposeRequest.Filename)
testImage(t, testcase, imagePath)
}
}
// getAllCases returns paths to all testcases in the testcase directory
func getAllCases() ([]string, error) {
cases, err := ioutil.ReadDir(constants.TestPaths.TestCasesDirectory)
if err != nil {
return nil, fmt.Errorf("cannot list test cases: %v", err)
}
casesPaths := []string{}
for _, c := range cases {
if c.IsDir() {
continue
}
casePath := fmt.Sprintf("%s/%s", constants.TestPaths.TestCasesDirectory, c.Name())
casesPaths = append(casesPaths, casePath)
}
return casesPaths, nil
}
// runTests opens, parses and runs all the specified testcases
func runTests(t *testing.T, cases []string) {
_ = os.Mkdir("/var/lib/osbuild-composer-tests", 0755)
store, err := ioutil.TempDir("/var/lib/osbuild-composer-tests", "osbuild-image-tests-*")
require.NoError(t, err, "error creating temporary store")
defer func() {
err := os.RemoveAll(store)
require.NoError(t, err, "error removing temporary store")
}()
for _, p := range cases {
t.Run(path.Base(p), func(t *testing.T) {
f, err := os.Open(p)
if err != nil {
t.Skipf("%s: cannot open test case: %v", p, err)
}
var testcase testcaseStruct
err = json.NewDecoder(f).Decode(&testcase)
require.NoErrorf(t, err, "%s: cannot decode test case", p)
currentArch := common.CurrentArch()
if testcase.ComposeRequest.Arch != currentArch {
t.Skipf("the required arch is %s, the current arch is %s", testcase.ComposeRequest.Arch, currentArch)
}
runTestcase(t, testcase, store)
})
}
}
func TestImages(t *testing.T) {
cases := flag.Args()
// if no cases were specified, run the default set
if len(cases) == 0 {
var err error
cases, err = getAllCases()
require.NoError(t, err)
}
runTests(t, cases)
}
| runTestcase |
ModifierNonStandardCharRemover.test.ts | import { ModifierNonStandardCharRemoverHandler } from './ModifierNonStandardCharRemover';
describe('ModifierNonStandardCharRemover', () => {
it('should remove the emoji', () => {
expect(
new ModifierNonStandardCharRemoverHandler().run(new FormContext(), {}, {} as BaseField, 'this guy should not be here. 😁 <- yes, this guy')
).toEqual({ 'value': 'this guy should not be here. <- yes, this guy' });
});
}); | import { BaseField } from '../fields';
import { FormContext } from '../form'; |
|
antManufacturers.js | var test = require('tape'); |
import { antManufacturers } from '../src/parser';
test('should get stages manufacturer', assert => {
const line = '[13:41:35] ANT : dID 775327 MFG 69 Model 1';
const expect = 'Stages';
const actual = antManufacturers(line);
assert.equal(
actual[0].manufacturer,
expect,
'manufacturer should be Stages Cycling'
);
assert.end();
});
test('should get 4iiiis manufacturer', assert => {
const line = '[6:33:40] ANT : dID 766475 MFG 51 Model 7';
const expect = '4iiii';
const actual = antManufacturers(line);
assert.equal(actual[0].manufacturer, expect, 'manufacturer should be 4iiiis');
assert.end();
});
test('should get quarq manufacturer', assert => {
const line = '[10:58:43] ANT : dID 743018 MFG 7 Model 1';
const expect = 'Quarq';
const actual = antManufacturers(line);
assert.equal(actual[0].manufacturer, expect, 'manufacturer should be Quarq');
assert.end();
}); | |
run.py | #!/usr/bin/env python
import argparse
import os
import sys
import json
import shutil
#from src.gradcam import *
data_ingest_params = './config/data-params.json'
fp_params = './config/file_path.json'
gradcam_params = './config/gradcam_params.json'
ig_params = './config/ig_params.json'
train_params = './config/train_params.json'
test_params = './config/test_params.json'
def load_params(fp):
|
def main(targets):
if 'clean' in targets:
shutil.rmtree('results/gradcam/', ignore_errors=True)
shutil.rmtree('results/model_prediction/', ignore_errors=True)
shutil.rmtree('results/integrated_gradient/', ignore_errors=True)
os.mkdir('results/gradcam')
os.mkdir('results/model_prediction')
os.mkdir('results/integrated_gradient')
if "gradcam" in targets:
# Check if directory "results" is created
if not os.path.isdir('results/gradcam'):
os.makedirs('results/gradcam')
gradcam_fp = load_params(fp_params)['gradcam_path']
input_gradcam_params = load_params(gradcam_params)
input_images = input_gradcam_params["load_image_path"]["image_input_path_train_covered"]
save_images = input_gradcam_params['save_image_path']
model_path = input_gradcam_params['model_path']
if "custom_image_path" in input_gradcam_params:
custom_image_path = input_gradcam_params['custom_image_path']
os.system("python " + gradcam_fp + " --image-path " + input_images + " --custom-image-path " + custom_image_path + " --save-path-gb " + save_images['gb_path'] + " --save-path-cam-gb " + save_images['cam_gb_path'] + " --save-path-cam " + save_images['cam_path'] + " --model-path " + model_path + " --use-cuda")
else:
os.system("python " + gradcam_fp + " --image-path " + input_images + " --save-path-gb " + save_images['gb_path'] + " --save-path-cam-gb " + save_images['cam_gb_path'] + " --save-path-cam " + save_images['cam_path'] + " --model-path " + model_path + " --use-cuda")
if "training" in targets:
if not os.path.isdir('models'):
os.makedirs('models')
train_fp = load_params(fp_params)['train_path']
input_train_params = load_params(train_params)
model_name = input_train_params['model_name']
feature_extract = input_train_params['feature_extracting']
batch_size = input_train_params['batch_size']
learning_rate = input_train_params['learning_rate']
num_epochs = input_train_params['num_epochs']
if feature_extract:
os.system("python " + train_fp + " --model-name " + model_name + " --batch-size " + str(batch_size) + " --learning-rate " + str(learning_rate) + " --num-epochs " + str(num_epochs) + " --use-cuda --feature-extracting")
else:
os.system("python " + train_fp + " --model-name " + model_name + " --batch-size " + str(batch_size) + " --learning-rate " + str(learning_rate) + " --num-epochs " + str(num_epochs) + " --use-cuda")
if "testing" in targets:
if not os.path.isdir('models'):
print("No models available. Train a model first")
sys.exit(0)
if not os.path.isdir('results/model_prediction'):
os.mkdir('results/model_prediction')
test_fp = load_params(fp_params)['test_path']
input_test_params = load_params(test_params)
model_name = input_test_params['model_name']
model_path = input_test_params['model_path']
batch_size = input_test_params['batch_size']
test_size = input_test_params['test_size']
if model_name not in model_path:
print("Model name and model path mismatch, please check your parameters again!")
sys.exit(0)
if "custom_image_path" in input_test_params:
custom_image_path = input_test_params['custom_image_path']
os.system("python " + test_fp + " --model-name " + model_name + " --model-path " + model_path + " --custom-image-path " + custom_image_path + " --batch-size " + str(batch_size) + " --use-cuda")
else:
os.system("python " + test_fp + " --model-name " + model_name + " --model-path " + model_path + " --batch-size " + str(batch_size) + " --test-size " + str(test_size) + " --use-cuda")
if "ig" in targets:
if not os.path.isdir('models'):
print("No models available. Train a model first")
sys.exit(0)
if not os.path.isdir('results/integrated_gradient'):
os.mkdir('results/integrated_gradient')
ig_fp = load_params(fp_params)['ig_path']
input_ig_params = load_params(ig_params)
img_load_path = input_ig_params['image_load_path']
img_save_path = input_ig_params['image_save_path']
model_path = input_ig_params['model_path']
if "custom_image_path" in input_ig_params:
custom_image_path = input_ig_params['custom_image_path']
os.system("python " + ig_fp + " --custom-image-path " + custom_image_path + " --img-load-path " + img_load_path + " --img-save-path " + img_save_path + " --model-path " + model_path + " --use-cuda")
else:
os.system("python " + ig_fp + " --img-load-path " + img_load_path + " --img-save-path " + img_save_path + " --model-path " + model_path + " --use-cuda")
if __name__ == '__main__':
if not os.path.isdir('results'):
os.makedirs('results')
targets = sys.argv[1:]
main(targets)
| with open(fp) as fh:
param = json.load(fh)
return param |
__init__.py | """Module providing generic sequences that are used throught Embiggen."""
from embiggen.sequences.generic_sequences.edge_prediction_sequence import EdgePredictionSequence
| __all__ = [
"EdgePredictionSequence"
] |
|
test_flac3d.py | import copy
import pathlib
import sys
import helpers
import numpy
import pytest
import meshio
@pytest.mark.parametrize(
"mesh, binary, data",
[
(helpers.tet_mesh, False, []),
(helpers.hex_mesh, False, []),
(helpers.tet_mesh, False, [1, 2]),
(helpers.tet_mesh, True, []),
(helpers.hex_mesh, True, []),
(helpers.tet_mesh, True, [1, 2]),
],
)
def test(mesh, binary, data):
|
# the failure perhaps has to do with dictionary ordering
@pytest.mark.skipif(sys.version_info < (3, 6), reason="Fails with 3.5")
@pytest.mark.parametrize(
"filename", ["flac3d_mesh_ex.f3grid", "flac3d_mesh_ex_bin.f3grid"],
)
def test_reference_file(filename):
this_dir = pathlib.Path(__file__).resolve().parent
filename = this_dir / "meshes" / "flac3d" / filename
mesh = meshio.read(filename)
# points
assert numpy.isclose(mesh.points.sum(), 307.0)
# cells
ref_num_cells = [
("hexahedron", 45),
("pyramid", 9),
("hexahedron", 18),
("wedge", 9),
("hexahedron", 6),
("wedge", 3),
("hexahedron", 6),
("wedge", 3),
("pyramid", 6),
("tetra", 3),
]
assert [(k, len(v)) for k, v in mesh.cells] == ref_num_cells
# Cell data
ref_sum_cell_data = [45, 9, 18, 9, 6, 3, 6, 3, 6, 3]
assert [len(arr) for arr in mesh.cell_data["flac3d:zone"]] == ref_sum_cell_data
| if data:
mesh = copy.deepcopy(mesh)
mesh.cell_data["flac3d:zone"] = [numpy.array(data)]
helpers.write_read(
lambda f, m: meshio.flac3d.write(f, m, binary=binary),
meshio.flac3d.read,
mesh,
1.0e-15,
) |
UpdateDataSourceCommand.ts | import { KendraClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KendraClient";
import { UpdateDataSourceRequest } from "../models/models_0";
import {
deserializeAws_json1_1UpdateDataSourceCommand,
serializeAws_json1_1UpdateDataSourceCommand,
} from "../protocols/Aws_json1_1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type UpdateDataSourceCommandInput = UpdateDataSourceRequest;
export type UpdateDataSourceCommandOutput = __MetadataBearer;
/**
* <p>Updates an existing Amazon Kendra data source.</p>
*/
export class | extends $Command<
UpdateDataSourceCommandInput,
UpdateDataSourceCommandOutput,
KendraClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: UpdateDataSourceCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: KendraClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<UpdateDataSourceCommandInput, UpdateDataSourceCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize));
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "KendraClient";
const commandName = "UpdateDataSourceCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: UpdateDataSourceRequest.filterSensitiveLog,
outputFilterSensitiveLog: (output: any) => output,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: UpdateDataSourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_json1_1UpdateDataSourceCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<UpdateDataSourceCommandOutput> {
return deserializeAws_json1_1UpdateDataSourceCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
}
| UpdateDataSourceCommand |
010_logs_and_prints.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
"""
When running computations on the background or through a scheduler such as
Slurm, print statement are lost. Although you can redirect the standard outputs
in the former case, the procedure is not as straightforward in the latter case.
Fortunately, Clustertools offers a unify way of managing such things by creating
a log file in a standard fashion (independent of the environment/backend).
The `clustertools` utility offers a simple way to get the log of each
computation. Simply run `clustertools display log <exp_name> <comp_number>`.
1. Run `python 000_reset.py` to reset the experiment.
2. Run `python 010_logs_and_prints.py front-end`
3. Run `clustertools display log BasicUsage 0` to print the log
You can play with the computation number
"""
from clustertools import Computation, CTParser, ParameterSet, \
Experiment, set_stdout_logging
class MyComputation(Computation):
"""
Inherit from `Computation` and redefine the `run` method as you which
"""
def run(self, result, x, z, w, y=2, **parameters):
|
if __name__ == "__main__":
set_stdout_logging()
parser = CTParser()
environment, _ = parser.parse()
param_set = ParameterSet()
param_set.add_parameters(x=[1, 2, 3], z=4, w=[5, 6])
experiment = Experiment("BasicUsage", param_set, MyComputation)
environment.run(experiment)
| import time
from datetime import datetime
from random import randint
# We add a few print statements
print(repr(self))
print()
print("{}: I must multiply {} by {}. This is a hard computation, "
"it will take a few seconds".format(datetime.now(), x, y))
result["multiply"] = x * y
time.sleep(randint(1, 10))
print("{}: Now I must add {} and {}. This is easier but I am tired "
"with all those hard computations. Let me think..."
"".format(datetime.now(), z, w))
result["sum"] = z + w
time.sleep(randint(1, 10))
print("{}: Woah, it was hard. I think I'll go back to sleep."
"".format(datetime.now())) |
_models.py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class CloudError(Model):
"""CloudError.
"""
_attribute_map = {
}
class ComplianceStatus(Model):
"""Compliance Status details.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar compliance_state: The compliance state of the configuration.
Possible values include: 'Pending', 'Compliant', 'Noncompliant',
'Installed', 'Failed'
:vartype compliance_state: str or
~azure.mgmt.kubernetesconfiguration.models.ComplianceStateType
:param last_config_applied: Datetime the configuration was last applied.
:type last_config_applied: datetime
:param message: Message from when the configuration was applied.
:type message: str
:param message_level: Level of the message. Possible values include:
'Error', 'Warning', 'Information'
:type message_level: str or
~azure.mgmt.kubernetesconfiguration.models.MessageLevelType
"""
_validation = {
'compliance_state': {'readonly': True},
}
_attribute_map = {
'compliance_state': {'key': 'complianceState', 'type': 'str'},
'last_config_applied': {'key': 'lastConfigApplied', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'message_level': {'key': 'messageLevel', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ComplianceStatus, self).__init__(**kwargs)
self.compliance_state = None
self.last_config_applied = kwargs.get('last_config_applied', None)
self.message = kwargs.get('message', None)
self.message_level = kwargs.get('message_level', None)
class ConfigurationIdentity(Model):
"""Identity for the managed cluster.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar principal_id: The principal id of the system assigned identity which
is used by the configuration.
:vartype principal_id: str
:ivar tenant_id: The tenant id of the system assigned identity which is
used by the configuration.
:vartype tenant_id: str
:param type: The type of identity used for the configuration. Type
'SystemAssigned' will use an implicitly created identity. Type 'None' will
not use Managed Identity for the configuration. Possible values include:
'SystemAssigned', 'None'
:type type: str or
~azure.mgmt.kubernetesconfiguration.models.ResourceIdentityType
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'ResourceIdentityType'},
}
def __init__(self, **kwargs):
super(ConfigurationIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = kwargs.get('type', None)
class ErrorDefinition(Model):
"""Error definition.
All required parameters must be populated in order to send to Azure.
:param code: Required. Service specific error code which serves as the
substatus for the HTTP error code.
:type code: str
:param message: Required. Description of the error.
:type message: str
"""
_validation = {
'code': {'required': True},
'message': {'required': True},
}
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ErrorDefinition, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.message = kwargs.get('message', None)
class ErrorResponse(Model):
"""Error response.
:param error: Error definition.
:type error: ~azure.mgmt.kubernetesconfiguration.models.ErrorDefinition
"""
_attribute_map = {
'error': {'key': 'error', 'type': 'ErrorDefinition'},
}
def __init__(self, **kwargs):
super(ErrorResponse, self).__init__(**kwargs)
self.error = kwargs.get('error', None)
class ErrorResponseException(HttpOperationError):
"""Server responsed with exception of type: 'ErrorResponse'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args)
class Resource(Model):
"""The Resource model definition.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
:type system_data: ~azure.mgmt.kubernetesconfiguration.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(self, **kwargs):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
self.system_data = kwargs.get('system_data', None)
class | (Resource):
"""ARM proxy resource.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
:type system_data: ~azure.mgmt.kubernetesconfiguration.models.SystemData
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
}
def __init__(self, **kwargs):
super(ProxyResource, self).__init__(**kwargs)
class ExtensionInstance(ProxyResource):
"""The Extension Instance object.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param location: Location of resource type
:type location: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
:type system_data: ~azure.mgmt.kubernetesconfiguration.models.SystemData
:param extension_type: Type of the Extension, of which this resource is an
instance of. It must be one of the Extension Types registered with
Microsoft.KubernetesConfiguration by the Extension publisher.
:type extension_type: str
:param auto_upgrade_minor_version: Flag to note if this instance
participates in auto upgrade of minor version, or not.
:type auto_upgrade_minor_version: bool
:param release_train: ReleaseTrain this extension instance participates in
for auto-upgrade (e.g. Stable, Preview, etc.) - only if
autoUpgradeMinorVersion is 'true'.
:type release_train: str
:param version: Version of the extension for this extension instance, if
it is 'pinned' to a specific version. autoUpgradeMinorVersion must be
'false'.
:type version: str
:param scope: Scope at which the extension instance is installed.
:type scope: ~azure.mgmt.kubernetesconfiguration.models.Scope
:param configuration_settings: Configuration settings, as name-value pairs
for configuring this instance of the extension.
:type configuration_settings: dict[str, str]
:param configuration_protected_settings: Configuration settings that are
sensitive, as name-value pairs for configuring this instance of the
extension.
:type configuration_protected_settings: dict[str, str]
:param install_state: Status of installation of this instance of the
extension. Possible values include: 'Pending', 'Installed', 'Failed'
:type install_state: str or
~azure.mgmt.kubernetesconfiguration.models.InstallStateType
:param statuses: Status from this instance of the extension.
:type statuses:
list[~azure.mgmt.kubernetesconfiguration.models.ExtensionStatus]
:ivar creation_time: DateLiteral (per ISO8601) noting the time the
resource was created by the client (user).
:vartype creation_time: str
:ivar last_modified_time: DateLiteral (per ISO8601) noting the time the
resource was modified by the client (user).
:vartype last_modified_time: str
:ivar last_status_time: DateLiteral (per ISO8601) noting the time of last
status from the agent.
:vartype last_status_time: str
:ivar error_info: Error information from the Agent - e.g. errors during
installation.
:vartype error_info:
~azure.mgmt.kubernetesconfiguration.models.ErrorDefinition
:param identity: The identity of the configuration.
:type identity:
~azure.mgmt.kubernetesconfiguration.models.ConfigurationIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'creation_time': {'readonly': True},
'last_modified_time': {'readonly': True},
'last_status_time': {'readonly': True},
'error_info': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'identity': {'key': 'identity', 'type': 'ConfigurationIdentity'},
'extension_type': {'key': 'properties.extensionType', 'type': 'str'},
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'release_train': {'key': 'properties.releaseTrain', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
'scope': {'key': 'properties.scope', 'type': 'Scope'},
'configuration_settings': {'key': 'properties.configurationSettings', 'type': '{str}'},
'configuration_protected_settings': {'key': 'properties.configurationProtectedSettings', 'type': '{str}'},
'install_state': {'key': 'properties.installState', 'type': 'str'},
'statuses': {'key': 'properties.statuses', 'type': '[ExtensionStatus]'},
'creation_time': {'key': 'properties.creationTime', 'type': 'str'},
'last_modified_time': {'key': 'properties.lastModifiedTime', 'type': 'str'},
'last_status_time': {'key': 'properties.lastStatusTime', 'type': 'str'},
'error_info': {'key': 'properties.errorInfo', 'type': 'ErrorDefinition'},
}
def __init__(self, **kwargs):
super(ExtensionInstance, self).__init__(**kwargs)
self.location = kwargs.get('location', None)
self.extension_type = kwargs.get('extension_type', None)
self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None)
self.release_train = kwargs.get('release_train', None)
self.version = kwargs.get('version', None)
self.scope = kwargs.get('scope', None)
self.configuration_settings = kwargs.get('configuration_settings', None)
self.configuration_protected_settings = kwargs.get('configuration_protected_settings', None)
self.install_state = kwargs.get('install_state', None)
self.statuses = kwargs.get('statuses', None)
self.creation_time = None
self.last_modified_time = None
self.last_status_time = None
self.error_info = None
self.identity = kwargs.get('identity', None)
class ExtensionInstanceUpdate(Model):
"""Update Extension Instance request object.
:param auto_upgrade_minor_version: Flag to note if this instance
participates in Extension Lifecycle Management or not.
:type auto_upgrade_minor_version: bool
:param release_train: ReleaseTrain this extension instance participates in
for auto-upgrade (e.g. Stable, Preview, etc.) - only if
autoUpgradeMinorVersion is 'true'.
:type release_train: str
:param version: Version number of extension, to 'pin' to a specific
version. autoUpgradeMinorVersion must be 'false'.
:type version: str
"""
_attribute_map = {
'auto_upgrade_minor_version': {'key': 'properties.autoUpgradeMinorVersion', 'type': 'bool'},
'release_train': {'key': 'properties.releaseTrain', 'type': 'str'},
'version': {'key': 'properties.version', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExtensionInstanceUpdate, self).__init__(**kwargs)
self.auto_upgrade_minor_version = kwargs.get('auto_upgrade_minor_version', None)
self.release_train = kwargs.get('release_train', None)
self.version = kwargs.get('version', None)
class ExtensionStatus(Model):
"""Status from this instance of the extension.
:param code: Status code provided by the Extension
:type code: str
:param display_status: Short description of status of this instance of the
extension.
:type display_status: str
:param level: Level of the status. Possible values include: 'Error',
'Warning', 'Information'. Default value: "Information" .
:type level: str or ~azure.mgmt.kubernetesconfiguration.models.LevelType
:param message: Detailed message of the status from the Extension
instance.
:type message: str
:param time: DateLiteral (per ISO8601) noting the time of installation
status.
:type time: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'display_status': {'key': 'displayStatus', 'type': 'str'},
'level': {'key': 'level', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'time': {'key': 'time', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ExtensionStatus, self).__init__(**kwargs)
self.code = kwargs.get('code', None)
self.display_status = kwargs.get('display_status', None)
self.level = kwargs.get('level', "Information")
self.message = kwargs.get('message', None)
self.time = kwargs.get('time', None)
class HelmOperatorProperties(Model):
"""Properties for Helm operator.
:param chart_version: Version of the operator Helm chart.
:type chart_version: str
:param chart_values: Values override for the operator Helm chart.
:type chart_values: str
"""
_attribute_map = {
'chart_version': {'key': 'chartVersion', 'type': 'str'},
'chart_values': {'key': 'chartValues', 'type': 'str'},
}
def __init__(self, **kwargs):
super(HelmOperatorProperties, self).__init__(**kwargs)
self.chart_version = kwargs.get('chart_version', None)
self.chart_values = kwargs.get('chart_values', None)
class ResourceProviderOperation(Model):
"""Supported operation of this resource provider.
Variables are only populated by the server, and will be ignored when
sending a request.
:param name: Operation name, in format of
{provider}/{resource}/{operation}
:type name: str
:param display: Display metadata associated with the operation.
:type display:
~azure.mgmt.kubernetesconfiguration.models.ResourceProviderOperationDisplay
:ivar is_data_action: The flag that indicates whether the operation
applies to data plane.
:vartype is_data_action: bool
"""
_validation = {
'is_data_action': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'ResourceProviderOperationDisplay'},
'is_data_action': {'key': 'isDataAction', 'type': 'bool'},
}
def __init__(self, **kwargs):
super(ResourceProviderOperation, self).__init__(**kwargs)
self.name = kwargs.get('name', None)
self.display = kwargs.get('display', None)
self.is_data_action = None
class ResourceProviderOperationDisplay(Model):
"""Display metadata associated with the operation.
:param provider: Resource provider: Microsoft KubernetesConfiguration.
:type provider: str
:param resource: Resource on which the operation is performed.
:type resource: str
:param operation: Type of operation: get, read, delete, etc.
:type operation: str
:param description: Description of this operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ResourceProviderOperationDisplay, self).__init__(**kwargs)
self.provider = kwargs.get('provider', None)
self.resource = kwargs.get('resource', None)
self.operation = kwargs.get('operation', None)
self.description = kwargs.get('description', None)
class Result(Model):
"""Sample result definition.
:param sample_property: Sample property of type string
:type sample_property: str
"""
_attribute_map = {
'sample_property': {'key': 'sampleProperty', 'type': 'str'},
}
def __init__(self, **kwargs):
super(Result, self).__init__(**kwargs)
self.sample_property = kwargs.get('sample_property', None)
class Scope(Model):
"""Scope of the extensionInstance. It can be either Cluster or Namespace; but
not both.
:param cluster: Specifies that the scope of the extensionInstance is
Cluster
:type cluster: ~azure.mgmt.kubernetesconfiguration.models.ScopeCluster
:param namespace: Specifies that the scope of the extensionInstance is
Namespace
:type namespace: ~azure.mgmt.kubernetesconfiguration.models.ScopeNamespace
"""
_attribute_map = {
'cluster': {'key': 'cluster', 'type': 'ScopeCluster'},
'namespace': {'key': 'namespace', 'type': 'ScopeNamespace'},
}
def __init__(self, **kwargs):
super(Scope, self).__init__(**kwargs)
self.cluster = kwargs.get('cluster', None)
self.namespace = kwargs.get('namespace', None)
class ScopeCluster(Model):
"""Specifies that the scope of the extensionInstance is Cluster.
:param release_namespace: Namespace where the extension Release must be
placed, for a Cluster scoped extensionInstance. If this namespace does
not exist, it will be created
:type release_namespace: str
"""
_attribute_map = {
'release_namespace': {'key': 'releaseNamespace', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ScopeCluster, self).__init__(**kwargs)
self.release_namespace = kwargs.get('release_namespace', None)
class ScopeNamespace(Model):
"""Specifies that the scope of the extensionInstance is Namespace.
:param target_namespace: Namespace where the extensionInstance will be
created for an Namespace scoped extensionInstance. If this namespace does
not exist, it will be created
:type target_namespace: str
"""
_attribute_map = {
'target_namespace': {'key': 'targetNamespace', 'type': 'str'},
}
def __init__(self, **kwargs):
super(ScopeNamespace, self).__init__(**kwargs)
self.target_namespace = kwargs.get('target_namespace', None)
class SourceControlConfiguration(ProxyResource):
"""The SourceControl Configuration object returned in Get & Put response.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource Id
:vartype id: str
:ivar name: Resource name
:vartype name: str
:ivar type: Resource type
:vartype type: str
:param system_data: Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources
:type system_data: ~azure.mgmt.kubernetesconfiguration.models.SystemData
:param repository_url: Url of the SourceControl Repository.
:type repository_url: str
:param operator_namespace: The namespace to which this operator is
installed to. Maximum of 253 lower case alphanumeric characters, hyphen
and period only. Default value: "default" .
:type operator_namespace: str
:param operator_instance_name: Instance name of the operator - identifying
the specific configuration.
:type operator_instance_name: str
:param operator_type: Type of the operator. Possible values include:
'Flux'
:type operator_type: str or
~azure.mgmt.kubernetesconfiguration.models.OperatorType
:param operator_params: Any Parameters for the Operator instance in string
format.
:type operator_params: str
:param configuration_protected_settings: Name-value pairs of protected
configuration settings for the configuration
:type configuration_protected_settings: dict[str, str]
:param operator_scope: Scope at which the operator will be installed.
Possible values include: 'cluster', 'namespace'. Default value: "cluster"
.
:type operator_scope: str or
~azure.mgmt.kubernetesconfiguration.models.OperatorScopeType
:ivar repository_public_key: Public Key associated with this SourceControl
configuration (either generated within the cluster or provided by the
user).
:vartype repository_public_key: str
:param ssh_known_hosts_contents: Base64-encoded known_hosts contents
containing public SSH keys required to access private Git instances
:type ssh_known_hosts_contents: str
:param enable_helm_operator: Option to enable Helm Operator for this git
configuration.
:type enable_helm_operator: bool
:param helm_operator_properties: Properties for Helm operator.
:type helm_operator_properties:
~azure.mgmt.kubernetesconfiguration.models.HelmOperatorProperties
:ivar provisioning_state: The provisioning state of the resource provider.
Possible values include: 'Accepted', 'Deleting', 'Running', 'Succeeded',
'Failed'
:vartype provisioning_state: str or
~azure.mgmt.kubernetesconfiguration.models.ProvisioningStateType
:ivar compliance_status: Compliance Status of the Configuration
:vartype compliance_status:
~azure.mgmt.kubernetesconfiguration.models.ComplianceStatus
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'repository_public_key': {'readonly': True},
'provisioning_state': {'readonly': True},
'compliance_status': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'system_data': {'key': 'systemData', 'type': 'SystemData'},
'repository_url': {'key': 'properties.repositoryUrl', 'type': 'str'},
'operator_namespace': {'key': 'properties.operatorNamespace', 'type': 'str'},
'operator_instance_name': {'key': 'properties.operatorInstanceName', 'type': 'str'},
'operator_type': {'key': 'properties.operatorType', 'type': 'str'},
'operator_params': {'key': 'properties.operatorParams', 'type': 'str'},
'configuration_protected_settings': {'key': 'properties.configurationProtectedSettings', 'type': '{str}'},
'operator_scope': {'key': 'properties.operatorScope', 'type': 'str'},
'repository_public_key': {'key': 'properties.repositoryPublicKey', 'type': 'str'},
'ssh_known_hosts_contents': {'key': 'properties.sshKnownHostsContents', 'type': 'str'},
'enable_helm_operator': {'key': 'properties.enableHelmOperator', 'type': 'bool'},
'helm_operator_properties': {'key': 'properties.helmOperatorProperties', 'type': 'HelmOperatorProperties'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'compliance_status': {'key': 'properties.complianceStatus', 'type': 'ComplianceStatus'},
}
def __init__(self, **kwargs):
super(SourceControlConfiguration, self).__init__(**kwargs)
self.repository_url = kwargs.get('repository_url', None)
self.operator_namespace = kwargs.get('operator_namespace', "default")
self.operator_instance_name = kwargs.get('operator_instance_name', None)
self.operator_type = kwargs.get('operator_type', None)
self.operator_params = kwargs.get('operator_params', None)
self.configuration_protected_settings = kwargs.get('configuration_protected_settings', None)
self.operator_scope = kwargs.get('operator_scope', "cluster")
self.repository_public_key = None
self.ssh_known_hosts_contents = kwargs.get('ssh_known_hosts_contents', None)
self.enable_helm_operator = kwargs.get('enable_helm_operator', None)
self.helm_operator_properties = kwargs.get('helm_operator_properties', None)
self.provisioning_state = None
self.compliance_status = None
class SystemData(Model):
"""Top level metadata
https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/common-api-contracts.md#system-metadata-for-all-azure-resources.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar created_by: A string identifier for the identity that created the
resource
:vartype created_by: str
:ivar created_by_type: The type of identity that created the resource:
user, application, managedIdentity, key
:vartype created_by_type: str
:ivar created_at: The timestamp of resource creation (UTC)
:vartype created_at: datetime
:ivar last_modified_by: A string identifier for the identity that last
modified the resource
:vartype last_modified_by: str
:ivar last_modified_by_type: The type of identity that last modified the
resource: user, application, managedIdentity, key
:vartype last_modified_by_type: str
:ivar last_modified_at: The timestamp of resource last modification (UTC)
:vartype last_modified_at: datetime
"""
_validation = {
'created_by': {'readonly': True},
'created_by_type': {'readonly': True},
'created_at': {'readonly': True},
'last_modified_by': {'readonly': True},
'last_modified_by_type': {'readonly': True},
'last_modified_at': {'readonly': True},
}
_attribute_map = {
'created_by': {'key': 'createdBy', 'type': 'str'},
'created_by_type': {'key': 'createdByType', 'type': 'str'},
'created_at': {'key': 'createdAt', 'type': 'iso-8601'},
'last_modified_by': {'key': 'lastModifiedBy', 'type': 'str'},
'last_modified_by_type': {'key': 'lastModifiedByType', 'type': 'str'},
'last_modified_at': {'key': 'lastModifiedAt', 'type': 'iso-8601'},
}
def __init__(self, **kwargs):
super(SystemData, self).__init__(**kwargs)
self.created_by = None
self.created_by_type = None
self.created_at = None
self.last_modified_by = None
self.last_modified_by_type = None
self.last_modified_at = None
| ProxyResource |
registerUser.js | import Api from 'app/api';
import ApiConstants from '../ApiConstants';
export default function registerUser(username, password, email, phone, address, city, country, name, nid, bio, firstName, lastName, profileType) {
const payload = {
username: username,
password: password,
email: email,
phone: phone, | name: name,
nid: nid,
bio: bio,
first_name: firstName,
last_name: lastName,
profile_type: profileType,
};
return Api(
ApiConstants.SIGNUP,
payload,
'post',
null
);
} | address: address,
city: city,
country: country, |
main.rs | #![feature(proc_macro, custom_derive)]
#[macro_use] extern crate chan;
extern crate chan_signal;
extern crate env_logger;
extern crate hyper;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate serde_derive;
extern crate ws;
use chan_signal::{Signal, notify};
use std::thread;
mod conf;
mod slack;
fn start_ws(s: &str) {
// Connect to the url and call the closure
ws::connect(s, |out| {
// Queue a message to be sent when the WebSocket is open
// if let Err(_) = out.send("Hello WebSocket") {
// println!("Websocket couldn't queue an initial message.")
//} else {
// println!("Client sent message 'Hello WebSocket'. ")
//}
// The handler needs to take ownership of out, so we use move
move |msg| {
// Handle messages received on this connection
println!("Client got message '{}'. ", msg);
match msg {
ws::Message::Text(s) => slack::events::_match(s),
_ => println!("unsupported binary format"),
};
// Close the connection
// out.close(ws::CloseCode::Normal)
Ok(())
}
});
println!("exited thread");
}
fn main() | {
env_logger::init().unwrap();
let c = match conf::load() {
Ok(c) => c,
Err(e) => {
println!("{}", e);
return
}
};
thread::spawn(move || {
match slack::rtm::start::call(&*c.tokens.first().unwrap()) {
Ok(s) => start_ws(&*s.url),
Err(e) => println!("{}", e)
}
});
let signal = notify(&[Signal::INT, Signal::QUIT, Signal::KILL]);
signal.recv();
} |
|
test_policies.py | # Copyright DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from cassandra import ConsistencyLevel, Unavailable
from cassandra.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from tests.integration import use_cluster, get_cluster, get_node
def setup_module():
use_cluster('test_cluster', [4])
class | (unittest.TestCase):
@classmethod
def tearDownClass(cls):
cluster = get_cluster()
cluster.start() # make sure other nodes are restarted
def test_should_rethrow_on_unvailable_with_default_policy_if_cas(self):
"""
Tests for the default retry policy in combination with lightweight transactions.
@since 3.17
@jira_ticket PYTHON-1007
@expected_result the query is retried with the default CL, not the serial one.
@test_category policy
"""
ep = ExecutionProfile(consistency_level=ConsistencyLevel.ALL,
serial_consistency_level=ConsistencyLevel.SERIAL)
cluster = Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: ep})
session = cluster.connect()
session.execute("CREATE KEYSPACE test_retry_policy_cas WITH replication = {'class':'SimpleStrategy','replication_factor': 3};")
session.execute("CREATE TABLE test_retry_policy_cas.t (id int PRIMARY KEY, data text);")
session.execute('INSERT INTO test_retry_policy_cas.t ("id", "data") VALUES (%(0)s, %(1)s)', {'0': 42, '1': 'testing'})
get_node(2).stop()
get_node(4).stop()
# before fix: cassandra.InvalidRequest: Error from server: code=2200 [Invalid query] message="SERIAL is not
# supported as conditional update commit consistency. ....""
# after fix: cassandra.Unavailable (expected since replicas are down)
with self.assertRaises(Unavailable) as cm:
session.execute("update test_retry_policy_cas.t set data = 'staging' where id = 42 if data ='testing'")
exception = cm.exception
self.assertEqual(exception.consistency, ConsistencyLevel.SERIAL)
self.assertEqual(exception.required_replicas, 2)
self.assertEqual(exception.alive_replicas, 1)
| RetryPolicyTests |
wakeonlan_test.go | package main
import (
"bytes"
"net"
"testing"
)
func TestNew_Only48BitAddresses(t *testing.T) |
func TestNew_Payload(t *testing.T) {
// get a buffer to hold the packet data
want := make([]byte, 102)
// write 6 byte preamble
copy(want[:6], []byte{0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF})
// write 16 repetitions of target mac address
offset := 6
addr, _ := net.ParseMAC("00:00:5e:00:53:01")
for i := 0; i < 16; i++ {
copy(want[offset+i*6:offset+i*6+6], addr)
}
// construct the magic packet
var buf bytes.Buffer
mp, _ := New("00:00:5e:00:53:01")
mp.Broadcast(&buf)
// compare!
got := buf.Bytes()
if bytes.Compare(want, got) != 0 {
t.Errorf("want %+v, but got %+v", want, got)
}
}
| {
tc := []struct {
addr string
success bool
}{
{"00:00:5e:00:53:01", true},
{"02:00:5e:10:00:00:00:01", false},
{"00:00:00:00:fe:80:00:00:00:00:00:00:02:00:5e:10:00:00:00:01", false},
{"00-00-5e-00-53-01", true},
{"02-00-5e-10-00-00-00-01", false},
{"00-00-00-00-fe-80-00-00-00-00-00-00-02-00-5e-10-00-00-00-01", false},
{"0000.5e00.5301", true},
{"0200.5e10.0000.0001", false},
{"0000.0000.fe80.0000.0000.0000.0200.5e10.0000.0001", false},
}
for _, tt := range tc {
t.Run(tt.addr, func(t *testing.T) {
_, err := New(tt.addr)
if (err == nil) != tt.success {
t.Errorf("parsing %+v should fail, but it didnt", tt.addr)
}
})
}
} |
test_terse_json.py | # Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from io import BytesIO, StringIO
from unittest.mock import Mock, patch
from twisted.web.server import Request
from synapse.http.site import SynapseRequest
from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter
from synapse.logging.context import LoggingContext, LoggingContextFilter
from tests.logging import LoggerCleanupMixin
from tests.server import FakeChannel
from tests.unittest import TestCase
class TerseJsonTestCase(LoggerCleanupMixin, TestCase):
def setUp(self):
self.output = StringIO()
def get_log_line(self):
# One log message, with a single trailing newline.
data = self.output.getvalue()
logs = data.splitlines()
self.assertEqual(len(logs), 1)
self.assertEqual(data.count("\n"), 1)
return json.loads(logs[0])
def test_terse_json_output(self):
"""
The Terse JSON formatter converts log messages to JSON.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(TerseJsonFormatter())
logger = self.get_logger(handler)
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"time",
"level",
"namespace",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
def | (self):
"""
Additional information can be included in the structured logging.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(TerseJsonFormatter())
logger = self.get_logger(handler)
logger.info(
"Hello there, %s!", "wally", extra={"foo": "bar", "int": 3, "bool": True}
)
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"time",
"level",
"namespace",
# The additional keys given via extra.
"foo",
"int",
"bool",
]
self.assertCountEqual(log.keys(), expected_log_keys)
# Check the values of the extra fields.
self.assertEqual(log["foo"], "bar")
self.assertEqual(log["int"], 3)
self.assertIs(log["bool"], True)
def test_json_output(self):
"""
The Terse JSON formatter converts log messages to JSON.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(JsonFormatter())
logger = self.get_logger(handler)
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"level",
"namespace",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
def test_with_context(self):
"""
The logging context should be added to the JSON response.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(JsonFormatter())
handler.addFilter(LoggingContextFilter())
logger = self.get_logger(handler)
with LoggingContext("name"):
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"level",
"namespace",
"request",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
self.assertEqual(log["request"], "name")
def test_with_request_context(self):
"""
Information from the logging context request should be added to the JSON response.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(JsonFormatter())
handler.addFilter(LoggingContextFilter())
logger = self.get_logger(handler)
# A full request isn't needed here.
site = Mock(spec=["site_tag", "server_version_string", "getResourceFor"])
site.site_tag = "test-site"
site.server_version_string = "Server v1"
site.reactor = Mock()
request = SynapseRequest(FakeChannel(site, None), site)
# Call requestReceived to finish instantiating the object.
request.content = BytesIO()
# Partially skip some of the internal processing of SynapseRequest.
request._started_processing = Mock()
request.request_metrics = Mock(spec=["name"])
with patch.object(Request, "render"):
request.requestReceived(b"POST", b"/_matrix/client/versions", b"1.1")
# Also set the requester to ensure the processing works.
request.requester = "@foo:test"
with LoggingContext(
request.get_request_id(), parent_context=request.logcontext
):
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger includes additional request information, if possible.
expected_log_keys = [
"log",
"level",
"namespace",
"request",
"ip_address",
"site_tag",
"requester",
"authenticated_entity",
"method",
"url",
"protocol",
"user_agent",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
self.assertTrue(log["request"].startswith("POST-"))
self.assertEqual(log["ip_address"], "127.0.0.1")
self.assertEqual(log["site_tag"], "test-site")
self.assertEqual(log["requester"], "@foo:test")
self.assertEqual(log["authenticated_entity"], "@foo:test")
self.assertEqual(log["method"], "POST")
self.assertEqual(log["url"], "/_matrix/client/versions")
self.assertEqual(log["protocol"], "1.1")
self.assertEqual(log["user_agent"], "")
| test_extra_data |
networks.py | import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from math import floor, log2
from functools import partial
from linear_attention_transformer import ImageLinearAttention
###
from random import random
import numpy as np
import torch.nn.functional as F
###
from models.networks_SPADE.base_network import BaseNetwork
from models.networks_SPADE.architecture import ResnetBlock as ResnetBlock
from models.networks_SPADE.architecture import SPADEResnetBlock as SPADEResnetBlock
###############################################################################
# Helper Functions
###############################################################################
class Identity(nn.Module):
def forward(self, x):
return x
def get_norm_layer(norm_type='instance'):
"""Return a normalization layer
Parameters:
norm_type (str) -- the name of the normalization layer: batch | instance | none
For BatchNorm, we use learnable affine parameters and track running statistics (mean/stddev).
For InstanceNorm, we do not use learnable affine parameters. We do not track running statistics.
"""
if norm_type == 'batch':
norm_layer = functools.partial(nn.BatchNorm2d, affine=True, track_running_stats=True)
elif norm_type == 'instance':
norm_layer = functools.partial(nn.InstanceNorm2d, affine=False, track_running_stats=False)
elif norm_type == 'none':
def norm_layer(x): return Identity()
else:
raise NotImplementedError('normalization layer [%s] is not found' % norm_type)
return norm_layer
def get_scheduler(optimizer, opt):
"""Return a learning rate scheduler
Parameters:
optimizer -- the optimizer of the network
opt (option class) -- stores all the experiment flags; needs to be a subclass of BaseOptions.
opt.lr_policy is the name of learning rate policy: linear | step | plateau | cosine
For 'linear', we keep the same learning rate for the first <opt.n_epochs> epochs
and linearly decay the rate to zero over the next <opt.n_epochs_decay> epochs.
For other schedulers (step, plateau, and cosine), we use the default PyTorch schedulers.
See https://pytorch.org/docs/stable/optim.html for more details.
"""
if opt.lr_policy == 'linear':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + opt.epoch_count - opt.n_epochs) / float(opt.n_epochs_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
elif opt.lr_policy == 'cosine':
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.n_epochs, eta_min=0)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def define_SPADE(opt,gpu_ids):
if('spade8' in opt.netG):
net = SPADE8Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
elif('spade6' in opt.netG):
net = SPADE6Generator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
else:
net = SPADEGenerator(input_nc=1, output_nc=1, num_downs = 8, ngf=1, norm_layer='abc', use_dropout=False, opt=opt)
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids)
net.init_weights()
return net
def init_weights(net, init_type='normal', init_gain=0.02):
"""Initialize network weights.
Parameters:
net (network) -- network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
We use 'normal' in the original pix2pix and CycleGAN paper. But xavier and kaiming might
work better for some applications. Feel free to try yourself.
"""
def init_func(m): # define the initialization function
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, init_gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=init_gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=init_gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1: # BatchNorm Layer's weight is not a matrix; only normal distribution applies.
init.normal_(m.weight.data, 1.0, init_gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func) # apply the initialization function <init_func>
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Initialize a network: 1. register CPU/GPU device (with multi-GPU support); 2. initialize the network weights
Parameters:
net (network) -- the network to be initialized
init_type (str) -- the name of an initialization method: normal | xavier | kaiming | orthogonal
gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Return an initialized network.
"""
if len(gpu_ids) > 0:
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
#net = torch.nn.DataParallel(net, gpu_ids) # multi-GPUs
init_weights(net, init_type, init_gain=init_gain)
return net
def define_G(input_nc, output_nc, ngf, netG, norm='batch', use_dropout=False, init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
netG (str) -- the architecture's name: resnet_9blocks | resnet_6blocks | unet_256 | unet_128
norm (str) -- the name of normalization layers used in the network: batch | instance | none
use_dropout (bool) -- if use dropout layers.
init_type (str) -- the name of our initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a generator
Our current implementation provides two types of generators:
U-Net: [unet_128] (for 128x128 input images) and [unet_256] (for 256x256 input images)
The original U-Net paper: https://arxiv.org/abs/1505.04597
Resnet-based generator: [resnet_6blocks] (with 6 Resnet blocks) and [resnet_9blocks] (with 9 Resnet blocks)
Resnet-based generator consists of several Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code from Justin Johnson's neural style transfer project (https://github.com/jcjohnson/fast-neural-style).
The generator has been initialized by <init_net>. It uses RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netG == 'resnet_9blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_9blocksup':
net = ResnetGeneratorUp(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=9)
elif netG == 'resnet_6blocks':
net = ResnetGenerator(input_nc, output_nc, ngf, norm_layer=norm_layer, use_dropout=use_dropout, n_blocks=6)
elif netG == 'unet_128':
net = UnetGenerator(input_nc, output_nc, 7, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_256':
net = UnetGenerator(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768':
net = UNet768(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_768_sigm':
net = UNet768Sigm(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade':
net = UNet768PIXSPADE(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
elif netG == 'unet_spade8sm':
net = UNet768PIXSPADE8SM(input_nc, output_nc, 8, ngf, norm_layer=norm_layer, use_dropout=use_dropout)
else:
raise NotImplementedError('Generator model name [%s] is not recognized' % netG)
return init_net(net, init_type, init_gain, gpu_ids)
def define_D(input_nc, ndf, netD, n_layers_D=3, norm='batch', init_type='normal', init_gain=0.02, gpu_ids=[]):
"""Create a discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the first conv layer
netD (str) -- the architecture's name: basic | n_layers | pixel
n_layers_D (int) -- the number of conv layers in the discriminator; effective when netD=='n_layers'
norm (str) -- the type of normalization layers used in the network.
init_type (str) -- the name of the initialization method.
init_gain (float) -- scaling factor for normal, xavier and orthogonal.
gpu_ids (int list) -- which GPUs the network runs on: e.g., 0,1,2
Returns a discriminator
Our current implementation provides three types of discriminators:
[basic]: 'PatchGAN' classifier described in the original pix2pix paper.
It can classify whether 70×70 overlapping patches are real or fake.
Such a patch-level discriminator architecture has fewer parameters
than a full-image discriminator and can work on arbitrarily-sized images
in a fully convolutional fashion.
[n_layers]: With this mode, you can specify the number of conv layers in the discriminator
with the parameter <n_layers_D> (default=3 as used in [basic] (PatchGAN).)
[pixel]: 1x1 PixelGAN discriminator can classify whether a pixel is real or not.
It encourages greater color diversity but has no effect on spatial statistics.
The discriminator has been initialized by <init_net>. It uses Leakly RELU for non-linearity.
"""
net = None
norm_layer = get_norm_layer(norm_type=norm)
if netD == 'basic': # default PatchGAN classifier
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'n_layers': # more options
net = NLayerDiscriminator(input_nc, ndf, n_layers_D, norm_layer=norm_layer)
elif netD == 'pixel': # classify if each pixel is real or fake
net = PixelDiscriminator(input_nc, ndf, norm_layer=norm_layer)
elif netD == 'conditional': #conditional patchGAN
net = NLayerDiscriminator(input_nc, ndf, n_layers=3, norm_layer=norm_layer)
elif netD == 'unet':
net = UnetDiscriminator()
else:
raise NotImplementedError('Discriminator model name [%s] is not recognized' % netD)
return init_net(net, init_type, init_gain, gpu_ids)
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_mode = gan_mode
if gan_mode == 'lsgan':
self.loss = nn.MSELoss()
elif gan_mode == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif gan_mode in ['wgangp']:
self.loss = None
else:
raise NotImplementedError('gan mode %s not implemented' % gan_mode)
def get_target_tensor(self, prediction, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(prediction)
def __call__(self, prediction, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
if self.gan_mode in ['lsgan', 'vanilla']:
target_tensor = self.get_target_tensor(prediction, target_is_real)
loss = self.loss(prediction, target_tensor)
elif self.gan_mode == 'wgangp':
if target_is_real:
loss = -prediction.mean()
else:
loss = prediction.mean()
return loss
class UnetGANLoss(nn.Module):
"""Define different GAN objectives.
The GANLoss class abstracts away the need to create the target label tensor
that has the same size as the input.
"""
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0):
""" Initialize the GANLoss class.
Parameters:
gan_mode (str) - - the type of GAN objective. It currently supports vanilla, lsgan, and wgangp.
target_real_label (bool) - - label for a real image
target_fake_label (bool) - - label of a fake image
Note: Do not use sigmoid as the last layer of Discriminator.
LSGAN needs no sigmoid. vanilla GANs will handle it with BCEWithLogitsLoss.
"""
super(UnetGANLoss, self).__init__()
self.register_buffer('real_label_1', torch.tensor(target_real_label))
self.register_buffer('real_label_2', torch.tensor(np.ones((1,256,256))))
self.register_buffer('fake_label_1', torch.tensor(target_fake_label))
self.register_buffer('fake_label_2', torch.tensor(np.zeros((1,256,256))))
self.loss_1 = nn.BCEWithLogitsLoss()
self.loss_2 = nn.BCEWithLogitsLoss()
def get_target_tensor(self, prediction_1, prediction_2, target_is_real):
"""Create label tensors with the same size as the input.
Parameters:
prediction (tensor) - - tpyically the prediction from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
A label tensor filled with ground truth label, and with the size of the input
"""
if target_is_real:
target_tensor_1 = self.real_label_1
target_tensor_2 = self.real_label_2
else:
target_tensor_1 = self.fake_label_1
target_tensor_2 = self.fake_label_2
return target_tensor_1.expand_as(prediction_1), target_tensor_2.expand_as(prediction_2)
def __call__(self, prediction_1, prediction_2, target_is_real):
"""Calculate loss given Discriminator's output and grount truth labels.
Parameters:
prediction (tensor) - - tpyically the prediction output from a discriminator
target_is_real (bool) - - if the ground truth label is for real images or fake images
Returns:
the calculated loss.
"""
target_tensor_1, target_tensor_2 = self.get_target_tensor(prediction_1, prediction_2, target_is_real)
loss_1 = self.loss_1(prediction_1, target_tensor_1)
loss_2 = self.loss_2(prediction_2, target_tensor_2)
loss = loss_1.mean()+loss_2.mean()
return loss
def cal_gradient_penalty(netD, real_data, fake_data, device, type='mixed', constant=1.0, lambda_gp=10.0):
"""Calculate the gradient penalty loss, used in WGAN-GP paper https://arxiv.org/abs/1704.00028
Arguments:
netD (network) -- discriminator network
real_data (tensor array) -- real images
fake_data (tensor array) -- generated images from the generator
device (str) -- GPU / CPU: from torch.device('cuda:{}'.format(self.gpu_ids[0])) if self.gpu_ids else torch.device('cpu')
type (str) -- if we mix real and fake data or not [real | fake | mixed].
constant (float) -- the constant used in formula ( | |gradient||_2 - constant)^2
lambda_gp (float) -- weight for this loss
Returns the gradient penalty loss
"""
if lambda_gp > 0.0:
if type == 'real': # either use real images, fake images, or a linear interpolation of two.
interpolatesv = real_data
elif type == 'fake':
interpolatesv = fake_data
elif type == 'mixed':
alpha = torch.rand(real_data.shape[0], 1, device=device)
alpha = alpha.expand(real_data.shape[0], real_data.nelement() // real_data.shape[0]).contiguous().view(*real_data.shape)
interpolatesv = alpha * real_data + ((1 - alpha) * fake_data)
else:
raise NotImplementedError('{} not implemented'.format(type))
interpolatesv.requires_grad_(True)
disc_interpolates = netD(interpolatesv)
gradients = torch.autograd.grad(outputs=disc_interpolates, inputs=interpolatesv,
grad_outputs=torch.ones(disc_interpolates.size()).to(device),
create_graph=True, retain_graph=True, only_inputs=True)
gradients = gradients[0].view(real_data.size(0), -1) # flat the data
gradient_penalty = (((gradients + 1e-16).norm(2, dim=1) - constant) ** 2).mean() * lambda_gp # added eps
return gradient_penalty, gradients
else:
return 0.0, None
class ResnetGenerator(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGenerator, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1,
bias=use_bias),
norm_layer(int(ngf * mult / 2)),
nn.ReLU(True)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetGeneratorUp(nn.Module):
"""Resnet-based generator that consists of Resnet blocks between a few downsampling/upsampling operations.
We adapt Torch code and idea from Justin Johnson's neural style transfer project(https://github.com/jcjohnson/fast-neural-style)
"""
def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=6, padding_type='reflect'):
"""Construct a Resnet-based generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers
n_blocks (int) -- the number of ResNet blocks
padding_type (str) -- the name of padding layer in conv layers: reflect | replicate | zero
"""
assert(n_blocks >= 0)
super(ResnetGeneratorUp, self).__init__()
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
model = [nn.ReflectionPad2d(3),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 2
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2),
nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.Upsample(scale_factor = 2, mode='nearest'),
nn.ReflectionPad2d(1),
nn.Conv2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=1, padding=0),]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0)]
model += [nn.Tanh()]
self.model = nn.Sequential(*model)
def forward(self, input):
"""Standard forward"""
return self.model(input)
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class UnetGenerator(nn.Module):
"""Create a Unet-based generator"""
def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet generator
Parameters:
input_nc (int) -- the number of channels in input images
output_nc (int) -- the number of channels in output images
num_downs (int) -- the number of downsamplings in UNet. For example, # if |num_downs| == 7,
image of size 128x128 will become of size 1x1 # at the bottleneck
ngf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
We construct the U-Net from the innermost layer to the outermost layer.
It is a recursive process.
"""
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, norm_layer=norm_layer, innermost=True) # add the innermost layer
for i in range(num_downs - 5): # add intermediate layers with ngf * 8 filters
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer, use_dropout=use_dropout)
# gradually reduce the number of filters from ngf * 8 to ngf
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block, norm_layer=norm_layer)
self.model = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True, norm_layer=norm_layer) # add the outermost layer
def forward(self, input):
"""Standard forward"""
return self.model(input)
class UnetSkipConnectionBlock(nn.Module):
"""Defines the Unet submodule with skip connection.
X -------------------identity----------------------
|-- downsampling -- |submodule| -- upsampling --|
"""
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False, norm_layer=nn.BatchNorm2d, use_dropout=False):
"""Construct a Unet submodule with skip connections.
Parameters:
outer_nc (int) -- the number of filters in the outer conv layer
inner_nc (int) -- the number of filters in the inner conv layer
input_nc (int) -- the number of channels in input images/features
submodule (UnetSkipConnectionBlock) -- previously defined submodules
outermost (bool) -- if this module is the outermost module
innermost (bool) -- if this module is the innermost module
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
"""
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=use_bias)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = norm_layer(inner_nc)
uprelu = nn.ReLU(True)
upnorm = norm_layer(outer_nc)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=use_bias)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
if use_dropout:
model = down + [submodule] + up + [nn.Dropout(0.5)]
else:
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else: # add skip connections
return torch.cat([x, self.model(x)], 1)
#%%% Unet from DeepMact
class ConvBnRelu2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, output_padding=1, dilation=1, stride=1, groups=1, is_bn=True, is_relu=True, is_decoder=False):
super(ConvBnRelu2d, self).__init__()
if is_decoder:
self.transpConv = torch.nn.ConvTranspose2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, output_padding=output_padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.conv = None
else:
self.transpConv = None
self.conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, padding=padding, stride=stride, dilation=dilation, groups=groups, bias=False)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=1e-4)
self.relu = torch.nn.ReLU(inplace=True)
if is_bn is False: self.bn = None
if is_relu is False: self.relu = None
def forward(self, x):
if self.conv is None:
x = self.transpConv(x)
elif self.transpConv is None:
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class StackEncoder(torch.nn.Module):
def __init__(self, x_channels, y_channels, kernel_size=3, stride=1):
super(StackEncoder, self).__init__()
padding = (kernel_size - 1) // 2
self.encode = torch.nn.Sequential(
ConvBnRelu2d(x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x):
y = self.encode(x)
y_small = torch.nn.functional.max_pool2d(y, kernel_size=2, stride=2)
return y, y_small
class StackDecoder(torch.nn.Module):
def __init__(self, x_big_channels, x_channels, y_channels, kernel_size=3, stride=1):
super(StackDecoder, self).__init__()
padding = (kernel_size - 1) // 2
self.decode = torch.nn.Sequential(
ConvBnRelu2d(x_big_channels + x_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
ConvBnRelu2d(y_channels, y_channels, kernel_size=kernel_size, padding=padding, dilation=1, stride=stride, groups=1),
)
def forward(self, x_big, x):
N, C, H, W = x_big.size()
y = torch.nn.functional.upsample(x, size=(H, W), mode='bilinear', align_corners=True)
y = torch.cat([y, x_big], 1)
y = self.decode(y)
return y
# 768
class UNet768(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
#print('2',out.shape)
out = self.up4(down4, out)
#print('3',out.shape)
out = self.up3(down3, out)
#print('4',out.shape)
out = self.up2(down2, out)
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, x.shape[2],x.shape[3]))#, dim=1)
return out
#%%Unet_spade_768_300
#%%sigm
class UNet768Sigm(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768Sigm, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(input_nc, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Sigmoid()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self, x):
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;print('down1',down1.size()) #256
down2, out = self.down2(out) # ;print('down2',down2.size()) #128
down3, out = self.down3(out) # ;print('down3',down3.size()) #64
down4, out = self.down4(out) # ;print('down4',down4.size()) #32
down5, out = self.down5(out) # ;print('down5',down5.size()) #16
down6, out = self.down6(out) # ;print('down6',down6.size()) #8
pass # ;print('out ',out.size())
out = self.center(out)
out = self.up6(down6, out)
out = self.up5(down5, out)
out = self.up4(down4, out)
out = self.up3(down3, out)
out = self.up2(down2, out)
out = self.up1(down1, out)
# 1024
out = self.final_out(self.classify(out))
out = torch.reshape(out,(1, self.output_nc, 256,256))#, dim=1)
return out
class NLayerDiscriminator(nn.Module):
"""Defines a PatchGAN discriminator"""
def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d):
"""Construct a PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
n_layers (int) -- the number of conv layers in the discriminator
norm_layer -- normalization layer
"""
super(NLayerDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = 1
sequence = [nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)]
nf_mult = 1
nf_mult_prev = 1
for n in range(1, n_layers): # gradually increase the number of filters
nf_mult_prev = nf_mult
nf_mult = min(2 ** n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2 ** n_layers, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] # output 1 channel prediction map
self.model = nn.Sequential(*sequence)
def forward(self, input):
"""Standard forward."""
return self.model(input)
class PixelDiscriminator(nn.Module):
"""Defines a 1x1 PatchGAN discriminator (pixelGAN)"""
def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
"""Construct a 1x1 PatchGAN discriminator
Parameters:
input_nc (int) -- the number of channels in input images
ndf (int) -- the number of filters in the last conv layer
norm_layer -- normalization layer
"""
super(PixelDiscriminator, self).__init__()
if type(norm_layer) == functools.partial: # no need to use bias as BatchNorm2d has affine parameters
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.net = [
nn.Conv2d(input_nc, ndf, kernel_size=1, stride=1, padding=0),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf, ndf * 2, kernel_size=1, stride=1, padding=0, bias=use_bias),
norm_layer(ndf * 2),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * 2, 1, kernel_size=1, stride=1, padding=0, bias=use_bias)]
self.net = nn.Sequential(*self.net)
def forward(self, input):
"""Standard forward."""
return self.net(input)
#%% Unet as Disdef random_hflip(tensor, prob):
def DiffAugment(x, types=[]):
for p in types:
for f in AUGMENT_FNS[p]:
x = f(x)
return x.contiguous(memory_format = torch.contiguous_format)
def rand_brightness(x):
x = x + (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) - 0.5)
return x
def rand_saturation(x):
x_mean = x.mean(dim=1, keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) * 2) + x_mean
return x
def rand_contrast(x):
x_mean = x.mean(dim=[1, 2, 3], keepdim=True)
x = (x - x_mean) * (torch.rand(x.size(0), 1, 1, 1, dtype=x.dtype, device=x.device) + 0.5) + x_mean
return x
def rand_translation(x, ratio=0.125):
shift_x, shift_y = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
translation_x = torch.randint(-shift_x, shift_x + 1, size=[x.size(0), 1, 1], device=x.device)
translation_y = torch.randint(-shift_y, shift_y + 1, size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(x.size(2), dtype=torch.long, device=x.device),
torch.arange(x.size(3), dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + translation_x + 1, 0, x.size(2) + 1)
grid_y = torch.clamp(grid_y + translation_y + 1, 0, x.size(3) + 1)
x_pad = F.pad(x, [1, 1, 1, 1, 0, 0, 0, 0])
x = x_pad.permute(0, 2, 3, 1).contiguous()[grid_batch, grid_x, grid_y].permute(0, 3, 1, 2).contiguous(memory_format = torch.contiguous_format)
return x
def rand_cutout(x, ratio=0.5):
cutout_size = int(x.size(2) * ratio + 0.5), int(x.size(3) * ratio + 0.5)
offset_x = torch.randint(0, x.size(2) + (1 - cutout_size[0] % 2), size=[x.size(0), 1, 1], device=x.device)
offset_y = torch.randint(0, x.size(3) + (1 - cutout_size[1] % 2), size=[x.size(0), 1, 1], device=x.device)
grid_batch, grid_x, grid_y = torch.meshgrid(
torch.arange(x.size(0), dtype=torch.long, device=x.device),
torch.arange(cutout_size[0], dtype=torch.long, device=x.device),
torch.arange(cutout_size[1], dtype=torch.long, device=x.device),
)
grid_x = torch.clamp(grid_x + offset_x - cutout_size[0] // 2, min=0, max=x.size(2) - 1)
grid_y = torch.clamp(grid_y + offset_y - cutout_size[1] // 2, min=0, max=x.size(3) - 1)
mask = torch.ones(x.size(0), x.size(2), x.size(3), dtype=x.dtype, device=x.device)
mask[grid_batch, grid_x, grid_y] = 0
x = x * mask.unsqueeze(1)
return x
AUGMENT_FNS = {
'color': [rand_brightness, rand_saturation, rand_contrast],
'translation': [rand_translation],
'cutout': [rand_cutout],
}
def random_float(lo, hi):
return lo + (hi - lo) * random()
def random_crop_and_resize(tensor, scale):
b, c, h, _ = tensor.shape
new_width = int(h * scale)
delta = h - new_width
h_delta = int(random() * delta)
w_delta = int(random() * delta)
cropped = tensor[:, :, h_delta:(h_delta + new_width), w_delta:(w_delta + new_width)].clone()
return F.interpolate(cropped, size=(h, h), mode='bilinear')
def random_hflip(tensor, prob):
if prob > random():
return tensor
return torch.flip(tensor, dims=(3,))
class AugWrapper(nn.Module):
def __init__(self, D, image_size, types):
super().__init__()
self.D = D
self.types = types
def forward(self, images, prob = 0., detach = False):
if random() < prob:
images = random_hflip(images, prob=0.5)
images = DiffAugment(images, types=self.types)
if detach:
images.detach_()
return self.D(images), images
def leaky | 2):
return nn.LeakyReLU(p)
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class Flatten(nn.Module):
def __init__(self, index):
super().__init__()
self.index = index
def forward(self, x):
return x.flatten(self.index)
class Rezero(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
self.g = nn.Parameter(torch.zeros(1))
def forward(self, x):
return self.fn(x) * self.g
def double_conv(chan_in, chan_out):
return nn.Sequential(
nn.Conv2d(chan_in, chan_out, 3, padding=1),
leaky_relu(),
nn.Conv2d(chan_out, chan_out, 3, padding=1),
leaky_relu()
)
class DownBlock(nn.Module):
def __init__(self, input_channels, filters, downsample=True):
super().__init__()
self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))
self.net = double_conv(input_channels, filters)
self.down = nn.Conv2d(filters, filters, 3, padding = 1, stride = 2) if downsample else None
def forward(self, x):
res = self.conv_res(x)
x = self.net(x)
unet_res = x
if self.down is not None:
x = self.down(x)
x = x + res
return x, unet_res
# one layer of self-attention and feedforward, for images
attn_and_ff = lambda chan: nn.Sequential(*[
Residual(Rezero(ImageLinearAttention(chan, norm_queries = True))),
Residual(Rezero(nn.Sequential(nn.Conv2d(chan, chan * 2, 1), leaky_relu(), nn.Conv2d(chan * 2, chan, 1))))
])
class UpBlock(nn.Module):
def __init__(self, input_channels, filters):
super().__init__()
self.conv_res = nn.ConvTranspose2d(input_channels // 2, filters, 1, stride = 2)
self.net = double_conv(input_channels, filters)
self.up = nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False)
self.input_channels = input_channels
self.filters = filters
def forward(self, x, res):
*_, h, w = x.shape
conv_res = self.conv_res(x, output_size = (h * 2, w * 2))
x = self.up(x)
x = torch.cat((x, res), dim=1)
x = self.net(x)
x = x + conv_res
return x
class UnetDiscriminator(nn.Module):
def __init__(self, image_size=256, network_capacity = 16, transparent = False, fmap_max = 256):
super().__init__()
num_layers = int(log2(image_size) - 3)
num_init_filters = 2# if not transparent else 4
blocks = []
filters = [num_init_filters] + [(network_capacity) * (2 ** i) for i in range(num_layers + 1)]
set_fmap_max = partial(min, fmap_max)
filters = list(map(set_fmap_max, filters))
filters[-1] = filters[-2]
chan_in_out = list(zip(filters[:-1], filters[1:]))
chan_in_out = list(map(list, chan_in_out))
print('Channels',chan_in_out)
down_blocks = []
attn_blocks = []
for ind, (in_chan, out_chan) in enumerate(chan_in_out):
num_layer = ind + 1
is_not_last = ind != (len(chan_in_out) - 1)
block = DownBlock(in_chan, out_chan, downsample = is_not_last)
down_blocks.append(block)
attn_fn = attn_and_ff(out_chan)
attn_blocks.append(attn_fn)
self.down_blocks = nn.ModuleList(down_blocks)
self.attn_blocks = nn.ModuleList(attn_blocks)
last_chan = filters[-1]
self.to_logit = nn.Sequential(
leaky_relu(),
nn.AvgPool2d(image_size // (2 ** num_layers)),
Flatten(1),
nn.Linear(last_chan, 1)
)
self.conv = double_conv(last_chan, last_chan)
dec_chan_in_out = chan_in_out[:-1][::-1]
self.up_blocks = nn.ModuleList(list(map(lambda c: UpBlock(c[1] * 2, c[0]), dec_chan_in_out)))
self.conv_out = nn.Conv2d(2, 1, 1)
def forward(self, x):
#print('Input shape:', x.shape)
b, *_ = x.shape
residuals = []
i=0
for (down_block, attn_block) in zip(self.down_blocks, self.attn_blocks):
#print('Step', i, x.shape)
i=i+1
x, unet_res = down_block(x)
residuals.append(unet_res)
if attn_block is not None:
x = attn_block(x)
x = self.conv(x) + x
enc_out = self.to_logit(x)
for (up_block, res) in zip(self.up_blocks, residuals[:-1][::-1]):
#print('in up blocK', x.shape)
x = up_block(x, res)
dec_out = self.conv_out(x)
return enc_out.squeeze(), dec_out
#%% SPADE RESNET
class SPADEGenerator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADEGenerator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 64
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
#x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
x = self.up(x)
# print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
# print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% spade8
class SPADE8Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE8Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 8
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 256
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
#print('0,', x.shape)
x = self.head_0(x, seg)
#print('1,', x.shape)
x = self.up(x)
#print('2', x.shape)
x = self.G_middle_0(x, seg)
#print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
#print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
#print('5,', x.shape)
x = self.up(x)
output_6 = x
#print('6,', x.shape)
x = self.up_0(x, seg)
#print('7,', x.shape)
x = self.up(x)
#print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
#print('9,', x.shape)
x = self.up(x)
#print('10,', x.shape)
x = self.up_2(x, seg)
#print('11,', x.shape)
output_11 = x
'''this can be removed'''
x = self.up(x)
#print('12,', x.shape)
x = self.up_3(x, seg)
#print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
#print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
#print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
#print('16,', x.shape)
'''til here'''
return output_5,output_6,output_9,output_11,output_15
#%%
class SPADE6Generator(BaseNetwork):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False,opt=None):
super(SPADE6Generator, self).__init__()
self.opt = opt
self.opt.num_upsampling_layers = 'normal'
self.opt.norm_G = 'spectralspadesyncbatch3x3'
self.opt.ngf = 6
self.opt.semantic_nc = 2
self.opt.use_vae = False
self.opt.crop_size = 300
self.opt.normG = 'spectralinstance'
self.opt.aspect_ratio = 1.0
nf = self.opt.ngf
opt = self.opt
self.sw, self.sh = self.compute_latent_vector_size(opt)
self.fc = nn.Conv2d(self.opt.semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, opt)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, opt)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, opt)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, opt)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, opt)
final_nc = nf
if opt.num_upsampling_layers == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, opt)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, 3, 3, padding=1)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, opt):
if opt.num_upsampling_layers == 'normal':
num_up_layers = 5
elif opt.num_upsampling_layers == 'more':
num_up_layers = 6
elif opt.num_upsampling_layers == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
opt.num_upsampling_layers)
sw = 10#self.opt.crop_size // (2**num_up_layers)
sh = round(sw / opt.aspect_ratio)
return sw, sh
def forward(self, input, z=None):
seg = input
if self.opt.use_vae:
# we sample z from unit normal and reshape the tensor
if z is None:
z = torch.randn(input.size(0), self.opt.z_dim,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.opt.ngf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
print('0,', x.shape)
x = self.head_0(x, seg)
print('1,', x.shape)
x = self.up(x)
print('2', x.shape)
x = self.G_middle_0(x, seg)
print('3,', x.shape)
if self.opt.num_upsampling_layers == 'more' or \
self.opt.num_upsampling_layers == 'most':
x = self.up(x)
print('4,', x.shape)
x = self.G_middle_1(x, seg)
output_5 = x
print('5,', x.shape)
x = self.up(x)
output_6 = x
print('6,', x.shape)
x = self.up_0(x, seg)
print('7,', x.shape)
x = self.up(x)
print('8,', x.shape)
x = self.up_1(x, seg)
output_9 = x
print('9,', x.shape)
x = self.up(x)
print('10,', x.shape)
x = self.up_2(x, seg)
print('11,', x.shape)
output_11 = x
x = self.up(x)
print('12,', x.shape)
x = self.up_3(x, seg)
print('13,', x.shape)
if self.opt.num_upsampling_layers == 'most':
x = self.up(x)
x = self.up_4(x, seg)
print('14,', x.shape)
x = self.conv_img(F.leaky_relu(x, 2e-1))
print('15,', x.shape)
output_15 = x
#x = F.tanh(x)
print('16,', x.shape)
return output_5,output_6,output_9,output_11,output_15
#%% For the PIX2SPADE
class UNet768PIXSPADE(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+1024, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+1024, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+256, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+128, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24+3, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
#%%Unet for spade8
class UNet768PIXSPADE8SM(torch.nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf, norm_layer=nn.BatchNorm2d, use_dropout=False):
super(UNet768PIXSPADE8SM, self).__init__()
# def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=nn.BatchNorm2d, use_dropout=False):
# C, H, W = in_shape
# assert(C==3)
print('UNET 768 SPADE')
self.output_nc = output_nc
# 1024
self.down1 = StackEncoder(1, 24, kernel_size=3) # Channels: 1 in, 24 out; Image size: 300 in, 150 out
self.down2 = StackEncoder(24, 64, kernel_size=3) # Channels: 24 in, 64 out; Image size: 150 in, 75 out
self.down3 = StackEncoder(64, 128, kernel_size=3) # Channels: 64 in, 128 out; Image size: 75 in, 38 out
self.down4 = StackEncoder(128, 256, kernel_size=3) # Channels: 128 in, 256 out; Image size: 38 in, 19 out
self.down5 = StackEncoder(256, 512, kernel_size=3) # Channels: 256 in, 512 out; Image size: 19 in, 10 out
self.down6 = StackEncoder(512, 768, kernel_size=3) # Channels: 512 in, 768 out; Image size: 10 in, 5 out
self.center = torch.nn.Sequential(
ConvBnRelu2d(768, 768, kernel_size=3, padding=1, stride=1), # Channels: 768 in, 768 out; Image size: 5 in, 5 out
)
# x_big_channels, x_channels, y_channels
self.up6 = StackDecoder(768, 768, 512, kernel_size=3) # Channels: 768+768 = 1536 in, 512 out; Image size: 5 in, 10 out
self.up5 = StackDecoder(512, 512, 256, kernel_size=3) # Channels: 512+512 = 1024 in, 256 out; Image size: 10 in, 19 out
self.up4 = StackDecoder(256+128, 256, 128, kernel_size=3) # Channels: 256+256 = 512 in, 128 out; Image size: 19 in, 38 out
self.up3 = StackDecoder(128+128, 128, 64, kernel_size=3) # Channels: 128+128 = 256 in, 64 out; Image size: 38 in, 75 out
self.up2 = StackDecoder(64+32, 64, 24, kernel_size=3) # Channels: 64+64 = 128 in, 24 out; Image size: 75 in, 150 out
self.up1 = StackDecoder(24+16, 24, 24, kernel_size=3) # Channels: 24+24 = 48 in, 24 out; Image size: 150 in, 300 out
self.classify = torch.nn.Conv2d(24, output_nc, kernel_size=1, padding=0, stride=1, bias=True) # Channels: 24 in, 1 out; Image size: 300 in, 300 out
self.final_out = torch.nn.Tanh()
def _crop_concat(self, upsampled, bypass):
"""
Crop y to the (h, w) of x and concat them.
Used for the expansive path.
Returns:
The concatenated tensor
"""
c = (bypass.size()[2] - upsampled.size()[2]) // 2
bypass = torch.nn.functional.pad(bypass, (-c, -c, -c, -c))
return torch.cat((upsampled, bypass), 1)
def forward(self,x, input_to_net):
#print(input_to_net.shape)
output_5,output_6,output_9,output_11,output_15 = input_to_net
#print(x.shape)
out = x # ;print('x ',x.size())
#
down1, out = self.down1(out) ##;
#print('down1',down1.shape) #256
down2, out = self.down2(out) # ;
#print('down2',down2.shape) #128
down3, out = self.down3(out) # ;
#print('down3',down3.shape) #64
down4, out = self.down4(out) # ;
#print('down4',down4.shape) #32
down5, out = self.down5(out) # ;
#print('down5',down5.shape) #16
down6, out = self.down6(out) # ;
#print('down6',down6.shape) #8
pass # ;
#print('out ',out.shape)
out = self.center(out)
#print('0',out.shape)
out = self.up6(down6, out)
#print('1',out.shape)
out = self.up5(down5, out)
out = torch.cat((out,output_5 ),1 )
#print('2',out.shape)
out = self.up4(down4, out)
out = torch.cat((out,output_6 ),1 )
#print('3',out.shape)
out = self.up3(down3, out)
out = torch.cat((out,output_9 ),1 )
#print('4',out.shape)
out = self.up2(down2, out)
out = torch.cat((out,output_11 ),1 )
#print('5',out.shape)
out = self.up1(down1, out)
# 1024
#out = torch.cat((out,output_15 ),1 )
#print('6',out.shape)
out = self.final_out(self.classify(out))
out = torch.reshape(out,(-1, self.output_nc, 256,256))#, dim=1)
return out
| _relu(p=0. |
test_pointer.py | import unittest
import jref.pointer as error
from jref.pointer import Pointer
class TestPointer(unittest.TestCase):
def setUp(self):
self.sentinel = object()
def check_pointer_is_sentinel(self, pointer, document):
self.check_pointer_equal(document, pointer, self.sentinel)
def check_pointer_equal(self, document, pointer, value):
self.assertEqual(Pointer.resolve_in(pointer, document), value)
# test that starting slash in non-empty pointer is optional
if (len(pointer) > 1
and pointer[0] == '/'):
self.assertEqual(Pointer.resolve_in(pointer[1:], document), value)
def test_pointer_resolve_in_can_be_called_as_an_instance_method(self):
self.assertEqual(
Pointer('key').resolve_in({'key': self.sentinel}), self.sentinel)
self.assertEqual( | def test_pointer_resolve_in_can_be_called_as_a_static_method(self):
self.assertEqual(
Pointer.resolve_in('key', {'key': self.sentinel}), self.sentinel)
self.assertEqual(
Pointer.resolve_in('key', document={'key': self.sentinel}),
self.sentinel)
def test_an_empty_pointer_resolves_to_the_document(self):
self.check_pointer_is_sentinel('', document=self.sentinel)
def test_empty_root_resolves_to_empty_key(self):
self.check_pointer_is_sentinel('/', document={'': self.sentinel})
def test_it_can_access_a_map_item_by_key(self):
doc = { 'key': self.sentinel }
self.check_pointer_is_sentinel('/key', doc)
def test_it_can_access_nested_map_items_by_key(self):
doc = { 'nested': { 'key': self.sentinel } }
self.check_pointer_is_sentinel('/nested/key', doc)
def test_it_can_access_array_element_by_index(self):
doc = [ 1, 2, self.sentinel, 4, 5 ]
self.check_pointer_is_sentinel('/2', doc)
def test_it_handles_complex_nesting(self):
doc1 = {
'a': [
1, 2, {
'c': [ 3, 4 ],
'd': 5,
},
],
'b': {
'f': [ 6, 7, 8 ],
},
}
self.check_pointer_equal(doc1, '/a/0', 1)
self.check_pointer_equal(doc1, '/a/1', 2)
self.check_pointer_equal(doc1, '/a/2/c/0', 3)
self.check_pointer_equal(doc1, '/a/2/c/1', 4)
self.check_pointer_equal(doc1, '/a/2/d', 5)
self.check_pointer_equal(doc1, '/b/f/0', 6)
self.check_pointer_equal(doc1, '/b/f/1', 7)
self.check_pointer_equal(doc1, '/b/f/2', 8)
doc2 = [
1, 2, {
'a': 3,
'b': {
'c': 4,
'd': [ 5 ],
},
},
]
self.check_pointer_equal(doc2, '/0', 1)
self.check_pointer_equal(doc2, '/1', 2)
self.check_pointer_equal(doc2, '/2/a', 3)
self.check_pointer_equal(doc2, '/2/b/c', 4)
self.check_pointer_equal(doc2, '/2/b/d/0', 5)
def test_it_supports_numerical_keys(self):
self.check_pointer_is_sentinel('/0', document={'0': self.sentinel})
self.check_pointer_is_sentinel('/1', document={'1': self.sentinel})
self.check_pointer_is_sentinel('/999', document={'999': self.sentinel})
def test_it_supports_dash_as_a_map_key(self):
self.check_pointer_is_sentinel('/-', document={'-': self.sentinel})
def test_it_raises_an_error_for_dash_as_an_array_index(self):
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('/-', document=[])
with self.assertRaises(error.DashArrayIndexNotSupported):
Pointer.resolve_in('-', document=[])
def test_it_raises_an_error_for_array_index_out_of_range(self):
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('/5', document=[])
with self.assertRaises(error.IndexOutOfRange):
Pointer.resolve_in('5', document=[])
def test_it_raises_an_error_for_non_numeric_array_index(self):
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('/key', document=[])
with self.assertRaises(error.InvalidArrayIndex):
Pointer.resolve_in('key', document=[])
def test_it_raises_an_error_if_key_not_in_document(self):
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('/key', document={})
with self.assertRaises(error.MemberNotDefined):
Pointer.resolve_in('key', document={})
def test_it_recognizes_tilde_escapes(self):
doc = {
'a~b': 1,
'ab~': 2,
'~ab': 3,
'a/b': 4,
'ab/': 5,
'/ab': 6,
'~/~': 7,
'/~/': 8,
'~0': 9,
'~1': 10,
}
self.check_pointer_equal(doc, '/a~0b', 1)
self.check_pointer_equal(doc, '/ab~0', 2)
self.check_pointer_equal(doc, '/~0ab', 3)
self.check_pointer_equal(doc, '/a~1b', 4)
self.check_pointer_equal(doc, '/ab~1', 5)
self.check_pointer_equal(doc, '/~1ab', 6)
self.check_pointer_equal(doc, '/~0~1~0', 7)
self.check_pointer_equal(doc, '/~1~0~1', 8)
self.check_pointer_equal(doc, '/~00', 9)
self.check_pointer_equal(doc, '/~01', 10)
def test_it_raises_an_error_on_unrecognized_escape_sequences(self):
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('/~2', document={})
with self.assertRaises(error.UnrecognizedEscapeSequence):
Pointer.resolve_in('~2', document={})
def test_it_raises_an_error_on_unescaped_tilde(self):
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('/~', document={})
with self.assertRaises(error.UnescapedTilde):
Pointer.resolve_in('~', document={})
def test_it_raises_an_error_if_unable_to_resolve_token(self):
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('/key', document=object())
with self.assertRaises(error.UnreferenceableValue):
Pointer.resolve_in('key', document=object())
def test_it_offers_support_for_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
value = LazyValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3)
def test_it_offers_support_for_recursive_lazy_loaded_values(self):
class LazyValue:
def __lazy_eval__(self):
return {'a': 1, 'b': 2, 'c': 3}
class EvenLazierValue:
def __lazy_eval__(self):
return LazyValue()
value = EvenLazierValue()
self.assertEqual(Pointer.resolve_in('/a', value), 1)
self.assertEqual(Pointer.resolve_in('/b', value), 2)
self.assertEqual(Pointer.resolve_in('/c', value), 3) | Pointer('key').resolve_in(document={'key': self.sentinel}),
self.sentinel)
|
onepb_test.go | // Code generated by protoc-gen-gogo.
// source: combos/unsafemarshaler/one.proto
// DO NOT EDIT!
/*
Package one is a generated protocol buffer package.
It is generated from these files:
combos/unsafemarshaler/one.proto
It has these top-level messages:
Subby
SampleOneOf
*/
package one
import testing "testing"
import math_rand "math/rand"
import time "time"
import unsafe "unsafe"
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
import fmt "fmt"
import go_parser "go/parser"
import proto "github.com/gogo/protobuf/proto"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
func TestSubbyProto(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Subby{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestSubbyMarshalTo(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Subby{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestSampleOneOfProto(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &SampleOneOf{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
littlefuzz := make([]byte, len(dAtA))
copy(littlefuzz, dAtA)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
if len(littlefuzz) > 0 {
fuzzamount := 100
for i := 0; i < fuzzamount; i++ {
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
}
// shouldn't panic
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
}
}
func TestSampleOneOfMarshalTo(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, false)
size := p.Size()
dAtA := make([]byte, size)
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
_, err := p.MarshalTo(dAtA)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &SampleOneOf{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
for i := range dAtA {
dAtA[i] = byte(popr.Intn(256))
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestSubbyJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &Subby{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestSampleOneOfJSON(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, true)
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
jsondata, err := marshaler.MarshalToString(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
msg := &SampleOneOf{}
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
}
}
func TestSubbyProtoText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &Subby{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestSubbyProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &Subby{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestSampleOneOfProtoText(t *testing.T) |
func TestSampleOneOfProtoCompactText(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, true)
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
msg := &SampleOneOf{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
}
func TestOneDescription(t *testing.T) {
OneDescription()
}
func TestSubbyVerboseEqual(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSubby(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &Subby{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestSampleOneOfVerboseEqual(t *testing.T) {
var bigendian uint32 = 0x01020304
if *(*byte)(unsafe.Pointer(&bigendian)) == 1 {
t.Skip("unsafe does not work on big endian architectures")
}
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSampleOneOf(popr, false)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
panic(err)
}
msg := &SampleOneOf{}
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
panic(err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("%#v !VerboseEqual %#v, since %v", msg, p, err)
}
}
func TestSubbyGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSubby(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestSampleOneOfGoString(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSampleOneOf(popr, false)
s1 := p.GoString()
s2 := fmt.Sprintf("%#v", p)
if s1 != s2 {
t.Fatalf("GoString want %v got %v", s1, s2)
}
_, err := go_parser.ParseExpr(s1)
if err != nil {
panic(err)
}
}
func TestSubbySize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSubby(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func TestSampleOneOfSize(t *testing.T) {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, true)
size2 := github_com_gogo_protobuf_proto.Size(p)
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
if err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
size := p.Size()
if len(dAtA) != size {
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
}
if size2 != size {
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
}
size3 := github_com_gogo_protobuf_proto.Size(p)
if size3 != size {
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
}
}
func TestSubbyStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSubby(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
func TestSampleOneOfStringer(t *testing.T) {
popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano()))
p := NewPopulatedSampleOneOf(popr, false)
s1 := p.String()
s2 := fmt.Sprintf("%v", p)
if s1 != s2 {
t.Fatalf("String want %v got %v", s1, s2)
}
}
//These tests are generated by github.com/gogo/protobuf/plugin/testgen
| {
seed := time.Now().UnixNano()
popr := math_rand.New(math_rand.NewSource(seed))
p := NewPopulatedSampleOneOf(popr, true)
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
msg := &SampleOneOf{}
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
t.Fatalf("seed = %d, err = %v", seed, err)
}
if err := p.VerboseEqual(msg); err != nil {
t.Fatalf("seed = %d, %#v !VerboseProto %#v, since %v", seed, msg, p, err)
}
if !p.Equal(msg) {
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
}
} |
619.go | package main
|
func main() {
fmt.Println("teach robotframework")
} | import "fmt" |
kraken_types.go | package kraken
import "github.com/thrasher-/gocryptotrader/currency"
// TimeResponse type
type TimeResponse struct {
Unixtime int64 `json:"unixtime"`
Rfc1123 string `json:"rfc1123"`
}
// Asset holds asset information
type Asset struct {
Altname string `json:"altname"`
AclassBase string `json:"aclass_base"`
Decimals int `json:"decimals"`
DisplayDecimals int `json:"display_decimals"`
}
// AssetPairs holds asset pair information
type AssetPairs struct {
Altname string `json:"altname"`
AclassBase string `json:"aclass_base"`
Base string `json:"base"`
AclassQuote string `json:"aclass_quote"`
Quote string `json:"quote"`
Lot string `json:"lot"`
PairDecimals int `json:"pair_decimals"`
LotDecimals int `json:"lot_decimals"`
LotMultiplier int `json:"lot_multiplier"`
LeverageBuy []int `json:"leverage_buy"`
LeverageSell []int `json:"leverage_sell"`
Fees [][]float64 `json:"fees"`
FeesMaker [][]float64 `json:"fees_maker"`
FeeVolumeCurrency string `json:"fee_volume_currency"`
MarginCall int `json:"margin_call"`
MarginStop int `json:"margin_stop"`
}
// Ticker is a standard ticker type
type Ticker struct {
Ask float64
Bid float64
Last float64
Volume float64
VWAP float64
Trades int64
Low float64
High float64
Open float64
}
// Tickers stores a map of tickers
type Tickers map[string]Ticker
// TickerResponse holds ticker information before its put into the Ticker struct
type TickerResponse struct {
Ask []string `json:"a"`
Bid []string `json:"b"`
Last []string `json:"c"`
Volume []string `json:"v"`
VWAP []string `json:"p"`
Trades []int64 `json:"t"`
Low []string `json:"l"`
High []string `json:"h"`
Open string `json:"o"`
}
// OpenHighLowClose contains ticker event information
type OpenHighLowClose struct {
Time float64
Open float64
High float64
Low float64
Close float64
Vwap float64
Volume float64
Count float64
}
// RecentTrades holds recent trade data
type RecentTrades struct {
Price float64
Volume float64
Time float64
BuyOrSell string
MarketOrLimit string
Miscellaneous interface{}
}
// OrderbookBase stores the orderbook price and amount data
type OrderbookBase struct {
Price float64
Amount float64
}
// Orderbook stores the bids and asks orderbook data
type Orderbook struct {
Bids []OrderbookBase
Asks []OrderbookBase
}
// Spread holds the spread between trades
type Spread struct {
Time float64
Bid float64
Ask float64
}
// TradeBalanceOptions type
type TradeBalanceOptions struct {
Aclass string
Asset string
}
// TradeBalanceInfo type
type TradeBalanceInfo struct {
EquivalentBalance float64 `json:"eb,string"` // combined balance of all currencies
TradeBalance float64 `json:"tb,string"` // combined balance of all equity currencies
MarginAmount float64 `json:"m,string"` // margin amount of open positions
Net float64 `json:"n,string"` // unrealized net profit/loss of open positions
Equity float64 `json:"e,string"` // trade balance + unrealized net profit/loss
FreeMargin float64 `json:"mf,string"` // equity - initial margin (maximum margin available to open new positions)
MarginLevel float64 `json:"ml,string"` // (equity / initial margin) * 100
}
// OrderInfo type
type OrderInfo struct {
RefID string `json:"refid"`
UserRef int32 `json:"userref"`
Status string `json:"status"`
OpenTm float64 `json:"opentm"`
StartTm float64 `json:"starttm"`
ExpireTm float64 `json:"expiretm"`
Descr struct {
Pair string `json:"pair"`
Type string `json:"type"`
OrderType string `json:"ordertype"`
Price float64 `json:"price,string"`
Price2 float64 `json:"price2,string"`
Leverage string `json:"leverage"`
Order string `json:"order"`
Close string `json:"close"`
} `json:"descr"`
Vol float64 `json:"vol,string"`
VolExec float64 `json:"vol_exec,string"`
Cost float64 `json:"cost,string"`
Fee float64 `json:"fee,string"`
Price float64 `json:"price,string"`
StopPrice float64 `json:"stopprice,string"`
LimitPrice float64 `json:"limitprice,string"`
Misc string `json:"misc"`
Oflags string `json:"oflags"`
Trades []string `json:"trades"`
}
// OpenOrders type
type OpenOrders struct {
Open map[string]OrderInfo `json:"open"`
Count int64 `json:"count"`
}
// ClosedOrders type
type ClosedOrders struct {
Closed map[string]OrderInfo `json:"closed"`
Count int64 `json:"count"`
}
// GetClosedOrdersOptions type
type GetClosedOrdersOptions struct {
Trades bool
UserRef int32
Start string
End string
Ofs int64
CloseTime string
}
// OrderInfoOptions type
type OrderInfoOptions struct {
Trades bool
UserRef int32
}
// GetTradesHistoryOptions type
type GetTradesHistoryOptions struct {
Type string
Trades bool
Start string
End string
Ofs int64
}
// TradesHistory type
type TradesHistory struct {
Trades map[string]TradeInfo `json:"trades"`
Count int64 `json:"count"`
}
// TradeInfo type
type TradeInfo struct {
OrderTxID string `json:"ordertxid"`
Pair string `json:"pair"`
Time float64 `json:"time"`
Type string `json:"type"`
OrderType string `json:"ordertype"`
Price float64 `json:"price,string"`
Cost float64 `json:"cost,string"`
Fee float64 `json:"fee,string"`
Vol float64 `json:"vol,string"`
Margin float64 `json:"margin,string"`
Misc string `json:"misc"`
PosTxID string `json:"postxid"`
Cprice float64 `json:"cprice,string"`
Cfee float64 `json:"cfee,string"`
Cvol float64 `json:"cvol,string"`
Cmargin float64 `json:"cmargin,string"`
Trades []string `json:"trades"`
PosStatus string `json:"posstatus"`
}
// Position holds the opened position
type Position struct {
Ordertxid string `json:"ordertxid"`
Pair string `json:"pair"`
Time float64 `json:"time"`
Type string `json:"type"`
OrderType string `json:"ordertype"`
Cost float64 `json:"cost,string"`
Fee float64 `json:"fee,string"`
Vol float64 `json:"vol,string"`
VolClosed float64 `json:"vol_closed,string"`
Margin float64 `json:"margin,string"`
Rollovertm int64 `json:"rollovertm,string"`
Misc string `json:"misc"`
Oflags string `json:"oflags"`
PosStatus string `json:"posstatus"`
Net string `json:"net"`
Terms string `json:"terms"`
}
// GetLedgersOptions type
type GetLedgersOptions struct {
Aclass string
Asset string
Type string
Start string
End string
Ofs int64
}
// Ledgers type
type Ledgers struct {
Ledger map[string]LedgerInfo `json:"ledger"`
Count int64 `json:"count"`
}
// LedgerInfo type
type LedgerInfo struct {
Refid string `json:"refid"`
Time float64 `json:"time"`
Type string `json:"type"`
Aclass string `json:"aclass"`
Asset string `json:"asset"`
Amount float64 `json:"amount,string"`
Fee float64 `json:"fee,string"`
Balance float64 `json:"balance,string"`
}
// TradeVolumeResponse type
type TradeVolumeResponse struct {
Currency string `json:"currency"`
Volume float64 `json:"volume,string"`
Fees map[string]TradeVolumeFee `json:"fees"`
FeesMaker map[string]TradeVolumeFee `json:"fees_maker"`
}
// TradeVolumeFee type
type TradeVolumeFee struct {
Fee float64 `json:"fee,string"`
MinFee float64 `json:"minfee,string"`
MaxFee float64 `json:"maxfee,string"`
NextFee float64 `json:"nextfee,string"`
NextVolume float64 `json:"nextvolume,string"`
TierVolume float64 `json:"tiervolume,string"`
}
// AddOrderResponse type
type AddOrderResponse struct {
Description OrderDescription `json:"descr"`
TransactionIds []string `json:"txid"`
}
// WithdrawInformation Used to check withdrawal fees
type WithdrawInformation struct {
Method string `json:"method"`
Limit float64 `json:"limit,string"`
Fee float64 `json:"fee,string"`
}
// DepositMethods Used to check deposit fees
type DepositMethods struct {
Method string `json:"method"`
Limit interface{} `json:"limit"` // If no limit amount, this comes back as boolean
Fee float64 `json:"fee,string"`
AddressSetupFee float64 `json:"address-setup-fee,string"`
}
// OrderDescription represents an orders description
type OrderDescription struct {
Close string `json:"close"`
Order string `json:"order"`
}
// AddOrderOptions represents the AddOrder options
type AddOrderOptions struct {
UserRef int32
Oflags string
StartTm string
ExpireTm string
CloseOrderType string
ClosePrice float64
ClosePrice2 float64
Validate bool
}
// CancelOrderResponse type
type CancelOrderResponse struct {
Count int64 `json:"count"`
Pending interface{} `json:"pending"`
}
// DepositFees the large list of predefined deposit fees
// Prone to change
var DepositFees = map[currency.Code]float64{
currency.XTZ: 0.05,
}
// WithdrawalFees the large list of predefined withdrawal fees
// Prone to change
var WithdrawalFees = map[currency.Code]float64{
currency.ZUSD: 5,
currency.ZEUR: 5,
currency.USD: 5,
currency.EUR: 5,
currency.REP: 0.01,
currency.XXBT: 0.0005,
currency.BTC: 0.0005,
currency.XBT: 0.0005,
currency.BCH: 0.0001,
currency.ADA: 0.3,
currency.DASH: 0.005,
currency.XDG: 2,
currency.EOS: 0.05,
currency.ETH: 0.005,
currency.ETC: 0.005,
currency.GNO: 0.005,
currency.ICN: 0.2,
currency.LTC: 0.001,
currency.MLN: 0.003,
currency.XMR: 0.05,
currency.QTUM: 0.01,
currency.XRP: 0.02,
currency.XLM: 0.00002,
currency.USDT: 5,
currency.XTZ: 0.05,
currency.ZEC: 0.0001,
}
// DepositAddress defines a deposit address
type DepositAddress struct {
Address string `json:"address"`
ExpireTime int64 `json:"expiretm,string"`
New bool `json:"new"`
}
// WithdrawStatusResponse defines a withdrawal status response
type WithdrawStatusResponse struct {
Method string `json:"method"`
Aclass string `json:"aclass"`
Asset string `json:"asset"`
Refid string `json:"refid"`
TxID string `json:"txid"`
Info string `json:"info"`
Amount float64 `json:"amount,string"`
Fee float64 `json:"fee,string"`
Time float64 `json:"time"`
Status string `json:"status"`
}
// WebsocketSubscriptionEventRequest handles WS subscription events
type WebsocketSubscriptionEventRequest struct {
Event string `json:"event"` // subscribe
RequestID int64 `json:"reqid,omitempty"` // Optional, client originated ID reflected in response message.
Pairs []string `json:"pair"` // Array of currency pairs (pair1,pair2,pair3).
Subscription WebsocketSubscriptionData `json:"subscription,omitempty"`
}
// WebsocketUnsubscribeByChannelIDEventRequest handles WS unsubscribe events
type WebsocketUnsubscribeByChannelIDEventRequest struct {
Event string `json:"event"` // unsubscribe
RequestID int64 `json:"reqid,omitempty"` // Optional, client originated ID reflected in response message.
Pairs []string `json:"pair,omitempty"` // Array of currency pairs (pair1,pair2,pair3).
ChannelID int64 `json:"channelID,omitempty"`
}
// WebsocketSubscriptionData contains details on WS channel
type WebsocketSubscriptionData struct {
Name string `json:"name,omitempty"` // ticker|ohlc|trade|book|spread|*, * for all (ohlc interval value is 1 if all channels subscribed)
Interval int64 `json:"interval,omitempty"` // Optional - Time interval associated with ohlc subscription in minutes. Default 1. Valid Interval values: 1|5|15|30|60|240|1440|10080|21600
Depth int64 `json:"depth,omitempty"` // Optional - depth associated with book subscription in number of levels each side, default 10. Valid Options are: 10, 25, 100, 500, 1000
} |
// WebsocketEventResponse holds all data response types
type WebsocketEventResponse struct {
Event string `json:"event"`
Status string `json:"status"`
Pair currency.Pair `json:"pair,omitempty"`
RequestID int64 `json:"reqid,omitempty"` // Optional, client originated ID reflected in response message.
Subscription WebsocketSubscriptionResponseData `json:"subscription,omitempty"`
ChannelName string `json:"channelName,omitempty"`
WebsocketSubscriptionEventResponse
WebsocketStatusResponse
WebsocketErrorResponse
}
// WebsocketSubscriptionEventResponse defines a websocket socket event response
type WebsocketSubscriptionEventResponse struct {
ChannelID int64 `json:"channelID"`
}
// WebsocketSubscriptionResponseData defines a websocket subscription response
type WebsocketSubscriptionResponseData struct {
Name string `json:"name"`
}
// WebsocketStatusResponse defines a websocket status response
type WebsocketStatusResponse struct {
ConnectionID float64 `json:"connectionID"`
Version string `json:"version"`
}
// WebsocketDataResponse defines a websocket data type
type WebsocketDataResponse []interface{}
// WebsocketErrorResponse defines a websocket error response
type WebsocketErrorResponse struct {
ErrorMessage string `json:"errorMessage"`
}
// WebsocketChannelData Holds relevant data for channels to identify what we're
// doing
type WebsocketChannelData struct {
Subscription string
Pair currency.Pair
ChannelID int64
} | |
testsuite.rs | // Copyright (c) The Dijets Core Contributors
// SPDX-License-Identifier: Apache-2.0
use anyhow::Result;
use dijets_types::account_address::AccountAddress;
use functional_tests::{
compiler::{Compiler, ScriptOrModule},
testsuite,
};
use ir_to_bytecode::{
compiler::{compile_module, compile_script},
parser::parse_script_or_module,
};
use move_binary_format::CompiledModule;
use move_core_types::language_storage::ModuleId;
use move_ir_types::ast;
use move_symbol_pool::Symbol;
use std::{collections::HashMap, path::Path};
struct IRCompiler {
deps: HashMap<ModuleId, CompiledModule>,
}
impl IRCompiler {
fn new(dijets_framework_modules: Vec<CompiledModule>) -> Self {
let deps = dijets_framework_modules
.into_iter()
.map(|m| (m.self_id(), m))
.collect();
IRCompiler { deps }
}
}
impl Compiler for IRCompiler {
/// Compile a transaction script or module.
fn compile<Logger: FnMut(String)>(
&mut self,
mut log: Logger,
address: AccountAddress,
input: &str,
) -> Result<ScriptOrModule> {
Ok(
match parse_script_or_module(Symbol::from("unused_file_name"), input)? {
ast::ScriptOrModule::Script(parsed_script) => {
log(format!("{}", &parsed_script));
ScriptOrModule::Script(
None,
compile_script(Some(address), parsed_script, self.deps.values())?.0,
)
}
ast::ScriptOrModule::Module(parsed_module) => {
log(format!("{}", &parsed_module));
let module = compile_module(address, parsed_module, self.deps.values())?.0;
self.deps.insert(module.self_id(), module.clone());
ScriptOrModule::Module(module)
}
},
)
}
fn use_compiled_genesis(&self) -> bool {
true
}
}
fn run_test(path: &Path) -> datatest_stable::Result<()> {
testsuite::functional_tests(
IRCompiler::new(dijets_framework_releases::current_modules().to_vec()),
path,
)
} |
datatest_stable::harness!(run_test, "tests", r".*\.mvir"); | |
remote.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import 'vs/css!./media/remoteViewlet';
import * as nls from 'vs/nls';
import * as dom from 'vs/base/browser/dom';
import { URI } from 'vs/base/common/uri';
import { IWorkbenchLayoutService } from 'vs/workbench/services/layout/browser/layoutService';
import { ITelemetryService } from 'vs/platform/telemetry/common/telemetry';
import { IWorkspaceContextService } from 'vs/platform/workspace/common/workspace';
import { IStorageService } from 'vs/platform/storage/common/storage';
import { IConfigurationService } from 'vs/platform/configuration/common/configuration';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { IThemeService } from 'vs/platform/theme/common/themeService';
import { IContextMenuService } from 'vs/platform/contextview/browser/contextView';
import { IExtensionService } from 'vs/workbench/services/extensions/common/extensions';
import { FilterViewPaneContainer } from 'vs/workbench/browser/parts/views/viewsViewlet';
import { VIEWLET_ID } from 'vs/workbench/contrib/remote/common/remote.contribution';
import { IContextKeyService } from 'vs/platform/contextkey/common/contextkey';
import { IViewDescriptor, IViewsRegistry, Extensions, ViewContainerLocation, IViewContainersRegistry, IViewDescriptorService } from 'vs/workbench/common/views';
import { Registry } from 'vs/platform/registry/common/platform';
import { IExtensionDescription } from 'vs/platform/extensions/common/extensions';
import { IOpenerService } from 'vs/platform/opener/common/opener';
import { IQuickInputService } from 'vs/platform/quickinput/common/quickInput';
import { ICommandService } from 'vs/platform/commands/common/commands';
import { ShowViewletAction } from 'vs/workbench/browser/viewlet';
import { IViewletService } from 'vs/workbench/services/viewlet/browser/viewlet';
import { IEditorGroupsService } from 'vs/workbench/services/editor/common/editorGroupsService';
import { IWorkbenchActionRegistry, Extensions as WorkbenchActionExtensions } from 'vs/workbench/common/actions';
import { SyncActionDescriptor } from 'vs/platform/actions/common/actions';
import { IProgress, IProgressStep, IProgressService, ProgressLocation } from 'vs/platform/progress/common/progress';
import { IWorkbenchContribution, IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions } from 'vs/workbench/common/contributions';
import { IRemoteAgentService } from 'vs/workbench/services/remote/common/remoteAgentService';
import { IDialogService } from 'vs/platform/dialogs/common/dialogs';
import { ReconnectionWaitEvent, PersistentConnectionEventType } from 'vs/platform/remote/common/remoteAgentConnection';
import Severity from 'vs/base/common/severity';
import { ReloadWindowAction } from 'vs/workbench/browser/actions/windowActions';
import { IDisposable } from 'vs/base/common/lifecycle';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { SwitchRemoteViewItem, SwitchRemoteAction } from 'vs/workbench/contrib/remote/browser/explorerViewItems';
import { Action, IActionViewItem, IAction } from 'vs/base/common/actions';
import { isStringArray } from 'vs/base/common/types';
import { IRemoteExplorerService } from 'vs/workbench/services/remote/common/remoteExplorerService';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
import { startsWith } from 'vs/base/common/strings';
import { TunnelPanelDescriptor, TunnelViewModel, forwardedPortsViewEnabled } from 'vs/workbench/contrib/remote/browser/tunnelView';
import { IAddedViewDescriptorRef } from 'vs/workbench/browser/parts/views/views';
import { ViewPane, IViewPaneOptions } from 'vs/workbench/browser/parts/views/viewPaneContainer';
import { IListVirtualDelegate } from 'vs/base/browser/ui/list/list';
import { ITreeRenderer, ITreeNode, IAsyncDataSource } from 'vs/base/browser/ui/tree/tree';
import { WorkbenchAsyncDataTree, ResourceNavigator } from 'vs/platform/list/browser/listService';
import { IKeybindingService } from 'vs/platform/keybinding/common/keybinding';
import { Event } from 'vs/base/common/event';
import { ExtensionsRegistry, IExtensionPointUser } from 'vs/workbench/services/extensions/common/extensionsRegistry';
import { SyncDescriptor } from 'vs/platform/instantiation/common/descriptors';
import { RemoteWindowActiveIndicator } from 'vs/workbench/contrib/remote/browser/remoteIndicator';
import { inQuickPickContextKeyValue } from 'vs/workbench/browser/quickaccess';
export interface HelpInformation {
extensionDescription: IExtensionDescription;
getStarted?: string;
documentation?: string;
feedback?: string;
issues?: string;
remoteName?: string[] | string;
}
const remoteHelpExtPoint = ExtensionsRegistry.registerExtensionPoint<HelpInformation>({
extensionPoint: 'remoteHelp',
jsonSchema: {
description: nls.localize('RemoteHelpInformationExtPoint', 'Contributes help information for Remote'),
type: 'object',
properties: {
'getStarted': {
description: nls.localize('RemoteHelpInformationExtPoint.getStarted', "The url to your project's Getting Started page"),
type: 'string'
},
'documentation': {
description: nls.localize('RemoteHelpInformationExtPoint.documentation', "The url to your project's documentation page"),
type: 'string'
},
'feedback': {
description: nls.localize('RemoteHelpInformationExtPoint.feedback', "The url to your project's feedback reporter"),
type: 'string'
},
'issues': {
description: nls.localize('RemoteHelpInformationExtPoint.issues', "The url to your project's issues list"),
type: 'string'
}
}
}
});
interface IViewModel {
helpInformation: HelpInformation[];
}
class HelpTreeVirtualDelegate implements IListVirtualDelegate<IHelpItem> {
getHeight(element: IHelpItem): number {
return 22;
}
getTemplateId(element: IHelpItem): string {
return 'HelpItemTemplate';
}
}
interface IHelpItemTemplateData {
parent: HTMLElement;
icon: HTMLElement;
}
class HelpTreeRenderer implements ITreeRenderer<HelpModel | IHelpItem, IHelpItem, IHelpItemTemplateData> {
templateId: string = 'HelpItemTemplate';
renderTemplate(container: HTMLElement): IHelpItemTemplateData {
dom.addClass(container, 'remote-help-tree-node-item');
const icon = dom.append(container, dom.$('.remote-help-tree-node-item-icon'));
const data = <IHelpItemTemplateData>Object.create(null);
data.parent = container;
data.icon = icon;
return data;
}
renderElement(element: ITreeNode<IHelpItem, IHelpItem>, index: number, templateData: IHelpItemTemplateData, height: number | undefined): void {
const container = templateData.parent;
dom.append(container, templateData.icon);
dom.addClasses(templateData.icon, ...element.element.iconClasses);
const labelContainer = dom.append(container, dom.$('.help-item-label'));
labelContainer.innerText = element.element.label;
}
disposeTemplate(templateData: IHelpItemTemplateData): void {
}
}
class | implements IAsyncDataSource<any, any> {
hasChildren(element: any) {
return element instanceof HelpModel;
}
getChildren(element: any) {
if (element instanceof HelpModel && element.items) {
return element.items;
}
return [];
}
}
interface IHelpItem {
key: string;
iconClasses: string[];
label: string;
handleClick(): Promise<void>;
}
class HelpModel {
items: IHelpItem[] | undefined;
constructor(
viewModel: IViewModel,
openerService: IOpenerService,
quickInputService: IQuickInputService,
commandService: ICommandService,
remoteExplorerService: IRemoteExplorerService,
environmentService: IWorkbenchEnvironmentService
) {
let helpItems: IHelpItem[] = [];
const getStarted = viewModel.helpInformation.filter(info => info.getStarted);
if (getStarted.length) {
helpItems.push(new HelpItem(
'star',
nls.localize('remote.help.getStarted', "Get Started"),
getStarted.map((info: HelpInformation) => ({
extensionDescription: info.extensionDescription,
url: info.getStarted!,
remoteAuthority: (typeof info.remoteName === 'string') ? [info.remoteName] : info.remoteName
})),
quickInputService,
environmentService,
openerService,
remoteExplorerService
));
}
const documentation = viewModel.helpInformation.filter(info => info.documentation);
if (documentation.length) {
helpItems.push(new HelpItem(
'book',
nls.localize('remote.help.documentation', "Read Documentation"),
documentation.map((info: HelpInformation) => ({
extensionDescription: info.extensionDescription,
url: info.documentation!,
remoteAuthority: (typeof info.remoteName === 'string') ? [info.remoteName] : info.remoteName
})),
quickInputService,
environmentService,
openerService,
remoteExplorerService
));
}
const feedback = viewModel.helpInformation.filter(info => info.feedback);
if (feedback.length) {
helpItems.push(new HelpItem(
'twitter',
nls.localize('remote.help.feedback', "Provide Feedback"),
feedback.map((info: HelpInformation) => ({
extensionDescription: info.extensionDescription,
url: info.feedback!,
remoteAuthority: (typeof info.remoteName === 'string') ? [info.remoteName] : info.remoteName
})),
quickInputService,
environmentService,
openerService,
remoteExplorerService
));
}
const issues = viewModel.helpInformation.filter(info => info.issues);
if (issues.length) {
helpItems.push(new HelpItem(
'issues',
nls.localize('remote.help.issues', "Review Issues"),
issues.map((info: HelpInformation) => ({
extensionDescription: info.extensionDescription,
url: info.issues!,
remoteAuthority: (typeof info.remoteName === 'string') ? [info.remoteName] : info.remoteName
})),
quickInputService,
environmentService,
openerService,
remoteExplorerService
));
}
if (helpItems.length) {
helpItems.push(new IssueReporterItem(
'comment',
nls.localize('remote.help.report', "Report Issue"),
viewModel.helpInformation.map(info => ({
extensionDescription: info.extensionDescription,
remoteAuthority: (typeof info.remoteName === 'string') ? [info.remoteName] : info.remoteName
})),
quickInputService,
environmentService,
commandService,
remoteExplorerService
));
}
if (helpItems.length) {
this.items = helpItems;
}
}
}
abstract class HelpItemBase implements IHelpItem {
public iconClasses: string[] = [];
constructor(
public key: string,
public label: string,
public values: { extensionDescription: IExtensionDescription, url?: string, remoteAuthority: string[] | undefined }[],
private quickInputService: IQuickInputService,
private environmentService: IWorkbenchEnvironmentService,
private remoteExplorerService: IRemoteExplorerService
) {
this.iconClasses.push(`codicon-${key}`);
this.iconClasses.push('remote-help-tree-node-item-icon');
this.iconClasses.push('codicon');
}
async handleClick() {
const remoteAuthority = this.environmentService.configuration.remoteAuthority;
if (!remoteAuthority) {
return;
}
for (let i = 0; i < this.remoteExplorerService.targetType.length; i++) {
if (startsWith(remoteAuthority, this.remoteExplorerService.targetType[i])) {
for (let value of this.values) {
if (value.remoteAuthority) {
for (let authority of value.remoteAuthority) {
if (startsWith(remoteAuthority, authority)) {
await this.takeAction(value.extensionDescription, value.url);
return;
}
}
}
}
}
}
if (this.values.length > 1) {
let actions = this.values.map(value => {
return {
label: value.extensionDescription.displayName || value.extensionDescription.identifier.value,
description: value.url,
extensionDescription: value.extensionDescription
};
});
const action = await this.quickInputService.pick(actions, { placeHolder: nls.localize('pickRemoteExtension', "Select url to open") });
if (action) {
await this.takeAction(action.extensionDescription, action.description);
}
} else {
await this.takeAction(this.values[0].extensionDescription, this.values[0].url);
}
}
protected abstract takeAction(extensionDescription: IExtensionDescription, url?: string): Promise<void>;
}
class HelpItem extends HelpItemBase {
constructor(
key: string,
label: string,
values: { extensionDescription: IExtensionDescription; url: string, remoteAuthority: string[] | undefined }[],
quickInputService: IQuickInputService,
environmentService: IWorkbenchEnvironmentService,
private openerService: IOpenerService,
remoteExplorerService: IRemoteExplorerService
) {
super(key, label, values, quickInputService, environmentService, remoteExplorerService);
}
protected async takeAction(extensionDescription: IExtensionDescription, url: string): Promise<void> {
await this.openerService.open(URI.parse(url));
}
}
class IssueReporterItem extends HelpItemBase {
constructor(
key: string,
label: string,
values: { extensionDescription: IExtensionDescription; remoteAuthority: string[] | undefined }[],
quickInputService: IQuickInputService,
environmentService: IWorkbenchEnvironmentService,
private commandService: ICommandService,
remoteExplorerService: IRemoteExplorerService
) {
super(key, label, values, quickInputService, environmentService, remoteExplorerService);
}
protected async takeAction(extensionDescription: IExtensionDescription): Promise<void> {
await this.commandService.executeCommand('workbench.action.openIssueReporter', [extensionDescription.identifier.value]);
}
}
class HelpPanel extends ViewPane {
static readonly ID = '~remote.helpPanel';
static readonly TITLE = nls.localize('remote.help', "Help and feedback");
private tree!: WorkbenchAsyncDataTree<any, any, any>;
constructor(
protected viewModel: IViewModel,
options: IViewPaneOptions,
@IKeybindingService protected keybindingService: IKeybindingService,
@IContextMenuService protected contextMenuService: IContextMenuService,
@IContextKeyService protected contextKeyService: IContextKeyService,
@IConfigurationService protected configurationService: IConfigurationService,
@IInstantiationService protected readonly instantiationService: IInstantiationService,
@IViewDescriptorService viewDescriptorService: IViewDescriptorService,
@IOpenerService openerService: IOpenerService,
@IQuickInputService protected quickInputService: IQuickInputService,
@ICommandService protected commandService: ICommandService,
@IRemoteExplorerService protected readonly remoteExplorerService: IRemoteExplorerService,
@IWorkbenchEnvironmentService protected readonly workbenchEnvironmentService: IWorkbenchEnvironmentService,
@IThemeService themeService: IThemeService,
@ITelemetryService telemetryService: ITelemetryService,
) {
super(options, keybindingService, contextMenuService, configurationService, contextKeyService, viewDescriptorService, instantiationService, openerService, themeService, telemetryService);
}
protected renderBody(container: HTMLElement): void {
super.renderBody(container);
dom.addClass(container, 'remote-help');
const treeContainer = document.createElement('div');
dom.addClass(treeContainer, 'remote-help-content');
container.appendChild(treeContainer);
this.tree = this.instantiationService.createInstance(WorkbenchAsyncDataTree,
'RemoteHelp',
treeContainer,
new HelpTreeVirtualDelegate(),
[new HelpTreeRenderer()],
new HelpDataSource(),
{
keyboardSupport: true,
accessibilityProvider: {
getAriaLabel: (item: HelpItemBase) => {
return item.label;
}
}
}
);
const model = new HelpModel(this.viewModel, this.openerService, this.quickInputService, this.commandService, this.remoteExplorerService, this.workbenchEnvironmentService);
this.tree.setInput(model);
const helpItemNavigator = this._register(ResourceNavigator.createTreeResourceNavigator(this.tree, { openOnFocus: false, openOnSelection: false }));
this._register(Event.debounce(helpItemNavigator.onDidOpenResource, (last, event) => event, 75, true)(e => {
e.element.handleClick();
}));
}
protected layoutBody(height: number, width: number): void {
this.tree.layout(height, width);
}
}
class HelpPanelDescriptor implements IViewDescriptor {
readonly id = HelpPanel.ID;
readonly name = HelpPanel.TITLE;
readonly ctorDescriptor: SyncDescriptor<HelpPanel>;
readonly canToggleVisibility = true;
readonly hideByDefault = false;
readonly workspace = true;
readonly group = 'help@50';
constructor(viewModel: IViewModel) {
this.ctorDescriptor = new SyncDescriptor(HelpPanel, [viewModel]);
}
}
export class RemoteViewPaneContainer extends FilterViewPaneContainer implements IViewModel {
private helpPanelDescriptor = new HelpPanelDescriptor(this);
helpInformation: HelpInformation[] = [];
private actions: IAction[] | undefined;
private tunnelPanelDescriptor: TunnelPanelDescriptor | undefined;
constructor(
@IWorkbenchLayoutService layoutService: IWorkbenchLayoutService,
@ITelemetryService telemetryService: ITelemetryService,
@IWorkspaceContextService contextService: IWorkspaceContextService,
@IStorageService storageService: IStorageService,
@IConfigurationService configurationService: IConfigurationService,
@IInstantiationService instantiationService: IInstantiationService,
@IThemeService themeService: IThemeService,
@IContextMenuService contextMenuService: IContextMenuService,
@IExtensionService extensionService: IExtensionService,
@IRemoteExplorerService private readonly remoteExplorerService: IRemoteExplorerService,
@IWorkbenchEnvironmentService private readonly environmentService: IWorkbenchEnvironmentService,
@IContextKeyService private readonly contextKeyService: IContextKeyService,
@IViewDescriptorService viewDescriptorService: IViewDescriptorService,
) {
super(VIEWLET_ID, remoteExplorerService.onDidChangeTargetType, configurationService, layoutService, telemetryService, storageService, instantiationService, themeService, contextMenuService, extensionService, contextService, viewDescriptorService);
this.addConstantViewDescriptors([this.helpPanelDescriptor]);
remoteHelpExtPoint.setHandler((extensions) => {
let helpInformation: HelpInformation[] = [];
for (let extension of extensions) {
this._handleRemoteInfoExtensionPoint(extension, helpInformation);
}
this.helpInformation = helpInformation;
const viewsRegistry = Registry.as<IViewsRegistry>(Extensions.ViewsRegistry);
if (this.helpInformation.length) {
viewsRegistry.registerViews([this.helpPanelDescriptor], this.viewContainer);
} else {
viewsRegistry.deregisterViews([this.helpPanelDescriptor], this.viewContainer);
}
});
}
private _handleRemoteInfoExtensionPoint(extension: IExtensionPointUser<HelpInformation>, helpInformation: HelpInformation[]) {
if (!extension.description.enableProposedApi) {
return;
}
if (!extension.value.documentation && !extension.value.feedback && !extension.value.getStarted && !extension.value.issues) {
return;
}
helpInformation.push({
extensionDescription: extension.description,
getStarted: extension.value.getStarted,
documentation: extension.value.documentation,
feedback: extension.value.feedback,
issues: extension.value.issues,
remoteName: extension.value.remoteName
});
}
protected getFilterOn(viewDescriptor: IViewDescriptor): string | undefined {
return isStringArray(viewDescriptor.remoteAuthority) ? viewDescriptor.remoteAuthority[0] : viewDescriptor.remoteAuthority;
}
public getActionViewItem(action: Action): IActionViewItem | undefined {
if (action.id === SwitchRemoteAction.ID) {
return this.instantiationService.createInstance(SwitchRemoteViewItem, action, SwitchRemoteViewItem.createOptionItems(Registry.as<IViewsRegistry>(Extensions.ViewsRegistry).getViews(this.viewContainer), this.contextKeyService));
}
return super.getActionViewItem(action);
}
public getActions(): IAction[] {
if (!this.actions) {
this.actions = [
this.instantiationService.createInstance(SwitchRemoteAction, SwitchRemoteAction.ID, SwitchRemoteAction.LABEL)
];
this.actions.forEach(a => {
this._register(a);
});
}
return this.actions;
}
getTitle(): string {
const title = nls.localize('remote.explorer', "Remote Explorer");
return title;
}
onDidAddViewDescriptors(added: IAddedViewDescriptorRef[]): ViewPane[] {
// Call to super MUST be first, since registering the additional view will cause this to be called again.
const panels: ViewPane[] = super.onDidAddViewDescriptors(added);
// This context key is set to false in the constructor, but is expected to be changed by resolver extensions to enable the forwarded ports view.
const viewEnabled: boolean = !!forwardedPortsViewEnabled.getValue(this.contextKeyService);
if (this.environmentService.configuration.remoteAuthority && !this.tunnelPanelDescriptor && viewEnabled) {
this.tunnelPanelDescriptor = new TunnelPanelDescriptor(new TunnelViewModel(this.remoteExplorerService), this.environmentService);
const viewsRegistry = Registry.as<IViewsRegistry>(Extensions.ViewsRegistry);
viewsRegistry.registerViews([this.tunnelPanelDescriptor!], this.viewContainer);
}
return panels;
}
}
Registry.as<IViewContainersRegistry>(Extensions.ViewContainersRegistry).registerViewContainer(
{
id: VIEWLET_ID,
name: nls.localize('remote.explorer', "Remote Explorer"),
ctorDescriptor: new SyncDescriptor(RemoteViewPaneContainer),
hideIfEmpty: true,
viewOrderDelegate: {
getOrder: (group?: string) => {
if (!group) {
return;
}
let matches = /^targets@(\d+)$/.exec(group);
if (matches) {
return -1000;
}
matches = /^details(@(\d+))?$/.exec(group);
if (matches) {
return -500;
}
matches = /^help(@(\d+))?$/.exec(group);
if (matches) {
return -10;
}
return;
}
},
icon: 'codicon-remote-explorer',
order: 4
}, ViewContainerLocation.Sidebar);
class OpenRemoteViewletAction extends ShowViewletAction {
static readonly ID = VIEWLET_ID;
static readonly LABEL = nls.localize('toggleRemoteViewlet', "Show Remote Explorer");
constructor(id: string, label: string, @IViewletService viewletService: IViewletService, @IEditorGroupsService editorGroupService: IEditorGroupsService, @IWorkbenchLayoutService layoutService: IWorkbenchLayoutService) {
super(id, label, VIEWLET_ID, viewletService, editorGroupService, layoutService);
}
}
// Register Action to Open Viewlet
Registry.as<IWorkbenchActionRegistry>(WorkbenchActionExtensions.WorkbenchActions).registerWorkbenchAction(
SyncActionDescriptor.create(OpenRemoteViewletAction, VIEWLET_ID, nls.localize('toggleRemoteViewlet', "Show Remote Explorer"), {
primary: 0
}),
'View: Show Remote Explorer',
nls.localize('view', "View")
);
class VisibleProgress {
private _isDisposed: boolean;
private _lastReport: string | null;
private _currentProgressPromiseResolve: (() => void) | null;
private _currentProgress: IProgress<IProgressStep> | null;
private _currentTimer: ReconnectionTimer2 | null;
public get lastReport(): string | null {
return this._lastReport;
}
constructor(progressService: IProgressService, location: ProgressLocation, initialReport: string | null, buttons: string[], onDidCancel: (choice: number | undefined, lastReport: string | null) => void) {
this._isDisposed = false;
this._lastReport = initialReport;
this._currentProgressPromiseResolve = null;
this._currentProgress = null;
this._currentTimer = null;
const promise = new Promise<void>((resolve) => this._currentProgressPromiseResolve = resolve);
progressService.withProgress(
{ location: location, buttons: buttons },
(progress) => { if (!this._isDisposed) { this._currentProgress = progress; } return promise; },
(choice) => onDidCancel(choice, this._lastReport)
);
if (this._lastReport) {
this.report();
}
}
public dispose(): void {
this._isDisposed = true;
if (this._currentProgressPromiseResolve) {
this._currentProgressPromiseResolve();
this._currentProgressPromiseResolve = null;
}
this._currentProgress = null;
if (this._currentTimer) {
this._currentTimer.dispose();
this._currentTimer = null;
}
}
public report(message?: string) {
if (message) {
this._lastReport = message;
}
if (this._lastReport && this._currentProgress) {
this._currentProgress.report({ message: this._lastReport });
}
}
public startTimer(completionTime: number): void {
this.stopTimer();
this._currentTimer = new ReconnectionTimer2(this, completionTime);
}
public stopTimer(): void {
if (this._currentTimer) {
this._currentTimer.dispose();
this._currentTimer = null;
}
}
}
class ReconnectionTimer2 implements IDisposable {
private readonly _parent: VisibleProgress;
private readonly _completionTime: number;
private readonly _token: any;
constructor(parent: VisibleProgress, completionTime: number) {
this._parent = parent;
this._completionTime = completionTime;
this._token = setInterval(() => this._render(), 1000);
this._render();
}
public dispose(): void {
clearInterval(this._token);
}
private _render() {
const remainingTimeMs = this._completionTime - Date.now();
if (remainingTimeMs < 0) {
return;
}
const remainingTime = Math.ceil(remainingTimeMs / 1000);
if (remainingTime === 1) {
this._parent.report(nls.localize('reconnectionWaitOne', "Attempting to reconnect in {0} second...", remainingTime));
} else {
this._parent.report(nls.localize('reconnectionWaitMany', "Attempting to reconnect in {0} seconds...", remainingTime));
}
}
}
class RemoteAgentConnectionStatusListener implements IWorkbenchContribution {
constructor(
@IRemoteAgentService remoteAgentService: IRemoteAgentService,
@IProgressService progressService: IProgressService,
@IDialogService dialogService: IDialogService,
@ICommandService commandService: ICommandService,
@IContextKeyService contextKeyService: IContextKeyService
) {
const connection = remoteAgentService.getConnection();
if (connection) {
let visibleProgress: VisibleProgress | null = null;
let lastLocation: ProgressLocation.Dialog | ProgressLocation.Notification | null = null;
let reconnectWaitEvent: ReconnectionWaitEvent | null = null;
let disposableListener: IDisposable | null = null;
function showProgress(location: ProgressLocation.Dialog | ProgressLocation.Notification, buttons: { label: string, callback: () => void }[], initialReport: string | null = null): VisibleProgress {
if (visibleProgress) {
visibleProgress.dispose();
visibleProgress = null;
}
lastLocation = location;
return new VisibleProgress(
progressService, location, initialReport, buttons.map(button => button.label),
(choice, lastReport) => {
// Handle choice from dialog
if (typeof choice !== 'undefined' && buttons[choice]) {
buttons[choice].callback();
} else {
if (location === ProgressLocation.Dialog) {
visibleProgress = showProgress(ProgressLocation.Notification, buttons, lastReport);
} else {
hideProgress();
}
}
}
);
}
function hideProgress() {
if (visibleProgress) {
visibleProgress.dispose();
visibleProgress = null;
}
}
const reconnectButton = {
label: nls.localize('reconnectNow', "Reconnect Now"),
callback: () => {
if (reconnectWaitEvent) {
reconnectWaitEvent.skipWait();
}
}
};
const reloadButton = {
label: nls.localize('reloadWindow', "Reload Window"),
callback: () => {
commandService.executeCommand(ReloadWindowAction.ID);
}
};
connection.onDidStateChange((e) => {
if (visibleProgress) {
visibleProgress.stopTimer();
}
if (disposableListener) {
disposableListener.dispose();
disposableListener = null;
}
switch (e.type) {
case PersistentConnectionEventType.ConnectionLost:
if (!visibleProgress) {
visibleProgress = showProgress(ProgressLocation.Dialog, [reconnectButton, reloadButton]);
}
visibleProgress.report(nls.localize('connectionLost', "Connection Lost"));
break;
case PersistentConnectionEventType.ReconnectionWait:
reconnectWaitEvent = e;
visibleProgress = showProgress(lastLocation || ProgressLocation.Notification, [reconnectButton, reloadButton]);
visibleProgress.startTimer(Date.now() + 1000 * e.durationSeconds);
break;
case PersistentConnectionEventType.ReconnectionRunning:
visibleProgress = showProgress(lastLocation || ProgressLocation.Notification, [reloadButton]);
visibleProgress.report(nls.localize('reconnectionRunning', "Attempting to reconnect..."));
// Register to listen for quick input is opened
disposableListener = contextKeyService.onDidChangeContext((contextKeyChangeEvent) => {
const reconnectInteraction = new Set<string>([inQuickPickContextKeyValue]);
if (contextKeyChangeEvent.affectsSome(reconnectInteraction)) {
// Need to move from dialog if being shown and user needs to type in a prompt
if (lastLocation === ProgressLocation.Dialog && visibleProgress !== null) {
visibleProgress = showProgress(ProgressLocation.Notification, [reloadButton], visibleProgress.lastReport);
}
}
});
break;
case PersistentConnectionEventType.ReconnectionPermanentFailure:
hideProgress();
dialogService.show(Severity.Error, nls.localize('reconnectionPermanentFailure', "Cannot reconnect. Please reload the window."), [nls.localize('reloadWindow', "Reload Window"), nls.localize('cancel', "Cancel")], { cancelId: 1 }).then(result => {
// Reload the window
if (result.choice === 0) {
commandService.executeCommand(ReloadWindowAction.ID);
}
});
break;
case PersistentConnectionEventType.ConnectionGain:
hideProgress();
break;
}
});
}
}
}
const workbenchContributionsRegistry = Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench);
workbenchContributionsRegistry.registerWorkbenchContribution(RemoteAgentConnectionStatusListener, LifecyclePhase.Eventually);
workbenchContributionsRegistry.registerWorkbenchContribution(RemoteWindowActiveIndicator, LifecyclePhase.Starting);
| HelpDataSource |
test_pathutils.py | import pytest
from blacksheep.common.files.pathsutils import (
get_file_extension_from_name,
get_mime_type_from_name,
)
@pytest.mark.parametrize(
"full_path,expected_result",
[
("hello.txt", ".txt"),
(".gitignore", ".gitignore"),
("ØØ Void.album", ".album"),
("", ""),
],
)
def test_get_file_extension_from_name(full_path, expected_result):
as |
@pytest.mark.parametrize(
"full_path,expected_result",
[
("example.ogg", "audio/ogg"),
("example.jpg", "image/jpeg"),
("example.jpeg", "image/jpeg"),
("example.png", "image/png"),
("example.js", "application/javascript"),
("example.json", "application/json"),
("example.woff2", "font/woff2"),
("hello.txt", "text/plain"),
(".gitignore", "application/octet-stream"),
("ØØ Void.album", "application/octet-stream"),
("", "application/octet-stream"),
],
)
def test_get_mime_type(full_path, expected_result):
assert get_mime_type_from_name(full_path) == expected_result
| sert get_file_extension_from_name(full_path) == expected_result
|
code_test.go | package leetcode
import (
"reflect"
"testing"
)
func TestThreeSum(t *testing.T) {
var tests = []struct {
nums []int
solution [][]int
}{
{
[]int{0, 0, 0, 0},
[][]int{
{0, 0, 0},
},
},
{
[]int{-1, 0, 1, 2, -1, -4},
[][]int{
{-1, -1, 2},
{-1, 0, 1},
},
},
{
[]int{-4, 1, 3, 3, 3, 1},
[][]int{
{-4, 1, 3},
},
},
}
for _, tt := range tests {
solution := threeSum(tt.nums)
if reflect.DeepEqual(solution, tt.solution) == false { | t.Errorf("threeSum(%v) return %v, want %v", tt.nums, solution, tt.solution)
}
}
} | |
class_y_t_music_uploader_1_1_providers_1_1_request_models_1_1_search_result_context_1_1_musicres092063fc46031c768065e3a0a4887fe5.js | var class_y_t_music_uploader_1_1_providers_1_1_request_models_1_1_search_result_context_1_1_musicres092063fc46031c768065e3a0a4887fe5 =
[ | [ "displayPriority", "d0/d41/class_y_t_music_uploader_1_1_providers_1_1_request_models_1_1_search_result_context_1_1_musicres092063fc46031c768065e3a0a4887fe5.html#a2c3d6218f8248d8c22ca74df5b6d15c9", null ],
[ "text", "d0/d41/class_y_t_music_uploader_1_1_providers_1_1_request_models_1_1_search_result_context_1_1_musicres092063fc46031c768065e3a0a4887fe5.html#af2e600efe26f751da6cbec95db72cca2", null ]
]; |
|
image.go | /* -------------------------------------------------------------------------- */
/* Copyright 2002-2019, OpenNebula Project, OpenNebula Systems */
/* */
/* Licensed under the Apache License, Version 2.0 (the "License"); you may */
/* not use this file except in compliance with the License. You may obtain */
/* a copy of the License at */
/* */
/* http://www.apache.org/licenses/LICENSE-2.0 */
/* */
/* Unless required by applicable law or agreed to in writing, software */
/* distributed under the License is distributed on an "AS IS" BASIS, */
/* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */
/* See the License for the specific language governing permissions and */
/* limitations under the License. */
/*--------------------------------------------------------------------------- */
package image
import (
"encoding/xml"
"fmt"
"github.com/OpenNebula/one/src/oca/go/src/goca/schemas/shared"
)
// Pool represents an OpenNebula Image pool
type Pool struct {
XMLName xml.Name `xml:"IMAGE_POOL"`
Images []Image `xml:"IMAGE"`
}
// Image represents an OpenNebula Image
type Image struct {
XMLName xml.Name `xml:"IMAGE"`
ID int `xml:"ID,omitempty"`
UID int `xml:"UID,omitempty"`
GID int `xml:"GID,omitempty"`
UName string `xml:"UNAME,omitempty"`
GName string `xml:"GNAME,omitempty"`
Name string `xml:"NAME"`
LockInfos *shared.Lock `xml:"LOCK,omitempty"`
Permissions *shared.Permissions `xml:"PERMISSIONS,omitempty"`
Type *int `xml:"TYPE,omitempty"`
DiskType *int `xml:"DISK_TYPE,omitempty"`
Persistent *int `xml:"PERSISTENT,omitempty"`
RegTime int `xml:"REGTIME,omitempty"`
Source string `xml:"SOURCE,omitempty"`
Path string `xml:"PATH,omitempty"`
FsType string `xml:"FSTYPE,omitempty"`
Size int `xml:"SIZE,omitempty"`
StateRaw int `xml:"STATE,omitempty"`
RunningVMs int `xml:"RUNNING_VMS,omitempty"`
CloningOps int `xml:"CLONING_OPS,omitempty"`
CloningID int `xml:"CLONING_ID,omitempty"`
TargetSnapshot int `xml:"TARGET_SNAPSHOT,omitempty"`
DatastoreID *int `xml:"DATASTORE_ID,omitempty"`
Datastore string `xml:"DATASTORE,omitempty"`
VMs shared.EntitiesID `xml:"VMS,omitempty"`
Clones shared.EntitiesID `xml:"CLONES,omitempty"`
AppClones shared.EntitiesID `xml:"APP_CLONES,omitempty"`
Snapshots shared.DiskSnapshot `xml:"SNAPSHOTS,omitempty"`
Template Template `xml:"TEMPLATE"`
}
// State is the state of the Image
type State int
const (
// Init image is being initialized
Init State = iota
// Ready image is ready to be used
Ready
// Used image is in use
Used
// Disabled image is in disabled
Disabled
// Locked image is locked
Locked
// Error image is in error state
Error
// Clone image is in clone state
Clone
// Delete image is in delete state
Delete
// UsedPers image is in use and persistent
UsedPers
// LockUsed image is in locked state (non-persistent)
LockUsed
// LockUsedPers image is in locked state (persistent)
LockUsedPers
)
func (s State) isValid() bool {
if s >= Init && s <= LockUsedPers {
return true
}
return false
}
// String returns the string version of the State
func (s State) String() string {
return [...]string{
"INIT",
"READY",
"USED",
"DISABLED",
"LOCKED",
"ERROR",
"CLONE",
"DELETE",
"USED_PERS",
"LOCKED_USED",
"LOCKED_USED_PERS",
}[s]
}
// State looks up the state of the image and returns the State
func (image *Image) State() (State, error) {
state := State(image.StateRaw)
if !state.isValid() {
return -1, fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw)
}
return state, nil
}
// StateString returns the state in string format
func (image *Image) StateString() (string, error) {
state := State(image.StateRaw)
if !state.isValid() |
return state.String(), nil
}
| {
return "", fmt.Errorf("Image State: this state value is not currently handled: %d\n", image.StateRaw)
} |
load_test.py | #!/bin/env python3
# Steps requried to use
# install requried libraries
# (root)# dnf install python3-ldap3
#
# Create python virtual environment directory
# (user)$ python3 -m venv ./venv3
#
# Enable virtual environment
# (user)$ source ./venv3/bin/activate
#
# Update pip and then install needed libary
# (user-venv3)$ pip install --upgrade pip
# (user-venv3)$ pip install python-freeipa
# (user-venv3)$ pip install ldap3
#
# Execute Script:
# (user-venv3)$ ./load_test.py -h
# -- not required, saved as a note
# dnf install python3-requests-kerberos python3-requests-gssapi
import sys
import time
from datetime import datetime
import re
import argparse
import logging
#from linetimer import CodeTimer
import itertools
import pprint
import subprocess
import socket
import dns.resolver
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# from ldap3 import Server, Connection, ALL, MODIFY_ADD
import ldap3
from python_freeipa import ClientMeta
# import requests
#from requests_kerberos import HTTPKerberosAuth
# generate a 4 digit randomizer from the current time
# randomizer = int(time.time()) % 10000
randomizer = datetime.now().strftime("%d%H%M")
start_timestr = datetime.now().strftime("%Y%m%d %H:%M")
start_time = time.time()
uid_template = "tuser{}_{{seq}}".format(randomizer)
pp=pprint.PrettyPrinter(indent=2)
class LogFilter(object):
def __init__(self,level,type='ge'):
self.__level = level
self.__type = type
def filter(self, logRecord):
if self.__type == 'ge':
return logRecord.levelno >= self.__level
elif self.__type == 'eq':
return logRecord.levelno == self.__level
else:
return logRecord.levelno <= self.__level
class MyLogger(logging.getLoggerClass()):
_PERF = 21
def __init__(self, name, **kwargs ):
super().__init__(name, **kwargs)
logging.addLevelName(self._PERF, 'PERF')
def perf(self, message, *args, **kwargs):
if self.isEnabledFor(self._PERF):
self._log(self._PERF, message, args, **kwargs)
logging.setLoggerClass(MyLogger)
logger = logging.getLogger('IDM_user_load_tester')
logger.setLevel(logging.INFO)
_stout_handler = logging.StreamHandler()
_stout_handler.setLevel(logging.INFO)
logger.addHandler(_stout_handler)
def iter_timer(iterable, step=10, label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
pos = 0
# step_count=len(iterable)//step
for item in iterable:
pos = pos + 1
if pos != 0 and pos % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(pos,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
def loop_timer(count,step=10,label=""):
start = time.time()
last_t = start
loop_tag = "loop {}{}{{}}".format(label, " "*bool(label))
logger.perf(loop_tag.format("start"))
for item in range(count):
if item != 0 and item % step == 0:
logger.perf("{}: {:4.3f} {:4.3f}".format(item,time.time() - start, time.time() - last_t))
last_t = time.time()
yield item
logger.perf("{}: {:4.3f} {:4.3f}".format(count,time.time() - start, time.time() - last_t))
logger.perf(loop_tag.format("end"))
# creates a generator to iterate through a list in chunks | # returns an iterator chunk of the iterable of up to the given size.
def chunker(iterable, size):
it = iter(iterable)
while True:
chunk = tuple(itertools.islice(it,size))
if not chunk:
return
yield chunk
def dump_ldap_stats(reset=True):
logger.debug(ldap_conn.usage)
if reset:
ldap_conn.usage.reset()
def generate_user(seq_num, ldif_out=False, dc_dn=None):
#create a list/dict of user entries to use for passing to a function
user = {}
user["a_uid"] = uid_template.format(seq=seq_num)
user["o_givenname"] = str(seq_num)
user["o_sn"] = "tuser_{}".format(randomizer)
user["o_cn"] = "{} {}".format(user["o_givenname"], user["o_sn"])
user["o_preferredlanguage"]='EN'
user["o_employeetype"]="Created via load_test.py. Run started at: {}".format(start_timestr)
# if the user is to be used for LDIF, strip the first two prepended chars
if ldif_out:
clean_rex = r"^._"
keylist = list(user.keys())
user['attributes']={}
for key in keylist:
new_key = re.sub(clean_rex,'',key)
user['attributes'][new_key]=user[key]
del user[key]
if dc_dn is not None:
user['dn']="uid={},cn=staged users,cn=accounts,cn=provisioning,{}".format(user['attributes']['uid'],dc_dn)
user['object_class']=['top','inetorgperson']
return user
def add_users_api(total):
users=[]
for i in loop_timer(args.count,args.count//10,label="user_add_api"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
if args.stage:
user_out = client.stageuser_add(**user)
else:
user_out = client.user_add(**user)
logger.debug(user_out)
return users
def add_users_stage(total):
users=[]
if args.ldap_stage:
for i in loop_timer(args.count,args.count//10,label="user_add_stage_ldap"):
user = generate_user(i, ldif_out=True, dc_dn=dom_dn)
users.append(user['attributes']['uid'])
user_dn=user['dn']
del user['dn']
ldap_conn.add(user_dn,**user)
else:
for i in loop_timer(args.count,args.count//10,label="user_add_stage"):
user = generate_user(i)
users.append(user["a_uid"])
logger.debug(user)
user_out = client.stageuser_add(**user)
logger.debug(user_out)
for i in iter_timer(users,args.count//10,label="user_activate"):
activate_out = client.stageuser_activate(i)
logger.debug(activate_out)
return users
def get_users(template):
logger.perf("Checking for user template '{}'".format(template))
if client.user_find(template,o_sizelimit=1)['count'] > 0:
users = [ user['uid'][0] for user in client.user_find(template,o_sizelimit=0,o_timelimit=0)['result']]
logger.perf("Found {} users".format(len(users)))
else:
logger.perf("Unable to find user template")
exit(1)
return users
def get_users_ldap(template):
logger.perf("Checking for user template '{}'".format(template))
results = client.user_find(template,o_sizelimit=1)
if results['count'] > 0:
result=results['result'][0]
uid = result['uid'][0]
user_dn=result['dn']
base_dn = re.sub("uid={},".format(uid),'',user_dn)
entry_gen = ldap_conn.extend.standard.paged_search(search_base = base_dn,
search_filter = "(uid={}*)".format(template),
search_scope = ldap3.SUBTREE,
attributes = '*',
paged_size=1000,
generator=True)
total = 0
users=[]
for entry in entry_gen:
# print(entry)
total += 1
if total % 10000 == 0:
logger.perf("Loaded {} users".format(total))
dump_ldap_stats()
# extract user uid. For some reason uid is a list, we only need the first
users.append(entry['attributes']['uid'][0])
if args.user_limit>-1 and total >= args.user_limit:
break
logger.perf("Loaded {} users".format(len(users)))
dump_ldap_stats()
else:
logger.perf("Unable to find user template")
exit(1)
return users
def create_group_add_users_api(i,users):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc)
if result["value"]==group_name:
logger.info("Success")
logger.debug(result)
logger.perf("Group: {}".format(group_name))
logger.info("Adding {} users".format(len(users)))
result = client.group_add_member(group_name, o_user=users)
logger.info("Done")
logger.debug(result)
def create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=-1):
group_name = "group{}_{}".format(randomizer,i)
group_desc = "Test group vor load_test.py. Run started at: {}".format(start_timestr)
logger.info("Creating group: {}".format(group_name))
result = client.group_add(group_name, o_description=group_desc,o_raw=True)
group_dn=result['result']['dn']
logger.debug(result)
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_ADD, chunk)
def remove_group_users_ldap(users, ldap_conn, base_user_dn, group_name, group_dn, chunk=-1):
logger.info("Group to delete: {}".format(group_dn))
start = time.time()
mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap3.MODIFY_DELETE, chunk)
logger.perf("Removing users from group took: {:4.3f}".format(time.time() - start))
result = client.group_show(group_name)
logger.info("Group show: {}".format(result))
logger.info("Delete group from IDM: {}".format(group_dn))
start = time.time()
result = client.group_del(group_name)
logger.perf("Delete group using API took: {:4.3f}".format(time.time() - start))
logger.info("Group del resul: {}".format(result))
def ldap_modify_retry(*fargs, **kwargs):
for retry_num in range(args.max_retries+1):
try:
return(ldap_conn.modify(*fargs,**kwargs))
except Exception as e:
logger.perf("Exception Occured")
logger.perf("'{}'".format(e))
logger.perf("{} retries left".format(args.max_retries-retry_num))
ldap_conn.unbind()
ldap_conn.bind()
logger.info("LDAP Connection rebound")
def mod_group_users_ldap(users, ldap_conn, base_user_dn, group_dn, ldap_mod_op, chunk=-1):
if chunk==-1:
chunk=len(users)
user_dn_list = [base_user_dn.format(user) for user in users]
for user_dn_chunk in chunker(user_dn_list,chunk):
# print(user_dn_chunk)
logger.perf("Chunk ({})".format(len(user_dn_chunk)))
logger.debug("Showing fist 20 of user_dn_chunk: {}".format(user_dn_chunk[:20]))
# result = ldap_conn.modify(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]})
result = ldap_modify_retry(group_dn,{"member":[(ldap_mod_op, user_dn_chunk)]})
dump_ldap_stats()
logger.debug("LDAP Modify result: {}".format(result))
if args.rebind:
logger.perf("rebinding LDAP connection")
ldap_conn.unbind()
ldap_conn.bind()
if args.delay>0:
logger.perf("Sleeping {} seconds".format(args.delay))
time.sleep(args.delay)
def check_dns_record(server, domain, record):
resolver = dns.resolver.Resolver()
resolver.nameservers=[socket.gethostbyname(server)]
try:
rdata = resolver.query(record + "." + domain)
logger.perf("Server [{}] answered with [{}]".format(server, rdata[0].address))
return 1
except dns.resolver.NXDOMAIN:
logger.perf("Record [{}] doesn't exist on server [{}]".format(record + "." + domain, server))
return 0
parser = argparse.ArgumentParser(description="Generate load test data for IdM",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', dest='verbosity', action='count', default=0,
help="Increase Verbosity, default is errors only. Only effective up to 3 levels.")
parser.add_argument('-c', type=int, dest='count',
help="Total count of users to add")
parser.add_argument('-g', dest='group_count', default=1, type=int,
help="Number of groups to create")
parser.add_argument('-S', dest='server', type=str,
help="Server to connect to")
parser.add_argument('-U', dest='user', type=str,
help="User account to use for connect")
parser.add_argument('-P', dest='password', type=str,
help="Password for connection")
parser.add_argument('--stage', dest='stage', action='store_true', default=False,
help="Create user in stage not active")
parser.add_argument('--stage-ldap', dest='ldap_stage', default=False, action='store_true',
help='Create stage users via ldap not API')
parser.add_argument('--ldap-group', dest='ldap_group', default=False, action='store_true',
help="Add users to group using LDAP directly")
parser.add_argument('--ldap-group-remove', dest='ldap_group_del', type=str,
help="Remove users from group using LDAP directly")
parser.add_argument('-C', dest='chunk', type=int, default=-1,
help="Chunk size for batching user adds to groups, -1 means all users given in count")
parser.add_argument('-r', dest='reuse_template', type=str,
help="Reuse existing users for group add using given user naming template")
parser.add_argument('-D', dest='delay',type=int, default=0,
help="Delay N seconds between chunks")
parser.add_argument('--rebind', dest='rebind',default=False,action='store_true',
help="Perform a unmind/bind operation between ldap operations.")
parser.add_argument('-l', dest='user_limit', type=int, default=-1,
help="Limit the number of users returned by reuse")
parser.add_argument('--max-retries',dest='max_retries', type=int, default=0,
help="Maximum number of retries for a failed chunk operation")
parser.add_argument('--check-repl', dest='check_repl',default=False,action='store_true',
help="Check when replication is finished by adding a DNS record")
args=parser.parse_args()
# setting up logger here to prevent log files being generated when showing help
perf_logfile = "perf_{}".format(randomizer)
_perf_handler = logging.FileHandler(perf_logfile)
_perf_formatter = logging.Formatter("%(asctime)s; %(message)s")
_perf_handler.setFormatter(_perf_formatter)
_perf_handler.addFilter(LogFilter(MyLogger._PERF,type='eq'))
logger.addHandler(_perf_handler)
if args.verbosity:
# Error is a level of 40.
level=30-(args.verbosity*10)
if level<0:
level=0
logger.setLevel(level)
levels={ 5: "CRITICAL",
4: "ERROR",
3: "WARNING",
2: "INFO",
1: "DEBUG",
0: "ALL" }
if level!=30:
log_file = "log_{}".format(randomizer)
_file_handler = logging.FileHandler(log_file)
_file_formatter = logging.Formatter('%(asctime)s %(levelname)s :: %(message)s')
_file_handler.setFormatter(_file_formatter)
_file_handler.addFilter(LogFilter(level))
logger.addHandler(_file_handler)
logger.info("Logging to file '{}'".format(log_file))
logger.info("Debug level: {0} ({1})".format(levels[level // 10],level))
# client = ClientMeta('ipaserver0.example.com',False)
# client.login('admin', 'admin123')
# kerberos seems broken using OS rpms on RHEL 8
#client.login_kerberos()
# user = client.user_add('test4', 'John', 'Doe', 'John Doe', o_preferredlanguage='EN')
# Output some data to the user about the script options passed in
# Not working as expected when git not found
try:
commit_info = str(subprocess.check_output(['git', 'log', '-n', '1', '--pretty=tformat:"%ci %H"']),"utf-8").strip()
logger.perf("Commit Info: {}".format(commit_info))
except:
logger.perf("No git info found")
pass
logger.perf("Start Time: {}".format(start_timestr))
logger.perf("User count: {} Group count: {}".format(args.count,args.group_count))
logger.perf("Server: {}".format(args.server))
logger.perf("Perf Log file: {}".format(perf_logfile))
if args.stage:
if args.ldap_stage:
logger.perf("Creating Stage users via ldap")
else:
logger.perf("Creating Stage users via API")
else:
logger.perf("Creating active users via API")
if args.ldap_group:
logger.perf("Adding users to groups via LDAP")
if args.chunk>-1:
logger.perf(" Using a chunk size of {}".format(args.chunk))
else:
logger.perf("Adding users to groups via API")
if args.reuse_template:
logger.perf("Reusing users starting with: '{}'".format(args.reuse_template))
if args.user_limit>-1:
logger.perf(" Limiting reuse to first {} users found".format(args.user_limit))
logger.debug(args)
logger.perf('----')
# end start header
client = ClientMeta(args.server,False)
client.login(args.user, args.password)
dnszone = client.dnszone_find(o_forward_only=True)['result'][0]
servers = dnszone['nsrecord']
domain = dnszone['idnsname'][0]['__dns_name__']
logger.info("Found servers: {} for domain: [{}]".format(servers, domain))
if args.ldap_group or args.ldap_stage:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
dom_dn = re.search("(dc=.*)",user_dn, re.IGNORECASE).group(1)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
if args.reuse_template:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={},".format(args.user),'',user_dn)
logger.debug("base_user_dn: {}".format(base_user_dn))
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True, collect_usage=True)
users=get_users_ldap(args.reuse_template)
else:
logger.info("Creating {} users".format(args.count))
logger.info("template: {}".format(uid_template))
logger.info("Checking for existing templated users")
user_check=client.user_find(uid_template.format(seq=0))
if user_check["count"]>0:
sec_to_wait = 61 - datetime.now().second
logger.error("Existing users found please wait {} seconds".format(sec_to_wait))
exit(1)
else:
logger.info("Proceeding")
if args.stage:
users = add_users_stage(args.count)
else:
users = add_users_api(args.count)
if args.ldap_group:
# print(ldap_server.info)
# for i in iter_timer(range(args.group_count),step=1,label="group_add_user_ldap"):
# create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
for i in loop_timer(args.group_count,1,label="group_add_user_ldap"):
create_group_add_users_ldap(i,users,ldap_conn,base_user_dn,chunk=args.chunk)
elif args.ldap_group_del is not None:
user_dn=client.user_show(args.user,o_all=True)['result']['dn']
group_dn=client.group_show(args.ldap_group_del,o_all=True)['result']['dn']
base_user_dn = re.sub("^uid={}".format(args.user),'uid={}',user_dn)
ldap_server = ldap3.Server(args.server, get_info=ldap3.ALL)
ldap_conn = ldap3.Connection(ldap_server,user=user_dn, password=args.password, auto_bind=True)
remove_group_users_ldap(users, ldap_conn, base_user_dn, args.ldap_group_del, group_dn, chunk=args.chunk)
else:
for i in loop_timer(args.group_count,1,label="group_add_user_api"):
create_group_add_users_api(i,users)
logger.perf('----')
logger.perf("End Time: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time: {:.3f}sec".format(run_time))
logger.perf("Total Run time: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60))
if args.check_repl:
record = "trecord{}".format(randomizer)
client.dnsrecord_add(a_dnszoneidnsname=domain, a_idnsname=record, o_a_part_ip_address='1.1.1.1')
check_result = 0
itr_ctr = 0
while check_result < len(servers) and itr_ctr < 600:
time.sleep(1)
check_result = 0
logger.perf("---- Iteration [{}] ----".format(itr_ctr))
for server in servers:
check_result += check_dns_record(server, domain, record)
itr_ctr += 1
logger.perf('----')
logger.perf("End Time with replication: {}".format(datetime.now().strftime("%Y%m%d %H:%M")))
run_time=time.time() - start_time
logger.perf("Total Run Time with replication: {:.3f}sec".format(run_time))
logger.perf("Total Run time with replication: {:d}min {:.3f}sec".format(int(run_time//60),run_time%60)) | |
options.go | package mock
import (
"github.com/divisionone/go-micro/client"
)
// Response sets the response methods for a service
func Response(service string, response []MockResponse) client.Option {
return func(o *client.Options) {
r, ok := fromContext(o.Context)
if !ok {
r = make(map[string][]MockResponse) | }
r[service] = response
o.Context = newContext(o.Context, r)
}
} | |
oc5rclr.rs | #[doc = "Register `OC5RCLR` reader"]
pub struct R(crate::R<OC5RCLR_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<OC5RCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::convert::From<crate::R<OC5RCLR_SPEC>> for R {
fn from(reader: crate::R<OC5RCLR_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `OC5RCLR` writer"]
pub struct W(crate::W<OC5RCLR_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<OC5RCLR_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<OC5RCLR_SPEC>> for W {
fn from(writer: crate::W<OC5RCLR_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `OC5R` reader - "]
pub struct OC5R_R(crate::FieldReader<u32, u32>);
impl OC5R_R {
pub(crate) fn new(bits: u32) -> Self {
OC5R_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for OC5R_R {
type Target = crate::FieldReader<u32, u32>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `OC5R` writer - "]
pub struct OC5R_W<'a> {
w: &'a mut W,
}
impl<'a> OC5R_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u32) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff);
self.w
}
}
impl R {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn | (&self) -> OC5R_R {
OC5R_R::new((self.bits & 0xffff_ffff) as u32)
}
}
impl W {
#[doc = "Bits 0:31"]
#[inline(always)]
pub fn oc5r(&mut self) -> OC5R_W {
OC5R_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "OC5RCLR register\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [oc5rclr](index.html) module"]
pub struct OC5RCLR_SPEC;
impl crate::RegisterSpec for OC5RCLR_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [oc5rclr::R](R) reader structure"]
impl crate::Readable for OC5RCLR_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [oc5rclr::W](W) writer structure"]
impl crate::Writable for OC5RCLR_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets OC5RCLR to value 0"]
impl crate::Resettable for OC5RCLR_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
| oc5r |
application.rs | // Take a look at the license at the top of the repository in the LICENSE file.
// rustdoc-stripper-ignore-next
//! Traits intended for subclassing [`Application`](crate::Application).
use gio::subclass::prelude::*;
use glib::translate::*;
use glib::Cast;
use crate::{Application, Window};
pub trait GtkApplicationImpl: ObjectImpl + GtkApplicationImplExt + ApplicationImpl {
fn window_added(&self, application: &Self::Type, window: &Window) {
self.parent_window_added(application, window)
}
fn window_removed(&self, application: &Self::Type, window: &Window) {
self.parent_window_removed(application, window)
}
}
pub trait GtkApplicationImplExt: ObjectSubclass {
fn parent_window_added(&self, application: &Self::Type, window: &Window);
fn parent_window_removed(&self, application: &Self::Type, window: &Window);
}
impl<T: GtkApplicationImpl> GtkApplicationImplExt for T {
fn parent_window_added(&self, application: &Self::Type, window: &Window) {
unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkApplicationClass;
if let Some(f) = (*parent_class).window_added {
f(
application
.unsafe_cast_ref::<Application>()
.to_glib_none()
.0,
window.to_glib_none().0,
)
}
}
}
fn parent_window_removed(&self, application: &Self::Type, window: &Window) { | f(
application
.unsafe_cast_ref::<Application>()
.to_glib_none()
.0,
window.to_glib_none().0,
)
}
}
}
}
unsafe impl<T: GtkApplicationImpl> IsSubclassable<T> for Application {
fn class_init(class: &mut ::glib::Class<Self>) {
Self::parent_class_init::<T>(class);
let klass = class.as_mut();
klass.window_added = Some(application_window_added::<T>);
klass.window_removed = Some(application_window_removed::<T>);
// Chain our startup handler in here
let parent_klass = &mut class.as_mut().parent_class;
parent_klass.startup = Some(application_startup::<T>);
}
}
unsafe extern "C" fn application_startup<T: ObjectSubclass>(ptr: *mut gio::ffi::GApplication)
where
T: GtkApplicationImpl,
{
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<gio::Application> = from_glib_borrow(ptr);
imp.startup(wrap.unsafe_cast_ref());
crate::rt::set_initialized();
}
unsafe extern "C" fn application_window_added<T: GtkApplicationImpl>(
ptr: *mut ffi::GtkApplication,
wptr: *mut ffi::GtkWindow,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<Application> = from_glib_borrow(ptr);
imp.window_added(wrap.unsafe_cast_ref(), &from_glib_borrow(wptr))
}
unsafe extern "C" fn application_window_removed<T: GtkApplicationImpl>(
ptr: *mut ffi::GtkApplication,
wptr: *mut ffi::GtkWindow,
) {
let instance = &*(ptr as *mut T::Instance);
let imp = instance.impl_();
let wrap: Borrowed<Application> = from_glib_borrow(ptr);
imp.window_removed(wrap.unsafe_cast_ref(), &from_glib_borrow(wptr))
} | unsafe {
let data = T::type_data();
let parent_class = data.as_ref().parent_class() as *mut ffi::GtkApplicationClass;
if let Some(f) = (*parent_class).window_removed { |
fabcar.go | /*
Copyright 2020 IBM All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package main
import (
"errors"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"github.com/hyperledger/fabric-sdk-go/pkg/core/config"
"github.com/hyperledger/fabric-sdk-go/pkg/gateway"
)
func main() {
wallet, err := gateway.NewFileSystemWallet("wallet")
if err != nil {
fmt.Printf("Failed to create wallet: %s\n", err)
os.Exit(1)
}
if !wallet.Exists("appUser") {
err = populateWallet(wallet)
if err != nil {
fmt.Printf("Failed to populate wallet contents: %s\n", err)
os.Exit(1)
}
}
ccpPath := filepath.Join(
"..",
"..",
"test-network",
"organizations",
"peerOrganizations",
"org1.example.com",
"connection-org1.yaml",
)
gw, err := gateway.Connect(
gateway.WithConfig(config.FromFile(filepath.Clean(ccpPath))),
gateway.WithIdentity(wallet, "appUser"),
)
if err != nil {
fmt.Printf("Failed to connect to gateway: %s\n", err)
os.Exit(1)
}
defer gw.Close()
network, err := gw.GetNetwork("mychannel")
if err != nil {
fmt.Printf("Failed to get network: %s\n", err)
os.Exit(1)
}
contract := network.GetContract("fabcar")
result, err := contract.EvaluateTransaction("queryAllCars")
if err != nil {
fmt.Printf("Failed to evaluate transaction: %s\n", err)
os.Exit(1)
}
fmt.Println(string(result))
result, err = contract.SubmitTransaction("createCar", "CAR10", "VW", "Polo", "Grey", "Mary")
if err != nil {
fmt.Printf("Failed to submit transaction: %s\n", err)
os.Exit(1)
}
fmt.Println(string(result))
result, err = contract.EvaluateTransaction("queryCar", "CAR10")
if err != nil {
fmt.Printf("Failed to evaluate transaction: %s\n", err)
os.Exit(1)
}
fmt.Println(string(result))
_, err = contract.SubmitTransaction("changeCarOwner", "CAR10", "Archie")
if err != nil {
fmt.Printf("Failed to submit transaction: %s\n", err)
os.Exit(1)
}
result, err = contract.EvaluateTransaction("queryCar", "CAR10")
if err != nil {
fmt.Printf("Failed to evaluate transaction: %s\n", err)
os.Exit(1)
}
fmt.Println(string(result))
}
func populateWallet(wallet *gateway.Wallet) error {
credPath := filepath.Join(
"..",
"..",
"test-network",
"organizations",
"peerOrganizations",
"org1.example.com",
"users",
"[email protected]",
"msp",
)
certPath := filepath.Join(credPath, "signcerts", "cert.pem")
// read the certificate pem
cert, err := ioutil.ReadFile(filepath.Clean(certPath))
if err != nil {
return err
}
keyDir := filepath.Join(credPath, "keystore")
// there's a single file in this dir containing the private key
files, err := ioutil.ReadDir(keyDir)
if err != nil |
if len(files) != 1 {
return errors.New("keystore folder should have contain one file")
}
keyPath := filepath.Join(keyDir, files[0].Name())
key, err := ioutil.ReadFile(filepath.Clean(keyPath))
if err != nil {
return err
}
identity := gateway.NewX509Identity("Org1MSP", string(cert), string(key))
err = wallet.Put("appUser", identity)
if err != nil {
return err
}
return nil
}
| {
return err
} |
probe.rs | use legion::prelude::*;
use nalgebra_glm::Vec3;
use crate::{
graphics::resources::{ProbeFormat, ProbeQuality},
scene::components,
Application,
};
pub fn | (
app: &mut Application,
position: Vec3,
quality: ProbeQuality,
format: ProbeFormat,
) -> Entity {
let probe_id = {
app.probe_manager
.create(Vec3::zeros(), &app.resources, quality, format)
};
let probe_component = components::Probe { id: probe_id };
let mut transform = components::Transform::new(app);
transform.position = position;
app.current_scene
.world
.insert((), vec![(probe_component, transform)])[0]
}
| create |
error.rs | // Copyright 2018 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
use anyhow::{format_err, Error};
use fidl_fuchsia_auth::Status;
use fidl_fuchsia_identity_external::Error as ExternalApiError;
use thiserror::Error;
use token_cache::AuthCacheError;
use token_store::AuthDbError;
/// An extension trait to simplify conversion of results based on general errors to
/// TokenManagerErrors.
pub trait ResultExt<T, E> {
/// Wraps the error in a non-fatal `TokenManagerError` with the supplied `Status`.
fn token_manager_status(self, status: Status) -> Result<T, TokenManagerError>;
}
impl<T, E> ResultExt<T, E> for Result<T, E>
where
E: Into<Error> + Send + Sync + Sized,
{
fn token_manager_status(self, status: Status) -> Result<T, TokenManagerError> {
self.map_err(|err| TokenManagerError::new(status).with_cause(err))
}
}
/// An Error type for problems encountered in the token manager. Each error contains the
/// `fuchsia.auth.Status` that should be reported back to the client and an indication of whether
/// it is fatal.
#[derive(Debug, Error)]
#[error("TokenManager error, returning {:?}. ({:?})", status, cause)]
pub struct TokenManagerError {
/// The most appropriate `fuchsia.auth.Status` to describe this problem.
pub status: Status,
/// Whether this error should be considered fatal, i.e. whether it should terminate processing
/// of all requests on the current channel.
pub fatal: bool,
/// The cause of this error, if available.
pub cause: Option<Error>,
}
impl TokenManagerError {
/// Constructs a new non-fatal error based on the supplied `Status`.
pub fn new(status: Status) -> Self {
TokenManagerError { status, fatal: false, cause: None }
}
/// Sets a cause on the current error.
pub fn with_cause<T: Into<Error>>(mut self, cause: T) -> Self {
self.cause = Some(cause.into());
self
}
}
impl From<Status> for TokenManagerError {
fn from(status: Status) -> Self {
TokenManagerError::new(status)
}
}
impl From<AuthDbError> for TokenManagerError {
fn | (auth_db_error: AuthDbError) -> Self {
let (status, fatal) = match &auth_db_error {
AuthDbError::InvalidArguments => (Status::InvalidRequest, true),
AuthDbError::DbInvalid => (Status::InternalError, true),
AuthDbError::CredentialNotFound => (Status::UserNotFound, false),
AuthDbError::SerializationError => (Status::InternalError, false),
_ => (Status::UnknownError, false),
};
TokenManagerError { status, fatal, cause: Some(Error::from(auth_db_error)) }
}
}
impl From<AuthCacheError> for TokenManagerError {
fn from(auth_cache_error: AuthCacheError) -> Self {
TokenManagerError {
status: match auth_cache_error {
AuthCacheError::InvalidArguments => Status::InvalidRequest,
AuthCacheError::KeyNotFound => Status::UserNotFound,
},
// No cache failures are persistent and hence none are fatal.
fatal: false,
cause: Some(Error::from(auth_cache_error)),
}
}
}
impl From<ExternalApiError> for TokenManagerError {
fn from(external_api_error: ExternalApiError) -> Self {
TokenManagerError {
status: match external_api_error {
ExternalApiError::Unknown => Status::UnknownError,
ExternalApiError::Internal => Status::InternalError,
ExternalApiError::Config => Status::AuthProviderServiceUnavailable,
ExternalApiError::UnsupportedOperation => Status::UnknownError,
ExternalApiError::InvalidRequest => Status::InvalidRequest,
ExternalApiError::Resource => Status::IoError,
ExternalApiError::Network => Status::NetworkError,
ExternalApiError::Server => Status::AuthProviderServerError,
ExternalApiError::InvalidToken => Status::ReauthRequired,
ExternalApiError::InsufficientToken => Status::ReauthRequired,
ExternalApiError::Aborted => Status::UserCancelled,
},
fatal: false,
cause: Some(format_err!("Auth provider error: {:?}", external_api_error)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use anyhow::format_err;
const TEST_STATUS: Status = Status::UnknownError;
fn create_test_error() -> Error {
format_err!("Test error")
}
#[test]
fn test_new() {
let cause = format_err!("Example cause");
let error = TokenManagerError::new(TEST_STATUS).with_cause(cause);
assert_eq!(error.status, TEST_STATUS);
assert!(!error.fatal);
assert!(error.cause.is_some());
}
#[test]
fn test_from_status() {
let error: TokenManagerError = TEST_STATUS.into();
assert_eq!(error.status, TEST_STATUS);
assert!(!error.fatal);
assert!(error.cause.is_none());
}
#[test]
fn test_token_manager_status() {
let test_result: Result<(), Error> = Err(create_test_error());
let wrapped_result = test_result.token_manager_status(TEST_STATUS);
assert_eq!(wrapped_result.as_ref().unwrap_err().status, TEST_STATUS);
assert_eq!(
format!("{:?}", wrapped_result.unwrap_err().cause.unwrap()),
format!("{:?}", create_test_error())
);
}
#[test]
fn test_from_auth_db_error() {
let err = TokenManagerError::from(AuthDbError::CredentialNotFound);
let err_fatal = TokenManagerError::from(AuthDbError::DbInvalid);
assert_eq!(
(format!("{:?}", err.cause.as_ref().unwrap()), err.fatal, err.status),
("credential not found".to_string(), false, Status::UserNotFound)
);
assert_eq!(
(format!("{:?}", err_fatal.cause.as_ref().unwrap()), err_fatal.fatal, err_fatal.status),
("database contents could not be parsed".to_string(), true, Status::InternalError)
);
}
#[test]
fn test_from_auth_cache_error() {
let err = TokenManagerError::from(AuthCacheError::InvalidArguments);
assert_eq!(
(format!("{:?}", err.cause.as_ref().unwrap()), err.fatal, err.status),
("invalid argument".to_string(), false, Status::InvalidRequest)
);
}
}
| from |
lib.rs | #![warn(rust_2018_idioms, clippy::dbg_macro, clippy::print_stdout)]
/*!
The proc-macros for [phper](https://crates.io/crates/phper).
## License
[Unlicense](https://github.com/jmjoy/phper/blob/master/LICENSE).
*/
// TODO Write a bridge macro for easy usage about register functions and classes, like `cxx`.
mod alloc;
mod derives;
mod globals;
mod inner;
mod log;
mod utils;
use proc_macro::TokenStream;
use syn::{parse_macro_input, DeriveInput};
/// C style string end with '\0'.
///
/// # Examples
///
/// ```no_test
/// use std::ffi::CStr;
///
/// assert_eq!(c_str!("foo"), unsafe {
/// CStr::from_ptr("foo\0".as_ptr().cast())
/// });
/// ```
#[proc_macro]
pub fn c_str(input: TokenStream) -> TokenStream {
utils::c_str(input)
}
/// C style string end with '\0'.
///
/// # Examples
///
/// ```no_test
/// assert_eq!(c_str_ptr!("foo"), "foo\0".as_ptr().cast());
/// ```
#[proc_macro]
pub fn c_str_ptr(input: TokenStream) -> TokenStream |
/// PHP module entry, wrap the `phper::modules::Module` write operation.
///
/// # Examples
///
/// ```no_test
/// use phper::{php_get_module, modules::Module};
///
/// #[php_get_module]
/// pub fn get_module() -> Module {
/// let mut module = Module::new(
/// env!("CARGO_PKG_NAME"),
/// env!("CARGO_PKG_VERSION"),
/// env!("CARGO_PKG_AUTHORS"),
/// );
///
/// // ...
///
/// module
/// }
///
/// ```
#[proc_macro_attribute]
pub fn php_get_module(attr: TokenStream, input: TokenStream) -> TokenStream {
inner::php_get_module(attr, input)
}
/// Auto derive for `phper::errors::Throwable`.
///
/// # Examples
///
/// ```no_test
/// #[derive(thiserror::Error, phper::Throwable, Debug)]
/// #[throwable_class("Exception")]
/// pub enum Error {
/// #[error(transparent)]
/// Io(#[from] std::io::Error),
///
/// #[error(transparent)]
/// #[throwable(transparent)]
/// My(#[from] MyError),
/// }
/// ```
///
/// TODO Support attribute `throwable` with `class`, `code` and `message`, integration tests.
#[proc_macro_derive(Throwable, attributes(throwable, throwable_class))]
pub fn derive_throwable(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
derives::derive_throwable(input).unwrap_or_else(|e| e.into_compile_error().into())
}
| {
utils::c_str_ptr(input)
} |
secrets_transformation_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package master
import (
"crypto/aes"
"crypto/cipher"
"encoding/base64"
"fmt"
"testing"
apiserverconfigv1 "k8s.io/apiserver/pkg/apis/config/v1"
"k8s.io/apiserver/pkg/storage/value"
aestransformer "k8s.io/apiserver/pkg/storage/value/encrypt/aes"
)
const (
aesGCMPrefix = "k8s:enc:aesgcm:v1:key1:"
aesCBCPrefix = "k8s:enc:aescbc:v1:key1:"
aesGCMConfigYAML = `
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
resources:
- resources:
- secrets
providers:
- aesgcm:
keys:
- name: key1
secret: c2VjcmV0IGlzIHNlY3VyZQ==
`
aesCBCConfigYAML = `
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
resources:
- resources:
- secrets
providers:
- aescbc:
keys:
- name: key1
secret: c2VjcmV0IGlzIHNlY3VyZQ==
`
identityConfigYAML = `
kind: EncryptionConfiguration
apiVersion: apiserver.config.k8s.io/v1
resources:
- resources:
- secrets
providers:
- identity: {}
`
)
// TestSecretsShouldBeEnveloped is an integration test between KubeAPI and etcd that checks:
// 1. Secrets are encrypted on write
// 2. Secrets are decrypted on read
// when EncryptionConfiguration is passed to KubeAPI server.
func TestSecretsShouldBeTransformed(t *testing.T) {
var testCases = []struct {
transformerConfigContent string
transformerPrefix string
unSealFunc unSealSecret
}{
{aesGCMConfigYAML, aesGCMPrefix, unSealWithGCMTransformer},
{aesCBCConfigYAML, aesCBCPrefix, unSealWithCBCTransformer},
// TODO: add secretbox
}
for _, tt := range testCases {
test, err := newTransformTest(t, tt.transformerConfigContent)
if err != nil {
test.cleanUp()
t.Errorf("failed to setup test for envelop %s, error was %v", tt.transformerPrefix, err)
continue
}
test.secret, err = test.createSecret(testSecret, testNamespace)
if err != nil {
t.Fatalf("Failed to create test secret, error: %v", err)
}
test.run(tt.unSealFunc, tt.transformerPrefix)
test.cleanUp()
}
}
// Baseline (no enveloping) - use to contrast with enveloping benchmarks.
func BenchmarkBase(b *testing.B) {
runBenchmark(b, "")
}
// Identity transformer is a NOOP (crypto-wise) - use to contrast with AESGCM and AESCBC benchmark results.
func BenchmarkIdentityWrite(b *testing.B) {
runBenchmark(b, identityConfigYAML)
}
func BenchmarkAESGCMEnvelopeWrite(b *testing.B) {
runBenchmark(b, aesGCMConfigYAML)
}
func BenchmarkAESCBCEnvelopeWrite(b *testing.B) {
runBenchmark(b, aesCBCConfigYAML)
}
func runBenchmark(b *testing.B, transformerConfig string) {
b.StopTimer()
test, err := newTransformTest(b, transformerConfig)
defer test.cleanUp()
if err != nil {
b.Fatalf("failed to setup benchmark for config %s, error was %v", transformerConfig, err)
}
b.StartTimer()
test.benchmark(b)
b.StopTimer()
test.printMetrics()
}
func unSealWithGCMTransformer(cipherText []byte, ctx value.Context,
transformerConfig apiserverconfigv1.ProviderConfiguration) ([]byte, error) |
func unSealWithCBCTransformer(cipherText []byte, ctx value.Context,
transformerConfig apiserverconfigv1.ProviderConfiguration) ([]byte, error) {
block, err := newAESCipher(transformerConfig.AESCBC.Keys[0].Secret)
if err != nil {
return nil, err
}
cbcTransformer := aestransformer.NewCBCTransformer(block)
clearText, _, err := cbcTransformer.TransformFromStorage(cipherText, ctx)
if err != nil {
return nil, fmt.Errorf("failed to decypt secret: %v", err)
}
return clearText, nil
}
func newAESCipher(key string) (cipher.Block, error) {
k, err := base64.StdEncoding.DecodeString(key)
if err != nil {
return nil, fmt.Errorf("failed to decode config secret: %v", err)
}
block, err := aes.NewCipher(k)
if err != nil {
return nil, fmt.Errorf("failed to create AES cipher: %v", err)
}
return block, nil
}
| {
block, err := newAESCipher(transformerConfig.AESGCM.Keys[0].Secret)
if err != nil {
return nil, fmt.Errorf("failed to create block cipher: %v", err)
}
gcmTransformer := aestransformer.NewGCMTransformer(block)
clearText, _, err := gcmTransformer.TransformFromStorage(cipherText, ctx)
if err != nil {
return nil, fmt.Errorf("failed to decypt secret: %v", err)
}
return clearText, nil
} |
info_linux_basic.go | //go:build linux && (386 || arm64 || amd64)
// +build linux
// +build 386 arm64 amd64
package system
// ////////////////////////////////////////////////////////////////////////////////// // | // Copyright (c) 2021 ESSENTIAL KAOS //
// Apache License, Version 2.0 <https://www.apache.org/licenses/LICENSE-2.0> //
// //
// ////////////////////////////////////////////////////////////////////////////////// //
// byteSliceToString convert byte slice to string
func byteSliceToString(s [65]int8) string {
result := ""
for _, r := range s {
if r == 0 {
break
}
result += string(rune(r))
}
return result
} | // // |
GetJobUnlockCodeInput.ts | import { NodeHttpOptions as __HttpOptions__ } from "@aws-sdk/types";
import * as __aws_sdk_types from "@aws-sdk/types";
/**
* GetJobUnlockCodeInput shape
*/
export interface GetJobUnlockCodeInput {
/**
* <p>The ID for the job that you want to get the <code>UnlockCode</code> value for, for example <code>JID123e4567-e89b-12d3-a456-426655440000</code>.</p>
*/
JobId: string;
/**
* The maximum number of times this operation should be retried. If set, this value will override the `maxRetries` configuration set on the client for this command.
*/
$maxRetries?: number;
/**
* An object that may be queried to determine if the underlying operation has been aborted.
*
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
$abortSignal?: __aws_sdk_types.AbortSignal;
| */
$httpOptions?: __HttpOptions__;
} | /**
* Per-request HTTP configuration options. If set, any options specified will override the corresponding HTTP option set on the client for this command. |
mygoogle.py | import myrequests
class GoogleError(myrequests.RequestError):
pass
def google_geocode(address, components='locality:riga|country:LV', language='ru', key=''):
response = myrequests.requests.get(
f'https://maps.googleapis.com/maps/api/geocode/json?address={address}&components={components}&language={language}&key={key}')
if not response.ok:
raise GoogleError(response.reason)
else:
body = response.json()
if 'status' in body:
if body['status'] in ['OK', 'ZERO_RESULTS']:
|
else:
raise GoogleError(body['status'], body['error_message'])
| return body['results'] |
streptomycesflavidovirens.py | """
This file offers the methods to automatically retrieve the graph Streptomyces flavidovirens.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:51:05.041684
The undirected graph Streptomyces flavidovirens has 6208 nodes and 745893
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03871 and has 31 connected components, where the component
with most nodes has 6140 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 201, the mean node degree is 240.30,
and the node degree mode is 2. The top 5 most central nodes are 1123319.AUBE01000020_gene3023
(degree 2822), 1123319.AUBE01000003_gene803 (degree 1858), 1123319.AUBE01000022_gene2882
(degree 1842), 1123319.AUBE01000016_gene5937 (degree 1794) and 1123319.AUBE01000016_gene5980
(degree 1776).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesFlavidovirens
# Then load the graph
graph = StreptomycesFlavidovirens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def | (
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Streptomyces flavidovirens graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces flavidovirens graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:51:05.041684
The undirected graph Streptomyces flavidovirens has 6208 nodes and 745893
weighted edges, of which none are self-loops. The graph is dense as it
has a density of 0.03871 and has 31 connected components, where the component
with most nodes has 6140 nodes and the component with the least nodes has
2 nodes. The graph median node degree is 201, the mean node degree is 240.30,
and the node degree mode is 2. The top 5 most central nodes are 1123319.AUBE01000020_gene3023
(degree 2822), 1123319.AUBE01000003_gene803 (degree 1858), 1123319.AUBE01000022_gene2882
(degree 1842), 1123319.AUBE01000016_gene5937 (degree 1794) and 1123319.AUBE01000016_gene5980
(degree 1776).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import StreptomycesFlavidovirens
# Then load the graph
graph = StreptomycesFlavidovirens()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="StreptomycesFlavidovirens",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| StreptomycesFlavidovirens |
activate_amplifier.py | #!/usr/bin/python3
import sys
from signal import pause
import RPi.GPIO as GPIO
# script to activate and deactivate an amplifier, power led, etc. using a GPIO
# pin on power up / down
# see for an example implementation with a PAM8403 digital amplifier
# (PAM pin 12 connected to GPIO 26)
# https://github.com/MiczFlor/RPi-Jukebox-RFID/wiki/Hardware-Hack-PAM8403-Poweroff
# change this value based on which GPIO port the amplifier or other devices are connected to
# Flexible Pinout
AMP_GPIO = 26
# Classic Pinout
# AMP_GPIO = 23
# setup RPi lib to control output pin
# we do not cleanup the GPIO because we want the pin low = off after program exit
# the resulting warning can be ignored
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(AMP_GPIO, GPIO.OUT)
def | (status):
if status:
print("Setting amplifier: ON")
GPIO.output(AMP_GPIO, GPIO.HIGH)
else:
print("Setting amplifier: OFF")
GPIO.output(AMP_GPIO, GPIO.LOW)
if __name__ == "__main__":
try:
set_amplifier(True)
pause()
except KeyboardInterrupt:
# turn the relay off
set_amplifier(False)
print("\nExiting amplifier control\n")
# exit the application
sys.exit(0)
| set_amplifier |
test_todo.py | import unittest
import requests_mock
from canvasapi import Canvas |
@requests_mock.Mocker()
class TestTodo(unittest.TestCase):
def setUp(self):
self.canvas = Canvas(settings.BASE_URL, settings.API_KEY)
self.todo = Todo(
self.canvas._Canvas__requester,
{
"type": "grading",
"assignment": {},
"ignore": ".. url ..",
"ignore_permanently": ".. url ..",
"html_url": ".. url ..",
"needs_grading_count": 3,
"context_type": "course",
"course_id": 1,
"group_id": None,
},
)
def test_str(self, m):
test_str = str(self.todo)
self.assertIsInstance(test_str, str)
self.assertEqual(test_str, "Todo Item (grading)") | from canvasapi.todo import Todo
from tests import settings
|
0205. isIsomorphic.py | # pigeonhole
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
return len(set(zip(s, t))) == len(set(s)) == len(set(t))
# two dict
class Solution:
def isIsomorphic(self, s: str, t: str) -> bool:
dx, dy = {}, {}
for x, y in zip(s, t):
if (x in dx and dx[x] != y) or (y in dy and dy[y] != x):
|
dx[x], dy[y] = y, x
return True
| return False |
repeated_util.py | """Helper functions for dealing with repeated fields.
It comes up in a few places that we need to flatten or unflatten repeated
columns when using them in conjunction with other repeated or scalar fields.
These functions allow us to flatten into non-repeated columns to apply various
operations and then unflatten back into repeated columns afterwards.
"""
from __future__ import absolute_import
from tinyquery import tq_modes
def rebuild_column_values(repetitions, values, result):
"""Rebuild a repeated column from flattened results.
Args:
repetitions: a list of how many repeated values go in a row for
each of the rows to process.
values: a list of all the values that need to be packed into lists
result: a (partial) result list to which the rows will be appended.
Returns:
a list of lists of values representing len(repetitions) rows, each
of which with a number of values corresponding to that row's
entry in repetitions
"""
if len(repetitions) == 0:
return result
curr_repetition = repetitions[0]
# For rows with no values, we supplied a None, so we need to pop
# off one value no matter what. If that value is None, we go back
# to an empty list, otherwise we put the value in a list.
curr_values = normalize_repeated_null(values[:max(curr_repetition, 1)])
return rebuild_column_values(
repetitions[1:],
values[max(curr_repetition, 1):],
result + [curr_values])
def normalize_column_to_length(col, desired_count): |
If `col` is a scalar, it's duplicated in a list the desired number of
times. If `col` is a list, it must have 0, 1, or the desired number of
elements, in which cases `None` or the single element is duplicated, or
the original list is returned.
"""
desired_count = max(desired_count, 1)
if isinstance(col, list) and len(col) == desired_count:
return col
elif isinstance(col, list):
assert len(col) in (0, 1), (
'Unexpectedly got a row with the incorrect number of '
'repeated values.')
return (col or [None]) * desired_count
else:
return [col] * desired_count
def flatten_column_values(repeated_column_indices, column_values):
"""Take a list of columns and flatten them.
We need to acomplish three things during the flattening:
1. Flatten out any repeated fields.
2. Keep track of how many repeated values were in each row so that we
can go back
3. If there are other columns, duplicate their values so that we have
the same number of entries in all columns after flattening.
Args:
repeated_column_indices: the indices of the columns that
are repeated; if there's more than one repeated column, this
function assumes that we've already checked that the lengths of
these columns will match up, or that they have 0 or 1 element.
column_values: a list containing a list for each column's values.
Returns:
(repetition_counts, flattened_columns): a tuple
repetition_counts: a list containing one number per row,
representing the number of repeated values in that row
flattened_columns: a list containing one list for each column's
values. The list for each column will not contain nested
lists.
"""
# wrapping in list for python 3 support
rows = list(zip(*column_values))
repetition_counts = [
max(max(len(row[idx]) for idx in repeated_column_indices), 1)
for row in rows
]
rows_with_repetition_normalized = [
[
normalize_column_to_length(col, count)
for col in row
]
for row, count in zip(rows, repetition_counts)
]
normalized_columns = zip(*rows_with_repetition_normalized)
flattened_columns = [
[val for arr in col for val in arr]
for col in normalized_columns]
return (repetition_counts, flattened_columns)
def columns_have_allowed_repetition_counts(ref_col, col):
"""Determine if we could select col along with ref_col.
We assume ref_col is repeated. In tinyquery this is allowable if any of
the following is true:
- col is not repeated
- col is repeated but every row has only 0 or 1 element
- col is repeated but every row with more than 1 element matches the number
of elements in ref_col
"""
if col.mode != tq_modes.REPEATED:
return True
ref_counts = [len(val) for val in ref_col.values]
counts = [len(val) for val in col.values]
return all(
rc == c or c in (0, 1) or rc in (0, 1)
for rc, c in zip(ref_counts, counts))
def normalize_repeated_null(value):
"""Normalze the way we represent null in repeated fields.
There's 3 equivalent options: `None`, [], and `[None]`. We chose [] to be
the standard for repeated fields, so this turns any of these into [].
"""
if value is None or value == [None]:
return []
return value | """Given the value(s) for a column, normalize to a desired length. |
model.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// <p>An arbitary key/value pair used to add searchable metadata to secure tunnel
/// resources.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Tag {
/// <p>The key of the tag.</p>
pub key: std::option::Option<std::string::String>,
/// <p>The value of the tag.</p>
pub value: std::option::Option<std::string::String>,
}
impl Tag {
/// <p>The key of the tag.</p>
pub fn key(&self) -> std::option::Option<&str> {
self.key.as_deref()
}
/// <p>The value of the tag.</p>
pub fn value(&self) -> std::option::Option<&str> {
self.value.as_deref()
}
}
impl std::fmt::Debug for Tag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Tag");
formatter.field("key", &self.key);
formatter.field("value", &self.value);
formatter.finish()
}
}
/// See [`Tag`](crate::model::Tag)
pub mod tag {
/// A builder for [`Tag`](crate::model::Tag)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) key: std::option::Option<std::string::String>,
pub(crate) value: std::option::Option<std::string::String>,
}
impl Builder {
/// <p>The key of the tag.</p>
pub fn key(mut self, input: impl Into<std::string::String>) -> Self {
self.key = Some(input.into());
self
}
/// <p>The key of the tag.</p>
pub fn set_key(mut self, input: std::option::Option<std::string::String>) -> Self {
self.key = input;
self
}
/// <p>The value of the tag.</p>
pub fn value(mut self, input: impl Into<std::string::String>) -> Self {
self.value = Some(input.into());
self
}
/// <p>The value of the tag.</p>
pub fn set_value(mut self, input: std::option::Option<std::string::String>) -> Self {
self.value = input;
self
}
/// Consumes the builder and constructs a [`Tag`](crate::model::Tag)
pub fn build(self) -> crate::model::Tag {
crate::model::Tag {
key: self.key,
value: self.value,
}
}
}
}
impl Tag {
/// Creates a new builder-style object to manufacture [`Tag`](crate::model::Tag)
pub fn builder() -> crate::model::tag::Builder {
crate::model::tag::Builder::default()
}
}
/// <p>Tunnel timeout configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TimeoutConfig {
/// <p>The maximum amount of time (in minutes) a tunnel can remain open. If not specified,
/// maxLifetimeTimeoutMinutes defaults to 720 minutes. Valid values are from 1 minute to 12
/// hours (720 minutes) </p>
pub max_lifetime_timeout_minutes: std::option::Option<i32>,
}
impl TimeoutConfig {
/// <p>The maximum amount of time (in minutes) a tunnel can remain open. If not specified,
/// maxLifetimeTimeoutMinutes defaults to 720 minutes. Valid values are from 1 minute to 12
/// hours (720 minutes) </p>
pub fn max_lifetime_timeout_minutes(&self) -> std::option::Option<i32> {
self.max_lifetime_timeout_minutes
}
}
impl std::fmt::Debug for TimeoutConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TimeoutConfig");
formatter.field(
"max_lifetime_timeout_minutes",
&self.max_lifetime_timeout_minutes,
);
formatter.finish()
}
}
/// See [`TimeoutConfig`](crate::model::TimeoutConfig)
pub mod timeout_config {
/// A builder for [`TimeoutConfig`](crate::model::TimeoutConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) max_lifetime_timeout_minutes: std::option::Option<i32>,
}
impl Builder {
/// <p>The maximum amount of time (in minutes) a tunnel can remain open. If not specified,
/// maxLifetimeTimeoutMinutes defaults to 720 minutes. Valid values are from 1 minute to 12
/// hours (720 minutes) </p>
pub fn max_lifetime_timeout_minutes(mut self, input: i32) -> Self {
self.max_lifetime_timeout_minutes = Some(input);
self
}
/// <p>The maximum amount of time (in minutes) a tunnel can remain open. If not specified,
/// maxLifetimeTimeoutMinutes defaults to 720 minutes. Valid values are from 1 minute to 12
/// hours (720 minutes) </p>
pub fn set_max_lifetime_timeout_minutes(mut self, input: std::option::Option<i32>) -> Self {
self.max_lifetime_timeout_minutes = input;
self
}
/// Consumes the builder and constructs a [`TimeoutConfig`](crate::model::TimeoutConfig)
pub fn build(self) -> crate::model::TimeoutConfig {
crate::model::TimeoutConfig {
max_lifetime_timeout_minutes: self.max_lifetime_timeout_minutes,
}
}
}
}
impl TimeoutConfig {
/// Creates a new builder-style object to manufacture [`TimeoutConfig`](crate::model::TimeoutConfig)
pub fn builder() -> crate::model::timeout_config::Builder {
crate::model::timeout_config::Builder::default()
}
}
/// <p>The destination configuration.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct DestinationConfig {
/// <p>The name of the IoT thing to which you want to connect.</p>
pub thing_name: std::option::Option<std::string::String>,
/// <p>A list of service names that identity the target application. The AWS IoT client running on the destination device reads
/// this value and uses it to look up a port or an IP address and a port. The AWS IoT client
/// instantiates the local proxy which uses this information to connect to the destination
/// application.</p>
pub services: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl DestinationConfig {
/// <p>The name of the IoT thing to which you want to connect.</p>
pub fn thing_name(&self) -> std::option::Option<&str> {
self.thing_name.as_deref()
}
/// <p>A list of service names that identity the target application. The AWS IoT client running on the destination device reads
/// this value and uses it to look up a port or an IP address and a port. The AWS IoT client
/// instantiates the local proxy which uses this information to connect to the destination
/// application.</p>
pub fn services(&self) -> std::option::Option<&[std::string::String]> {
self.services.as_deref()
}
}
impl std::fmt::Debug for DestinationConfig {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("DestinationConfig");
formatter.field("thing_name", &self.thing_name);
formatter.field("services", &self.services);
formatter.finish()
}
}
/// See [`DestinationConfig`](crate::model::DestinationConfig)
pub mod destination_config {
/// A builder for [`DestinationConfig`](crate::model::DestinationConfig)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) thing_name: std::option::Option<std::string::String>,
pub(crate) services: std::option::Option<std::vec::Vec<std::string::String>>,
}
impl Builder {
/// <p>The name of the IoT thing to which you want to connect.</p>
pub fn thing_name(mut self, input: impl Into<std::string::String>) -> Self {
self.thing_name = Some(input.into());
self
}
/// <p>The name of the IoT thing to which you want to connect.</p>
pub fn set_thing_name(mut self, input: std::option::Option<std::string::String>) -> Self {
self.thing_name = input;
self
}
/// Appends an item to `services`.
///
/// To override the contents of this collection use [`set_services`](Self::set_services).
///
/// <p>A list of service names that identity the target application. The AWS IoT client running on the destination device reads
/// this value and uses it to look up a port or an IP address and a port. The AWS IoT client
/// instantiates the local proxy which uses this information to connect to the destination
/// application.</p>
pub fn services(mut self, input: impl Into<std::string::String>) -> Self {
let mut v = self.services.unwrap_or_default();
v.push(input.into());
self.services = Some(v);
self
}
/// <p>A list of service names that identity the target application. The AWS IoT client running on the destination device reads
/// this value and uses it to look up a port or an IP address and a port. The AWS IoT client
/// instantiates the local proxy which uses this information to connect to the destination
/// application.</p>
pub fn set_services(
mut self,
input: std::option::Option<std::vec::Vec<std::string::String>>,
) -> Self {
self.services = input;
self
}
/// Consumes the builder and constructs a [`DestinationConfig`](crate::model::DestinationConfig)
pub fn build(self) -> crate::model::DestinationConfig {
crate::model::DestinationConfig {
thing_name: self.thing_name,
services: self.services,
}
}
}
}
impl DestinationConfig {
/// Creates a new builder-style object to manufacture [`DestinationConfig`](crate::model::DestinationConfig)
pub fn builder() -> crate::model::destination_config::Builder {
crate::model::destination_config::Builder::default()
}
}
/// <p>Information about the tunnel.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct TunnelSummary {
/// <p>The unique alpha-numeric identifier for the tunnel.</p>
pub tunnel_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name of the tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub tunnel_arn: std::option::Option<std::string::String>,
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub status: std::option::Option<crate::model::TunnelStatus>,
/// <p>A description of the tunnel.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The time the tunnel was created.</p>
pub created_at: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The time the tunnel was last updated.</p>
pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl TunnelSummary {
/// <p>The unique alpha-numeric identifier for the tunnel.</p>
pub fn tunnel_id(&self) -> std::option::Option<&str> {
self.tunnel_id.as_deref()
}
/// <p>The Amazon Resource Name of the tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn tunnel_arn(&self) -> std::option::Option<&str> {
self.tunnel_arn.as_deref()
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn status(&self) -> std::option::Option<&crate::model::TunnelStatus> {
self.status.as_ref()
}
/// <p>A description of the tunnel.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The time the tunnel was created.</p>
pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.created_at.as_ref()
}
/// <p>The time the tunnel was last updated.</p>
pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_updated_at.as_ref()
}
}
impl std::fmt::Debug for TunnelSummary {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("TunnelSummary");
formatter.field("tunnel_id", &self.tunnel_id);
formatter.field("tunnel_arn", &self.tunnel_arn);
formatter.field("status", &self.status);
formatter.field("description", &self.description);
formatter.field("created_at", &self.created_at);
formatter.field("last_updated_at", &self.last_updated_at);
formatter.finish()
}
}
/// See [`TunnelSummary`](crate::model::TunnelSummary)
pub mod tunnel_summary {
/// A builder for [`TunnelSummary`](crate::model::TunnelSummary)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) tunnel_id: std::option::Option<std::string::String>,
pub(crate) tunnel_arn: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::TunnelStatus>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The unique alpha-numeric identifier for the tunnel.</p>
pub fn tunnel_id(mut self, input: impl Into<std::string::String>) -> Self {
self.tunnel_id = Some(input.into());
self
}
/// <p>The unique alpha-numeric identifier for the tunnel.</p>
pub fn set_tunnel_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.tunnel_id = input;
self
}
/// <p>The Amazon Resource Name of the tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn tunnel_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.tunnel_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name of the tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn set_tunnel_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.tunnel_arn = input;
self
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn status(mut self, input: crate::model::TunnelStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::TunnelStatus>,
) -> Self {
self.status = input;
self
}
/// <p>A description of the tunnel.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>A description of the tunnel.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The time the tunnel was created.</p>
pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
self.created_at = Some(input);
self
}
/// <p>The time the tunnel was created.</p>
pub fn set_created_at(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.created_at = input;
self
}
/// <p>The time the tunnel was last updated.</p>
pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_updated_at = Some(input);
self
}
/// <p>The time the tunnel was last updated.</p>
pub fn set_last_updated_at(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_updated_at = input;
self
}
/// Consumes the builder and constructs a [`TunnelSummary`](crate::model::TunnelSummary)
pub fn build(self) -> crate::model::TunnelSummary {
crate::model::TunnelSummary {
tunnel_id: self.tunnel_id,
tunnel_arn: self.tunnel_arn,
status: self.status,
description: self.description,
created_at: self.created_at,
last_updated_at: self.last_updated_at,
}
}
}
}
impl TunnelSummary {
/// Creates a new builder-style object to manufacture [`TunnelSummary`](crate::model::TunnelSummary)
pub fn builder() -> crate::model::tunnel_summary::Builder {
crate::model::tunnel_summary::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum TunnelStatus {
#[allow(missing_docs)] // documentation missing in model
Closed,
#[allow(missing_docs)] // documentation missing in model
Open,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for TunnelStatus {
fn from(s: &str) -> Self {
match s {
"CLOSED" => TunnelStatus::Closed,
"OPEN" => TunnelStatus::Open,
other => TunnelStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for TunnelStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(TunnelStatus::from(s))
}
}
impl TunnelStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
TunnelStatus::Closed => "CLOSED",
TunnelStatus::Open => "OPEN",
TunnelStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CLOSED", "OPEN"]
}
}
impl AsRef<str> for TunnelStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
/// <p>A connection between a source computer and a destination device.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct Tunnel {
/// <p>A unique alpha-numeric ID that identifies a tunnel.</p>
pub tunnel_id: std::option::Option<std::string::String>,
/// <p>The Amazon Resource Name (ARN) of a tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub tunnel_arn: std::option::Option<std::string::String>,
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub status: std::option::Option<crate::model::TunnelStatus>,
/// <p>The connection state of the source application.</p>
pub source_connection_state: std::option::Option<crate::model::ConnectionState>,
/// <p>The connection state of the destination application.</p>
pub destination_connection_state: std::option::Option<crate::model::ConnectionState>,
/// <p>A description of the tunnel.</p>
pub description: std::option::Option<std::string::String>,
/// <p>The destination configuration that specifies the thing name of the destination
/// device and a service name that the local proxy uses to connect to the destination
/// application.</p>
pub destination_config: std::option::Option<crate::model::DestinationConfig>,
/// <p>Timeout configuration for the tunnel.</p>
pub timeout_config: std::option::Option<crate::model::TimeoutConfig>,
/// <p>A list of tag metadata associated with the secure tunnel.</p>
pub tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
/// <p>The time when the tunnel was created.</p>
pub created_at: std::option::Option<aws_smithy_types::DateTime>,
/// <p>The last time the tunnel was updated.</p>
pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl Tunnel {
/// <p>A unique alpha-numeric ID that identifies a tunnel.</p>
pub fn tunnel_id(&self) -> std::option::Option<&str> {
self.tunnel_id.as_deref()
}
/// <p>The Amazon Resource Name (ARN) of a tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn tunnel_arn(&self) -> std::option::Option<&str> {
self.tunnel_arn.as_deref()
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn status(&self) -> std::option::Option<&crate::model::TunnelStatus> {
self.status.as_ref()
}
/// <p>The connection state of the source application.</p>
pub fn source_connection_state(&self) -> std::option::Option<&crate::model::ConnectionState> {
self.source_connection_state.as_ref()
}
/// <p>The connection state of the destination application.</p>
pub fn destination_connection_state(
&self,
) -> std::option::Option<&crate::model::ConnectionState> {
self.destination_connection_state.as_ref()
}
/// <p>A description of the tunnel.</p>
pub fn description(&self) -> std::option::Option<&str> {
self.description.as_deref()
}
/// <p>The destination configuration that specifies the thing name of the destination
/// device and a service name that the local proxy uses to connect to the destination
/// application.</p>
pub fn destination_config(&self) -> std::option::Option<&crate::model::DestinationConfig> {
self.destination_config.as_ref()
}
/// <p>Timeout configuration for the tunnel.</p>
pub fn timeout_config(&self) -> std::option::Option<&crate::model::TimeoutConfig> {
self.timeout_config.as_ref()
}
/// <p>A list of tag metadata associated with the secure tunnel.</p>
pub fn tags(&self) -> std::option::Option<&[crate::model::Tag]> {
self.tags.as_deref()
}
/// <p>The time when the tunnel was created.</p>
pub fn created_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.created_at.as_ref()
}
/// <p>The last time the tunnel was updated.</p>
pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_updated_at.as_ref()
}
}
impl std::fmt::Debug for Tunnel {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("Tunnel");
formatter.field("tunnel_id", &self.tunnel_id);
formatter.field("tunnel_arn", &self.tunnel_arn);
formatter.field("status", &self.status);
formatter.field("source_connection_state", &self.source_connection_state);
formatter.field(
"destination_connection_state",
&self.destination_connection_state,
);
formatter.field("description", &self.description);
formatter.field("destination_config", &self.destination_config);
formatter.field("timeout_config", &self.timeout_config);
formatter.field("tags", &self.tags);
formatter.field("created_at", &self.created_at);
formatter.field("last_updated_at", &self.last_updated_at);
formatter.finish()
}
}
/// See [`Tunnel`](crate::model::Tunnel)
pub mod tunnel {
/// A builder for [`Tunnel`](crate::model::Tunnel)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct | {
pub(crate) tunnel_id: std::option::Option<std::string::String>,
pub(crate) tunnel_arn: std::option::Option<std::string::String>,
pub(crate) status: std::option::Option<crate::model::TunnelStatus>,
pub(crate) source_connection_state: std::option::Option<crate::model::ConnectionState>,
pub(crate) destination_connection_state: std::option::Option<crate::model::ConnectionState>,
pub(crate) description: std::option::Option<std::string::String>,
pub(crate) destination_config: std::option::Option<crate::model::DestinationConfig>,
pub(crate) timeout_config: std::option::Option<crate::model::TimeoutConfig>,
pub(crate) tags: std::option::Option<std::vec::Vec<crate::model::Tag>>,
pub(crate) created_at: std::option::Option<aws_smithy_types::DateTime>,
pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>A unique alpha-numeric ID that identifies a tunnel.</p>
pub fn tunnel_id(mut self, input: impl Into<std::string::String>) -> Self {
self.tunnel_id = Some(input.into());
self
}
/// <p>A unique alpha-numeric ID that identifies a tunnel.</p>
pub fn set_tunnel_id(mut self, input: std::option::Option<std::string::String>) -> Self {
self.tunnel_id = input;
self
}
/// <p>The Amazon Resource Name (ARN) of a tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn tunnel_arn(mut self, input: impl Into<std::string::String>) -> Self {
self.tunnel_arn = Some(input.into());
self
}
/// <p>The Amazon Resource Name (ARN) of a tunnel. The tunnel ARN format is
/// <code>arn:aws:tunnel:<region>:<account-id>:tunnel/<tunnel-id></code>
/// </p>
pub fn set_tunnel_arn(mut self, input: std::option::Option<std::string::String>) -> Self {
self.tunnel_arn = input;
self
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn status(mut self, input: crate::model::TunnelStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The status of a tunnel. Valid values are: Open and Closed.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::TunnelStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The connection state of the source application.</p>
pub fn source_connection_state(mut self, input: crate::model::ConnectionState) -> Self {
self.source_connection_state = Some(input);
self
}
/// <p>The connection state of the source application.</p>
pub fn set_source_connection_state(
mut self,
input: std::option::Option<crate::model::ConnectionState>,
) -> Self {
self.source_connection_state = input;
self
}
/// <p>The connection state of the destination application.</p>
pub fn destination_connection_state(
mut self,
input: crate::model::ConnectionState,
) -> Self {
self.destination_connection_state = Some(input);
self
}
/// <p>The connection state of the destination application.</p>
pub fn set_destination_connection_state(
mut self,
input: std::option::Option<crate::model::ConnectionState>,
) -> Self {
self.destination_connection_state = input;
self
}
/// <p>A description of the tunnel.</p>
pub fn description(mut self, input: impl Into<std::string::String>) -> Self {
self.description = Some(input.into());
self
}
/// <p>A description of the tunnel.</p>
pub fn set_description(mut self, input: std::option::Option<std::string::String>) -> Self {
self.description = input;
self
}
/// <p>The destination configuration that specifies the thing name of the destination
/// device and a service name that the local proxy uses to connect to the destination
/// application.</p>
pub fn destination_config(mut self, input: crate::model::DestinationConfig) -> Self {
self.destination_config = Some(input);
self
}
/// <p>The destination configuration that specifies the thing name of the destination
/// device and a service name that the local proxy uses to connect to the destination
/// application.</p>
pub fn set_destination_config(
mut self,
input: std::option::Option<crate::model::DestinationConfig>,
) -> Self {
self.destination_config = input;
self
}
/// <p>Timeout configuration for the tunnel.</p>
pub fn timeout_config(mut self, input: crate::model::TimeoutConfig) -> Self {
self.timeout_config = Some(input);
self
}
/// <p>Timeout configuration for the tunnel.</p>
pub fn set_timeout_config(
mut self,
input: std::option::Option<crate::model::TimeoutConfig>,
) -> Self {
self.timeout_config = input;
self
}
/// Appends an item to `tags`.
///
/// To override the contents of this collection use [`set_tags`](Self::set_tags).
///
/// <p>A list of tag metadata associated with the secure tunnel.</p>
pub fn tags(mut self, input: impl Into<crate::model::Tag>) -> Self {
let mut v = self.tags.unwrap_or_default();
v.push(input.into());
self.tags = Some(v);
self
}
/// <p>A list of tag metadata associated with the secure tunnel.</p>
pub fn set_tags(
mut self,
input: std::option::Option<std::vec::Vec<crate::model::Tag>>,
) -> Self {
self.tags = input;
self
}
/// <p>The time when the tunnel was created.</p>
pub fn created_at(mut self, input: aws_smithy_types::DateTime) -> Self {
self.created_at = Some(input);
self
}
/// <p>The time when the tunnel was created.</p>
pub fn set_created_at(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.created_at = input;
self
}
/// <p>The last time the tunnel was updated.</p>
pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_updated_at = Some(input);
self
}
/// <p>The last time the tunnel was updated.</p>
pub fn set_last_updated_at(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_updated_at = input;
self
}
/// Consumes the builder and constructs a [`Tunnel`](crate::model::Tunnel)
pub fn build(self) -> crate::model::Tunnel {
crate::model::Tunnel {
tunnel_id: self.tunnel_id,
tunnel_arn: self.tunnel_arn,
status: self.status,
source_connection_state: self.source_connection_state,
destination_connection_state: self.destination_connection_state,
description: self.description,
destination_config: self.destination_config,
timeout_config: self.timeout_config,
tags: self.tags,
created_at: self.created_at,
last_updated_at: self.last_updated_at,
}
}
}
}
impl Tunnel {
/// Creates a new builder-style object to manufacture [`Tunnel`](crate::model::Tunnel)
pub fn builder() -> crate::model::tunnel::Builder {
crate::model::tunnel::Builder::default()
}
}
/// <p>The state of a connection.</p>
#[non_exhaustive]
#[derive(std::clone::Clone, std::cmp::PartialEq)]
pub struct ConnectionState {
/// <p>The connection status of the tunnel. Valid values are <code>CONNECTED</code> and
/// <code>DISCONNECTED</code>.</p>
pub status: std::option::Option<crate::model::ConnectionStatus>,
/// <p>The last time the connection status was updated.</p>
pub last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl ConnectionState {
/// <p>The connection status of the tunnel. Valid values are <code>CONNECTED</code> and
/// <code>DISCONNECTED</code>.</p>
pub fn status(&self) -> std::option::Option<&crate::model::ConnectionStatus> {
self.status.as_ref()
}
/// <p>The last time the connection status was updated.</p>
pub fn last_updated_at(&self) -> std::option::Option<&aws_smithy_types::DateTime> {
self.last_updated_at.as_ref()
}
}
impl std::fmt::Debug for ConnectionState {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut formatter = f.debug_struct("ConnectionState");
formatter.field("status", &self.status);
formatter.field("last_updated_at", &self.last_updated_at);
formatter.finish()
}
}
/// See [`ConnectionState`](crate::model::ConnectionState)
pub mod connection_state {
/// A builder for [`ConnectionState`](crate::model::ConnectionState)
#[non_exhaustive]
#[derive(std::default::Default, std::clone::Clone, std::cmp::PartialEq, std::fmt::Debug)]
pub struct Builder {
pub(crate) status: std::option::Option<crate::model::ConnectionStatus>,
pub(crate) last_updated_at: std::option::Option<aws_smithy_types::DateTime>,
}
impl Builder {
/// <p>The connection status of the tunnel. Valid values are <code>CONNECTED</code> and
/// <code>DISCONNECTED</code>.</p>
pub fn status(mut self, input: crate::model::ConnectionStatus) -> Self {
self.status = Some(input);
self
}
/// <p>The connection status of the tunnel. Valid values are <code>CONNECTED</code> and
/// <code>DISCONNECTED</code>.</p>
pub fn set_status(
mut self,
input: std::option::Option<crate::model::ConnectionStatus>,
) -> Self {
self.status = input;
self
}
/// <p>The last time the connection status was updated.</p>
pub fn last_updated_at(mut self, input: aws_smithy_types::DateTime) -> Self {
self.last_updated_at = Some(input);
self
}
/// <p>The last time the connection status was updated.</p>
pub fn set_last_updated_at(
mut self,
input: std::option::Option<aws_smithy_types::DateTime>,
) -> Self {
self.last_updated_at = input;
self
}
/// Consumes the builder and constructs a [`ConnectionState`](crate::model::ConnectionState)
pub fn build(self) -> crate::model::ConnectionState {
crate::model::ConnectionState {
status: self.status,
last_updated_at: self.last_updated_at,
}
}
}
}
impl ConnectionState {
/// Creates a new builder-style object to manufacture [`ConnectionState`](crate::model::ConnectionState)
pub fn builder() -> crate::model::connection_state::Builder {
crate::model::connection_state::Builder::default()
}
}
#[allow(missing_docs)] // documentation missing in model
#[non_exhaustive]
#[derive(
std::clone::Clone,
std::cmp::Eq,
std::cmp::Ord,
std::cmp::PartialEq,
std::cmp::PartialOrd,
std::fmt::Debug,
std::hash::Hash,
)]
pub enum ConnectionStatus {
#[allow(missing_docs)] // documentation missing in model
Connected,
#[allow(missing_docs)] // documentation missing in model
Disconnected,
/// Unknown contains new variants that have been added since this code was generated.
Unknown(String),
}
impl std::convert::From<&str> for ConnectionStatus {
fn from(s: &str) -> Self {
match s {
"CONNECTED" => ConnectionStatus::Connected,
"DISCONNECTED" => ConnectionStatus::Disconnected,
other => ConnectionStatus::Unknown(other.to_owned()),
}
}
}
impl std::str::FromStr for ConnectionStatus {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> std::result::Result<Self, Self::Err> {
Ok(ConnectionStatus::from(s))
}
}
impl ConnectionStatus {
/// Returns the `&str` value of the enum member.
pub fn as_str(&self) -> &str {
match self {
ConnectionStatus::Connected => "CONNECTED",
ConnectionStatus::Disconnected => "DISCONNECTED",
ConnectionStatus::Unknown(s) => s.as_ref(),
}
}
/// Returns all the `&str` values of the enum members.
pub fn values() -> &'static [&'static str] {
&["CONNECTED", "DISCONNECTED"]
}
}
impl AsRef<str> for ConnectionStatus {
fn as_ref(&self) -> &str {
self.as_str()
}
}
| Builder |
template_pyqt5.py | # Form implementation generated from reading ui file 'pyqtgraph/console/template.ui'
#
# Created by: PyQt5 UI code generator 5.5.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(739, 497)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical) | self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtWidgets.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtWidgets.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtWidgets.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtWidgets.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setWordWrap(True)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtWidgets.QLabel(self.exceptionGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName("filterText")
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Console"))
self.historyBtn.setText(_translate("Form", "History.."))
self.exceptionBtn.setText(_translate("Form", "Exceptions.."))
self.exceptionGroup.setTitle(_translate("Form", "Exception Handling"))
self.clearExceptionBtn.setText(_translate("Form", "Clear Stack"))
self.catchAllExceptionsBtn.setText(_translate("Form", "Show All Exceptions"))
self.catchNextExceptionBtn.setText(_translate("Form", "Show Next Exception"))
self.onlyUncaughtCheck.setText(_translate("Form", "Only Uncaught Exceptions"))
self.runSelectedFrameCheck.setText(_translate("Form", "Run commands in selected stack frame"))
self.exceptionInfoLabel.setText(_translate("Form", "Stack Trace"))
self.label.setText(_translate("Form", "Filter (regex):"))
from .CmdInput import CmdInput | |
resource_collector.go | // +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e_node
import (
"bytes"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"sort"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
cadvisorclient "github.com/google/cadvisor/client/v2"
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
"github.com/opencontainers/runc/libcontainer/cgroups"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/gomega"
)
const (
// resource monitoring
cadvisorImageName = "google/cadvisor:latest"
cadvisorPodName = "cadvisor"
cadvisorPort = 8090
// housekeeping interval of Cadvisor (second)
houseKeepingInterval = 1
)
var (
systemContainers map[string]string
)
type ResourceCollector struct {
client *cadvisorclient.Client
request *cadvisorapiv2.RequestOptions
pollingInterval time.Duration
buffers map[string][]*framework.ContainerResourceUsage
lock sync.RWMutex
stopCh chan struct{}
}
// NewResourceCollector creates a resource collector object which collects
// resource usage periodically from Cadvisor
func NewResourceCollector(interval time.Duration) *ResourceCollector {
buffers := make(map[string][]*framework.ContainerResourceUsage)
return &ResourceCollector{
pollingInterval: interval,
buffers: buffers,
}
}
// Start starts resource collector and connects to the standalone Cadvisor pod
// then repeatedly runs collectStats.
func (r *ResourceCollector) Start() {
// Get the cgroup container names for kubelet and docker
kubeletContainer, err := getContainerNameForProcess(kubeletProcessName, "")
dockerContainer, err := getContainerNameForProcess(dockerProcessName, dockerPidFile)
if err == nil {
systemContainers = map[string]string{
stats.SystemContainerKubelet: kubeletContainer,
stats.SystemContainerRuntime: dockerContainer,
}
} else {
framework.Failf("Failed to get docker container name in test-e2e-node resource collector.")
}
wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
var err error
r.client, err = cadvisorclient.NewClient(fmt.Sprintf("http://localhost:%d/", cadvisorPort))
if err == nil {
return true, nil
}
return false, err
})
Expect(r.client).NotTo(BeNil(), "cadvisor client not ready")
r.request = &cadvisorapiv2.RequestOptions{IdType: "name", Count: 1, Recursive: false}
r.stopCh = make(chan struct{})
oldStatsMap := make(map[string]*cadvisorapiv2.ContainerStats)
go wait.Until(func() { r.collectStats(oldStatsMap) }, r.pollingInterval, r.stopCh)
}
// Stop stops resource collector collecting stats. It does not clear the buffer
func (r *ResourceCollector) Stop() {
close(r.stopCh)
}
// Reset clears the stats buffer of resource collector.
func (r *ResourceCollector) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
for _, name := range systemContainers {
r.buffers[name] = []*framework.ContainerResourceUsage{}
}
}
// GetCPUSummary gets CPU usage in percentile.
func (r *ResourceCollector) GetCPUSummary() framework.ContainersCPUSummary {
result := make(framework.ContainersCPUSummary)
for key, name := range systemContainers {
data := r.GetBasicCPUStats(name)
result[key] = data
}
return result
}
// LogLatest logs the latest resource usage.
func (r *ResourceCollector) LogLatest() {
summary, err := r.GetLatest()
if err != nil {
framework.Logf("%v", err)
}
framework.Logf("%s", formatResourceUsageStats(summary))
}
// collectStats collects resource usage from Cadvisor.
func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.ContainerStats) {
for _, name := range systemContainers {
ret, err := r.client.Stats(name, r.request)
if err != nil {
framework.Logf("Error getting container stats, err: %v", err)
return
}
cStats, ok := ret[name]
if !ok {
framework.Logf("Missing info/stats for container %q", name)
return
}
newStats := cStats.Stats[0]
if oldStats, ok := oldStatsMap[name]; ok && oldStats.Timestamp.Before(newStats.Timestamp) {
if oldStats.Timestamp.Equal(newStats.Timestamp) {
continue
}
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, newStats))
}
oldStatsMap[name] = newStats
}
}
// computeContainerResourceUsage computes resource usage based on new data sample.
func computeContainerResourceUsage(name string, oldStats, newStats *cadvisorapiv2.ContainerStats) *framework.ContainerResourceUsage {
return &framework.ContainerResourceUsage{
Name: name,
Timestamp: newStats.Timestamp,
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
MemoryUsageInBytes: newStats.Memory.Usage,
MemoryWorkingSetInBytes: newStats.Memory.WorkingSet,
MemoryRSSInBytes: newStats.Memory.RSS,
CPUInterval: newStats.Timestamp.Sub(oldStats.Timestamp),
}
}
// GetLatest gets the latest resource usage from stats buffer.
func (r *ResourceCollector) GetLatest() (framework.ResourceUsagePerContainer, error) {
r.lock.RLock()
defer r.lock.RUnlock()
stats := make(framework.ResourceUsagePerContainer)
for key, name := range systemContainers {
contStats, ok := r.buffers[name]
if !ok || len(contStats) == 0 {
return nil, fmt.Errorf("Resource usage of %s:%s is not ready yet", key, name)
}
stats[key] = contStats[len(contStats)-1]
}
return stats, nil
}
type resourceUsageByCPU []*framework.ContainerResourceUsage
func (r resourceUsageByCPU) Len() int { return len(r) }
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
// The percentiles to report.
var percentiles = [...]float64{0.50, 0.90, 0.95, 0.99, 1.00}
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
// containerName. This method examines all data currently in the buffer.
func (r *ResourceCollector) GetBasicCPUStats(containerName string) map[float64]float64 {
r.lock.RLock()
defer r.lock.RUnlock()
result := make(map[float64]float64, len(percentiles))
// We must make a copy of array, otherwise the timeseries order is changed.
usages := make([]*framework.ContainerResourceUsage, 0)
for _, usage := range r.buffers[containerName] {
usages = append(usages, usage)
}
sort.Sort(resourceUsageByCPU(usages))
for _, q := range percentiles {
index := int(float64(len(usages))*q) - 1
if index < 0 {
// We don't have enough data.
result[q] = 0
continue
}
result[q] = usages[index].CPUUsageInCores
}
return result
}
func formatResourceUsageStats(containerStats framework.ResourceUsagePerContainer) string {
// Example output:
//
// Resource usage for node "e2e-test-foo-minion-abcde":
// container cpu(cores) memory(MB)
// "/" 0.363 2942.09
// "/docker-daemon" 0.088 521.80
// "/kubelet" 0.086 424.37
// "/system" 0.007 119.88
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
for name, s := range containerStats {
fmt.Fprintf(w, "%q\t%.3f\t%.2f\t%.2f\n", name, s.CPUUsageInCores, float64(s.MemoryWorkingSetInBytes)/(1024*1024), float64(s.MemoryRSSInBytes)/(1024*1024))
}
w.Flush()
return fmt.Sprintf("Resource usage:\n%s", buf.String())
}
func formatCPUSummary(summary framework.ContainersCPUSummary) string {
// Example output for a node (the percentiles may differ):
// CPU usage of containers on node "e2e-test-foo-minion-0vj7":
// container 5th% 50th% 90th% 95th%
// "/" 0.051 0.159 0.387 0.455
// "/runtime 0.000 0.000 0.146 0.166
// "/kubelet" 0.036 0.053 0.091 0.154
// "/misc" 0.001 0.001 0.001 0.002
var summaryStrings []string
var header []string
header = append(header, "container")
for _, p := range percentiles {
header = append(header, fmt.Sprintf("%.0fth%%", p*100))
}
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
for _, containerName := range framework.TargetContainers() {
var s []string
s = append(s, fmt.Sprintf("%q", containerName))
data, ok := summary[containerName]
for _, p := range percentiles {
value := "N/A"
if ok {
value = fmt.Sprintf("%.3f", data[p])
}
s = append(s, value)
}
fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
}
w.Flush()
summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers:\n%s", buf.String()))
return strings.Join(summaryStrings, "\n")
}
// createCadvisorPod creates a standalone cadvisor pod for fine-grain resource monitoring.
func getCadvisorPod() *api.Pod {
return &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: cadvisorPodName,
},
Spec: api.PodSpec{
// It uses a host port for the tests to collect data.
// Currently we can not use port mapping in test-e2e-node.
SecurityContext: &api.PodSecurityContext{
HostNetwork: true,
},
Containers: []api.Container{
{
Image: cadvisorImageName,
Name: cadvisorPodName,
Ports: []api.ContainerPort{
{
Name: "http",
HostPort: cadvisorPort,
ContainerPort: cadvisorPort,
Protocol: api.ProtocolTCP,
},
},
VolumeMounts: []api.VolumeMount{
{
Name: "sys",
ReadOnly: true,
MountPath: "/sys",
},
{
Name: "var-run",
ReadOnly: false,
MountPath: "/var/run",
},
{
Name: "docker",
ReadOnly: true,
MountPath: "/var/lib/docker/",
},
{
Name: "rootfs",
ReadOnly: true,
MountPath: "/rootfs",
},
},
Args: []string{
"--profiling",
fmt.Sprintf("--housekeeping_interval=%ds", houseKeepingInterval),
fmt.Sprintf("--port=%d", cadvisorPort),
},
},
},
Volumes: []api.Volume{
{
Name: "rootfs",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/"}},
},
{
Name: "var-run",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/run"}},
},
{
Name: "sys",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/sys"}},
},
{
Name: "docker",
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/var/lib/docker"}},
},
},
},
}
}
// deletePodsSync deletes a list of pods and block until pods disappear.
func deletePodsSync(f *framework.Framework, pods []*api.Pod) {
var wg sync.WaitGroup
for _, pod := range pods {
wg.Add(1)
go func(pod *api.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, api.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
Expect(framework.WaitForPodToDisappear(f.Client, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())
}(pod)
}
wg.Wait()
return
}
// newTestPods creates a list of pods (specification) for test.
func newTestPods(numPods int, imageName, podType string) []*api.Pod {
var pods []*api.Pod
for i := 0; i < numPods; i++ {
podName := "test-" + string(uuid.NewUUID())
labels := map[string]string{
"type": podType,
"name": podName,
}
pods = append(pods,
&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
Labels: labels,
},
Spec: api.PodSpec{
// Restart policy is always (default).
Containers: []api.Container{
{
Image: imageName,
Name: podName,
},
},
},
})
}
return pods
}
// Time series of resource usage
type ResourceSeries struct {
Timestamp []int64 `json:"ts"`
CPUUsageInMilliCores []int64 `json:"cpu"`
MemoryRSSInMegaBytes []int64 `json:"memory"`
Units map[string]string `json:"unit"`
}
// GetResourceSeriesWithLabels gets the time series of resource usage of each container.
func (r *ResourceCollector) GetResourceTimeSeries() map[string]*ResourceSeries {
resourceSeries := make(map[string]*ResourceSeries)
for key, name := range systemContainers {
newSeries := &ResourceSeries{Units: map[string]string{
"cpu": "mCPU",
"memory": "MB",
}}
resourceSeries[key] = newSeries
for _, usage := range r.buffers[name] {
newSeries.Timestamp = append(newSeries.Timestamp, usage.Timestamp.UnixNano())
newSeries.CPUUsageInMilliCores = append(newSeries.CPUUsageInMilliCores, int64(usage.CPUUsageInCores*1000))
newSeries.MemoryRSSInMegaBytes = append(newSeries.MemoryRSSInMegaBytes, int64(float64(usage.MemoryUsageInBytes)/(1024*1024)))
}
}
return resourceSeries
}
// Code for getting container name of docker, copied from pkg/kubelet/cm/container_manager_linux.go
// since they are not exposed
const (
kubeletProcessName = "kubelet"
dockerProcessName = "docker"
dockerPidFile = "/var/run/docker.pid"
containerdProcessName = "docker-containerd"
containerdPidFile = "/run/docker/libcontainerd/docker-containerd.pid"
)
func getContainerNameForProcess(name, pidFile string) (string, error) {
pids, err := getPidsForProcess(name, pidFile)
if err != nil {
return "", fmt.Errorf("failed to detect process id for %q - %v", name, err)
}
if len(pids) == 0 {
return "", nil
}
cont, err := getContainer(pids[0])
if err != nil {
return "", err
}
return cont, nil
}
func getPidFromPidFile(pidFile string) (int, error) {
file, err := os.Open(pidFile)
if err != nil {
return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err)
}
defer file.Close()
data, err := ioutil.ReadAll(file)
if err != nil {
return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err)
}
pid, err := strconv.Atoi(string(data))
if err != nil {
return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err)
}
return pid, nil
}
func getPidsForProcess(name, pidFile string) ([]int, error) {
if len(pidFile) > 0 {
if pid, err := getPidFromPidFile(pidFile); err == nil {
return []int{pid}, nil
} else {
// log the error and fall back to pidof
runtime.HandleError(err)
}
}
out, err := exec.Command("pidof", name).Output()
if err != nil {
return []int{}, fmt.Errorf("failed to find pid of %q: %v", name, err)
}
// The output of pidof is a list of pids.
pids := []int{}
for _, pidStr := range strings.Split(strings.TrimSpace(string(out)), " ") {
pid, err := strconv.Atoi(pidStr)
if err != nil {
continue
}
pids = append(pids, pid)
}
return pids, nil
}
// getContainer returns the cgroup associated with the specified pid.
// It enforces a unified hierarchy for memory and cpu cgroups.
// On systemd environments, it uses the name=systemd cgroup for the specified pid.
func getContainer(pid int) (string, error) | {
cgs, err := cgroups.ParseCgroupFile(fmt.Sprintf("/proc/%d/cgroup", pid))
if err != nil {
return "", err
}
cpu, found := cgs["cpu"]
if !found {
return "", cgroups.NewNotFoundError("cpu")
}
memory, found := cgs["memory"]
if !found {
return "", cgroups.NewNotFoundError("memory")
}
// since we use this container for accounting, we need to ensure its a unified hierarchy.
if cpu != memory {
return "", fmt.Errorf("cpu and memory cgroup hierarchy not unified. cpu: %s, memory: %s", cpu, memory)
}
// on systemd, every pid is in a unified cgroup hierarchy (name=systemd as seen in systemd-cgls)
// cpu and memory accounting is off by default, users may choose to enable it per unit or globally.
// users could enable CPU and memory accounting globally via /etc/systemd/system.conf (DefaultCPUAccounting=true DefaultMemoryAccounting=true).
// users could also enable CPU and memory accounting per unit via CPUAccounting=true and MemoryAccounting=true
// we only warn if accounting is not enabled for CPU or memory so as to not break local development flows where kubelet is launched in a terminal.
// for example, the cgroup for the user session will be something like /user.slice/user-X.slice/session-X.scope, but the cpu and memory
// cgroup will be the closest ancestor where accounting is performed (most likely /) on systems that launch docker containers.
// as a result, on those systems, you will not get cpu or memory accounting statistics for kubelet.
// in addition, you would not get memory or cpu accounting for the runtime unless accounting was enabled on its unit (or globally).
if systemd, found := cgs["name=systemd"]; found {
if systemd != cpu {
log.Printf("CPUAccounting not enabled for pid: %d", pid)
}
if systemd != memory {
log.Printf("MemoryAccounting not enabled for pid: %d", pid)
}
return systemd, nil
}
return cpu, nil
} |
|
word_algebra.py | import re
from .delta import Inf, d_expr_dimension
from .linear import Linear
from .lyndon import to_lyndon_basis
from .util import get_one_item
def word_expr_weight(expr):
return len(get_one_item(expr.items())[0])
def word_expr_max_char(expr):
return max([max(word) for word, _ in expr.items()])
def words_with_n_distinct_chars(expr, min_distinct):
return expr.filtered_obj(lambda word: len(set(word)) >= min_distinct)
# Replaces each letter c with index_map[c]
def word_substitute(
word, # Tuple[int]
index_map, # int -> int
):
return tuple([index_map.get(c, c) for c in word])
# For each word, replaces each letter c with index_map[c]
def word_expr_substitute(
expr, # Linear[word], word is Tuple[int] | ):
ret = Linear()
for word, coeff in expr.items():
word_new = word_substitute(word, index_map)
if not Inf in word_new:
ret += Linear({word_new: coeff})
return ret
def _word_to_template_impl(word, index_map):
next_index = 0 if len(index_map) == 0 else max(index_map.values()) + 1
for c in word:
if not c in index_map:
index_map[c] = next_index
next_index += 1
return word_substitute(word, index_map)
# Converts word to a standard form modulo substitutions
def word_to_template(word):
return _word_to_template_impl(word, {})
def word_expr_to_template(expr, index_map=None):
if index_map is None:
index_map = {}
return expr.mapped_obj(lambda w: _word_to_template_impl(w, index_map)) | index_map, # int -> int |
raw_mutex.rs | // Copyright 2016 Amanieu d'Antras
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
use crate::{deadlock, util};
#[cfg(has_sized_atomics)]
use core::sync::atomic::AtomicU8;
#[cfg(not(has_sized_atomics))]
use core::sync::atomic::AtomicUsize as AtomicU8;
use core::{sync::atomic::Ordering, time::Duration};
use lock_api::{GuardNoSend, RawMutex as RawMutexTrait, RawMutexFair, RawMutexTimed};
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
use std::time::Instant;
#[cfg(has_sized_atomics)]
type U8 = u8;
#[cfg(not(has_sized_atomics))]
type U8 = usize;
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
pub(crate) const TOKEN_NORMAL: UnparkToken = UnparkToken(0);
// UnparkToken used to indicate that the mutex is being handed off to the target
// thread directly without unlocking it.
pub(crate) const TOKEN_HANDOFF: UnparkToken = UnparkToken(1);
const LOCKED_BIT: U8 = 1;
const PARKED_BIT: U8 = 2;
/// Raw mutex type backed by the parking lot.
pub struct RawMutex {
state: AtomicU8,
}
unsafe impl RawMutexTrait for RawMutex {
const INIT: RawMutex = RawMutex {
state: AtomicU8::new(0),
};
type GuardMarker = GuardNoSend;
#[inline]
fn lock(&self) {
if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_err()
{
self.lock_slow(None);
}
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
#[inline]
fn try_lock(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT != 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
return true;
}
Err(x) => state = x,
}
}
}
#[inline]
fn unlock(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(false);
}
}
unsafe impl RawMutexFair for RawMutex {
#[inline]
fn unlock_fair(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
if self
.state
.compare_exchange(LOCKED_BIT, 0, Ordering::Release, Ordering::Relaxed)
.is_ok()
{
return;
}
self.unlock_slow(true);
} |
#[inline]
fn bump(&self) {
if self.state.load(Ordering::Relaxed) & PARKED_BIT != 0 {
self.bump_slow();
}
}
}
unsafe impl RawMutexTimed for RawMutex {
type Duration = Duration;
type Instant = Instant;
#[inline]
fn try_lock_until(&self, timeout: Instant) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(Some(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
#[inline]
fn try_lock_for(&self, timeout: Duration) -> bool {
let result = if self
.state
.compare_exchange_weak(0, LOCKED_BIT, Ordering::Acquire, Ordering::Relaxed)
.is_ok()
{
true
} else {
self.lock_slow(util::to_deadline(timeout))
};
if result {
unsafe { deadlock::acquire_resource(self as *const _ as usize) };
}
result
}
}
impl RawMutex {
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked_if_locked(&self) -> bool {
let mut state = self.state.load(Ordering::Relaxed);
loop {
if state & LOCKED_BIT == 0 {
return false;
}
match self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
}
}
// Used by Condvar when requeuing threads to us, must be called while
// holding the queue lock.
#[inline]
pub(crate) fn mark_parked(&self) {
self.state.fetch_or(PARKED_BIT, Ordering::Relaxed);
}
#[cold]
fn lock_slow(&self, timeout: Option<Instant>) -> bool {
let mut spinwait = SpinWait::new();
let mut state = self.state.load(Ordering::Relaxed);
loop {
// Grab the lock if it isn't locked, even if there is a queue on it
if state & LOCKED_BIT == 0 {
match self.state.compare_exchange_weak(
state,
state | LOCKED_BIT,
Ordering::Acquire,
Ordering::Relaxed,
) {
Ok(_) => return true,
Err(x) => state = x,
}
continue;
}
// If there is no queue, try spinning a few times
if state & PARKED_BIT == 0 && spinwait.spin() {
state = self.state.load(Ordering::Relaxed);
continue;
}
// Set the parked bit
if state & PARKED_BIT == 0 {
if let Err(x) = self.state.compare_exchange_weak(
state,
state | PARKED_BIT,
Ordering::Relaxed,
Ordering::Relaxed,
) {
state = x;
continue;
}
}
// Park our thread until we are woken up by an unlock
unsafe {
let addr = self as *const _ as usize;
let validate = || self.state.load(Ordering::Relaxed) == LOCKED_BIT | PARKED_BIT;
let before_sleep = || {};
let timed_out = |_, was_last_thread| {
// Clear the parked bit if we were the last parked thread
if was_last_thread {
self.state.fetch_and(!PARKED_BIT, Ordering::Relaxed);
}
};
match parking_lot_core::park(
addr,
validate,
before_sleep,
timed_out,
DEFAULT_PARK_TOKEN,
timeout,
) {
// The thread that unparked us passed the lock on to us
// directly without unlocking it.
ParkResult::Unparked(TOKEN_HANDOFF) => return true,
// We were unparked normally, try acquiring the lock again
ParkResult::Unparked(_) => (),
// The validation function failed, try locking again
ParkResult::Invalid => (),
// Timeout expired
ParkResult::TimedOut => return false,
}
}
// Loop back and try locking again
spinwait.reset();
state = self.state.load(Ordering::Relaxed);
}
}
#[cold]
fn unlock_slow(&self, force_fair: bool) {
// Unpark one thread and leave the parked bit set if there might
// still be parked threads on this address.
unsafe {
let addr = self as *const _ as usize;
let callback = |result: UnparkResult| {
// If we are using a fair unlock then we should keep the
// mutex locked and hand it off to the unparked thread.
if result.unparked_threads != 0 && (force_fair || result.be_fair) {
// Clear the parked bit if there are no more parked
// threads.
if !result.have_more_threads {
self.state.store(LOCKED_BIT, Ordering::Relaxed);
}
return TOKEN_HANDOFF;
}
// Clear the locked bit, and the parked bit as well if there
// are no more parked threads.
if result.have_more_threads {
self.state.store(PARKED_BIT, Ordering::Release);
} else {
self.state.store(0, Ordering::Release);
}
TOKEN_NORMAL
};
parking_lot_core::unpark_one(addr, callback);
}
}
#[cold]
fn bump_slow(&self) {
unsafe { deadlock::release_resource(self as *const _ as usize) };
self.unlock_slow(true);
self.lock();
}
} | |
ChevronRightOutline.d.ts | import * as React from 'react';
import { StyledIconProps } from '../../StyledIconBase';
export declare const ChevronRightOutline: React.ForwardRefExoticComponent<Pick<StyledIconProps, "string" | "max" | "accumulate" | "origin" | "end" | "hanging" | "alphabetic" | "ideographic" | "media" | "style" | "title" | "clipPath" | "filter" | "mask" | "result" | "local" | "color" | "clip" | "size" | "fill" | "stroke" | "mathematical" | "additive" | "key" | "children" | "cursor" | "direction" | "display" | "fontFamily" | "fontSize" | "fontSizeAdjust" | "fontStretch" | "fontStyle" | "fontVariant" | "fontWeight" | "height" | "imageRendering" | "letterSpacing" | "opacity" | "order" | "overflow" | "paintOrder" | "pointerEvents" | "rotate" | "scale" | "textRendering" | "transform" | "unicodeBidi" | "visibility" | "width" | "wordSpacing" | "writingMode" | "offset" | "textDecoration" | "alignmentBaseline" | "baselineShift" | "clipRule" | "colorInterpolation" | "colorRendering" | "dominantBaseline" | "fillOpacity" | "fillRule" | "floodColor" | "floodOpacity" | "glyphOrientationVertical" | "lightingColor" | "markerEnd" | "markerMid" | "markerStart" | "shapeRendering" | "stopColor" | "stopOpacity" | "strokeDasharray" | "strokeDashoffset" | "strokeLinecap" | "strokeLinejoin" | "strokeMiterlimit" | "strokeOpacity" | "strokeWidth" | "textAnchor" | "vectorEffect" | "className" | "id" | "lang" | "method" | "min" | "name" | "target" | "type" | "role" | "tabIndex" | "accentHeight" | "allowReorder" | "amplitude" | "arabicForm" | "ascent" | "attributeName" | "attributeType" | "autoReverse" | "azimuth" | "baseFrequency" | "baseProfile" | "bbox" | "begin" | "bias" | "by" | "calcMode" | "capHeight" | "clipPathUnits" | "colorInterpolationFilters" | "colorProfile" | "contentScriptType" | "contentStyleType" | "cx" | "cy" | "d" | "decelerate" | "descent" | "diffuseConstant" | "divisor" | "dur" | "dx" | "dy" | "edgeMode" | "elevation" | "enableBackground" | "exponent" | "externalResourcesRequired" | "filterRes" | "filterUnits" | "focusable" | "format" | "from" | "fx" | "fy" | "g1" | "g2" | "glyphName" | "glyphOrientationHorizontal" | "glyphRef" | "gradientTransform" | "gradientUnits" | "horizAdvX" | "horizOriginX" | "href" | "in2" | "in" | "intercept" | "k1" | "k2" | "k3" | "k4" | "k" | "kernelMatrix" | "kernelUnitLength" | "kerning" | "keyPoints" | "keySplines" | "keyTimes" | "lengthAdjust" | "limitingConeAngle" | "markerHeight" | "markerUnits" | "markerWidth" | "maskContentUnits" | "maskUnits" | "mode" | "numOctaves" | "operator" | "orient" | "orientation" | "overlinePosition" | "overlineThickness" | "panose1" | "pathLength" | "patternContentUnits" | "patternTransform" | "patternUnits" | "points" | "pointsAtX" | "pointsAtY" | "pointsAtZ" | "preserveAlpha" | "preserveAspectRatio" | "primitiveUnits" | "r" | "radius" | "refX" | "refY" | "renderingIntent" | "repeatCount" | "repeatDur" | "requiredExtensions" | "requiredFeatures" | "restart" | "rx" | "ry" | "seed" | "slope" | "spacing" | "specularConstant" | "specularExponent" | "speed" | "spreadMethod" | "startOffset" | "stdDeviation" | "stemh" | "stemv" | "stitchTiles" | "strikethroughPosition" | "strikethroughThickness" | "surfaceScale" | "systemLanguage" | "tableValues" | "targetX" | "targetY" | "textLength" | "to" | "u1" | "u2" | "underlinePosition" | "underlineThickness" | "unicode" | "unicodeRange" | "unitsPerEm" | "vAlphabetic" | "values" | "version" | "vertAdvY" | "vertOriginX" | "vertOriginY" | "vHanging" | "vIdeographic" | "viewBox" | "viewTarget" | "vMathematical" | "widths" | "x1" | "x2" | "x" | "xChannelSelector" | "xHeight" | "xlinkActuate" | "xlinkArcrole" | "xlinkHref" | "xlinkRole" | "xlinkShow" | "xlinkTitle" | "xlinkType" | "xmlBase" | "xmlLang" | "xmlns" | "xmlnsXlink" | "xmlSpace" | "y1" | "y2" | "y" | "yChannelSelector" | "z" | "zoomAndPan" | "aria-activedescendant" | "aria-atomic" | "aria-autocomplete" | "aria-busy" | "aria-checked" | "aria-colcount" | "aria-colindex" | "aria-colspan" | "aria-controls" | "aria-current" | "aria-describedby" | "aria-details" | "aria-disabled" | "aria-dropeffect" | "aria-errormessage" | "aria-expanded" | "aria-flowto" | "aria-grabbed" | "aria-haspopup" | "aria-hidden" | "aria-invalid" | "aria-keyshortcuts" | "aria-label" | "aria-labelledby" | "aria-level" | "aria-live" | "aria-modal" | "aria-multiline" | "aria-multiselectable" | "aria-orientation" | "aria-owns" | "aria-placeholder" | "aria-posinset" | "aria-pressed" | "aria-readonly" | "aria-relevant" | "aria-required" | "aria-roledescription" | "aria-rowcount" | "aria-rowindex" | "aria-rowspan" | "aria-selected" | "aria-setsize" | "aria-sort" | "aria-valuemax" | "aria-valuemin" | "aria-valuenow" | "aria-valuetext" | "dangerouslySetInnerHTML" | "onCopy" | "onCopyCapture" | "onCut" | "onCutCapture" | "onPaste" | "onPasteCapture" | "onCompositionEnd" | "onCompositionEndCapture" | "onCompositionStart" | "onCompositionStartCapture" | "onCompositionUpdate" | "onCompositionUpdateCapture" | "onFocus" | "onFocusCapture" | "onBlur" | "onBlurCapture" | "onChange" | "onChangeCapture" | "onBeforeInput" | "onBeforeInputCapture" | "onInput" | "onInputCapture" | "onReset" | "onResetCapture" | "onSubmit" | "onSubmitCapture" | "onInvalid" | "onInvalidCapture" | "onLoad" | "onLoadCapture" | "onError" | "onErrorCapture" | "onKeyDown" | "onKeyDownCapture" | "onKeyPress" | "onKeyPressCapture" | "onKeyUp" | "onKeyUpCapture" | "onAbort" | "onAbortCapture" | "onCanPlay" | "onCanPlayCapture" | "onCanPlayThrough" | "onCanPlayThroughCapture" | "onDurationChange" | "onDurationChangeCapture" | "onEmptied" | "onEmptiedCapture" | "onEncrypted" | "onEncryptedCapture" | "onEnded" | "onEndedCapture" | "onLoadedData" | "onLoadedDataCapture" | "onLoadedMetadata" | "onLoadedMetadataCapture" | "onLoadStart" | "onLoadStartCapture" | "onPause" | "onPauseCapture" | "onPlay" | "onPlayCapture" | "onPlaying" | "onPlayingCapture" | "onProgress" | "onProgressCapture" | "onRateChange" | "onRateChangeCapture" | "onSeeked" | "onSeekedCapture" | "onSeeking" | "onSeekingCapture" | "onStalled" | "onStalledCapture" | "onSuspend" | "onSuspendCapture" | "onTimeUpdate" | "onTimeUpdateCapture" | "onVolumeChange" | "onVolumeChangeCapture" | "onWaiting" | "onWaitingCapture" | "onAuxClick" | "onAuxClickCapture" | "onClick" | "onClickCapture" | "onContextMenu" | "onContextMenuCapture" | "onDoubleClick" | "onDoubleClickCapture" | "onDrag" | "onDragCapture" | "onDragEnd" | "onDragEndCapture" | "onDragEnter" | "onDragEnterCapture" | "onDragExit" | "onDragExitCapture" | "onDragLeave" | "onDragLeaveCapture" | "onDragOver" | "onDragOverCapture" | "onDragStart" | "onDragStartCapture" | "onDrop" | "onDropCapture" | "onMouseDown" | "onMouseDownCapture" | "onMouseEnter" | "onMouseLeave" | "onMouseMove" | "onMouseMoveCapture" | "onMouseOut" | "onMouseOutCapture" | "onMouseOver" | "onMouseOverCapture" | "onMouseUp" | "onMouseUpCapture" | "onSelect" | "onSelectCapture" | "onTouchCancel" | "onTouchCancelCapture" | "onTouchEnd" | "onTouchEndCapture" | "onTouchMove" | "onTouchMoveCapture" | "onTouchStart" | "onTouchStartCapture" | "onPointerDown" | "onPointerDownCapture" | "onPointerMove" | "onPointerMoveCapture" | "onPointerUp" | "onPointerUpCapture" | "onPointerCancel" | "onPointerCancelCapture" | "onPointerEnter" | "onPointerEnterCapture" | "onPointerLeave" | "onPointerLeaveCapture" | "onPointerOver" | "onPointerOverCapture" | "onPointerOut" | "onPointerOutCapture" | "onGotPointerCapture" | "onGotPointerCaptureCapture" | "onLostPointerCapture" | "onLostPointerCaptureCapture" | "onScroll" | "onScrollCapture" | "onWheel" | "onWheelCapture" | "onAnimationStart" | "onAnimationStartCapture" | "onAnimationEnd" | "onAnimationEndCapture" | "onAnimationIteration" | "onAnimationIterationCapture" | "onTransitionEnd" | "onTransitionEndCapture"> & React.RefAttributes<SVGSVGElement>>;
export declare const ChevronRightOutlineDimensions: {
height: number;
width: number; | }; |
|
0005_orderitem_completed.py | # Generated by Django 3.1.7 on 2021-04-27 11:52
from django.db import migrations, models
class | (migrations.Migration):
dependencies = [
('checkout', '0004_checkout_completed'),
]
operations = [
migrations.AddField(
model_name='orderitem',
name='completed',
field=models.BooleanField(default=False),
),
]
| Migration |
sql_query_client.go | package sql_query
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/runtime"
strfmt "github.com/go-openapi/strfmt"
)
// New creates a new sql query API client.
func New(transport runtime.ClientTransport, formats strfmt.Registry) *Client |
/*
Client for sql query API
*/
type Client struct {
transport runtime.ClientTransport
formats strfmt.Registry
}
/*
CreateSqlQuery creates s q l runner query
create a sql runner query
*/
func (a *Client) CreateSqlQuery(params *CreateSqlQueryParams) (*CreateSqlQueryOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewCreateSqlQueryParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "create_sql_query",
Method: "POST",
PathPattern: "/sql_queries",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"https"},
Params: params,
Reader: &CreateSqlQueryReader{formats: a.formats},
})
if err != nil {
return nil, err
}
return result.(*CreateSqlQueryOK), nil
}
/*
SqlQuery gets s q l runner query
get a sql runner query
*/
func (a *Client) SqlQuery(params *SqlQueryParams) (*SqlQueryOK, error) {
// TODO: Validate the params before sending
if params == nil {
params = NewSqlQueryParams()
}
result, err := a.transport.Submit(&runtime.ClientOperation{
ID: "sql_query",
Method: "GET",
PathPattern: "/sql_queries/{slug}",
ProducesMediaTypes: []string{"application/json"},
ConsumesMediaTypes: []string{"application/json"},
Schemes: []string{"https"},
Params: params,
Reader: &SqlQueryReader{formats: a.formats},
})
if err != nil {
return nil, err
}
return result.(*SqlQueryOK), nil
}
// SetTransport changes the transport on the client
func (a *Client) SetTransport(transport runtime.ClientTransport) {
a.transport = transport
}
| {
return &Client{transport: transport, formats: formats}
} |
AddDocumentForm.stories.tsx | import React from 'react';
import AddDocumentForm from './AddDocumentForm';
export default { | title: 'Components/AddDocumentForm',
};
export const Default = () => <AddDocumentForm text="Example text" />; | component: AddDocumentForm, |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.