file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
mount.go | package main
import (
"bufio"
"os"
"strings"
)
/* GetSystemMounts returns a map of system mount points to block devices */
func GetSystemMounts() map[string]string | {
// open /proc/mounts for reading
fd, err := os.Open("/proc/mounts")
if err != nil {
return nil
}
defer fd.Close()
// parse mounts file and return result
scanner := bufio.NewScanner(fd)
result := make(map[string]string)
for scanner.Scan() {
items := strings.Split(scanner.Text(), " ")
result[items[1]] = items[0]
}
return result
} |
|
trigger_dag.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Triggering DAG runs APIs."""
import json
from datetime import datetime
from typing import List, Optional, Union
from airflow.exceptions import DagNotFound, DagRunAlreadyExists
from airflow.models import DagBag, DagModel, DagRun
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
def _trigger_dag(
dag_id: str,
dag_bag: DagBag,
dag_run: DagModel,
run_id: Optional[str],
conf: Optional[Union[dict, str]],
execution_date: Optional[datetime],
replace_microseconds: bool,
) -> List[DagRun]: # pylint: disable=too-many-arguments
|
def trigger_dag(
dag_id: str,
run_id: Optional[str] = None,
conf: Optional[Union[dict, str]] = None,
execution_date: Optional[datetime] = None,
replace_microseconds: bool = True,
) -> Optional[DagRun]:
"""Triggers execution of DAG specified by dag_id
:param dag_id: DAG ID
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: first dag run triggered - even if more than one Dag Runs were triggered or None
"""
dag_model = DagModel.get_current(dag_id)
if dag_model is None:
raise DagNotFound("Dag id {} not found in DagModel".format(dag_id))
def read_store_serialized_dags():
from airflow.configuration import conf
return conf.getboolean('core', 'store_serialized_dags')
dagbag = DagBag(
dag_folder=dag_model.fileloc,
store_serialized_dags=read_store_serialized_dags()
)
dag_run = DagRun()
triggers = _trigger_dag(
dag_id=dag_id,
dag_run=dag_run,
dag_bag=dagbag,
run_id=run_id,
conf=conf,
execution_date=execution_date,
replace_microseconds=replace_microseconds,
)
return triggers[0] if triggers else None
| """Triggers DAG run.
:param dag_id: DAG ID
:param dag_bag: DAG Bag model
:param dag_run: DAG Run model
:param run_id: ID of the dag_run
:param conf: configuration
:param execution_date: date of execution
:param replace_microseconds: whether microseconds should be zeroed
:return: list of triggered dags
"""
dag = dag_bag.get_dag(dag_id) # prefetch dag if it is stored serialized
if dag_id not in dag_bag.dags:
raise DagNotFound("Dag id {} not found".format(dag_id))
execution_date = execution_date if execution_date else timezone.utcnow()
if not timezone.is_localized(execution_date):
raise ValueError("The execution_date should be localized")
if replace_microseconds:
execution_date = execution_date.replace(microsecond=0)
if dag.default_args and 'start_date' in dag.default_args:
min_dag_start_date = dag.default_args["start_date"]
if min_dag_start_date and execution_date < min_dag_start_date:
raise ValueError(
"The execution_date [{0}] should be >= start_date [{1}] from DAG's default_args".format(
execution_date.isoformat(),
min_dag_start_date.isoformat()))
if not run_id:
run_id = "{}{}".format(DagRunType.MANUAL.value, execution_date.isoformat())
dag_run_id = dag_run.find(dag_id=dag_id, run_id=run_id)
if dag_run_id:
raise DagRunAlreadyExists("Run id {} already exists for dag id {}".format(
run_id,
dag_id
))
run_conf = None
if conf:
if isinstance(conf, dict):
run_conf = conf
else:
run_conf = json.loads(conf)
triggers = []
dags_to_trigger = [dag]
while dags_to_trigger:
dag = dags_to_trigger.pop()
trigger = dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True,
)
triggers.append(trigger)
if dag.subdags:
dags_to_trigger.extend(dag.subdags)
return triggers |
test_modes.py | import unittest
import obdlib.obd.modes as modes
class | (unittest.TestCase):
def test_init(self):
m = modes.Modes(1)
self.assertIsInstance(m.modes, dict)
suite = unittest.TestLoader().loadTestsFromTestCase(TestModes)
unittest.TextTestRunner(verbosity=2).run(suite)
| TestModes |
query_test.go | /*
Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreedto in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mysqlconn
import (
"fmt"
"reflect"
"strings"
"sync"
"testing"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"github.com/youtube/vitess/go/sqldb"
"github.com/youtube/vitess/go/sqltypes"
querypb "github.com/youtube/vitess/go/vt/proto/query"
)
func TestComInitDB(t *testing.T) {
listener, sConn, cConn := createSocketPair(t)
defer func() {
listener.Close()
sConn.Close()
cConn.Close()
}()
// Write ComInitDB packet, read it, compare.
if err := cConn.writeComInitDB("my_db"); err != nil {
t.Fatalf("writeComInitDB failed: %v", err)
}
data, err := sConn.ReadPacket()
if err != nil || len(data) == 0 || data[0] != ComInitDB {
t.Fatalf("sConn.ReadPacket - ComInitDB failed: %v %v", data, err)
}
db := sConn.parseComInitDB(data)
if db != "my_db" {
t.Errorf("parseComInitDB returned unexpected data: %v", db)
}
}
func TestQueries(t *testing.T) {
listener, sConn, cConn := createSocketPair(t)
defer func() {
listener.Close()
sConn.Close()
cConn.Close()
}()
// Smallest result
checkQuery(t, "tiny", sConn, cConn, &sqltypes.Result{})
// Typical Insert result
checkQuery(t, "insert", sConn, cConn, &sqltypes.Result{
RowsAffected: 0x8010203040506070,
InsertID: 0x0102030405060708,
})
// Typicall Select with TYPE_AND_NAME.
// One value is also NULL.
checkQuery(t, "type and name", sConn, cConn, &sqltypes.Result{
Fields: []*querypb.Field{
{
Name: "id",
Type: querypb.Type_INT32,
},
{
Name: "name",
Type: querypb.Type_VARCHAR,
},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")),
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")),
},
{
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("20")),
sqltypes.NULL,
},
},
RowsAffected: 2,
})
// Typicall Select with TYPE_AND_NAME.
// All types are represented.
// One row has all NULL values.
checkQuery(t, "all types", sConn, cConn, &sqltypes.Result{
Fields: []*querypb.Field{
{Name: "Type_INT8 ", Type: querypb.Type_INT8},
{Name: "Type_UINT8 ", Type: querypb.Type_UINT8},
{Name: "Type_INT16 ", Type: querypb.Type_INT16},
{Name: "Type_UINT16 ", Type: querypb.Type_UINT16},
{Name: "Type_INT24 ", Type: querypb.Type_INT24},
{Name: "Type_UINT24 ", Type: querypb.Type_UINT24},
{Name: "Type_INT32 ", Type: querypb.Type_INT32},
{Name: "Type_UINT32 ", Type: querypb.Type_UINT32},
{Name: "Type_INT64 ", Type: querypb.Type_INT64},
{Name: "Type_UINT64 ", Type: querypb.Type_UINT64},
{Name: "Type_FLOAT32 ", Type: querypb.Type_FLOAT32},
{Name: "Type_FLOAT64 ", Type: querypb.Type_FLOAT64},
{Name: "Type_TIMESTAMP", Type: querypb.Type_TIMESTAMP},
{Name: "Type_DATE ", Type: querypb.Type_DATE},
{Name: "Type_TIME ", Type: querypb.Type_TIME},
{Name: "Type_DATETIME ", Type: querypb.Type_DATETIME},
{Name: "Type_YEAR ", Type: querypb.Type_YEAR},
{Name: "Type_DECIMAL ", Type: querypb.Type_DECIMAL},
{Name: "Type_TEXT ", Type: querypb.Type_TEXT},
{Name: "Type_BLOB ", Type: querypb.Type_BLOB},
{Name: "Type_VARCHAR ", Type: querypb.Type_VARCHAR},
{Name: "Type_VARBINARY", Type: querypb.Type_VARBINARY},
{Name: "Type_CHAR ", Type: querypb.Type_CHAR},
{Name: "Type_BINARY ", Type: querypb.Type_BINARY},
{Name: "Type_BIT ", Type: querypb.Type_BIT},
{Name: "Type_ENUM ", Type: querypb.Type_ENUM},
{Name: "Type_SET ", Type: querypb.Type_SET},
// Skip TUPLE, not possible in Result.
{Name: "Type_GEOMETRY ", Type: querypb.Type_GEOMETRY},
{Name: "Type_JSON ", Type: querypb.Type_JSON},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_INT8, []byte("Type_INT8")),
sqltypes.MakeTrusted(querypb.Type_UINT8, []byte("Type_UINT8")),
sqltypes.MakeTrusted(querypb.Type_INT16, []byte("Type_INT16")),
sqltypes.MakeTrusted(querypb.Type_UINT16, []byte("Type_UINT16")),
sqltypes.MakeTrusted(querypb.Type_INT24, []byte("Type_INT24")),
sqltypes.MakeTrusted(querypb.Type_UINT24, []byte("Type_UINT24")),
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("Type_INT32")),
sqltypes.MakeTrusted(querypb.Type_UINT32, []byte("Type_UINT32")),
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("Type_INT64")),
sqltypes.MakeTrusted(querypb.Type_UINT64, []byte("Type_UINT64")),
sqltypes.MakeTrusted(querypb.Type_FLOAT32, []byte("Type_FLOAT32")),
sqltypes.MakeTrusted(querypb.Type_FLOAT64, []byte("Type_FLOAT64")),
sqltypes.MakeTrusted(querypb.Type_TIMESTAMP, []byte("Type_TIMESTAMP")),
sqltypes.MakeTrusted(querypb.Type_DATE, []byte("Type_DATE")),
sqltypes.MakeTrusted(querypb.Type_TIME, []byte("Type_TIME")),
sqltypes.MakeTrusted(querypb.Type_DATETIME, []byte("Type_DATETIME")),
sqltypes.MakeTrusted(querypb.Type_YEAR, []byte("Type_YEAR")),
sqltypes.MakeTrusted(querypb.Type_DECIMAL, []byte("Type_DECIMAL")),
sqltypes.MakeTrusted(querypb.Type_TEXT, []byte("Type_TEXT")),
sqltypes.MakeTrusted(querypb.Type_BLOB, []byte("Type_BLOB")),
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("Type_VARCHAR")),
sqltypes.MakeTrusted(querypb.Type_VARBINARY, []byte("Type_VARBINARY")),
sqltypes.MakeTrusted(querypb.Type_CHAR, []byte("Type_CHAR")),
sqltypes.MakeTrusted(querypb.Type_BINARY, []byte("Type_BINARY")),
sqltypes.MakeTrusted(querypb.Type_BIT, []byte("Type_BIT")),
sqltypes.MakeTrusted(querypb.Type_ENUM, []byte("Type_ENUM")),
sqltypes.MakeTrusted(querypb.Type_SET, []byte("Type_SET")),
sqltypes.MakeTrusted(querypb.Type_GEOMETRY, []byte("Type_GEOMETRY")),
sqltypes.MakeTrusted(querypb.Type_JSON, []byte("Type_JSON")),
},
{
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
sqltypes.NULL,
},
},
RowsAffected: 2,
})
// Typicall Select with TYPE_AND_NAME.
// First value first column is an empty string, so it's encoded as 0.
checkQuery(t, "first empty string", sConn, cConn, &sqltypes.Result{
Fields: []*querypb.Field{
{
Name: "name",
Type: querypb.Type_VARCHAR,
},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("")),
},
{
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")),
},
},
RowsAffected: 2,
})
// Typicall Select with TYPE_ONLY.
checkQuery(t, "type only", sConn, cConn, &sqltypes.Result{
Fields: []*querypb.Field{
{
Type: querypb.Type_INT64,
},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("10")),
},
{
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("20")),
},
},
RowsAffected: 2,
})
// Typicall Select with ALL.
checkQuery(t, "complete", sConn, cConn, &sqltypes.Result{
Fields: []*querypb.Field{
{
Type: querypb.Type_INT64,
Name: "cool column name",
Table: "table name",
OrgTable: "org table",
Database: "fine db",
OrgName: "crazy org",
ColumnLength: 0x80020304,
Charset: 0x1234,
Decimals: 36,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG |
querypb.MySqlFlag_PRI_KEY_FLAG |
querypb.MySqlFlag_PART_KEY_FLAG |
querypb.MySqlFlag_NUM_FLAG),
},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("10")),
},
{
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("20")),
},
{
sqltypes.MakeTrusted(querypb.Type_INT64, []byte("30")),
},
},
RowsAffected: 3,
})
}
func checkQuery(t *testing.T, query string, sConn, cConn *Conn, result *sqltypes.Result) |
func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result *sqltypes.Result, wantfields, allRows bool) {
if sConn.Capabilities&CapabilityClientDeprecateEOF > 0 {
query += " NOEOF"
} else {
query += " EOF"
}
if wantfields {
query += " FIELDS"
} else {
query += " NOFIELDS"
}
if allRows {
query += " ALL"
} else {
query += " PARTIAL"
}
// Use a go routine to run ExecuteFetch.
wg := sync.WaitGroup{}
wg.Add(1)
go func() {
defer wg.Done()
// Test ExecuteFetch.
maxrows := 10000
if !allRows {
// Asking for just one row max. The results that have more will fail.
maxrows = 1
}
got, err := cConn.ExecuteFetch(query, maxrows, wantfields)
if !allRows && len(result.Rows) > 1 {
if err == nil {
t.Errorf("ExecuteFetch should have failed but got: %v", got)
}
return
}
if err != nil {
t.Fatalf("executeFetch failed: %v", err)
}
expected := *result
if !wantfields {
expected.Fields = nil
}
if !got.Equal(&expected) {
for i, f := range got.Fields {
if i < len(expected.Fields) && !proto.Equal(f, expected.Fields[i]) {
t.Logf("Got field(%v) = %v", i, f)
t.Logf("Expected field(%v) = %v", i, expected.Fields[i])
}
}
t.Fatalf("ExecuteFetch(wantfields=%v) returned:\n%v\nBut was expecting:\n%v", wantfields, got, expected)
}
// Test ExecuteStreamFetch, build a Result.
expected = *result
if err := cConn.ExecuteStreamFetch(query); err != nil {
t.Fatalf("ExecuteStreamFetch(%v) failed: %v", query, err)
}
got = &sqltypes.Result{}
got.RowsAffected = result.RowsAffected
got.InsertID = result.InsertID
got.Fields, err = cConn.Fields()
if err != nil {
t.Fatalf("Fields(%v) failed: %v", query, err)
}
if len(got.Fields) == 0 {
got.Fields = nil
}
for {
row, err := cConn.FetchNext()
if err != nil {
t.Fatalf("FetchNext(%v) failed: %v", query, err)
}
if row == nil {
// Done.
break
}
got.Rows = append(got.Rows, row)
}
cConn.CloseResult()
if !got.Equal(&expected) {
for i, f := range got.Fields {
if i < len(expected.Fields) && !proto.Equal(f, expected.Fields[i]) {
t.Logf("========== Got field(%v) = %v", i, f)
t.Logf("========== Expected field(%v) = %v", i, expected.Fields[i])
}
}
for i, row := range got.Rows {
if i < len(expected.Rows) && !reflect.DeepEqual(row, expected.Rows[i]) {
t.Logf("========== Got row(%v) = %v", i, RowString(row))
t.Logf("========== Expected row(%v) = %v", i, RowString(expected.Rows[i]))
}
}
t.Errorf("\nExecuteStreamFetch(%v) returned:\n%+v\nBut was expecting:\n%+v\n", query, got, &expected)
}
}()
// The other side gets the request, and sends the result.
// Twice, once for ExecuteFetch, once for ExecuteStreamFetch.
count := 2
if !allRows && len(result.Rows) > 1 {
// short-circuit one test, the go routine returned and didn't
// do the streaming query.
count--
}
for i := 0; i < count; i++ {
comQuery, err := sConn.ReadPacket()
if err != nil {
t.Fatalf("server cannot read query: %v", err)
}
if comQuery[0] != ComQuery {
t.Fatalf("server got bad packet: %v", comQuery)
}
got := sConn.parseComQuery(comQuery)
if got != query {
t.Errorf("server got query '%v' but expected '%v'", got, query)
}
if err := sConn.writeResult(result); err != nil {
t.Errorf("Error writing result to client: %v", err)
}
sConn.sequence = 0
}
wg.Wait()
}
func testQueriesWithRealDatabase(t *testing.T, params *sqldb.ConnParams) {
ctx := context.Background()
conn, err := Connect(ctx, params)
if err != nil {
t.Fatal(err)
}
// Try a simple error case.
_, err = conn.ExecuteFetch("select * from aa", 1000, true)
if err == nil || !strings.Contains(err.Error(), "Table 'vttest.aa' doesn't exist") {
t.Fatalf("expected error but got: %v", err)
}
// Try a simple DDL.
result, err := conn.ExecuteFetch("create table a(id int, name varchar(128), primary key(id))", 0, false)
if err != nil {
t.Fatalf("create table failed: %v", err)
}
if result.RowsAffected != 0 {
t.Errorf("create table returned RowsAffected %v, was expecting 0", result.RowsAffected)
}
// Try a simple insert.
result, err = conn.ExecuteFetch("insert into a(id, name) values(10, 'nice name')", 1000, true)
if err != nil {
t.Fatalf("insert failed: %v", err)
}
if result.RowsAffected != 1 || len(result.Rows) != 0 {
t.Errorf("unexpected result for insert: %v", result)
}
// And re-read what we inserted.
result, err = conn.ExecuteFetch("select * from a", 1000, true)
if err != nil {
t.Fatalf("insert failed: %v", err)
}
expectedResult := &sqltypes.Result{
Fields: []*querypb.Field{
{
Name: "id",
Type: querypb.Type_INT32,
Table: "a",
OrgTable: "a",
Database: "vttest",
OrgName: "id",
ColumnLength: 11,
Charset: CharacterSetBinary,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG |
querypb.MySqlFlag_PRI_KEY_FLAG |
querypb.MySqlFlag_PART_KEY_FLAG |
querypb.MySqlFlag_NUM_FLAG),
},
{
Name: "name",
Type: querypb.Type_VARCHAR,
Table: "a",
OrgTable: "a",
Database: "vttest",
OrgName: "name",
ColumnLength: 384,
Charset: CharacterSetUtf8,
},
},
Rows: [][]sqltypes.Value{
{
sqltypes.MakeTrusted(querypb.Type_INT32, []byte("10")),
sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("nice name")),
},
},
RowsAffected: 1,
}
if !result.Equal(expectedResult) {
// MySQL 5.7 is adding the NO_DEFAULT_VALUE_FLAG to Flags.
expectedResult.Fields[0].Flags |= uint32(querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)
if !result.Equal(expectedResult) {
t.Errorf("unexpected result for select, got:\n%v\nexpected:\n%v\n", result, expectedResult)
}
}
// Insert a few rows.
for i := 0; i < 100; i++ {
result, err := conn.ExecuteFetch(fmt.Sprintf("insert into a(id, name) values(%v, 'nice name %v')", 1000+i, i), 1000, true)
if err != nil {
t.Fatalf("ExecuteFetch(%v) failed: %v", i, err)
}
if result.RowsAffected != 1 {
t.Errorf("insert into returned RowsAffected %v, was expecting 1", result.RowsAffected)
}
}
// And use a streaming query to read them back.
// Do it twice to make sure state is reset properly.
readRowsUsingStream(t, conn, 101)
readRowsUsingStream(t, conn, 101)
// And drop the table.
result, err = conn.ExecuteFetch("drop table a", 0, false)
if err != nil {
t.Fatalf("drop table failed: %v", err)
}
if result.RowsAffected != 0 {
t.Errorf("insert into returned RowsAffected %v, was expecting 0", result.RowsAffected)
}
}
func readRowsUsingStream(t *testing.T, conn *Conn, expectedCount int) {
// Start the streaming query.
if err := conn.ExecuteStreamFetch("select * from a"); err != nil {
t.Fatalf("ExecuteStreamFetch failed: %v", err)
}
// Check the fields.
expectedFields := []*querypb.Field{
{
Name: "id",
Type: querypb.Type_INT32,
Table: "a",
OrgTable: "a",
Database: "vttest",
OrgName: "id",
ColumnLength: 11,
Charset: CharacterSetBinary,
Flags: uint32(querypb.MySqlFlag_NOT_NULL_FLAG |
querypb.MySqlFlag_PRI_KEY_FLAG |
querypb.MySqlFlag_PART_KEY_FLAG |
querypb.MySqlFlag_NUM_FLAG),
},
{
Name: "name",
Type: querypb.Type_VARCHAR,
Table: "a",
OrgTable: "a",
Database: "vttest",
OrgName: "name",
ColumnLength: 384,
Charset: CharacterSetUtf8,
},
}
fields, err := conn.Fields()
if err != nil {
t.Fatalf("Fields failed: %v", err)
}
if !sqltypes.FieldsEqual(fields, expectedFields) {
// MySQL 5.7 is adding the NO_DEFAULT_VALUE_FLAG to Flags.
expectedFields[0].Flags |= uint32(querypb.MySqlFlag_NO_DEFAULT_VALUE_FLAG)
if !sqltypes.FieldsEqual(fields, expectedFields) {
t.Fatalf("fields are not right, got:\n%v\nexpected:\n%v", fields, expectedFields)
}
}
// Read the rows.
count := 0
for {
row, err := conn.FetchNext()
if err != nil {
t.Fatalf("FetchNext failed: %v", err)
}
if row == nil {
// We're done.
break
}
if len(row) != 2 {
t.Fatalf("Unexpected row found: %v", row)
}
count++
}
if count != expectedCount {
t.Errorf("Got unexpected count %v for query, was expecting %v", count, expectedCount)
}
conn.CloseResult()
}
| {
// The protocol depends on the CapabilityClientDeprecateEOF flag.
// So we want to test both cases.
sConn.Capabilities = 0
cConn.Capabilities = 0
checkQueryInternal(t, query, sConn, cConn, result, true /* wantfields */, true /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, false /* wantfields */, true /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, true /* wantfields */, false /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, false /* wantfields */, false /* allRows */)
sConn.Capabilities = CapabilityClientDeprecateEOF
cConn.Capabilities = CapabilityClientDeprecateEOF
checkQueryInternal(t, query, sConn, cConn, result, true /* wantfields */, true /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, false /* wantfields */, true /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, true /* wantfields */, false /* allRows */)
checkQueryInternal(t, query, sConn, cConn, result, false /* wantfields */, false /* allRows */)
} |
tree.go | package util
import "fmt"
// Slice2Tree 将切片数据转换为树形数据
func Slice2Tree(sliceDatas []map[string]interface{}, idField, pidField string) []map[string]interface{} {
var r []map[string]interface{}
index := make(map[string]interface{})
for _, val := range sliceDatas {
id := fmt.Sprint(val[idField])
index[id] = val
}
for _, val := range sliceDatas {
pid := fmt.Sprint(val[pidField])
if _, ok := index[pid]; !ok || pid == "" {
r = append(r, val)
} else {
pval := index[pid].(map[string]interface{})
if _, ok := pval["children"]; !ok {
var n []map[string]interface{}
n = append(n, val)
pval["children"] = &n
} else {
nodes := pval["children"].(*[]map[string]interface{})
*nodes = append(*nodes, val)
}
}
}
return r
}
// ConvertToViewTree 转换树形数据为视图树数据
func ConvertToViewTree(treeDatas []map[string]interface{}, labelField, valueField, keyField string) []map[string]interface{} {
for _, node := range treeDatas {
node["title"] = node[labelField]
node["value"] = node[valueField]
node["key"] = node[keyField] | child, ok := node["children"]
if ok {
node["children"] = ConvertToViewTree(*child.(*[]map[string]interface{}), labelField, valueField, keyField)
}
}
return treeDatas
} | |
issue_test.ts | import {
assert,
assertStrictEquals,
} from "https://deno.land/[email protected]/testing/asserts.ts";
import { serve, Server } from "https://deno.land/[email protected]/http/server.ts";
import { delay } from "https://deno.land/[email protected]/async/delay.ts";
import { MultipartReader } from "https://deno.land/[email protected]/mime/multipart.ts";
let server1: Server | undefined;
let handlers: Promise<void | string>[] = [];
// deno-lint-ignore no-explicit-any
function getHeaderValueParams(value: any) {
const params = new Map();
// Forced to do so for some Map constructor param mismatch
value
.split(";")
.slice(1)
// deno-lint-ignore no-explicit-any
.map((s: any) => s.trim().split("="))
// deno-lint-ignore no-explicit-any
.filter((arr: any) => arr.length > 1)
// deno-lint-ignore ban-ts-comment
// @ts-ignore
.map(([k, v]) => [k, v.replace(/^"([^"]*)"$/, "$1")]) | return params;
}
async function handleServer1() {
await delay(100);
server1 = serve({ hostname: "0.0.0.0", port: 3000 });
for await (const request of server1) {
const contentTypeHeader = request.headers.get("content-type");
const params = getHeaderValueParams(contentTypeHeader);
const reader = new MultipartReader(request.body, params.get("boundary"));
const form = await reader.readForm();
const data = [];
for (const entry of form.entries()) {
data.push(entry as string[]);
}
const bodyContent = new URLSearchParams(data)
.toString();
await request.respond({ status: 200, body: bodyContent });
}
}
const serverOneUrl = "http://localhost:3000";
console.log("requesting any url path echos formdata of request");
async function closeServers() {
try {
//send a dummy req after close to close the server
server1 && server1.close();
handlers.push(
fetch(serverOneUrl).then((r) => r.text()).catch((err) => {}),
);
await Promise.all(handlers);
handlers = [];
server1 = undefined;
} catch {
//
}
}
Deno.test("URLSearchParams accepts string[][]", () => {
const result = new URLSearchParams([
["foo", "bar"],
["foo", "baz"],
]).toString();
assertStrictEquals(result, "foo=bar&foo=baz");
});
Deno.test("multipart should read arrays properly", async () => {
try {
handlers.push(handleServer1());
const formData = new FormData();
formData.append("foo", "bar");
formData.append("foo", "baz");
const response = await fetch(serverOneUrl, {
body: formData,
}).then((r) => r.text());
assertStrictEquals(response, "foo=bar&foo=baz");
} finally {
await closeServers();
}
}); | // deno-lint-ignore ban-ts-comment
// @ts-ignore
.forEach(([k, v]) => params.set(k, v)); |
mock_sender.rs | //! An [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient).
use {
crate::{
client_error::Result,
rpc_config::RpcBlockProductionConfig,
rpc_request::RpcRequest,
rpc_response::{
Response, RpcAccountBalance, RpcBlockProduction, RpcBlockProductionRange, RpcBlockhash,
RpcConfirmedTransactionStatusWithSignature, RpcContactInfo, RpcFees, RpcIdentity,
RpcInflationGovernor, RpcInflationRate, RpcInflationReward, RpcKeyedAccount,
RpcPerfSample, RpcResponseContext, RpcSimulateTransactionResult, RpcSnapshotSlotInfo,
RpcStakeActivation, RpcSupply, RpcVersionInfo, RpcVoteAccountInfo,
RpcVoteAccountStatus, StakeActivationState,
},
rpc_sender::*,
},
serde_json::{json, Number, Value},
solana_account_decoder::{UiAccount, UiAccountEncoding},
solana_sdk::{
account::Account,
clock::{Slot, UnixTimestamp},
epoch_info::EpochInfo,
fee_calculator::{FeeCalculator, FeeRateGovernor},
instruction::InstructionError,
message::MessageHeader,
pubkey::Pubkey,
signature::Signature,
sysvar::epoch_schedule::EpochSchedule,
transaction::{self, Transaction, TransactionError},
},
solana_transaction_status::{
EncodedConfirmedBlock, EncodedConfirmedTransaction, EncodedTransaction,
EncodedTransactionWithStatusMeta, Rewards, TransactionConfirmationStatus,
TransactionStatus, UiCompiledInstruction, UiMessage, UiRawMessage, UiTransaction,
UiTransactionEncoding, UiTransactionStatusMeta,
},
solana_version::Version,
std::{collections::HashMap, net::SocketAddr, str::FromStr, sync::RwLock},
};
pub const PUBKEY: &str = "7RoSF9fUmdphVCpabEoefH81WwrW7orsWonXWqTXkKV8";
pub const SIGNATURE: &str =
"43yNSFC6fYTuPgTNFFhF4axw7AfWxB2BPdurme8yrsWEYwm8299xh8n6TAHjGymiSub1XtyxTNyd9GBfY2hxoBw8";
pub type Mocks = HashMap<RpcRequest, Value>;
pub struct MockSender {
mocks: RwLock<Mocks>,
url: String,
}
/// An [`RpcSender`] used for unit testing [`RpcClient`](crate::rpc_client::RpcClient).
///
/// This is primarily for internal use.
///
/// Unless directed otherwise, it will generally return a reasonable default
/// response, at least for [`RpcRequest`] values for which responses have been
/// implemented.
///
/// The behavior can be customized in two ways:
///
/// 1) The `url` constructor argument is not actually a URL, but a simple string
/// directive that changes `MockSender`s behavior in specific scenarios.
///
/// If `url` is "fails" then any call to `send` will return `Ok(Value::Null)`.
///
/// It is customary to set the `url` to "succeeds" for mocks that should
/// return sucessfully, though this value is not actually interpreted.
///
/// Other possible values of `url` are specific to different `RpcRequest`
/// values. Read the implementation for specifics.
///
/// 2) Custom responses can be configured by providing [`Mocks`] to the
/// [`MockSender::new_with_mocks`] constructor. This type is a [`HashMap`]
/// from [`RpcRequest`] to a JSON [`Value`] response, Any entries in this map
/// override the default behavior for the given request.
impl MockSender {
pub fn new<U: ToString>(url: U) -> Self {
Self::new_with_mocks(url, Mocks::default())
}
pub fn new_with_mocks<U: ToString>(url: U, mocks: Mocks) -> Self |
}
impl RpcSender for MockSender {
fn get_transport_stats(&self) -> RpcTransportStats {
RpcTransportStats::default()
}
fn send(&self, request: RpcRequest, params: serde_json::Value) -> Result<serde_json::Value> {
if let Some(value) = self.mocks.write().unwrap().remove(&request) {
return Ok(value);
}
if self.url == "fails" {
return Ok(Value::Null);
}
let method = &request.build_request_json(42, params.clone())["method"];
let val = match method.as_str().unwrap() {
"getAccountInfo" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Null,
})?,
"getBalance" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: Value::Number(Number::from(50)),
})?,
"getRecentBlockhash" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: (
Value::String(PUBKEY.to_string()),
serde_json::to_value(FeeCalculator::default()).unwrap(),
),
})?,
"getEpochInfo" => serde_json::to_value(EpochInfo {
epoch: 1,
slot_index: 2,
slots_in_epoch: 32,
absolute_slot: 34,
block_height: 34,
transaction_count: Some(123),
})?,
"getFeeCalculatorForBlockhash" => {
let value = if self.url == "blockhash_expired" {
Value::Null
} else {
serde_json::to_value(Some(FeeCalculator::default())).unwrap()
};
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value,
})?
}
"getFeeRateGovernor" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: serde_json::to_value(FeeRateGovernor::default()).unwrap(),
})?,
"getFees" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: serde_json::to_value(RpcFees {
blockhash: PUBKEY.to_string(),
fee_calculator: FeeCalculator::default(),
last_valid_slot: 42,
last_valid_block_height: 42,
})
.unwrap(),
})?,
"getSignatureStatuses" => {
let status: transaction::Result<()> = if self.url == "account_in_use" {
Err(TransactionError::AccountInUse)
} else if self.url == "instruction_error" {
Err(TransactionError::InstructionError(
0,
InstructionError::UninitializedAccount,
))
} else {
Ok(())
};
let status = if self.url == "sig_not_found" {
None
} else {
let err = status.clone().err();
Some(TransactionStatus {
status,
slot: 1,
confirmations: None,
err,
confirmation_status: Some(TransactionConfirmationStatus::Finalized),
})
};
let statuses: Vec<Option<TransactionStatus>> = params.as_array().unwrap()[0]
.as_array()
.unwrap()
.iter()
.map(|_| status.clone())
.collect();
serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: statuses,
})?
}
"getTransaction" => serde_json::to_value(EncodedConfirmedTransaction {
slot: 2,
transaction: EncodedTransactionWithStatusMeta {
transaction: EncodedTransaction::Json(
UiTransaction {
signatures: vec!["3AsdoALgZFuq2oUVWrDYhg2pNeaLJKPLf8hU2mQ6U8qJxeJ6hsrPVpMn9ma39DtfYCrDQSvngWRP8NnTpEhezJpE".to_string()],
message: UiMessage::Raw(
UiRawMessage {
header: MessageHeader {
num_required_signatures: 1,
num_readonly_signed_accounts: 0,
num_readonly_unsigned_accounts: 1,
},
account_keys: vec![
"C6eBmAXKg6JhJWkajGa5YRGUfG4YKXwbxF5Ufv7PtExZ".to_string(),
"2Gd5eoR5J4BV89uXbtunpbNhjmw3wa1NbRHxTHzDzZLX".to_string(),
"11111111111111111111111111111111".to_string(),
],
recent_blockhash: "D37n3BSG71oUWcWjbZ37jZP7UfsxG2QMKeuALJ1PYvM6".to_string(),
instructions: vec![UiCompiledInstruction {
program_id_index: 2,
accounts: vec![0, 1],
data: "3Bxs49DitAvXtoDR".to_string(),
}],
})
}),
meta: Some(UiTransactionStatusMeta {
err: None,
status: Ok(()),
fee: 0,
pre_balances: vec![499999999999999950, 50, 1],
post_balances: vec![499999999999999950, 50, 1],
inner_instructions: None,
log_messages: None,
pre_token_balances: None,
post_token_balances: None,
rewards: None,
}),
},
block_time: Some(1628633791),
})?,
"getTransactionCount" => json![1234],
"getSlot" => json![0],
"getMaxShredInsertSlot" => json![0],
"requestAirdrop" => Value::String(Signature::new(&[8; 64]).to_string()),
"getSnapshotSlot" => Value::Number(Number::from(0)),
"getHighestSnapshotSlot" => json!(RpcSnapshotSlotInfo {
full: 100,
incremental: Some(110),
}),
"getBlockHeight" => Value::Number(Number::from(1234)),
"getSlotLeaders" => json!([PUBKEY]),
"getBlockProduction" => {
if params.is_null() {
json!(Response {
context: RpcResponseContext { slot: 1 },
value: RpcBlockProduction {
by_identity: HashMap::new(),
range: RpcBlockProductionRange {
first_slot: 1,
last_slot: 2,
},
},
})
} else {
let config: Vec<RpcBlockProductionConfig> =
serde_json::from_value(params).unwrap();
let config = config[0].clone();
let mut by_identity = HashMap::new();
by_identity.insert(config.identity.unwrap(), (1, 123));
let config_range = config.range.unwrap_or_default();
json!(Response {
context: RpcResponseContext { slot: 1 },
value: RpcBlockProduction {
by_identity,
range: RpcBlockProductionRange {
first_slot: config_range.first_slot,
last_slot: {
if let Some(last_slot) = config_range.last_slot {
last_slot
} else {
2
}
},
},
},
})
}
}
"getStakeActivation" => json!(RpcStakeActivation {
state: StakeActivationState::Activating,
active: 123,
inactive: 12,
}),
"getSupply" => json!(Response {
context: RpcResponseContext { slot: 1 },
value: RpcSupply {
total: 100000000,
circulating: 50000,
non_circulating: 20000,
non_circulating_accounts: vec![PUBKEY.to_string()],
},
}),
"getLargestAccounts" => {
let rpc_account_balance = RpcAccountBalance {
address: PUBKEY.to_string(),
lamports: 10000,
};
json!(Response {
context: RpcResponseContext { slot: 1 },
value: vec![rpc_account_balance],
})
}
"getVoteAccounts" => {
json!(RpcVoteAccountStatus {
current: vec![],
delinquent: vec![RpcVoteAccountInfo {
vote_pubkey: PUBKEY.to_string(),
node_pubkey: PUBKEY.to_string(),
activated_stake: 0,
commission: 0,
epoch_vote_account: false,
epoch_credits: vec![],
last_vote: 0,
root_slot: Slot::default(),
}],
})
}
"sendTransaction" => {
let signature = if self.url == "malicious" {
Signature::new(&[8; 64]).to_string()
} else {
let tx_str = params.as_array().unwrap()[0].as_str().unwrap().to_string();
let data = base64::decode(tx_str).unwrap();
let tx: Transaction = bincode::deserialize(&data).unwrap();
tx.signatures[0].to_string()
};
Value::String(signature)
}
"simulateTransaction" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: RpcSimulateTransactionResult {
err: None,
logs: None,
accounts: None,
units_consumed: None,
},
})?,
"getMinimumBalanceForRentExemption" => json![20],
"getVersion" => {
let version = Version::default();
json!(RpcVersionInfo {
solana_core: version.to_string(),
feature_set: Some(version.feature_set),
})
}
"getLatestBlockhash" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: RpcBlockhash {
blockhash: PUBKEY.to_string(),
last_valid_block_height: 1234,
},
})?,
"getFeeForMessage" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: json!(Some(0)),
})?,
"getClusterNodes" => serde_json::to_value(vec![RpcContactInfo {
pubkey: PUBKEY.to_string(),
gossip: Some(SocketAddr::from(([10, 239, 6, 48], 8899))),
tpu: Some(SocketAddr::from(([10, 239, 6, 48], 8856))),
rpc: Some(SocketAddr::from(([10, 239, 6, 48], 8899))),
version: Some("1.0.0 c375ce1f".to_string()),
feature_set: None,
shred_version: None,
}])?,
"getBlock" => serde_json::to_value(EncodedConfirmedBlock {
previous_blockhash: "mfcyqEXB3DnHXki6KjjmZck6YjmZLvpAByy2fj4nh6B".to_string(),
blockhash: "3Eq21vXNB5s86c62bVuUfTeaMif1N2kUqRPBmGRJhyTA".to_string(),
parent_slot: 429,
transactions: vec![EncodedTransactionWithStatusMeta {
transaction: EncodedTransaction::Binary(
"ju9xZWuDBX4pRxX2oZkTjxU5jB4SSTgEGhX8bQ8PURNzyzqKMPPpNvWihx8zUe\
FfrbVNoAaEsNKZvGzAnTDy5bhNT9kt6KFCTBixpvrLCzg4M5UdFUQYrn1gdgjX\
pLHxcaShD81xBNaFDgnA2nkkdHnKtZt4hVSfKAmw3VRZbjrZ7L2fKZBx21CwsG\
hD6onjM2M3qZW5C8J6d1pj41MxKmZgPBSha3MyKkNLkAGFASK"
.to_string(),
UiTransactionEncoding::Base58,
),
meta: None,
}],
rewards: Rewards::new(),
block_time: None,
block_height: Some(428),
})?,
"getBlocks" => serde_json::to_value(vec![1, 2, 3])?,
"getBlocksWithLimit" => serde_json::to_value(vec![1, 2, 3])?,
"getSignaturesForAddress" => {
serde_json::to_value(vec![RpcConfirmedTransactionStatusWithSignature {
signature: SIGNATURE.to_string(),
slot: 123,
err: None,
memo: None,
block_time: None,
confirmation_status: Some(TransactionConfirmationStatus::Finalized),
}])?
}
"getBlockTime" => serde_json::to_value(UnixTimestamp::default())?,
"getEpochSchedule" => serde_json::to_value(EpochSchedule::default())?,
"getRecentPerformanceSamples" => serde_json::to_value(vec![RpcPerfSample {
slot: 347873,
num_transactions: 125,
num_slots: 123,
sample_period_secs: 60,
}])?,
"getIdentity" => serde_json::to_value(RpcIdentity {
identity: PUBKEY.to_string(),
})?,
"getInflationGovernor" => serde_json::to_value(
RpcInflationGovernor {
initial: 0.08,
terminal: 0.015,
taper: 0.15,
foundation: 0.05,
foundation_term: 7.0,
})?,
"getInflationRate" => serde_json::to_value(
RpcInflationRate {
total: 0.08,
validator: 0.076,
foundation: 0.004,
epoch: 0,
})?,
"getInflationReward" => serde_json::to_value(vec![
Some(RpcInflationReward {
epoch: 2,
effective_slot: 224,
amount: 2500,
post_balance: 499999442500,
commission: None,
})])?,
"minimumLedgerSlot" => json![123],
"getMaxRetransmitSlot" => json![123],
"getMultipleAccounts" => serde_json::to_value(Response {
context: RpcResponseContext { slot: 1 },
value: vec![Value::Null, Value::Null]
})?,
"getProgramAccounts" => {
let pubkey = Pubkey::from_str(&PUBKEY.to_string()).unwrap();
let account = Account {
lamports: 1_000_000,
data: vec![],
owner: pubkey,
executable: false,
rent_epoch: 0,
};
serde_json::to_value(vec![
RpcKeyedAccount {
pubkey: PUBKEY.to_string(),
account: UiAccount::encode(
&pubkey,
&account,
UiAccountEncoding::Base64,
None,
None,
)
}
])?
},
_ => Value::Null,
};
Ok(val)
}
}
| {
Self {
url: url.to_string(),
mocks: RwLock::new(mocks),
}
} |
efficientnet_b5_fpn_bn_scratch_400_6x.py | from symbol.builder import FasterRcnn as Detector
from symbol.builder import add_anchor_to_arg
from models.efficientnet.builder import EfficientNetB5FPN as Backbone
from models.FPN.builder import FPNNeck as Neck
from models.FPN.builder import FPNRpnHead as RpnHead
from models.FPN.builder import FPNRoiAlign as RoiExtractor
from models.FPN.builder import FPNBbox2fcHead as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 8 if is_train else 1
fp16 = True
loader_worker = 8
class KvstoreParam:
kvstore = "nccl"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
normalizer = normalizer_factory(type="localbn", ndev=len(KvstoreParam.gpus))
# normalizer = normalizer_factory(type="gn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image
nnvm_proposal = True
nnvm_rpn_target = False
class anchor_generate:
scale = (4,)
ratio = (0.5, 1.0, 2.0)
stride = (4, 8, 16, 32, 64)
image_anchor = 256
max_side = 700
class anchor_assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
image_anchor = 256
pos_fraction = 0.5
class head:
conv_channel = 256
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 2000 if is_train else 1000
post_nms_top_n = 2000 if is_train else 1000
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = False
image_roi = 512
fg_fraction = 0.25
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 81
class_agnostic = False
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 512
batch_image = General.batch_image
class regress_target:
class_agnostic = False
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = (4, 8, 16, 32)
roi_canonical_scale = 224
roi_canonical_level = 4
class DatasetParam:
if is_train:
image_set = ("coco_train2014", "coco_valminusminival2014")
total_image = 82783 + 35504
else:
image_set = ("coco_minival2014", )
total_image = 5000
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head)
test_sym = detector.get_test_symbol(backbone, neck, rpn_head, roi_extractor, bbox_head)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = True
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = None
epoch = 0
fixed_param = []
def process_weight(sym, arg, aux):
for stride in RpnParam.anchor_generate.stride:
add_anchor_to_arg(
sym, arg, aux, RpnParam.anchor_generate.max_side,
stride, RpnParam.anchor_generate.scale,
RpnParam.anchor_generate.ratio)
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 1e-4
clip_gradient = None
class schedule:
mult = 6
begin_epoch = 0
end_epoch = 6 * mult
if mult <= 2:
lr_iter = [60000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
80000 * mult * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
else:
# follow the setting in Rethinking ImageNet Pre-training
# reduce the lr in the last 60k and 20k iterations
lr_iter = [(DatasetParam.total_image * 2 // 16 * end_epoch - 60000) * 16 //
(len(KvstoreParam.gpus) * KvstoreParam.batch_image),
(DatasetParam.total_image * 2 // 16 * end_epoch - 20000) * 16 //
(len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0
iter = 500
class TestParam:
min_det_score = 0.05
max_det_per_image = 100
process_roidb = lambda x: x
process_output = lambda x, y: x
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/annotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
# data processing
class | :
short = 400
long = 600
class PadParam:
short = 400
long = 600
max_num_gt = 100
class AnchorTarget2DParam:
def __init__(self):
self.generate = self._generate()
class _generate:
def __init__(self):
self.stride = (4, 8, 16, 32, 64)
self.short = (100, 50, 25, 13, 7)
self.long = (150, 75, 38, 19, 10)
scales = (4)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.FPN.input import PyramidAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data"]
label_name = ["gt_bbox", "im_info"]
if not RpnParam.nnvm_rpn_target:
transform.append(PyramidAnchorTarget2D(AnchorTarget2DParam()))
label_name += ["rpn_cls_label", "rpn_reg_target", "rpn_reg_weight"]
else:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "im_id", "rec_id"]
label_name = []
import core.detection_metric as metric
rpn_acc_metric = metric.AccWithIgnore(
"RpnAcc",
["rpn_cls_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
rpn_l1_metric = metric.L1(
"RpnL1",
["rpn_reg_loss_output", "rpn_cls_label_blockgrad_output"],
[]
)
# for bbox, the label is generated in network so it is an output
box_acc_metric = metric.AccWithIgnore(
"RcnnAcc",
["bbox_cls_loss_output", "bbox_label_blockgrad_output"],
[]
)
box_l1_metric = metric.L1(
"RcnnL1",
["bbox_reg_loss_output", "bbox_label_blockgrad_output"],
[]
)
metric_list = [rpn_acc_metric, rpn_l1_metric, box_acc_metric, box_l1_metric]
return General, KvstoreParam, RpnParam, RoiParam, BboxParam, DatasetParam, \
ModelParam, OptimizeParam, TestParam, \
transform, data_name, label_name, metric_list
| ResizeParam |
date_time.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib;
use glib::translate::*;
use glib::GString;
use gst_sys;
glib_wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct DateTime(Shared<gst_sys::GstDateTime>);
match fn {
ref => |ptr| gst_sys::gst_date_time_ref(ptr),
unref => |ptr| gst_sys::gst_date_time_unref(ptr),
get_type => || gst_sys::gst_date_time_get_type(),
}
}
impl DateTime {
pub fn new(
tzoffset: f32,
year: i32,
month: i32,
day: i32,
hour: i32,
minute: i32,
seconds: f64,
) -> DateTime {
assert_initialized_main_thread!();
unsafe {
from_glib_full(gst_sys::gst_date_time_new(
tzoffset, year, month, day, hour, minute, seconds,
))
}
}
pub fn new_from_g_date_time(dt: &glib::DateTime) -> Option<DateTime> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(gst_sys::gst_date_time_new_from_g_date_time(
dt.to_glib_full(),
))
}
}
pub fn new_from_iso8601_string(string: &str) -> Option<DateTime> {
assert_initialized_main_thread!();
unsafe {
from_glib_full(gst_sys::gst_date_time_new_from_iso8601_string(
string.to_glib_none().0,
))
}
}
pub fn new_from_unix_epoch_local_time(secs: i64) -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_from_unix_epoch_local_time(secs)) }
}
pub fn new_from_unix_epoch_utc(secs: i64) -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_from_unix_epoch_utc(secs)) }
}
pub fn new_local_time(
year: i32,
month: i32,
day: i32,
hour: i32,
minute: i32,
seconds: f64,
) -> DateTime {
assert_initialized_main_thread!();
unsafe {
from_glib_full(gst_sys::gst_date_time_new_local_time(
year, month, day, hour, minute, seconds,
))
}
}
pub fn new_now_local_time() -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_now_local_time()) }
}
pub fn new_now_utc() -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_now_utc()) }
}
pub fn new_y(year: i32) -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_y(year)) }
}
pub fn new_ym(year: i32, month: i32) -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_ym(year, month)) }
}
pub fn new_ymd(year: i32, month: i32, day: i32) -> DateTime {
assert_initialized_main_thread!();
unsafe { from_glib_full(gst_sys::gst_date_time_new_ymd(year, month, day)) }
}
pub fn get_day(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_day(self.to_glib_none().0) }
}
pub fn get_hour(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_hour(self.to_glib_none().0) }
}
pub fn get_microsecond(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_microsecond(self.to_glib_none().0) }
}
pub fn get_minute(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_minute(self.to_glib_none().0) }
}
pub fn get_month(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_month(self.to_glib_none().0) }
}
pub fn get_second(&self) -> i32 {
unsafe { gst_sys::gst_date_time_get_second(self.to_glib_none().0) }
}
pub fn get_time_zone_offset(&self) -> f32 {
unsafe { gst_sys::gst_date_time_get_time_zone_offset(self.to_glib_none().0) }
}
pub fn get_year(&self) -> i32 |
pub fn has_day(&self) -> bool {
unsafe { from_glib(gst_sys::gst_date_time_has_day(self.to_glib_none().0)) }
}
pub fn has_month(&self) -> bool {
unsafe { from_glib(gst_sys::gst_date_time_has_month(self.to_glib_none().0)) }
}
pub fn has_second(&self) -> bool {
unsafe { from_glib(gst_sys::gst_date_time_has_second(self.to_glib_none().0)) }
}
pub fn has_time(&self) -> bool {
unsafe { from_glib(gst_sys::gst_date_time_has_time(self.to_glib_none().0)) }
}
pub fn has_year(&self) -> bool {
unsafe { from_glib(gst_sys::gst_date_time_has_year(self.to_glib_none().0)) }
}
pub fn to_g_date_time(&self) -> Option<glib::DateTime> {
unsafe { from_glib_full(gst_sys::gst_date_time_to_g_date_time(self.to_glib_none().0)) }
}
pub fn to_iso8601_string(&self) -> Option<GString> {
unsafe {
from_glib_full(gst_sys::gst_date_time_to_iso8601_string(
self.to_glib_none().0,
))
}
}
}
unsafe impl Send for DateTime {}
unsafe impl Sync for DateTime {}
| {
unsafe { gst_sys::gst_date_time_get_year(self.to_glib_none().0) }
} |
control_flow.rs | use petgraph::graph;
use petgraph::visit::EdgeRef;
use petgraph::Direction;
use crate::ast;
use crate::span::Span;
use super::error::{AnalysisError, ControlFlowError};
use super::expr_flow;
use super::semantic_data::LoopId;
use super::type_cons::TypeCons;
use super::abstract_type::AbstractType;
use super::type_checker::TypingContext;
use super::typed_ast;
use super::analysis_context::{
AnalysisUniverse,
AnalysisContext,
GlobalData,
LocalData,
ReservedAnonymousFn
};
use super::control_data::*;
use super::anon_storage::AnonStorage;
type InternalLoopData = Option<(graph::NodeIndex, graph::NodeIndex, LoopId)>;
macro_rules! node_w {
($CFG: expr, $node: expr) => {
$CFG.graph().node_weight($node).unwrap()
};
}
macro_rules! neighbors {
($CFG: expr, $node: expr) => {
$CFG.graph().neighbors_directed($node, Direction::Outgoing)
};
}
macro_rules! append_node {
($CFG: expr, $head: expr, $previous: expr, $to_insert: expr) => {
append_node!($CFG, $head, $previous, $to_insert, Edge::Normal)
};
($CFG: expr, $head: expr, $previous: expr, $to_insert: expr, $edge: expr) => {
let temp = $CFG.graph.add_node($to_insert);
if let Some(previous_node) = $previous {
$CFG.graph.add_edge(previous_node, temp, $edge);
}
if $head.is_none() {
$head = Some(temp);
}
$previous = Some(temp);
};
}
macro_rules! append_node_index {
($CFG: expr, $head: expr, $previous: expr, $to_insert: expr) => {
append_node_index!($CFG, $head, $previous, $to_insert, Edge::Normal)
};
($CFG: expr, $head: expr, $previous: expr, $to_insert: expr, $edge: expr) => {
if let Some(previous_node) = $previous {
$CFG.graph.add_edge(previous_node, $to_insert, $edge);
}
if $head.is_none() {
$head = Some($to_insert);
}
$previous = Some($to_insert);
};
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
struct BranchData {
head: Option<graph::NodeIndex>,
foot: Option<graph::NodeIndex>,
}
#[derive(Clone, Debug)]
pub struct CFG {
graph: graph::Graph<Node, Edge>,
start: graph::NodeIndex,
end: graph::NodeIndex,
}
impl CFG {
pub fn graph(&self) -> &graph::Graph<Node, Edge> {
&self.graph
}
pub fn start(&self) -> graph::NodeIndex {
self.start
}
pub fn after_start(&self) -> graph::NodeIndex {
self.next(self.start)
}
pub fn end(&self) -> graph::NodeIndex {
self.end
}
///
/// Convenience function to get the next node in a linear sequence. If the current node has
/// multiple outgoing edge (such as Node::Return, Node::Break, and
/// Node::Continue) or none (Node::End), return an error.
///
pub fn next(&self, id: graph::NodeIndex) -> graph::NodeIndex {
let mut neighbors =
self.graph.neighbors_directed(id, Direction::Outgoing);
if neighbors.clone().count() != 1 {
panic!("CFG::next() only works when a Node has 1 neighbor");
} else {
neighbors.next().unwrap()
}
}
pub fn previous(&self, id: graph::NodeIndex) -> graph::NodeIndex {
let mut neighbors = self.neighbors_in(id);
if neighbors.clone().count() != 1 {
panic!("CFG::previous() only works when a Node has 1 neighbor");
} else {
neighbors.next().unwrap()
}
}
pub fn before_branch_merge(
&self,
id: graph::NodeIndex,
) -> Vec<graph::NodeIndex> {
match *self.node_weight(id) {
Node::BranchMerge(_) => self.neighbors_in(id).collect(),
ref n @ _ => panic!(
"CFG::before_branch_merge() only works with Node::BranchMerge. Found {:?}",
n
),
}
}
pub fn after_loop_foot(&self, id: graph::NodeIndex) -> graph::NodeIndex {
let loop_id;
match *node_w!(self, id) {
Node::LoopFoot(ref data) => loop_id = data.loop_id,
_ => panic!("Should only be given a Node::LoopFoot"),
}
let neighbors = neighbors!(self, id);
let neighbor_count = neighbors.clone().count();
if neighbor_count != 2 {
panic!("Loop foot should always be pointing to LoopHead and the next Node. Need two directed neighbors, found {}", neighbor_count);
}
for n in neighbors {
match *node_w!(self, n) {
Node::LoopHead(ref data, _) => {
if loop_id != data.loop_id {
return n;
}
}
_ => return n,
}
}
unreachable!();
}
///
/// Expects id of LoopHead or BranchSplit
/// Returns (TRUE, FALSE) branch heads.
///
pub fn after_conditional(
&self,
id: graph::NodeIndex,
) -> (graph::NodeIndex, graph::NodeIndex) {
match *node_w!(self, id) {
Node::LoopHead(..) => (),
Node::BranchSplit(..) => (),
_ => panic!("Should only be given a Node::Condition"),
}
let edges = self.graph.edges_directed(id, Direction::Outgoing);
assert_eq!(edges.clone().count(), 2);
let mut true_branch = None;
let mut false_branch = None;
for e in edges {
match *e.weight() {
Edge::True => true_branch = Some(e.target()),
Edge::False => false_branch = Some(e.target()),
ref e @ _ => panic!(
"Unexpected edge {:?} coming out of a condition node.",
e
),
}
}
(true_branch.unwrap(), false_branch.unwrap())
}
pub fn after_return(&self, id: graph::NodeIndex) -> graph::NodeIndex {
match *node_w!(self, id) {
Node::Return(..) => (),
_ => panic!("Should only be given a Node::Return"),
}
let mut neighbors = neighbors!(self, id);
let neighbor_count = neighbors.clone().count();
if neighbor_count == 2 {
let mut found_first_end = false;
for n in neighbors {
match *node_w!(self, n) {
Node::End => {
if found_first_end {
return n;
} else {
found_first_end = true;
}
}
_ => return n,
}
}
} else if neighbor_count == 1 {
return neighbors.next().unwrap();
} else {
panic!("Node::Return points to {} neighbors. Nodes should never point towards more than 2 neighbors but at least 1 (except Node::End).", neighbor_count);
}
unreachable!();
}
pub fn after_continue(&self, id: graph::NodeIndex) -> graph::NodeIndex {
match *node_w!(self, id) {
Node::Continue(_) => (),
_ => panic!("Should only be given a Node::Continue"),
}
let mut neighbors = neighbors!(self, id);
let neighbor_count = neighbors.clone().count();
if neighbor_count == 2 {
let mut found_first = false;
for n in neighbors {
match *node_w!(self, n) {
Node::LoopHead(..) => {
if found_first {
return n;
} else {
found_first = true;
}
}
_ => return n,
}
}
} else if neighbor_count == 1 {
return neighbors.next().unwrap();
} else {
panic!("Node::Continue points to {} neighbors. Nodes should never point towards more than 2 neighbors but at least 1 (except Node::End).", neighbor_count);
}
unreachable!();
}
pub fn after_break(&self, id: graph::NodeIndex) -> graph::NodeIndex {
match *node_w!(self, id) {
Node::Break(_) => (),
_ => panic!("Should only be given a Node::Break"),
}
let neighbors = neighbors!(self, id);
let neighbor_count = neighbors.clone().count();
if neighbor_count == 2 {
let mut found_first = false;
for n in neighbors {
match *node_w!(self, n) {
Node::LoopFoot(_) => {
if found_first {
return n;
} else {
found_first = true;
}
}
_ => return n,
}
}
} else if neighbor_count == 1 {
} else {
panic!("Node::Continue points to {} neighbors. Nodes should never point towards more than 2 neighbors but at least 1 (except Node::End).", neighbor_count);
}
unreachable!();
}
pub fn node_weight(&self, node: graph::NodeIndex) -> &Node {
self.graph.node_weight(node).unwrap()
}
pub fn node_weight_mut(&mut self, node: graph::NodeIndex) -> &mut Node {
self.graph.node_weight_mut(node).unwrap()
}
pub fn neighbors_out(
&self,
node: graph::NodeIndex,
) -> graph::Neighbors<Edge> {
self.graph.neighbors_directed(node, Direction::Outgoing)
}
pub fn neighbors_in(
&self,
node: graph::NodeIndex,
) -> graph::Neighbors<Edge> {
self.graph.neighbors_directed(node, Direction::Incoming)
}
#[allow(unused_assignments)]
///
/// Generate the control flow graph.
/// Only performs continue/break statement checking (necessary for CFG generation).
///
pub fn generate(
universe: &AnalysisUniverse,
global_data: &mut GlobalData,
local_data: &mut LocalData,
body: ast::AstNode<ast::Block>,
fn_type: &TypeCons,
_analysis_context: &AnalysisContext,
) -> Result<(AnonStorage<ReservedAnonymousFn>, Self), AnalysisError> {
let mut cfg = {
let mut graph = graph::Graph::new();
let start = graph.add_node(Node::Start);
let end = graph.add_node(Node::End);
CFG {
graph: graph,
start: start,
end: end,
}
};
// Start with Node::Start
let (body, _) = body.to_data();
let instructions = body.0;
// TODO: Return anonymous functions
let mut anonymous_fns = AnonStorage::new();
let function_body = cfg.generate_scoped_block(
universe,
global_data,
local_data,
&mut anonymous_fns,
instructions.into_iter(),
None,
)?;
let mut previous = Some(cfg.start);
let mut head = previous;
append_node!(cfg, head, previous, Node::EnterScope);
// Append the function body.
if let Some(branch_head) = function_body.head {
cfg.graph
.add_edge(previous.unwrap(), branch_head, Edge::Normal);
previous = Some(function_body.foot.unwrap());
}
if let TypeCons::Function {
ref return_type, ..
} = fn_type
{
let outer_scope = universe.std_scope();
let outer_context = TypingContext::empty();
// TODO: Should this be the function scope/context?
let return_type = return_type.substitute(
universe,
&outer_scope,
&outer_context,
)?;
if let AbstractType::Unit(_) = return_type {
append_node!(
cfg,
head,
previous,
Node::Return(ReturnData {
expr: None,
span: Span::dummy(),
})
);
}
}
append_node!(cfg, head, previous, Node::ExitScope);
append_node_index!(cfg, head, previous, cfg.end);
Ok((anonymous_fns, cfg))
}
///
/// Generates a block of code encapsualted by ScopeEnter and ScopeExit.
///
/// Expression statements (assignment, local declaration), expressions ('1 + 2;') are placed
/// into a basic block. Whenever a non-basic block structure is encountered, the current basic
/// block is appended to the graph.
///
fn generate_scoped_block<'a, 'b, T>(
&'a mut self,
universe: &'b AnalysisUniverse,
global_data: &'b mut GlobalData,
local_data: &'b mut LocalData,
anonymous_fns: &mut AnonStorage<ReservedAnonymousFn>,
mut instructions: T,
loop_data: InternalLoopData,
) -> Result<BranchData, ControlFlowError>
where
T: Iterator<Item = ast::Stmt>,
{
use crate::ast::*;
let mut previous: Option<graph::NodeIndex> = None;
let mut head: Option<graph::NodeIndex> = None;
let mut current_block = BasicBlock::new();
while let Some(next) = instructions.next() {
match next {
// Added to current basic block
Stmt::Expr(expr) => {
let (ast_expr, span) = expr.to_data();
let (mut anon, expr) =
expr_flow::flatten(global_data, local_data, ast_expr);
anonymous_fns.append(&mut anon);
current_block.append(BlockNode::Expr(ExprData {
expr: expr,
span: span,
}));
}
Stmt::ExprStmt(expr_stmt) => {
let (expr_stmt, expr_stmt_span) = expr_stmt.to_data();
match expr_stmt {
// Append assignment node to current basic block
ExprStmt::Assignment(assignment) => {
let (mut anon_fn, assignment) = typed_ast::Assignment::new(
global_data,
local_data,
assignment,
);
anonymous_fns.append(&mut anon_fn);
current_block.append(BlockNode::Assignment(
AssignmentData {
assignment: assignment,
span: expr_stmt_span,
},
));
}
// Append local variable declaration node to current basic block
ExprStmt::LocalVarDecl(decl) => {
let (mut anon_fn, decl) = typed_ast::LocalVarDecl::new(
global_data,
local_data,
decl,
expr_stmt_span.clone(),
);
anonymous_fns.append(&mut anon_fn);
current_block.append(BlockNode::LocalVarDecl(
LocalVarDeclData {
decl: decl,
span: expr_stmt_span,
},
));
}
// Append return node to current basic block
ExprStmt::Return(span, expr) => {
if current_block.is_empty() == false {
append_node!(
self,
head,
previous,
Node::Block(current_block)
);
current_block = BasicBlock::new();
}
let expr = expr
.map(|expr| {
let (mut anon_fn, expr) =
expr_flow::flatten(global_data, local_data, expr);
anonymous_fns.append(&mut anon_fn);
expr
});
append_node!(
self,
head,
previous,
Node::Return(ReturnData {
expr: expr,
span: span,
})
);
}
// Append break node to current basic block
ExprStmt::Break(span) => match loop_data {
Some((_, _foot, loop_id)) => {
if current_block.is_empty() == false {
append_node!(
self,
head,
previous,
Node::Block(current_block)
);
current_block = BasicBlock::new();
}
append_node!(
self,
head,
previous,
Node::Break(LoopData {
loop_id: loop_id,
span: span,
})
);
}
None => {
return Err(ControlFlowError::BadBreak(span))
}
},
// Append continue node to current basic block
ExprStmt::Continue(span) => match loop_data {
Some((_loop_head, _, loop_id)) => {
if current_block.is_empty() == false {
append_node!(
self,
head,
previous,
Node::Block(current_block)
);
current_block = BasicBlock::new();
}
append_node!(
self,
head,
previous,
Node::Continue(LoopData {
loop_id: loop_id,
span: span,
})
);
}
None => {
return Err(ControlFlowError::BadContinue(span))
}
},
ExprStmt::While(while_data) => {
// Generate a fragment of a CFG beginning with LoopHead and ending with
// LoopFoot.
// LoopHead connects to the loop body via a TRUE edge (or to the LoopFoot
// in the case of an empty body)
// LoopHead connects to the LoopFoot via a FALSE edge
// Append current basic block if not empty
if current_block.is_empty() == false {
append_node!(
self,
head,
previous,
Node::Block(current_block)
);
current_block = BasicBlock::new();
}
let (block, _) = while_data.block.to_data();
let loop_id = local_data.new_loop_id();
let expr_data = {
let (conditional, con_span) =
while_data.conditional.to_data();
let (mut anon_fn, expr) =
expr_flow::flatten(global_data, local_data, conditional);
anonymous_fns.append(&mut anon_fn);
ExprData {
expr: expr,
span: con_span,
}
};
let loop_data = LoopData {
loop_id: loop_id,
span: expr_stmt_span.clone(),
};
let loop_head = self.graph.add_node(
Node::LoopHead(loop_data.clone(), expr_data),
);
let loop_foot =
self.graph.add_node(Node::LoopFoot(loop_data));
// Connect loop foot to loop head with a backedge
self.graph.add_edge(
loop_foot,
loop_head,
Edge::BackEdge,
);
// Append the loop head to the graph
append_node_index!(self, head, previous, loop_head);
let instructions = block.0;
let loop_body = self.generate_scoped_block(
universe,
global_data,
local_data,
anonymous_fns,
instructions.into_iter(),
Some((loop_head, loop_foot, loop_id)),
)?;
// Connect the condition node to the loop foot by the FALSE path
self.graph.add_edge(
loop_head,
loop_foot,
Edge::False,
);
if let Some(loop_body_head) = loop_body.head {
let scope_enter =
self.graph.add_node(Node::EnterScope);
let scope_exit =
self.graph.add_node(Node::ExitScope);
// Connect the scope enter/exit to the loop body by the TRUE path
self.graph.add_edge(
loop_head,
scope_enter,
Edge::True,
);
// Connect the loop body to the scope enter and exit
self.graph.add_edge(
scope_enter,
loop_body_head,
Edge::Normal,
);
self.graph.add_edge(
loop_body.foot.unwrap(),
scope_exit,
Edge::Normal,
);
// Connect scope exit to loop foot
self.graph.add_edge(
scope_exit,
loop_foot,
Edge::Normal,
);
} else {
// Empty loop body
// Connect the condition node to the loop foot by the TRUE path
self.graph.add_edge(
loop_head,
loop_foot,
Edge::True,
);
}
previous = Some(loop_foot);
}
ExprStmt::If(if_data) => {
// If statements are broken down into "stacked branches"
// 1) Each BranchSplit represents a conditional split
// 2) Each True path is ended by a BranchMerge
// 3) The next branch's BranchSplit is connected to the previous BranchSplit via the
// False path
// 4) The default branch's head connects directly to the previous
// BranchSplit via the False path
// 5) The end of the current False path connects to the previous' BranchMerge
// Generates a scoped fragment of the CFG suitable for easy branching.
// If it is a conditional branch, generate the branch with a BranchSplit
// and BranchMerge at the heads.
// If it is the default branch (i.e. no condition), do not generate a
// BranchSplit or BranchMerge (keep ScopeEnter, ScopeExit)
fn generate_branch(
cfg: &mut CFG,
universe: &AnalysisUniverse,
global_data: &mut GlobalData,
local_data: &mut LocalData,
anonymous_fns: &mut AnonStorage<ReservedAnonymousFn>,
body: AstNode<Block>,
condition: Option<AstNode<Expr>>,
loop_data: InternalLoopData,
) -> Result<BranchData, ControlFlowError>
{
let (block, _) = body.to_data();
let instructions = block.0;
// Generate the branch subgraph
let branch_graph = cfg.generate_scoped_block(
universe,
global_data,
local_data,
anonymous_fns,
instructions.into_iter(),
loop_data,
)?;
let scope_enter =
cfg.graph.add_node(Node::EnterScope);
let scope_exit =
cfg.graph.add_node(Node::ExitScope);
match (branch_graph.head, branch_graph.foot) {
(Some(head), Some(foot)) => {
cfg.graph.add_edge(
scope_enter,
head,
Edge::Normal,
);
cfg.graph.add_edge(
foot,
scope_exit,
Edge::Normal,
);
}
(Some(head), None) => {
cfg.graph.add_edge(
scope_enter,
head,
Edge::Normal,
);
cfg.graph.add_edge(
head,
scope_exit,
Edge::Normal,
);
}
(None, None) => {
// Empty block
// Currently guarenteeing generate_branch() always returns
// head = Some, foot = Some
cfg.graph.add_edge(
scope_enter,
scope_exit,
Edge::Normal,
);
}
(None, Some(_)) => unreachable!(),
}
// Generate the BranchSplit and BranchMerge
// Make those the new head and foot of the body, respectively
match condition {
Some(ast_condition) => {
let branch_id =
local_data.new_branching_id();
let (conditional, con_span) =
ast_condition.to_data();
let (mut anon, expr) = expr_flow::flatten(
global_data,
local_data,
conditional,
);
anonymous_fns.append(&mut anon);
let expr_data = ExprData {
expr: expr,
span: con_span,
};
let branching_data = BranchingData {
branch_id: branch_id,
};
let split_node = cfg.graph.add_node(
Node::BranchSplit(
branching_data.clone(),
expr_data,
),
);
let merge_node = cfg.graph.add_node(
Node::BranchMerge(
branching_data.clone(),
),
);
cfg.graph.add_edge(
split_node,
scope_enter,
Edge::True,
);
cfg.graph.add_edge(
scope_exit,
merge_node,
Edge::Normal,
);
Ok(BranchData {
head: Some(split_node),
foot: Some(merge_node),
})
}
None => Ok(BranchData {
head: Some(scope_enter),
foot: Some(scope_exit),
}),
}
}
// Append current basic block if not empty
if current_block.is_empty() == false {
append_node!(
self,
head,
previous,
Node::Block(current_block)
);
current_block = BasicBlock::new();
}
let mut branches = if_data.branches.into_iter();
// Generate the first branch
let first_branch = branches.next().unwrap();
let first_branch = generate_branch(
self,
universe,
global_data,
local_data,
anonymous_fns,
first_branch.block,
Some(first_branch.conditional),
loop_data,
)?;
// Append the first branch to the overall CFG
append_node_index!(
self,
head,
previous,
first_branch.head
.expect("generate_branch() head should always be Some"),
Edge::Normal
);
let first_branch_foot = first_branch.foot.expect(
"generate_branch() foot should always be Some",
);
let mut previous_branch: BranchData = first_branch;
// Stack the branches
for branch in branches {
let branch = generate_branch(
self,
universe,
global_data,
local_data,
anonymous_fns,
branch.block,
Some(branch.conditional),
loop_data,
)?;
let branch_head = branch.head
.expect("generate_branch() head should always be Some");
let branch_foot = branch.foot
.expect("generate_branch() foot should always be Some");
let previous_head = previous_branch.head
.expect("generate_branch() head should always be Some");
let previous_foot = previous_branch.foot
.expect("generate_branch() foot should always be Some");
// Connect false edge of previous BranchSplit to current
// BranchSplit
self.graph.add_edge(
previous_head,
branch_head,
Edge::False,
);
// Connect current BranchMerge to previous BranchMerge ("stacking")
self.graph.add_edge(
branch_foot,
previous_foot,
Edge::Normal,
);
previous_branch = BranchData {
head: Some(branch_head),
foot: Some(branch_foot),
};
}
let previous_head = previous_branch.head.expect(
"generate_branch() head should always be Some",
);
let previous_foot = previous_branch.foot.expect(
"generate_branch() foot should always be Some",
);
// No more conditional branches.
// Check for the "else" branch.
match if_data.default_block {
Some(block) => {
// Found an "else" branch
let else_branch = generate_branch(
self,
universe,
global_data,
local_data,
anonymous_fns,
block,
None,
loop_data,
)?;
let else_head = else_branch.head
.expect("generate_branch() head should always be Some");
let else_foot = else_branch.foot
.expect("generate_branch() foot should always be Some");
// Connect false edge of previous BranchMerge to head of
// the else branch
self.graph.add_edge(
previous_head,
else_head,
Edge::False,
);
self.graph.add_edge(
else_foot,
previous_foot,
Edge::Normal,
);
}
None => {
// No default branch ("else"). Connect the previous BranchSplit
// to the previous BranchMerge with a FALSE edge
self.graph.add_edge(
previous_head,
previous_foot,
Edge::False,
);
}
}
// All other nodes are added after any branching.
previous = Some(first_branch_foot);
}
}
}
}
}
if current_block.is_empty() == false {
append_node!(self, head, previous, Node::Block(current_block));
}
Ok(BranchData {
head: head,
foot: previous,
})
}
}
#[cfg(test)]
#[cfg_attr(rustfmt, rustfmt_skip)]
mod tests {
use super::*;
use super::super::type_checker::TypingContext;
use super::super::analysis_helpers::*;
use crate::parser::*;
use crate::parser::parser::*;
use crate::module::ModuleSource;
use crate::span::Span;
use petgraph::dot::{Config, Dot};
use petgraph::Direction;
use super::super::semantic_data::TypeId;
use super::super::analysis_context::AnalysisUniverse;
use super::super::type_cons::*;
macro_rules! edges {
($CFG: expr, $node: expr) => {
$CFG.graph.edges_directed($node, Direction::Outgoing)
}
}
macro_rules! node_w {
($CFG: expr, $node: expr) => {
$CFG.graph.node_weight($node).unwrap()
}
}
fn expected_app(tc: TypeId) -> AbstractType {
AbstractType::App {
data: Span::dummy(),
type_cons: tc,
args: Vec::new(),
}
}
fn fn_type_cons(params: Vec<AbstractType>, return_type: AbstractType) -> TypeCons {
let tc = TypeCons::Function {
parameters: params,
return_type: return_type,
type_params: TypeParams::empty(),
};
tc
}
#[test]
fn linear_cfg_generation() {
let input = "fn test(arg: int) {
let a: int = 2;
let b: int = 3;
}";
let mut global_data = GlobalData::new();
let mut local_data = LocalData::new();
let source = ModuleSource::Anonymous(None);
let mut input = buffer_input(&source, input);
let mut universe = AnalysisUniverse::std(&mut global_data);
let fn_type = fn_type_cons(vec![expected_app(universe.int())], expected_app(universe.unit()));
let fn_def = testfn_decl(&mut input).unwrap();
let analysis_context =
generate_fn_analysis_data(
&universe,
&mut global_data,
&mut local_data,
&universe.std_scope(),
&TypingContext::empty(),
&fn_type,
&fn_def
).unwrap();
let (_, cfg) = CFG::generate(
&universe,
&mut global_data,
&mut local_data,
fn_def.body.clone(),
&fn_type,
&analysis_context)
.unwrap();
println!("{:?}", Dot::with_config(&cfg.graph, &[Config::EdgeNoLabel]));
{
irmatch!(*cfg.graph.node_weight(cfg.start).unwrap(); Node::Start => ());
irmatch!(*cfg.graph.node_weight(cfg.end).unwrap(); Node::End => ());
// start -> enter_scope -> block -> implicit return -> exit_scope -> end
assert_eq!(cfg.graph.node_count(), 6);
let mut start_neighbors = neighbors!(cfg, cfg.start);
let enter = start_neighbors.next().unwrap();
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected to find Node::EnterScope. Found {:?}", n),
}
let block_1 = enter_neighbors.next().unwrap();
let mut block_1_neighbors = neighbors!(cfg, block_1);
match *node_w!(cfg, block_1) {
Node::Block(_) => (),
ref n @ _ => panic!("Expected to find Node::LocalVarDecl. Found {:?}", n),
}
let ret = block_1_neighbors.next().unwrap();
let mut ret_neighbors = neighbors!(cfg, ret);
match *node_w!(cfg, ret) {
Node::Return(..) => (),
ref n @ _ => panic!("Expected to find Node::Return. Found {:?}", n),
}
let exit = ret_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected to find Node::ExitScope. Found {:?}", n),
}
let end = exit_neighbors.next().unwrap();
let end_neighbors = neighbors!(cfg, end);
assert_eq!(end_neighbors.count(), 0);
match *node_w!(cfg, end) {
Node::End => (),
ref n @ _ => panic!("Expected to find Node::End. Found {:?}", n),
}
}
}
#[test]
fn branching_cfg_generation() {
let input = "fn test(arg: int) {
if (test) {
let c: int = 4;
}
}";
let mut global_data = GlobalData::new();
let mut local_data = LocalData::new();
let source = ModuleSource::Anonymous(None);
let mut input = buffer_input(&source, input);
let mut universe = AnalysisUniverse::std(&mut global_data);
let fn_type = fn_type_cons(vec![expected_app(universe.int())], expected_app(universe.unit()));
let fn_def = testfn_decl(&mut input).unwrap();
let analysis_context =
generate_fn_analysis_data(
&universe,
&mut global_data,
&mut local_data,
&universe.std_scope(),
&TypingContext::empty(),
&fn_type,
&fn_def
).unwrap();
let (_, cfg) = CFG::generate(
&universe,
&mut global_data,
&mut local_data,
fn_def.body.clone(),
&fn_type,
&analysis_context,
)
.unwrap();
println!("{:?}", Dot::with_config(&cfg.graph, &[Config::EdgeNoLabel]));
{
irmatch!(*cfg.graph.node_weight(cfg.start).unwrap(); Node::Start => ());
irmatch!(*cfg.graph.node_weight(cfg.end).unwrap(); Node::End => ());
// start -> enter_scope -> branch_split
// -[true]> {
// -> enter_scope
// -> block
// -> exit_scope
// } -> >>___ branch_merge ->
// -[false]> >>
// implicit return -> exit_scope -> end
assert_eq!(cfg.graph.node_count(), 10);
let mut start_neighbors = neighbors!(cfg, cfg.start);
let enter = start_neighbors.next().unwrap();
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected to find Node::EnterScope. Found {:?}", n),
}
let mut merge = None;
let branch_split = enter_neighbors.next().expect("Looking for BranchSplit");
match *node_w!(cfg, branch_split) {
Node::BranchSplit(..) => (),
ref n @ _ => panic!("Expected BranchSplit node. Found {:?}", n),
}
// Check condition node
let condition = branch_split;
let condition_node = cfg.graph.node_weight(condition).unwrap();
{
if let Node::BranchSplit(..) = *condition_node {
let edges = cfg.graph.edges_directed(condition, Direction::Outgoing);
assert_eq!(edges.clone().count(), 2);
let mut found_true_edge = false;
let mut found_false_edge = false;
// Look for True False edges and verify
for edge in edges {
if let Edge::True = *edge.weight() {
let target = edge.target();
match *cfg.graph.node_weight(target).unwrap() {
Node::EnterScope => {
let mut neighbors =
cfg.graph.neighbors_directed(target, Direction::Outgoing);
let decl = neighbors.next().unwrap();
match *cfg.graph.node_weight(decl).unwrap() {
Node::Block(_) => (),
ref n @ _ => panic!(
"Expected to find Node::LocalVarDecl. Found {:?}",
n
),
}
let mut neighbors =
cfg.graph.neighbors_directed(decl, Direction::Outgoing);
let exit_scope = neighbors.next().unwrap();
match *cfg.graph.node_weight(exit_scope).unwrap() {
Node::ExitScope => (),
ref n @ _ => panic!(
"Expected to find Node::ExitScope. Found {:?}",
n
),
}
}
ref n @ _ => {
panic!("Expected to find Node::EnterScope. Found {:?}", n)
}
}
found_true_edge = true;
} else if let Edge::False = *edge.weight() {
let target = edge.target();
if let Node::BranchMerge(_) = *cfg.graph.node_weight(target).unwrap() {
merge = Some(target);
}
found_false_edge = true;
}
}
assert!(found_true_edge);
assert!(found_false_edge);
} else {
panic!("Not a condition node");
}
}
let merge = merge.unwrap();
let return_n = cfg.graph.neighbors(merge).next().unwrap();
let mut return_neighbors = neighbors!(cfg, return_n);
match *node_w!(cfg, return_n) {
Node::Return(..) => (),
ref n @ _ => panic!("Expected to find Node::Return. Found {:?}", n),
}
let exit = return_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected to find Node::ExitScope. Found {:?}", n),
}
let end = exit_neighbors.next().unwrap();
let end_neighbors = neighbors!(cfg, end);
match *node_w!(cfg, end) {
Node::End => {
assert_eq!(end_neighbors.count(), 0);
}
ref n @ _ => panic!("Expected to find Node::ExitScope. Found {:?}", n),
}
}
}
#[test]
fn complex_branching_cfg_generation() {
let input = "fn test(arg: int) {
if (false) {
let c: int = 4;
} elif (true) {
} else {
}
}";
let mut global_data = GlobalData::new();
let mut local_data = LocalData::new();
let source = ModuleSource::Anonymous(None);
let mut input = buffer_input(&source, input);
let mut universe = AnalysisUniverse::std(&mut global_data);
let fn_type = fn_type_cons(vec![expected_app(universe.int())], expected_app(universe.unit()));
let fn_def = testfn_decl(&mut input).unwrap();
let analysis_context =
generate_fn_analysis_data(&universe,
&mut global_data,
&mut local_data,
&universe.std_scope(),
&TypingContext::empty(),
&fn_type,
&fn_def
).unwrap();
let (_, cfg) = CFG::generate(
&universe,
&mut global_data,
&mut local_data,
fn_def.body.clone(),
&fn_type,
&analysis_context,
)
.unwrap();
println!("{:?}", Dot::with_config(&cfg.graph, &[Config::EdgeNoLabel]));
{
// start -> enter_scope
// branch_split(A)
// -[true]> {
// enter_scope ->
// local_var_decl ->
// exit_scope
// }
//
// -[false]> {
// branch_split(C)
// -[true]> {
// scope_enter ->
// scope_exit
// }
//
// -[false]> {
// scope_enter ->
// scope_exit
// }
//
// branch_merge(C) ->
// }
// branch_merge(A) -> implicit_return -> exit_scope ->
// end
assert_eq!(cfg.graph.node_count(), 16);
let mut start_neighbors = neighbors!(cfg, cfg.start);
assert_eq!(start_neighbors.clone().count(), 1);
let enter = start_neighbors.next().unwrap();
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected to find Node::Enter. Found {:?}", n),
}
let branch_split_A = enter_neighbors.next().unwrap();
let _branch_split_neighbors_A = neighbors!(cfg, branch_split_A);
match *node_w!(cfg, branch_split_A) {
Node::BranchSplit(..) => (), // Success
ref n @ _ => panic!("Expected a condition node. Found {:?}", n),
}
let branch_split_A_edges = edges!(cfg, branch_split_A);
let mut branch_split_c = None;
let mut branch_split_A_true = None;
assert_eq!(branch_split_A_edges.clone().count(), 2);
for edge in branch_split_A_edges {
match *edge.weight() {
Edge::True => branch_split_A_true = Some(edge.target()),
Edge::False => branch_split_c = Some(edge.target()),
ref e @ _ => panic!("Expected true or false edge. Found {:?}", e),
}
}
// branch split a TRUE branch
let enter =
branch_split_A_true.expect("Missing true edge connecting to variable declaration");
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected Node::EnterScope. Found {:?}", n),
}
let var_decl = enter_neighbors.next().unwrap();
let mut var_decl_neighbors = neighbors!(cfg, var_decl);
assert_eq!(var_decl_neighbors.clone().count(), 1);
match *node_w!(cfg, var_decl) {
Node::Block(_) => (),
ref n @ _ => panic!("Expected local variable declartion. Found {:?}", n),
}
let exit = var_decl_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected Node::ExitScope. Found {:?}", n),
}
let merge = exit_neighbors.next().unwrap();
match *node_w!(cfg, merge) {
Node::BranchMerge(_) => (),
ref n @ _ => panic!("Expected Node::BranchMerge. Found {:?}", n),
}
// condition b FALSE branch (branch_split_c)
let branch_split_c = branch_split_c.expect("Missing false edge connecting to branch split C");
let branch_split_c_edges = edges!(cfg, branch_split_c);
let mut truth_target = None;
let mut false_target = None;
assert_eq!(branch_split_c_edges.clone().count(), 2);
for edge in branch_split_c_edges {
match *edge.weight() {
Edge::True => truth_target = Some(edge.target()),
Edge::False => false_target = Some(edge.target()),
ref e @ _ => panic!("Expected true or false edge. Found {:?}", e),
}
}
{
let enter =
truth_target.expect("Missing true edge connecting to empty block");
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected Node::EnterScope. Found {:?}", n),
}
let exit = enter_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected Node::ExitScope. Found {:?}", n),
}
let merge = exit_neighbors.next().unwrap();
match *node_w!(cfg, merge) {
Node::BranchMerge(_) => (),
ref n @ _ => panic!("Expected Node::BranchMerge. Found {:?}", n),
}
}
let merge_c = {
let enter =
false_target.expect("Missing true edge connecting to empty block");
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected Node::EnterScope. Found {:?}", n),
}
let exit = enter_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected Node::ExitScope. Found {:?}", n),
}
let merge = exit_neighbors.next().unwrap();
match *node_w!(cfg, merge) {
Node::BranchMerge(_) => (),
ref n @ _ => panic!("Expected Node::BranchMerge. Found {:?}", n),
}
merge
};
let mut merge_c_neighbors = neighbors!(cfg, merge_c);
let merge_a = merge_c_neighbors.next().unwrap();
let mut merge_a_neighbors = neighbors!(cfg, merge_a);
match *node_w!(cfg, merge_a) {
Node::BranchMerge(_) => (),
ref n @ _ => panic!("Expected BranchMerge. Found {:?}", n),
}
let implicit_return = merge_a_neighbors.next().unwrap();
let mut implicit_return_neighbors = neighbors!(cfg, implicit_return);
assert_eq!(implicit_return_neighbors.clone().count(), 1);
match *node_w!(cfg, implicit_return) {
Node::Return(..) => (),
ref n @ _ => println!("Expected return node. Found {:?}", n),
}
let exit = implicit_return_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected to find Node::Exit. Found {:?}", n),
}
let end = exit_neighbors.next().unwrap();
irmatch!(*node_w!(cfg, end); Node::End => ());
}
}
#[test]
fn while_loop_generation() {
let input = "fn test(arg: int) {
while (true) {
}
}";
let mut global_data = GlobalData::new();
let mut local_data = LocalData::new();
let source = ModuleSource::Anonymous(None);
let mut input = buffer_input(&source, input);
let mut universe = AnalysisUniverse::std(&mut global_data); | let fn_type = fn_type_cons(vec![expected_app(universe.int())], expected_app(universe.unit()));
let fn_def = testfn_decl(&mut input).unwrap();
let analysis_context =
generate_fn_analysis_data(
&universe,
&mut global_data,
&mut local_data,
&universe.std_scope(),
&TypingContext::empty(),
&fn_type,
&fn_def
).unwrap();
let (_, cfg) = CFG::generate(
&universe,
&mut global_data,
&mut local_data,
fn_def.body.clone(),
&fn_type,
&analysis_context
)
.unwrap();
println!("{:?}", Dot::with_config(&cfg.graph, &[Config::EdgeNoLabel]));
// start -> enter_scope -> loop_head(A)
// -[true]> loop_foot(A)
// -[false]> loop_foot(A)
// loop_foot(A) -> implicit_return -> exit_scope -> end
// loop_head(A) << loop_foot(A)
//
assert_eq!(cfg.graph.node_count(), 7);
let mut start_neighbors = neighbors!(cfg, cfg.start);
assert_eq!(start_neighbors.clone().count(), 1);
let enter = start_neighbors.next().unwrap();
let mut enter_neighbors = neighbors!(cfg, enter);
match *node_w!(cfg, enter) {
Node::EnterScope => (),
ref n @ _ => panic!("Expected to find Node::Enter. Found {:?}", n),
}
let loop_id;
let loop_head = enter_neighbors.next().unwrap();
match *node_w!(cfg, loop_head) {
Node::LoopHead(ref loop_data, _) => loop_id = loop_data.loop_id,
ref n @ _ => panic!("Expected to find Node::LoopHead. Found {:?}", n),
}
let head_neighbors = neighbors!(cfg, loop_head);
assert_eq!(head_neighbors.clone().count(), 2);
let head_edges = edges!(cfg, loop_head);
assert_eq!(head_edges.clone().count(), 2);
let mut truth_target = None;
let mut false_target = None;
for edge in head_edges {
match *edge.weight() {
Edge::True => truth_target = Some(edge.target()),
Edge::False => false_target = Some(edge.target()),
ref e @ _ => panic!("Expected true or false edge. Found {:?}", e),
}
}
let truth_target = truth_target.unwrap();
let false_target = false_target.unwrap();
match *node_w!(cfg, truth_target) {
Node::LoopFoot(ref loop_data) => assert_eq!(loop_data.loop_id, loop_id),
ref n @ _ => panic!("Expected to find Node::LoopFoot. Found {:?}", n),
}
let mut foot_neighbors = neighbors!(cfg, truth_target);
assert_eq!(truth_target, false_target);
assert_eq!(foot_neighbors.clone().count(), 2);
let implicit_return = foot_neighbors.next().unwrap();
let mut return_neighbors = neighbors!(cfg, implicit_return);
assert_eq!(return_neighbors.clone().count(), 1);
match *node_w!(cfg, implicit_return) {
Node::Return(..) => (),
ref n @ _ => panic!("Expected return node. Found {:?}", n),
}
let exit = return_neighbors.next().unwrap();
let mut exit_neighbors = neighbors!(cfg, exit);
match *node_w!(cfg, exit) {
Node::ExitScope => (),
ref n @ _ => panic!("Expected to find Node::ExitScope. Found {:?}", n),
}
let end = exit_neighbors.next().unwrap();
let end_neighbors = neighbors!(cfg, end);
assert_eq!(end_neighbors.count(), 0);
match *node_w!(cfg, end) {
Node::End => (),
ref n @ _ => panic!("Expected to find Node::End. Found {:?}", n),
}
}
} | |
lv_test.go | package lv
import (
"testing"
"time"
"github.com/nicola-spb/locales"
"github.com/nicola-spb/locales/currency"
)
func TestLocale(t *testing.T) {
trans := New()
expected := "lv"
if trans.Locale() != expected {
t.Errorf("Expected '%s' Got '%s'", expected, trans.Locale())
}
}
func TestPluralsRange(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsRange()
// expected := 1
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsOrdinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleTwo,
// },
// {
// expected: locales.PluralRuleFew,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsOrdinal()
// expected := 4
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestPluralsCardinal(t *testing.T) {
trans := New()
tests := []struct {
expected locales.PluralRule
}{
// {
// expected: locales.PluralRuleOne,
// },
// {
// expected: locales.PluralRuleOther,
// },
}
rules := trans.PluralsCardinal()
// expected := 2
// if len(rules) != expected {
// t.Errorf("Expected '%d' Got '%d'", expected, len(rules))
// }
for _, tt := range tests {
r := locales.PluralRuleUnknown
for i := 0; i < len(rules); i++ {
if rules[i] == tt.expected {
r = rules[i]
break
}
}
if r == locales.PluralRuleUnknown {
t.Errorf("Expected '%s' Got '%s'", tt.expected, r)
}
}
}
func TestRangePlurals(t *testing.T) {
trans := New()
tests := []struct {
num1 float64
v1 uint64
num2 float64
v2 uint64
expected locales.PluralRule
}{
// {
// num1: 1,
// v1: 1,
// num2: 2,
// v2: 2,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.RangePluralRule(tt.num1, tt.v1, tt.num2, tt.v2)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestOrdinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 2,
// v: 0,
// expected: locales.PluralRuleTwo,
// },
// {
// num: 3,
// v: 0,
// expected: locales.PluralRuleFew,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.OrdinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestCardinalPlurals(t *testing.T) {
trans := New()
tests := []struct {
num float64
v uint64
expected locales.PluralRule
}{
// {
// num: 1,
// v: 0,
// expected: locales.PluralRuleOne,
// },
// {
// num: 4,
// v: 0,
// expected: locales.PluralRuleOther,
// },
}
for _, tt := range tests {
rule := trans.CardinalPluralRule(tt.num, tt.v)
if rule != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, rule)
}
}
}
func TestDaysAbbreviated(t *testing.T) {
trans := New()
days := trans.WeekdaysAbbreviated()
for i, day := range days {
s := trans.WeekdayAbbreviated(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sun",
// },
// {
// idx: 1,
// expected: "Mon",
// },
// {
// idx: 2,
// expected: "Tue",
// },
// {
// idx: 3,
// expected: "Wed",
// },
// {
// idx: 4,
// expected: "Thu",
// },
// {
// idx: 5,
// expected: "Fri",
// },
// {
// idx: 6,
// expected: "Sat",
// },
}
for _, tt := range tests {
s := trans.WeekdayAbbreviated(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysNarrow(t *testing.T) {
trans := New()
days := trans.WeekdaysNarrow()
for i, day := range days {
s := trans.WeekdayNarrow(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", string(day), s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "S",
// },
// {
// idx: 1,
// expected: "M",
// },
// {
// idx: 2,
// expected: "T",
// },
// {
// idx: 3,
// expected: "W",
// },
// {
// idx: 4,
// expected: "T",
// },
// {
// idx: 5,
// expected: "F",
// },
// {
// idx: 6,
// expected: "S",
// },
}
for _, tt := range tests {
s := trans.WeekdayNarrow(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysShort(t *testing.T) {
trans := New()
days := trans.WeekdaysShort()
for i, day := range days {
s := trans.WeekdayShort(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Su",
// },
// {
// idx: 1,
// expected: "Mo",
// },
// {
// idx: 2,
// expected: "Tu",
// },
// {
// idx: 3,
// expected: "We",
// },
// {
// idx: 4,
// expected: "Th",
// },
// { | // expected: "Fr",
// },
// {
// idx: 6,
// expected: "Sa",
// },
}
for _, tt := range tests {
s := trans.WeekdayShort(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestDaysWide(t *testing.T) {
trans := New()
days := trans.WeekdaysWide()
for i, day := range days {
s := trans.WeekdayWide(time.Weekday(i))
if s != day {
t.Errorf("Expected '%s' Got '%s'", day, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 0,
// expected: "Sunday",
// },
// {
// idx: 1,
// expected: "Monday",
// },
// {
// idx: 2,
// expected: "Tuesday",
// },
// {
// idx: 3,
// expected: "Wednesday",
// },
// {
// idx: 4,
// expected: "Thursday",
// },
// {
// idx: 5,
// expected: "Friday",
// },
// {
// idx: 6,
// expected: "Saturday",
// },
}
for _, tt := range tests {
s := trans.WeekdayWide(time.Weekday(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsAbbreviated(t *testing.T) {
trans := New()
months := trans.MonthsAbbreviated()
for i, month := range months {
s := trans.MonthAbbreviated(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "Jan",
// },
// {
// idx: 2,
// expected: "Feb",
// },
// {
// idx: 3,
// expected: "Mar",
// },
// {
// idx: 4,
// expected: "Apr",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "Jun",
// },
// {
// idx: 7,
// expected: "Jul",
// },
// {
// idx: 8,
// expected: "Aug",
// },
// {
// idx: 9,
// expected: "Sep",
// },
// {
// idx: 10,
// expected: "Oct",
// },
// {
// idx: 11,
// expected: "Nov",
// },
// {
// idx: 12,
// expected: "Dec",
// },
}
for _, tt := range tests {
s := trans.MonthAbbreviated(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsNarrow(t *testing.T) {
trans := New()
months := trans.MonthsNarrow()
for i, month := range months {
s := trans.MonthNarrow(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "J",
// },
// {
// idx: 2,
// expected: "F",
// },
// {
// idx: 3,
// expected: "M",
// },
// {
// idx: 4,
// expected: "A",
// },
// {
// idx: 5,
// expected: "M",
// },
// {
// idx: 6,
// expected: "J",
// },
// {
// idx: 7,
// expected: "J",
// },
// {
// idx: 8,
// expected: "A",
// },
// {
// idx: 9,
// expected: "S",
// },
// {
// idx: 10,
// expected: "O",
// },
// {
// idx: 11,
// expected: "N",
// },
// {
// idx: 12,
// expected: "D",
// },
}
for _, tt := range tests {
s := trans.MonthNarrow(time.Month(tt.idx))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestMonthsWide(t *testing.T) {
trans := New()
months := trans.MonthsWide()
for i, month := range months {
s := trans.MonthWide(time.Month(i + 1))
if s != month {
t.Errorf("Expected '%s' Got '%s'", month, s)
}
}
tests := []struct {
idx int
expected string
}{
// {
// idx: 1,
// expected: "January",
// },
// {
// idx: 2,
// expected: "February",
// },
// {
// idx: 3,
// expected: "March",
// },
// {
// idx: 4,
// expected: "April",
// },
// {
// idx: 5,
// expected: "May",
// },
// {
// idx: 6,
// expected: "June",
// },
// {
// idx: 7,
// expected: "July",
// },
// {
// idx: 8,
// expected: "August",
// },
// {
// idx: 9,
// expected: "September",
// },
// {
// idx: 10,
// expected: "October",
// },
// {
// idx: 11,
// expected: "November",
// },
// {
// idx: 12,
// expected: "December",
// },
}
for _, tt := range tests {
s := string(trans.MonthWide(time.Month(tt.idx)))
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeFull(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
// fixed := time.FixedZone("OTHER", -4)
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am Eastern Standard Time",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, fixed),
// expected: "8:05:01 pm OTHER",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeLong(t *testing.T) {
// loc, err := time.LoadLocation("America/Toronto")
// if err != nil {
// t.Errorf("Expected '<nil>' Got '%s'", err)
// }
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, loc),
// expected: "9:05:01 am EST",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, loc),
// expected: "8:05:01 pm EST",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05:01 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05:01 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtTimeShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 5, 1, 0, time.UTC),
// expected: "9:05 am",
// },
// {
// t: time.Date(2016, 02, 03, 20, 5, 1, 0, time.UTC),
// expected: "8:05 pm",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtTimeShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateFull(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Wednesday, February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateFull(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateLong(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "February 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateLong(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateMedium(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "Feb 3, 2016",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateMedium(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtDateShort(t *testing.T) {
tests := []struct {
t time.Time
expected string
}{
// {
// t: time.Date(2016, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/16",
// },
// {
// t: time.Date(-500, 02, 03, 9, 0, 1, 0, time.UTC),
// expected: "2/3/500",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtDateShort(tt.t)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtNumber(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// expected: "1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// expected: "1,123,456.6",
// },
// {
// num: 221123456.5643,
// v: 3,
// expected: "221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// expected: "-221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
// {
// num: -0,
// v: 2,
// expected: "0.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtNumber(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtCurrency(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "-$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "-CAD 221,123,456.564",
// },
// {
// num: 0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtCurrency(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtAccounting(t *testing.T) {
tests := []struct {
num float64
v uint64
currency currency.Type
expected string
}{
// {
// num: 1123456.5643,
// v: 2,
// currency: currency.USD,
// expected: "$1,123,456.56",
// },
// {
// num: 1123456.5643,
// v: 1,
// currency: currency.USD,
// expected: "$1,123,456.60",
// },
// {
// num: 221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "$221,123,456.564",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.USD,
// expected: "($221,123,456.564)",
// },
// {
// num: -221123456.5643,
// v: 3,
// currency: currency.CAD,
// expected: "(CAD 221,123,456.564)",
// },
// {
// num: -0,
// v: 2,
// currency: currency.USD,
// expected: "$0.00",
// },
// {
// num: -0,
// v: 2,
// currency: currency.CAD,
// expected: "CAD 0.00",
// },
// {
// num: 1.23,
// v: 0,
// currency: currency.USD,
// expected: "$1.00",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtAccounting(tt.num, tt.v, tt.currency)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
}
func TestFmtPercent(t *testing.T) {
tests := []struct {
num float64
v uint64
expected string
}{
// {
// num: 15,
// v: 0,
// expected: "15%",
// },
// {
// num: 15,
// v: 2,
// expected: "15.00%",
// },
// {
// num: 434.45,
// v: 0,
// expected: "434%",
// },
// {
// num: 34.4,
// v: 2,
// expected: "34.40%",
// },
// {
// num: -34,
// v: 0,
// expected: "-34%",
// },
}
trans := New()
for _, tt := range tests {
s := trans.FmtPercent(tt.num, tt.v)
if s != tt.expected {
t.Errorf("Expected '%s' Got '%s'", tt.expected, s)
}
}
} | // idx: 5, |
cats.controller.ts | import { Controller, Post, Body, Get, Query, Param, Put, Delete } from '@nestjs/common';
import { CreateCatDto, ListAllEntities, UpdateCatDto } from 'src/interface/cats.interface';
import { CatsService } from './cats.service';
@Controller('cats')
export class | {
constructor(private readonly catsService: CatsService) {}
@Post()
create(@Body() createCatDto: CreateCatDto) {
return 'This action adds a new cat';
}
@Get()
async findAll(@Query() query: ListAllEntities) {
return this.catsService.findAllCats();
return `This action returns all cats (limit: ${query.limit} items)`;
}
@Get(':id')
findOne(@Param('id') id: string) {
return `This action returns a #${id} cat`;
}
@Put(':id')
update(@Param('id') id: string, @Body() updateCatDto: UpdateCatDto) {
return `This action updates a #${id} cat`;
}
@Delete(':id')
remove(@Param('id') id: string) {
return `This action removes a #${id} cat`;
}
}
| CatsController |
calendarEvent.ts | export default interface CalendarEvent {
professor: string,
startDate: string,
startTime: string,
location: string,
summary: string, | uid: string,
} |
|
transparent_render_phase.rs | use rafx::nodes::RenderPhase;
use rafx::nodes::{RenderPhaseIndex, SubmitNode};
rafx::declare_render_phase!(
TransparentRenderPhase,
TRANSPARENT_RENDER_PHASE_INDEX,
transparent_render_phase_sort_submit_nodes
);
#[profiling::function]
fn | (
mut submit_nodes: Vec<SubmitNode>
) -> Vec<SubmitNode> {
// Sort by distance from camera back to front
log::trace!(
"Sort phase {}",
TransparentRenderPhase::render_phase_debug_name()
);
submit_nodes.sort_unstable_by(|a, b| b.distance().partial_cmp(&a.distance()).unwrap());
submit_nodes
}
| transparent_render_phase_sort_submit_nodes |
series_test.go | // Copyright 2018-20 PJ Engineering and Business Solutions Pty. Ltd. All rights reserved.
package dataframe
import (
"context"
"fmt"
"strings"
"testing"
"time"
"cloud.google.com/go/civil"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
)
func TestSeriesRename(t *testing.T) {
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}),
NewSeriesInt64("test", &SeriesInit{1, 0}),
NewSeriesString("test", &SeriesInit{1, 0}),
NewSeriesTime("test", &SeriesInit{1, 0}),
NewSeriesMixed("test", &SeriesInit{1, 0}),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}),
}
for i := range init {
s := init[i]
// Rename series
s.Rename("test2")
if s.Name() != "test2" {
t.Errorf("wrong name")
}
}
}
func TestSeriesType(t *testing.T) {
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}),
NewSeriesInt64("test", &SeriesInit{1, 0}),
NewSeriesString("test", &SeriesInit{1, 0}),
NewSeriesTime("test", &SeriesInit{1, 0}),
NewSeriesMixed("test", &SeriesInit{1, 0}),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{1, 0}),
}
expected := []string{
"float64",
"int64",
"string",
"time",
"mixed",
"civil.Date",
}
for i := range init {
s := init[i]
if s.Type() != expected[i] {
t.Errorf("wrong type: expected: %v actual: %v", expected[i], s.Type())
}
}
}
func TestSeriesNRows(t *testing.T) {
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, nil, 2.0, 3.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, nil, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", nil, "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, time.Now(), nil, time.Now(), time.Now()),
NewSeriesMixed("test", &SeriesInit{1, 0}, 1, nil, 2, 3),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, nil, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}),
}
expected := []int{
4,
4,
4,
4,
4,
4,
}
for i := range init {
s := init[i]
if s.NRows() != expected[i] {
t.Errorf("wrong val: expected: %v actual: %v", expected[i], s.NRows())
}
}
}
func TestSeriesOperations(t *testing.T) {
// Create new series
init := []Series{
NewSeriesFloat64("test", nil),
NewSeriesInt64("test", nil),
NewSeriesString("test", nil),
NewSeriesTime("test", nil),
NewSeriesMixed("test", nil),
NewSeriesGeneric("test", civil.Date{}, nil),
}
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Append and Prepend value
appendVals := []interface{}{
1.0, 2.0, 3.0, 4.0,
1, 2, 3, 4,
"1", "2", "3", "4",
tRef, tRef.Add(24 * time.Hour), tRef.Add(2 * 24 * time.Hour), tRef.Add(3 * 24 * time.Hour),
1, 2, 3, 4,
civil.Date{2018, time.May, 1}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 3}, civil.Date{2018, time.May, 4},
}
for i := range init {
s := init[i]
s.Append(appendVals[4*i+0])
s.Append(appendVals[4*i+1])
s.Prepend(appendVals[4*i+2])
s.Insert(s.NRows(), appendVals[4*i+3])
}
// Remove middle value
for i := range init {
s := init[i]
s.Remove(1)
}
// Test values
expectedValues := [][]interface{}{
{3.0, 2.0, 4.0},
{3, 2, 4},
{"3", "2", "4"},
{tRef.Add(2 * 24 * time.Hour), tRef.Add(24 * time.Hour), tRef.Add(3 * 24 * time.Hour)},
{3, 2, 4},
{civil.Date{2018, time.May, 3}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 4}},
}
for i := range init {
s := init[i]
exVals := expectedValues[i]
for row := 0; row < len(exVals); row++ {
rowVal := s.ValueString(row)
exp := exVals[row]
if rowVal != fmt.Sprintf("%v", exp) {
t.Errorf("wrong val: expected: %v actual: %v", exp, rowVal)
}
}
}
}
func TestSeriesUpdate(t *testing.T) {
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, 2.0, 3.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour)),
NewSeriesMixed("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 1}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 3}),
}
// Update values
for i := range init {
s := init[i]
switch s.Type() {
case "float64":
s.Update(0, 99.0)
case "int64":
s.Update(0, 99)
case "string":
s.Update(0, "99")
case "time":
s.Update(0, tRef.Add(99*24*time.Hour))
case "mixed":
s.Update(0, 99)
case "civil.Date":
s.Update(0, civil.Date{2018, time.May, 99})
}
}
expectedValues := [][]interface{}{
{99.0, 2.0, 3.0},
{99, 2, 3},
{"99", "2", "3"},
{tRef.Add(99 * 24 * time.Hour), tRef.Add(24 * time.Hour), tRef.Add(2 * 24 * time.Hour)},
{99, 2, 3},
{civil.Date{2018, time.May, 99}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 3}},
}
for i := range init {
s := init[i]
exVals := expectedValues[i]
for row := 0; row < len(exVals); row++ {
rowVal := s.ValueString(row)
exp := exVals[row]
if rowVal != fmt.Sprintf("%v", exp) {
t.Errorf("wrong val: expected: %v actual: %v", exp, rowVal)
}
}
}
}
func TestSeriesSwap(t *testing.T) {
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, 2.0, 3.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour)),
NewSeriesMixed("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}),
}
expectedValues := [][]interface{}{
{3.0, 2.0, 1.0},
{3, 2, 1},
{"3", "2", "1"},
{tRef.Add(2 * 24 * time.Hour), tRef.Add(24 * time.Hour), tRef},
{3, 2, 1},
{civil.Date{2018, time.May, 3}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 1}},
}
for i := range init {
s := init[i]
s.Lock()
s.Swap(0, 2, DontLock)
s.Unlock()
exVals := expectedValues[i]
for row := 0; row < len(exVals); row++ {
rowVal := s.ValueString(row)
exp := exVals[row]
if rowVal != fmt.Sprintf("%v", exp) {
t.Errorf("wrong val: expected: %v actual: %v", exp, rowVal)
}
}
}
}
func TestSeriesSort(t *testing.T) {
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, nil, 1.0, 2.0, 3.0, nil),
NewSeriesInt64("test", &SeriesInit{1, 0}, nil, 1, 2, 3, nil),
NewSeriesString("test", &SeriesInit{1, 0}, nil, "1", "2", "3", nil),
NewSeriesTime("test", &SeriesInit{1, 0}, nil, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour), nil),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, nil, civil.Date{2018, time.May, 01}, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}, nil),
// NewSeriesMixed("test", &SeriesInit{1, 0}, nil, 1, 2, 3, nil),
}
// Set IsLessThanFunc(a, b interface{}) bool
(init[4].(*SeriesGeneric)).SetIsLessThanFunc(nil)
(init[4].(*SeriesGeneric)).SetIsLessThanFunc(func(a, b interface{}) bool {
g1 := a.(civil.Date)
g2 := b.(civil.Date)
return g1.Before(g2)
})
// (init[5].(*SeriesMixed)).SetIsLessThanFunc(func(a, b interface{}) bool {
// return b.(int) > a.(int)
// })
// Sort values
for i := range init {
s := init[i]
s.Sort(context.Background(), SortOptions{Desc: true})
}
expectedValues := [][]interface{}{
{3.0, 2.0, 1.0, "NaN", "NaN"},
{3, 2, 1, "NaN", "NaN"},
{"3", "2", "1", "NaN", "NaN"},
{tRef.Add(2 * 24 * time.Hour), tRef.Add(24 * time.Hour), tRef, "NaN", "NaN"},
{civil.Date{2018, time.May, 3}, civil.Date{2018, time.May, 2}, civil.Date{2018, time.May, 1}, "NaN", "NaN"},
// {3, 2, 1, "NaN", "NaN"},
}
for i := range init {
s := init[i]
exVals := expectedValues[i]
for row := 0; row < len(exVals); row++ {
rowVal := s.ValueString(row)
exp := exVals[row]
if rowVal != fmt.Sprintf("%v", exp) {
t.Errorf("wrong val: expected: %v actual: %v", exp, rowVal)
}
}
}
}
type Tabler interface {
Table(r ...Range) string
String() string
}
func TestSeriesTable(t *testing.T) {
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, 2.0, 3.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour)),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}),
}
expected := []string{
`+-----+---------+
| | TEST |
+-----+---------+
| 0: | 1 |
| 1: | 2 |
| 2: | 3 |
+-----+---------+
| 3X1 | FLOAT64 |
+-----+---------+`,
`+-----+-------+
| | TEST |
+-----+-------+
| 0: | 1 |
| 1: | 2 |
| 2: | 3 |
+-----+-------+
| 3X1 | INT64 |
+-----+-------+`,
`+-----+--------+
| | TEST |
+-----+--------+
| 0: | 1 |
| 1: | 2 |
| 2: | 3 |
+-----+--------+
| 3X1 | STRING |
+-----+--------+`,
`+-----+-------------------------------+
| | TEST |
+-----+-------------------------------+
| 0: | 2017-01-01 05:30:12 +0000 UTC |
| 1: | 2017-01-02 05:30:12 +0000 UTC |
| 2: | 2017-01-03 05:30:12 +0000 UTC |
+-----+-------------------------------+
| 3X1 | TIME |
+-----+-------------------------------+`,
`+-----+------------+
| | TEST |
+-----+------------+
| 0: | 2018-05-01 |
| 1: | 2018-05-02 |
| 2: | 2018-05-03 |
+-----+------------+
| 3X1 | CIVIL DATE |
+-----+------------+`,
}
for i := range init {
s := init[i]
if v, ok := s.(Tabler); ok {
if strings.TrimSpace(v.Table()) != strings.TrimSpace(expected[i]) {
t.Errorf("wrong val: expected: %v actual: %v", expected[i], v.Table())
}
}
}
}
func TestSeriesString(t *testing.T) {
tRef := time.Date(2017, 1, 1, 5, 30, 12, 0, time.UTC)
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, 2.0, 3.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour)),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}),
| NewSeriesTime("test", &SeriesInit{1, 0}, tRef, tRef.Add(24*time.Hour), tRef.Add(2*24*time.Hour), tRef.Add(3*24*time.Hour), tRef.Add(4*24*time.Hour), tRef.Add(5*24*time.Hour), tRef.Add(6*24*time.Hour)),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}, civil.Date{2018, time.May, 04}, civil.Date{2018, time.May, 05}, civil.Date{2018, time.May, 06}, civil.Date{2018, time.May, 07}),
}
expected := []string{`[ 1 2 3 ]`,
`[ 1 2 3 ]`,
`[ 1 2 3 ]`,
`[ 2017-01-01 05:30:12 +0000 UTC 2017-01-02 05:30:12 +0000 UTC 2017-01-03 05:30:12 +0000 UTC ]`,
`[ 2018-05-01 2018-05-02 2018-05-03 ]`,
`[ 1 2 3 ... 5 6 7 ]`,
`[ 1 2 3 ... 5 6 7 ]`,
`[ 1 2 3 ... 5 6 7 ]`,
`[ 2017-01-01 05:30:12 +0000 UTC 2017-01-02 05:30:12 +0000 UTC 2017-01-03 05:30:12 +0000 UTC ... 2017-01-05 05:30:12 +0000 UTC 2017-01-06 05:30:12 +0000 UTC 2017-01-07 05:30:12 +0000 UTC ]`,
`[ 2018-05-01 2018-05-02 2018-05-03 ... 2018-05-05 2018-05-06 2018-05-07 ]`,
}
for i := range init {
s := init[i]
if v, ok := s.(Tabler); ok {
if strings.TrimSpace(v.String()) != strings.TrimSpace(expected[i]) {
t.Errorf("wrong val: expected: %v actual: %v", expected[i], v.String())
}
}
}
}
func TestSeriesCopy(t *testing.T) {
// Create new series
init := []Series{
NewSeriesFloat64("test", &SeriesInit{1, 0}, 1, nil, 2, 3),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, nil, 2, 3),
NewSeriesString("test", &SeriesInit{1, 0}, "1", nil, "2", "3"),
NewSeriesTime("test", &SeriesInit{1, 0}, time.Now(), nil, time.Now(), time.Now()),
NewSeriesMixed("test", &SeriesInit{1, 0}, 1, nil, 2, 3),
NewSeriesGeneric("test", civil.Date{}, &SeriesInit{0, 1}, civil.Date{2018, time.May, 01}, nil, civil.Date{2018, time.May, 02}, civil.Date{2018, time.May, 03}),
}
for i := range init {
s := init[i]
cp := s.Copy()
if !cmp.Equal(s, cp, cmpopts.EquateNaNs(), cmpopts.IgnoreUnexported(SeriesFloat64{}, SeriesInt64{}, SeriesString{}, SeriesTime{}, SeriesMixed{}, SeriesGeneric{})) {
t.Errorf("wrong val: expected: %v actual: %v", s, cp)
}
}
}
func TestToSeriesString(t *testing.T) {
ctx := context.Background()
sm := NewSeriesMixed("test", &SeriesInit{1, 0}, 1, nil, 2, 3)
ss, err := sm.ToSeriesString(ctx, false)
if err != nil {
t.Errorf("error encountered: %s\n", err)
}
// convert SeriesString back to SeriesMixed
_, err = ss.ToSeriesMixed(ctx, false)
if err != nil {
t.Errorf("error encountered: %s\n", err)
}
} | NewSeriesFloat64("test", &SeriesInit{1, 0}, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0),
NewSeriesInt64("test", &SeriesInit{1, 0}, 1, 2, 3, 4, 5, 6, 7),
NewSeriesString("test", &SeriesInit{1, 0}, "1", "2", "3", "4", "5", "6", "7"), |
benchmark_test.go | package prometheus
import (
"github.com/timescale/promscale/pkg/prompb"
"github.com/timescale/tsbs/cmd/tsbs_load_prometheus/adapter/noop"
"net/http"
"net/http/httptest"
"net/url"
"sync"
"testing"
)
func | (t *testing.T) {
adapter := noop.Adapter{}
server := httptest.NewServer(http.HandlerFunc(adapter.Handler))
serverURL, err := url.Parse(server.URL)
if err != nil {
t.Fatal(err)
}
pb := Benchmark{
adapterWriteUrl: serverURL.String(),
batchPool: &sync.Pool{},
}
pp := pb.GetProcessor().(*Processor)
batch := &Batch{series: []prompb.TimeSeries{{}}}
samples, _ := pp.ProcessBatch(batch, true)
if samples != 1 {
t.Error("wrong number of samples")
}
if adapter.SampleCounter != samples {
t.Error("wrong number of samples processed")
}
}
| TestPrometheusLoader |
user.service.ts | import {Injectable} from "@angular/core";
import {Observable} from "rxjs";
import {Course} from "../model/course.model";
import {Http} from "@angular/http";
import {User} from "../model/user.model";
@Injectable()
export class | {
authorsCandidatesList: User[] = [];
constructor(private http: Http) {
let candidateOne = new User("Vasily", "Pupkin");
candidateOne.id = 1;
this.authorsCandidatesList.push(candidateOne);
let candidateTwo = new User("Petr", "Sakhorov");
candidateTwo.id = 2;
this.authorsCandidatesList.push(candidateTwo);
let candidateTree = new User("Danila", "Gvozdev");
candidateTree.id = 3;
this.authorsCandidatesList.push(candidateTree);
};
getAuthorCandidates(): Observable<User[]> {
let self = this;
return Observable.create(observer => {
observer.next(self.authorsCandidatesList);
})
}
}
| UserService |
hashd.rs | // Copyright (c) Facebook, Inc. and its affiliates.
use anyhow::{bail, Result};
use log::{debug, info, warn};
use std::collections::HashSet;
use std::io;
use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
use util::*;
use rd_agent_intf::{HashdCmd, HashdKnobs, HashdReport, Slice, HASHD_A_SVC_NAME, HASHD_B_SVC_NAME};
use rd_hashd_intf;
use super::Config;
use super::HashdSel;
pub fn hashd_path_args(cfg: &Config, sel: HashdSel) -> Vec<String> {
let paths = &cfg.hashd_paths[sel as usize];
let mut args = vec![
paths.bin.clone(),
"--args".into(),
paths.args.clone(),
"--params".into(),
paths.params.clone(),
"--report".into(),
paths.report.clone(),
"--testfiles".into(),
paths.tf.clone(),
"--log-dir".into(),
paths.log_dir.clone(),
"--interval".into(),
"1".into(),
];
if cfg.verbosity > 0 {
args.push("-".to_string() + &"v".repeat(cfg.verbosity as usize));
}
args
}
pub struct Hashd {
name: String,
params_path: String,
report_path: String,
path_args: Vec<String>,
lat_target_pct: f64,
rps_max: u32,
file_max_ratio: f64,
svc: Option<TransientService>,
started_at: Option<SystemTime>,
}
impl Hashd {
fn start(&mut self, mem_size: u64) -> Result<()> {
let mut args = self.path_args.clone();
args.push("--size".into());
args.push(format!("{}", mem_size));
args.push("--file-max".into());
args.push(format!("{}", self.file_max_ratio));
debug!("args: {:#?}", &args);
let mut svc = TransientService::new_sys(self.name.clone(), args, Vec::new(), Some(0o002))?;
svc.set_slice(Slice::Work.name()).start()?;
self.svc = Some(svc);
self.started_at = Some(SystemTime::now());
Ok(())
}
fn update_params(&mut self, knobs: &HashdKnobs, cmd: &HashdCmd, frac: f64) -> Result<()> {
self.lat_target_pct = cmd.lat_target_pct;
self.rps_max = ((knobs.rps_max as f64 * frac).round() as u32).max(1);
let rps_target = ((self.rps_max as f64 * cmd.rps_target_ratio).round() as u32).max(1);
let mem_frac = match cmd.mem_ratio {
Some(v) => v,
None => knobs.mem_frac,
};
let file_addr_stdev = match cmd.file_addr_stdev {
Some(v) => v,
None => rd_hashd_intf::Params::default().file_addr_stdev_ratio,
};
let anon_addr_stdev = match cmd.anon_addr_stdev {
Some(v) => v,
None => rd_hashd_intf::Params::default().anon_addr_stdev_ratio,
};
let mut params = match rd_hashd_intf::Params::load(&self.params_path) {
Ok(v) => v,
Err(e) => {
info!(
"hashd: Failed to load {:?} ({:?}), using default",
&self.params_path, &e
);
rd_hashd_intf::Params::default()
}
};
let mut changed = false;
if params.file_size_mean != knobs.hash_size {
params.file_size_mean = knobs.hash_size;
changed = true;
}
if params.lat_target_pct != self.lat_target_pct {
params.lat_target_pct = self.lat_target_pct;
changed = true;
}
if params.lat_target != cmd.lat_target {
params.lat_target = cmd.lat_target;
changed = true;
}
if params.rps_max != self.rps_max {
params.rps_max = self.rps_max;
changed = true;
}
if params.rps_target != rps_target {
params.rps_target = rps_target;
changed = true;
}
if params.mem_frac != mem_frac {
params.mem_frac = mem_frac;
changed = true;
}
if params.chunk_pages != knobs.chunk_pages {
params.chunk_pages = knobs.chunk_pages;
changed = true;
}
if params.file_addr_stdev_ratio != file_addr_stdev {
params.file_addr_stdev_ratio = file_addr_stdev;
changed = true;
}
if params.anon_addr_stdev_ratio != anon_addr_stdev {
params.anon_addr_stdev_ratio = anon_addr_stdev;
changed = true;
}
if params.file_frac != cmd.file_ratio {
params.file_frac = cmd.file_ratio;
changed = true;
}
if params.log_bps != cmd.log_bps {
params.log_bps = cmd.log_bps;
changed = true;
}
if params.fake_cpu_load != knobs.fake_cpu_load {
params.fake_cpu_load = knobs.fake_cpu_load;
changed = true;
}
if changed {
info!(
"hashd: Updating {:?} to lat={:.2}ms@{:.2}% rps={:.2} mem={:.2}% log={:.2}Mbps frac={:.2}",
AsRef::<Path>::as_ref(&self.params_path)
.parent()
.unwrap()
.file_name()
.unwrap(),
cmd.lat_target * TO_MSEC,
cmd.lat_target_pct * TO_PCT, | );
params.save(&self.params_path)?;
}
Ok(())
}
fn update_resctl(&mut self, mem_low: u64, frac: f64) -> Result<()> {
let mut svc = self.svc.as_mut().unwrap();
svc.unit.resctl = systemd::UnitResCtl {
cpu_weight: Some((100.0 * frac).ceil() as u64),
io_weight: Some((100.0 * frac).ceil() as u64),
mem_low: Some((mem_low as f64 * frac).ceil() as u64),
..Default::default()
};
svc.unit.apply()
}
fn report(&mut self, expiration: SystemTime) -> Result<HashdReport> {
let expiration = match self.started_at {
Some(at) if at > expiration => at,
_ => expiration,
};
let svc_r = match &mut self.svc {
Some(svc) => super::svc_refresh_and_report(&mut svc.unit)?,
None => Default::default(),
};
let hashd_r = match rd_hashd_intf::Report::load(&self.report_path) {
Ok(rep) => {
if rep.timestamp.timestamp_millis() as u128
>= expiration.duration_since(UNIX_EPOCH).unwrap().as_millis()
{
rep
} else {
rd_hashd_intf::Report {
// retain fields which don't need explicit expiration
mem_probe_size: rep.mem_probe_size,
mem_probe_at: rep.mem_probe_at,
..Default::default()
}
}
}
Err(e) => match e.downcast_ref::<io::Error>() {
Some(ie) if ie.raw_os_error() == Some(libc::ENOENT) => Default::default(),
_ => bail!("hashd: Failed to read {:?} ({:?})", &self.report_path, &e),
},
};
Ok(HashdReport {
svc: svc_r,
phase: hashd_r.phase,
load: (hashd_r.hasher.rps / self.rps_max as f64).min(1.0),
rps: hashd_r.hasher.rps,
lat_pct: self.lat_target_pct,
lat: hashd_r.hasher.lat,
nr_in_flight: hashd_r.hasher.nr_in_flight,
nr_done: hashd_r.hasher.nr_done,
nr_workers: hashd_r.hasher.nr_workers,
nr_idle_workers: hashd_r.hasher.nr_idle_workers,
mem_probe_size: hashd_r.mem_probe_size,
mem_probe_at: hashd_r.mem_probe_at,
})
}
}
pub struct HashdSet {
hashd: [Hashd; 2],
}
impl HashdSet {
pub fn new(cfg: &Config) -> Self {
Self {
hashd: [
Hashd {
name: HASHD_A_SVC_NAME.into(),
params_path: cfg.hashd_paths(HashdSel::A).params.clone(),
report_path: cfg.hashd_paths(HashdSel::A).report.clone(),
path_args: hashd_path_args(cfg, HashdSel::A),
lat_target_pct: rd_hashd_intf::Params::default().lat_target_pct,
rps_max: 1,
file_max_ratio: rd_hashd_intf::Args::default().file_max_frac,
svc: None,
started_at: None,
},
Hashd {
name: HASHD_B_SVC_NAME.into(),
params_path: cfg.hashd_paths(HashdSel::B).params.clone(),
report_path: cfg.hashd_paths(HashdSel::B).report.clone(),
path_args: hashd_path_args(cfg, HashdSel::B),
lat_target_pct: rd_hashd_intf::Params::default().lat_target_pct,
rps_max: 1,
file_max_ratio: rd_hashd_intf::Args::default().file_max_frac,
svc: None,
started_at: None,
},
],
}
}
fn weights_to_fracs(cmd: &[HashdCmd; 2]) -> [f64; 2] {
match (cmd[0].active, cmd[1].active) {
(false, false) => return [0.0, 0.0],
(true, false) => return [1.0, 0.0],
(false, true) => return [0.0, 1.0],
(true, true) => {}
}
let sum = cmd[0].weight + cmd[1].weight;
if sum <= 0.0 {
warn!(
"hashd: Invalid weights ({}, {}), using (0.5, 0.5)",
cmd[0].weight, cmd[1].weight
);
return [0.5, 0.5];
}
let (w0, w1) = (cmd[0].weight / sum, cmd[1].weight / sum);
if w0 < 0.1 {
[0.1, 0.9]
} else if w1 < 0.1 {
[0.9, 0.1]
} else {
[w0, w1]
}
}
pub fn apply(&mut self, cmd: &[HashdCmd; 2], knobs: &HashdKnobs, mem_low: u64) -> Result<()> {
let fracs = Self::weights_to_fracs(cmd);
debug!("hashd: fracs={:?}", &fracs);
// handle the goners first
for i in 0..2 {
if !cmd[i].active && self.hashd[i].svc.is_some() {
self.hashd[i].svc = None;
self.hashd[i].started_at = None;
}
}
// adjust the args
for i in 0..2 {
if self.hashd[i].svc.is_some() && cmd[i].file_max_ratio != self.hashd[i].file_max_ratio
{
info!(
"hashd: file_max_ratio updated for active hashd {}, need a restart",
i
);
}
self.hashd[i].file_max_ratio = cmd[i].file_max_ratio;
}
// adjust the params files
for i in 0..2 {
if fracs[i] != 0.0 {
self.hashd[i].update_params(knobs, &cmd[i], fracs[i])?;
}
}
// start missing ones
for i in 0..2 {
if cmd[i].active && self.hashd[i].svc.is_none() {
self.hashd[i].start(knobs.mem_size)?;
}
}
// update resctl params
for i in 0..2 {
if self.hashd[i].svc.is_some() {
debug!("hashd: updating resctl on {:?}", &self.hashd[i].name);
self.hashd[i].update_resctl(mem_low, fracs[i])?;
}
}
Ok(())
}
pub fn mark_bench_start(&mut self) {
self.hashd[0].started_at = Some(SystemTime::now());
}
pub fn stop(&mut self) {
for i in 0..2 {
if self.hashd[i].svc.is_some() {
self.hashd[i].svc = None;
self.hashd[i].started_at = None;
}
}
}
pub fn all_svcs(&self) -> HashSet<(String, String)> {
let mut svcs = HashSet::<(String, String)>::new();
if self.hashd[0].svc.is_some() {
svcs.insert((
HASHD_A_SVC_NAME.to_owned(),
format!("{}/{}", Slice::Work.cgrp(), HASHD_A_SVC_NAME),
));
}
if self.hashd[1].svc.is_some() {
svcs.insert((
HASHD_B_SVC_NAME.to_owned(),
format!("{}/{}", Slice::Work.cgrp(), HASHD_B_SVC_NAME),
));
}
svcs
}
pub fn report(&mut self, expiration: SystemTime) -> Result<[HashdReport; 2]> {
Ok([
self.hashd[0].report(expiration)?,
self.hashd[1].report(expiration)?,
])
}
} | rps_target,
mem_frac * TO_PCT,
to_mb(cmd.log_bps),
frac |
package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class | (PythonPackage):
"""Glue (Grid LSC User Environment) is a suite of python modules and
programs to allow users to run LSC codes on the grid. It also provides
certain metadata services, such as the LSC segment database."""
homepage = "https://www.lsc-group.phys.uwm.edu/daswg/projects/glue.html"
url = "https://pypi.io/packages/source/l/lscsoft-glue/lscsoft-glue-2.0.0.tar.gz"
version('2.0.0', sha256='9bdfaebe4c921d83d1e3d1ca24379a644665e9d7530e7070665f387767c66923')
depends_on('py-setuptools', type='build')
depends_on('py-six', type=('build', 'run'))
depends_on('py-pyopenssl', type=('build', 'run'))
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-ligo-segments', type=('build', 'run'))
| PyLscsoftGlue |
apps.py | from django.apps import AppConfig | name = 'app_licences' |
class AppLicencesConfig(AppConfig): |
cpxrbm.py | import sys
# Find jVMC package
#sys.path.append("/Users/akhter/githesis-/jvmc/vmc_jax")
sys.path.append("/Users/akhter/thesis/vmc_jax")
import jax
from jax.config import config
config.update("jax_enable_x64", True)
import jax.random as random
import jax.numpy as jnp
import numpy as np
from jax.tree_util import tree_flatten, tree_unflatten
import jVMC
import tensornetwork as tn
tn.set_default_backend("jax")
import functools
from typing import Any, Callable, Sequence, Optional
import flax
from flax import linen as nn
from flax import optim
from jax import lax
from functools import partial
import jVMC.nets.initializers as init
import jVMC.global_defs as global_defs
import time
# DMRG energies produced with the TeNPy library https://github.com/tenpy/tenpy
#DMRG_energies = {"10": -1.0545844370449059, "20": -1.0900383739, "100": -1.1194665474274852}
L = 16 # system size
g = -0.7 # strength of external field
# Set up hamiltonian for open boundary conditions
hamiltonian = jVMC.operator.BranchFreeOperator()
for l in range(L - 1):
hamiltonian.add(jVMC.operator.scal_opstr(-1., (jVMC.operator.Sz(l), jVMC.operator.Sz(l + 1))))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(l), )))
hamiltonian.add(jVMC.operator.scal_opstr(g, (jVMC.operator.Sx(L - 1), )))
def svd(dp,shape, rank=L):
|
def simulate(rng, iterations, rank, t_step):
net = net_init
psi = jVMC.vqs.NQS(net, seed=rng) # Variational wave function
# Set up sampler
#tic = time.perf_counter()
sampler = jVMC.sampler.MCSampler(psi, (L,), random.PRNGKey(4321), updateProposer=jVMC.sampler.propose_spin_flip_Z2,
numChains=100, sweepSteps=L,
numSamples=30000, thermalizationSweeps=25)
#toc = time.perf_counter()
#print(" == Total time for sampling step: %fs\n" % (toc - tic))
# Set up TDVP
tdvpEquation = jVMC.util.tdvp.TDVP(sampler, rhsPrefactor=1.,
svdTol=1e-8, diagonalShift=10, makeReal='real')
stepper = jVMC.util.stepper.Euler(timeStep=t_step) # ODE integrator
res = []
for n in range(iterations):
dp, _ = stepper.step(0, tdvpEquation, psi.get_parameters(), hamiltonian=hamiltonian, psi=psi, numSamples=None)
print("dp_inserted", dp)
dp = svd(dp, (4,4,2,2), rank = r)
dp = jnp.concatenate([p.ravel() for p in tree_flatten(dp)[0]])
dp = jnp.concatenate([dp.real, dp.imag])
print("dp_returned", dp)
psi.set_parameters(dp)
print(n, jax.numpy.real(tdvpEquation.ElocMean0) / L, tdvpEquation.ElocVar0 / L)
res.append([jax.numpy.real(tdvpEquation.ElocMean0) / L])
np.savetxt('dp', dp)
return np.array(res)
#iterations = 2500
#rng_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
iterations = 2
rng_list = [0, 1]
time_step = 12e-2
h = L
net_init = jVMC.nets.CpxRBM(numHidden = h, bias = False)
#rank_list = jnp.arange(L/2, L+1)
rank_list = [8,9]
results = []
for j,rng in enumerate(rng_list):
E_0_aarray = np.zeros((iterations, len(rng_list)))#an empty two dimensional array corresponding to the D and "rng".
for r in rank_list:
#print("rng:", rng)
res = simulate(rng, iterations, rank=r, t_step = time_step)
E_0 = res + 1.0660513358196495#this energy is for 16 spins
#adding the energy values obtained to the first entry of the row
#print("length", len(E_0))
E_0_aarray[:, j] = E_0[:, 0]
#print("final_energy:", E_0[-1])
results.apend(E_0_aarray)
#print("E_array", E_0_aarray)
np.savetxt('cpxrbm_16_h16_sr_12t', np.array(results), header='Data for CpxRBM with h = 16 for 1 initializations')
| """Takes in the concatenated matrix and spits out the copressed one"""
#getting the real and the complex parts of the matrix
real_matrix = jnp.reshape(dp[:L*h], (L,h))
complex_matrix = jnp.reshape(dp[L*h:], (L,h))
print("real_matrix", real_matrix, "complex_matrix:", complex_matrix)
#creating the W matrix from the real and the complex parts
matrix = jax.lax.complex(real_matrix, complex_matrix)
print("matrix:", matrix)
#Now that we have the matrix we can svd it and reject some of the singular values.
tensor1 = jnp.reshape(matrix, shape)
print("tensor1_shape and atype:", tensor1.shape, type(tensor1))
#reshaping the matrix in a tensor of given shape e.g. a four legged tensor
node = tn.Node(tensor1)
#now we perform the svd of the node keeping the left two and the right two legs as they are
u, vh, _ = tn.split_node(node, left_edges=[node[0], node[1]], right_edges=[node[2],node[3]], max_singular_values=r)
print("shape of u:", u.shape, "shape of vh:", vh.shape)
node_contracted = (u @ vh).tensor
matrix_returned = jnp.reshape(node_contracted, (matrix.shape))
print("shape of matrix_returned:", matrix_returned.shape)
return matrix_returned |
app.component.ts | /*
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { Component } from '@angular/core';
import { Observable } from 'rxjs';
import { LoginFacade } from './modules/login/login.facade';
@Component({
selector: 'app-root', | styleUrls: ['./app.component.scss'],
providers: [ LoginFacade ]
})
export class AppComponent {
ready : Observable<number>
constructor(private loginFacade : LoginFacade) {
this.ready = loginFacade.ready
}
} | templateUrl: './app.component.html', |
reducer_active_book.js | // State argument is not application state,
// only the state this reducer is responsible for | export default function(state = null, action) {
switch(action.type) {
case 'BOOK_SELECTED':
return action.payload;
}
return state;
} | |
regexes.const.ts | // link regex
export const IMG_REGEX = /(https?:\/\/.*\.(?:tiff?|jpe?g|gif|png|svg|ico|heic|webp))(.*)/gim
export const IPFS_REGEX = /^https?:\/\/[^/]+\/(ip[fn]s)\/([^/?#]+)/gim
export const POST_REGEX = /^https?:\/\/(.*)\/(.*)\/(@[\w.\d-]+)\/(.*)/i
export const CCC_REGEX = /^https?:\/\/(.*)\/ccc\/([\w.\d-]+)\/(.*)/i
export const MENTION_REGEX = /^https?:\/\/(.*)\/(@[\w.\d-]+)$/i
export const TOPIC_REGEX = /^https?:\/\/(.*)\/(trending|hot|created|promoted|muted|payout)\/(.*)$/i
export const INTERNAL_MENTION_REGEX = /^\/@[\w.\d-]+$/i
export const INTERNAL_TOPIC_REGEX = /^\/(trending|hot|created|promoted|muted|payout)\/(.*)$/i
export const INTERNAL_POST_TAG_REGEX = /\/(.*)\/(@[\w.\d-]+)\/(.*)/i
export const INTERNAL_POST_REGEX = /^\/(@[\w.\d-]+)\/(.*)$/i
export const CUSTOM_COMMUNITY_REGEX = /^https?:\/\/(.*)\/c\/(hive-\d+)(.*)/i
export const YOUTUBE_REGEX = /(?:https?:\/\/)?(?:www\.)?(?:youtube\.com\/watch\?v=|youtu\.be\/)([^& \n<]+)(?:[^ \n<]+)?/g
export const YOUTUBE_EMBED_REGEX = /^(https?:)?\/\/www.youtube.com\/embed\/.*/i
export const VIMEO_REGEX = /(https?:\/\/)?(www\.)?(?:vimeo)\.com.*(?:videos|video|channels|)\/([\d]+)/i
export const VIMEO_EMBED_REGEX = /https:\/\/player\.vimeo\.com\/video\/([0-9]+)/
export const BITCHUTE_REGEX = /^(?:https?:\/\/)?(?:www\.)?bitchute.com\/(?:video|embed)\/([a-z0-9]+)/i
export const D_TUBE_REGEX = /(https?:\/\/d.tube.#!\/v\/)(\w+)\/(\w+)/g
export const D_TUBE_REGEX2 = /(https?:\/\/d.tube\/v\/)(\w+)\/(\w+)/g | export const D_TUBE_EMBED_REGEX = /^https:\/\/emb.d.tube\/.*/i
export const TWITCH_REGEX = /https?:\/\/(?:www.)?twitch.tv\/(?:(videos)\/)?([a-zA-Z0-9][\w]{3,24})/i
export const DAPPLR_REGEX = /^(https?:)?\/\/[a-z]*\.dapplr.in\/file\/dapplr-videos\/.*/i
export const TRUVVL_REGEX = /^https?:\/\/embed.truvvl.com\/(@[\w.\d-]+)\/(.*)/i
export const LBRY_REGEX = /^(https?:)?\/\/lbry.tv\/\$\/embed\/.*/i
export const ODYSEE_REGEX = /^(https?:)?\/\/odysee.com\/\$\/embed\/.*/i
export const ARCH_REGEX = /^(https?:)?\/\/archive.org\/embed\/.*/i
export const SPEAK_REGEX = /(?:https?:\/\/(?:3speak.([a-z]+)\/watch\?v=)|(?:3speak.([a-z]+)\/embed\?v=))([A-Za-z0-9\_\-\.\/]+)(&.*)?/i
export const SPEAK_EMBED_REGEX = /^(https?:)?\/\/3speak.([a-z]+)\/embed\?.*/i
export const TWITTER_REGEX = /(?:https?:\/\/(?:(?:twitter\.com\/(.*?)\/status\/(.*))))/gi
export const SPOTIFY_REGEX = /^https:\/\/open\.spotify\.com\/playlist\/(.*)?$/gi
export const RUMBLE_REGEX = /^https:\/\/rumble.com\/embed\/([a-zA-Z0-9-]+)\/\?pub=4/
export const BRIGHTEON_REGEX = /^https?:\/\/(www\.)?brighteon\.com\/(?:embed\/)?(.*[0-9].*)/i
export const VIMM_EMBED_REGEX = /^https:\/\/www.vimm.tv\/.*/i
export const SPOTIFY_EMBED_REGEX = /^https:\/\/open\.spotify\.com\/(embed|embed-podcast)\/(playlist|show|episode|track|album)\/(.*)/i
export const SOUNDCLOUD_EMBED_REGEX = /^https:\/\/w.soundcloud.com\/player\/.*/i
export const TWITCH_EMBED_REGEX = /^(https?:)?\/\/player.twitch.tv\/.*/i
export const BRAND_NEW_TUBE_REGEX = /^https:\/\/brandnewtube\.com\/embed\/[a-z0-9]+$/i | |
api_tool-dbdump.go | package pwapi
| )
func (svc *service) ToolDBDump(context.Context, *Void) (*pwdb.Dump, error) {
return pwdb.GetDump(svc.db)
} | import (
"context"
"pathwar.land/pathwar/v2/go/pkg/pwdb" |
uniswapFixture.d.ts | import { providers } from "ethers";
import { Address } from "../types";
import { Account } from "../test/types";
import { StakingRewards, Uni, UniswapTimelock, UniswapGovernorAlpha, UniswapV2Factory, UniswapV2Pair, UniswapV2Router02 } from "../contracts/uniswap";
export declare class UniswapFixture { | private _provider;
private _ownerSigner;
owner: Account;
uni: Uni;
uniswapGovernorAlpha: UniswapGovernorAlpha;
uniswapTimelock: UniswapTimelock;
factory: UniswapV2Factory;
pair: UniswapV2Pair;
router: UniswapV2Router02;
wethDaiPool: UniswapV2Pair;
wethDaiStakingRewards: StakingRewards;
wethWbtcPool: UniswapV2Pair;
wethWbtcStakingRewards: StakingRewards;
uniWethPool: UniswapV2Pair;
constructor(provider: providers.Web3Provider | providers.JsonRpcProvider, ownerAddress: Address);
initialize(_owner: Account, _weth: Address, _wbtc: Address, _dai: Address): Promise<void>;
createNewStakingPair(_tokenOne: Address, _tokenTwo: Address): Promise<[UniswapV2Pair, StakingRewards]>;
createNewPair(_tokenOne: Address, _tokenTwo: Address): Promise<UniswapV2Pair>;
getTokenOrder(_tokenOne: Address, _tokenTwo: Address): [Address, Address];
getForkedUniswapRouter(): UniswapV2Router02;
getForkedSushiswapRouter(): UniswapV2Router02;
} | private _deployer; |
conelp.py | # The small linear cone program of section 8.1 (Linear cone programs).
from cvxopt import matrix, solvers | [-14., 2., 7., -13., -18., 3., 0., 0., -1., 0., 3.,
13., -6., 13., 12., -10., -6., -10., -28.],
[ 5., 0., -15., 12., -6., 17., 0., 0., 0., -1., 9.,
6., -6., 6., -7., -7., -6., -7., -11.]])
h = matrix( [ -3., 5., 12., -2., -14., -13., 10., 0., 0., 0., 68.,
-30., -19., -30., 99., 23., -19., 23., 10.] )
dims = {'l': 2, 'q': [4, 4], 's': [3]}
sol = solvers.conelp(c, G, h, dims)
print("\nStatus: " + sol['status'])
print("\nx = \n")
print(sol['x'])
print("\nz = \n")
print(sol['z']) |
c = matrix([-6., -4., -5.])
G = matrix([[ 16., 7., 24., -8., 8., -1., 0., -1., 0., 0., 7.,
-5., 1., -5., 1., -7., 1., -7., -4.], |
chembl_upload.py | """
Chembl uploader
"""
# pylint: disable=E0401, E0611
import os
import glob
import pymongo
import biothings.hub.dataload.storage as storage
from biothings.hub.dataload.uploader import ParallelizedSourceUploader
from hub.dataload.uploader import BaseDrugUploader
from hub.datatransform.keylookup import MyChemKeyLookup
from .chembl_parser import load_data
SRC_META = {
"url": 'https://www.ebi.ac.uk/chembl/',
"license_url" : "https://www.ebi.ac.uk/about/terms-of-use",
"license_url_short" : "http://bit.ly/2KAUCAm"
}
class ChemblUploader(BaseDrugUploader, ParallelizedSourceUploader):
"""
ChemblUploader - upload the Chembl data source
"""
name = "chembl"
storage_class = storage.RootKeyMergerStorage
__metadata__ = {"src_meta" : SRC_META}
MOLECULE_PATTERN = "molecule.*.json"
keylookup = MyChemKeyLookup(
[("inchikey", "chembl.inchi_key"),
("inchi", "chembl.inchi"),
("chembl", "chembl.molecule_chembl_id"),
("chebi", "chembl.chebi_par_id"),
("drugcentral", "chembl.xrefs.drugcentral.id"),
("drugname", "chembl.pref_name")],
# TODO: handle duplicate keys from pubchem
# - we use RootKeyMergerStorage, but the num. duplicates
# - is too high (>10000)
# ("pubchem", "chembl.xrefs.pubchem.sid"),
copy_from_doc=True)
def jobs(self):
"""
this will generate arguments for self.load.data() method, allowing parallelization
"""
json_files = glob.glob(os.path.join(self.data_folder, self.__class__.MOLECULE_PATTERN))
return [(f,) for f in json_files]
def load_data(self, data_folder):
|
def post_update_data(self, *args, **kwargs):
"""create indexes following an update"""
# pylint: disable=W0613
"""
for idxname in ["chembl.chebi_par_id", "chembl.inchi", "chembl.molecule_chembl_id"]:
self.logger.info("Indexing '%s'" % idxname)
# background=true or it'll lock the whole database...
self.collection.create_index(idxname, background=True)
"""
for idxname in ["chembl.chebi_par_id", "chembl.molecule_chembl_id"]:
self.logger.info("Indexing '%s'" % idxname)
# background=true or it'll lock the whole database...
self.collection.create_index(idxname, background=True)
@classmethod
def get_mapping(cls):
"""return mapping data"""
mapping = {
"chembl": {
"properties": {
"biotherapeutic": {
"properties": {
"helm_notation": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"description": {
"type": "text"
},
"biocomponents": {
"properties": {
"organism": {
"type": "text"
},
"tax_id": {
"type": "integer"
},
"sequence": {
"type": "text"
},
"component_id": {
"type": "integer"
},
"description": {
"type": "text"
},
"component_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
'copy_to': ['all'],
}
}
},
"therapeutic_flag": {
"type": "boolean"
},
"usan_stem": {
"type": "text"
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_properties": {
"properties": {
"heavy_atoms": {
"type": "integer"
},
"acd_most_bpka": {
"type": "float"
},
"mw_freebase": {
"type": "float"
},
"num_ro5_violations": {
"type": "integer"
},
"molecular_species": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"qed_weighted": {
"type": "float"
},
"ro3_pass": {
"type": "boolean"
},
"full_mwt": {
"type": "float"
},
"num_lipinski_ro5_violations": {
"type": "integer"
},
"rtb": {
"type": "integer"
},
"psa": {
"type": "float"
},
"alogp": {
"type": "float"
},
"hbd": {
"type": "integer"
},
"acd_most_apka": {
"type": "float"
},
"hbd_lipinski": {
"type": "integer"
},
"acd_logp": {
"type": "float"
},
"full_molformula": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"aromatic_rings": {
"type": "integer"
},
"hba_lipinski": {
"type": "integer"
},
"mw_monoisotopic": {
"type": "float"
},
"hba": {
"type": "integer"
},
"acd_logd": {
"type": "float"
}
}
},
"helm_notation": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"max_phase": {
"type": "integer"
},
"inorganic_flag": {
"type": "integer"
},
"usan_stem_definition": {
"type": "text"
},
"dosed_ingredient": {
"type": "boolean"
},
"chebi_par_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"withdrawn_reason": {
"type": "text"
},
"molecule_hierarchy": {
"properties": {
"parent_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_chembl_id": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"prodrug": {
"type": "integer"
},
"withdrawn_flag": {
"type": "boolean"
},
"usan_year": {
"type": "integer"
},
"parenteral": {
"type": "boolean"
},
"black_box_warning": {
"type": "integer"
},
"polymer_flag": {
"type": "boolean"
},
"molecule_synonyms": {
"properties": {
"molecule_synonym": {
"type": "text"
},
"synonyms": {
"type": "text"
},
"syn_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"atc_classifications": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"molecule_type": {
"type": "text"
},
"first_in_class": {
"type": "integer"
},
"inchi": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"structure_type": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"withdrawn_class": {
"type": "text"
},
"inchi_key": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"topical": {
"type": "boolean"
},
"oral": {
"type": "boolean"
},
"xrefs": {
"properties": {
"drugcentral": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
}
}
},
"tg-gates": {
"properties": {
"id": {
"type": "integer"
},
"name": {
"type": "text"
}
}
},
"wikipedia": {
"properties": {
"url_stub": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
}
}
},
"dailymed": {
"properties": {
"name": {
"type": "text"
}
}
},
"pubchem": {
"properties": {
"sid": {
"type": "integer"
}
}
}
}
},
"chirality": {
"type": "integer"
},
"usan_substem": {
"type": "text"
},
"indication_class": {
"type": "text"
},
"withdrawn_country": {
"type": "text"
},
"withdrawn_year": {
"type": "integer"
},
"availability_type": {
"type": "integer"
},
"smiles": {
"normalizer": "keyword_lowercase_normalizer",
"type": "keyword",
},
"natural_product": {
"type": "integer"
},
"pref_name": {
"type": "text",
"copy_to": ["all"]
},
"first_approval": {
"type": "integer"
}
}
}
}
return mapping
| """load data from an input file"""
self.logger.info("Load data from '%s'" % data_folder)
return self.keylookup(load_data, debug=True)(data_folder) |
issue-24353.rs | // run-pass | #![allow(unreachable_code)]
fn main() {
return ();
let x = ();
x
} | |
Profile.js | import React, { Component } from 'react';
import { profileRequest } from '../services/api'
class Profile extends Component {
constructor(props) {
super(props);
this.state = {
email: ""
}
}
componentDidMount() { | this.setState({email: res.email})
}
})
}
render() {
return (
<div id="profile">
<h1>{this.state.email}'s Profile</h1>
</div>
);
}
}
export default Profile; | profileRequest()
.then(res => {
if (!res.error) { |
Embeddable.ts | import type { Constructor, Dictionary } from '../typings';
import { MetadataStorage } from '../metadata';
export function | (options: EmbeddableOptions = {}) {
return function <T>(target: T & Dictionary) {
const meta = MetadataStorage.getMetadataFromDecorator(target);
meta.class = target as unknown as Constructor<T>;
meta.name = target.name;
meta.embeddable = true;
Object.assign(meta, options);
return target;
};
}
export type EmbeddableOptions = {
discriminatorColumn?: string;
discriminatorMap?: Dictionary<string>;
discriminatorValue?: number | string;
abstract?: boolean;
};
| Embeddable |
mpsc.rs | #![feature(test)]
#![cfg_attr(test, deny(warnings))]
extern crate futures;
extern crate test;
extern crate tokio_sync;
mod tokio {
use futures::{future, Async, Future, Sink, Stream};
use std::thread;
use test::{self, Bencher};
use tokio_sync::mpsc::*;
#[bench]
fn bounded_new(b: &mut Bencher) {
b.iter(|| {
let _ = test::black_box(&channel::<i32>(1_000));
})
}
#[bench]
fn unbounded_new(b: &mut Bencher) {
b.iter(|| {
let _ = test::black_box(&unbounded_channel::<i32>());
})
}
#[bench]
fn send_one_message(b: &mut Bencher) {
b.iter(|| {
let (mut tx, mut rx) = channel(1_000);
// Send
tx.try_send(1).unwrap();
// Receive
assert_eq!(Async::Ready(Some(1)), rx.poll().unwrap());
})
}
#[bench]
fn bounded_rx_not_ready(b: &mut Bencher) {
let (_tx, mut rx) = channel::<i32>(1_000);
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn bounded_tx_poll_ready(b: &mut Bencher) {
let (mut tx, _rx) = channel::<i32>(1);
b.iter(|| {
future::lazy(|| {
assert!(tx.poll_ready().unwrap().is_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn bounded_tx_poll_not_ready(b: &mut Bencher) {
let (mut tx, _rx) = channel::<i32>(1);
tx.try_send(1).unwrap();
b.iter(|| {
future::lazy(|| {
assert!(tx.poll_ready().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn unbounded_rx_not_ready(b: &mut Bencher) {
let (_tx, mut rx) = unbounded_channel::<i32>();
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn unbounded_rx_not_ready_x5(b: &mut Bencher) {
let (_tx, mut rx) = unbounded_channel::<i32>();
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn bounded_uncontended_1(b: &mut Bencher) {
b.iter(|| {
let (mut tx, mut rx) = channel(1_000);
for i in 0..1000 {
tx.try_send(i).unwrap();
// No need to create a task, because poll is not going to park.
assert_eq!(Async::Ready(Some(i)), rx.poll().unwrap());
}
})
}
#[bench]
fn bounded_uncontended_2(b: &mut Bencher) {
b.iter(|| {
let (mut tx, mut rx) = channel(1000);
for i in 0..1000 {
tx.try_send(i).unwrap();
}
for i in 0..1000 {
// No need to create a task, because poll is not going to park.
assert_eq!(Async::Ready(Some(i)), rx.poll().unwrap());
}
})
}
#[bench]
fn | (b: &mut Bencher) {
let mut threads = vec![];
let mut txs = vec![];
for _ in 0..4 {
let (tx, rx) = ::std::sync::mpsc::channel::<Sender<i32>>();
txs.push(tx);
threads.push(thread::spawn(move || {
for mut tx in rx.iter() {
for i in 0..1_000 {
tx.try_send(i).unwrap();
}
}
}));
}
b.iter(|| {
// TODO make unbounded
let (tx, rx) = channel::<i32>(1_000_000);
for th in &txs {
th.send(tx.clone()).unwrap();
}
drop(tx);
let rx = rx.wait().take(4 * 1_000);
for v in rx {
let _ = test::black_box(v);
}
});
drop(txs);
for th in threads {
th.join().unwrap();
}
}
#[bench]
fn contended_bounded_tx(b: &mut Bencher) {
const THREADS: usize = 4;
const ITERS: usize = 100;
let mut threads = vec![];
let mut txs = vec![];
for _ in 0..THREADS {
let (tx, rx) = ::std::sync::mpsc::channel::<Sender<i32>>();
txs.push(tx);
threads.push(thread::spawn(move || {
for tx in rx.iter() {
let mut tx = tx.wait();
for i in 0..ITERS {
tx.send(i as i32).unwrap();
}
}
}));
}
b.iter(|| {
let (tx, rx) = channel::<i32>(1);
for th in &txs {
th.send(tx.clone()).unwrap();
}
drop(tx);
let rx = rx.wait().take(THREADS * ITERS);
for v in rx {
let _ = test::black_box(v);
}
});
drop(txs);
for th in threads {
th.join().unwrap();
}
}
}
mod legacy {
use futures::sync::mpsc::*;
use futures::{future, Async, Future, Sink, Stream};
use std::thread;
use test::{self, Bencher};
#[bench]
fn bounded_new(b: &mut Bencher) {
b.iter(|| {
let _ = test::black_box(&channel::<i32>(1_000));
})
}
#[bench]
fn unbounded_new(b: &mut Bencher) {
b.iter(|| {
let _ = test::black_box(&unbounded::<i32>());
})
}
#[bench]
fn send_one_message(b: &mut Bencher) {
b.iter(|| {
let (mut tx, mut rx) = channel(1_000);
// Send
tx.try_send(1).unwrap();
// Receive
assert_eq!(Ok(Async::Ready(Some(1))), rx.poll());
})
}
#[bench]
fn bounded_rx_not_ready(b: &mut Bencher) {
let (_tx, mut rx) = channel::<i32>(1_000);
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn bounded_tx_poll_ready(b: &mut Bencher) {
let (mut tx, _rx) = channel::<i32>(0);
b.iter(|| {
future::lazy(|| {
assert!(tx.poll_ready().unwrap().is_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn bounded_tx_poll_not_ready(b: &mut Bencher) {
let (mut tx, _rx) = channel::<i32>(0);
tx.try_send(1).unwrap();
b.iter(|| {
future::lazy(|| {
assert!(tx.poll_ready().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn unbounded_rx_not_ready(b: &mut Bencher) {
let (_tx, mut rx) = unbounded::<i32>();
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn unbounded_rx_not_ready_x5(b: &mut Bencher) {
let (_tx, mut rx) = unbounded::<i32>();
b.iter(|| {
future::lazy(|| {
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
assert!(rx.poll().unwrap().is_not_ready());
Ok::<_, ()>(())
})
.wait()
.unwrap();
})
}
#[bench]
fn unbounded_uncontended_1(b: &mut Bencher) {
b.iter(|| {
let (tx, mut rx) = unbounded();
for i in 0..1000 {
UnboundedSender::unbounded_send(&tx, i).expect("send");
// No need to create a task, because poll is not going to park.
assert_eq!(Ok(Async::Ready(Some(i))), rx.poll());
}
})
}
#[bench]
fn unbounded_uncontended_2(b: &mut Bencher) {
b.iter(|| {
let (tx, mut rx) = unbounded();
for i in 0..1000 {
UnboundedSender::unbounded_send(&tx, i).expect("send");
}
for i in 0..1000 {
// No need to create a task, because poll is not going to park.
assert_eq!(Ok(Async::Ready(Some(i))), rx.poll());
}
})
}
#[bench]
fn multi_thread_unbounded_tx(b: &mut Bencher) {
let mut threads = vec![];
let mut txs = vec![];
for _ in 0..4 {
let (tx, rx) = ::std::sync::mpsc::channel::<Sender<i32>>();
txs.push(tx);
threads.push(thread::spawn(move || {
for mut tx in rx.iter() {
for i in 0..1_000 {
tx.try_send(i).unwrap();
}
}
}));
}
b.iter(|| {
let (tx, rx) = channel::<i32>(1_000_000);
for th in &txs {
th.send(tx.clone()).unwrap();
}
drop(tx);
let rx = rx.wait().take(4 * 1_000);
for v in rx {
let _ = test::black_box(v);
}
});
drop(txs);
for th in threads {
th.join().unwrap();
}
}
#[bench]
fn contended_bounded_tx(b: &mut Bencher) {
const THREADS: usize = 4;
const ITERS: usize = 100;
let mut threads = vec![];
let mut txs = vec![];
for _ in 0..THREADS {
let (tx, rx) = ::std::sync::mpsc::channel::<Sender<i32>>();
txs.push(tx);
threads.push(thread::spawn(move || {
for tx in rx.iter() {
let mut tx = tx.wait();
for i in 0..ITERS {
tx.send(i as i32).unwrap();
}
}
}));
}
b.iter(|| {
let (tx, rx) = channel::<i32>(1);
for th in &txs {
th.send(tx.clone()).unwrap();
}
drop(tx);
let rx = rx.wait().take(THREADS * ITERS);
for v in rx {
let _ = test::black_box(v);
}
});
drop(txs);
for th in threads {
th.join().unwrap();
}
}
}
| contended_unbounded_tx |
YuHunModule.py | # -*- coding: utf-8 -*-
import datetime
import logging
import os
import random
import time
from tkinter import END
import cv2
import numpy
import numpy as np
import pyautogui
from PIL import ImageGrab
from matplotlib import pyplot as plt
pyautogui.FAILSAFE = False
logging.basicConfig(format="%(asctime)s :%(levelname)s:%(message)s", datefmt="%d-%M-%Y %H:%M:%S", level=logging.DEBUG)
# 初始化SIFT探测器
SIFT = cv2.xfeatures2d.SIFT_create()
def ComputeScreenShot(screenShot):
"""
由于屏幕分辨率高,计算耗时,这里优化一下
:return:
"""
kp2, des2 = SIFT.detectAndCompute(screenShot, None)
return kp2, des2
def GetLocation(target, kp2, des2):
"""
获取目标图像在截图中的位置
:param target:
:param screenShot:
:return: 返回坐标(x,y) 与opencv坐标系对应
"""
MIN_MATCH_COUNT = 10
img1 = target # cv2.cvtColor(target,cv2.COLOR_BGR2GRAY)# 查询图片
# img2 = screenShot
# img2 = cv2.cvtColor(screenShot, cv2.COLOR_BGR2GRAY) # 训练图片
# img2 = cv2.resize(img2, dsize=None, fx=0.5, fy=0.5, interpolation=cv2.INTER_NEAREST)
# 用SIFT找到关键点和描述符
kp1, des1 = SIFT.detectAndCompute(img1, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=4)
search_params = dict(checks=50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h, w = img1.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
if M is not None:
dst = cv2.perspectiveTransform(pts, M)
arr = np.int32(dst) #
midPosArr = arr[0] + (arr[2] - arr[0]) // 2
midPos = (midPosArr[0][0], midPosArr[0][1])
# show=cv2.circle(img2,midPos,30,(255,255,255),thickness=5)
# cv2.imshow('s',show)
# cv2.waitKey()
# cv2.destroyAllWindows()
return midPos
else:
return None
else:
return None
def CheatPos(originPos, factor=5):
"""
对原始点击坐标进行随机偏移,防止封号
:param originPos:原始坐标
:return:
"""
x, y = random.randint(-factor, factor), random.randint(-factor, factor)
newPos = (originPos[0] + x, originPos[1] + y)
return newPos
def Click(targetPosition):
"""
点击屏幕上的某个点
:param targetPosition:
:return:
"""
if targetPosition is None:
print('未检测到目标')
else:
pyautogui.moveTo(targetPosition, duration=0.20)
pyautogui.click()
time.sleep(random.randint(500, 1000) / 1000)
# time.sleep(random.randint(100, 150) / 1000)
def loadImgs():
"""
加载所有需要检测的目标图像
:return:
"""
obj = {}
path = os.getcwd() + '/img'
file_list = os.listdir(path)
for file in file_list:
name = file.split('.')[0]
file_path = path + '/' + file
a = cv2.imread(file_path, 0)
obj[name] = a
return obj
def GetScreenShot():
"""
获取屏幕截图
:return:
"""
screen = ImageGrab.grab()
# screen.save('screen.jpg')
# screen = cv2.imread('screen.jpg')
screen = cv2.cvtColor(numpy.asarray(screen), cv2.COLOR_RGB2BGR)
logging.info('截屏成功')
return screen
class YuHun():
def __init__(self):
self._flag = False
self.NeedCloseGame=False
self.NeedCloseSystem=False
def Run(self, LogUI, NeedCloseGame, NeedCloseSystem):
imgs = loadImgs()
LogUI.insert(END,
time.strftime('%Y-%m-%d %H:%M:%S ',
time.localtime(time.time())) + '开始挑战\n')
Count = 1
while self._flag is not True:
logging.debug('开始挑战')
screen = GetScreenShot()
WindowShape = screen.shape
result = []
# 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算
kp2, des2 = ComputeScreenShot(screen)
for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding',
'tiaozhan']:
obj = imgs[i]
# begin = time.clock()
pos = GetLocation(obj, kp2, des2)
# logging.debug('检测结算目标图像')
# print(time.clock()-begin)
if pos is not None:
if i == 'tili60' or i == 'tili80':
print('window.py', NeedCloseSystem)
if self.NeedCloseSystem:
print('log')
os.system('shutdown -s -t 60')
return
if not self.NeedCloseGame:
# 需要手动关闭游戏
LogUI.insert(END,
time.strftime('%Y-%m-%d %H:%M:%S ',
time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\n')
return
# 结束进程
hasProcess = True
while hasProcess:
if 'onmyoji' in os.popen('tasklist /FI "IMAGENAME eq onmyoji.exe"').read():
os.system('TASKKILL /F /IM onmyoji.exe')
hasProcess = True
else:
hasProcess = False
# 线程结束返回
return
elif i == 'end1':
time.sleep(random.randint(300, 800) / 1000)
pos = CheatPos(pos, 50)
elif i == 'end2':
newPos = (pos[0] + 80, pos[1] + 80)
pos = CheatPos(newPos, 5)
elif i == 'tiaozhan':
LogUI.insert(END,
time.strftime('%Y-%m-%d %H:%M:%S ',
time.localtime(time.time())) + '第' + str(Count) + '轮开始\n')
Count += 1
elif i == 'reject':
pos = CheatPos(pos, 3)
else:
pos = CheatPos(pos, 10)
result.append(pos)
LogUI.see(END)
else:
result.append(None)
# 开始检查结果
for i in result:
if i is not None:
print(WindowShape[1] * 0.06)
print(WindowShape[0] * 0.96)
if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:
continue
else:
Click(i)
if len(LogUI.get('1.0', 'end-1c')) > 6000:
LogUI.delete(1.0, END) # 使用 delete | LogUI.insert(END, ' 清空日志\n')
LogUI.see(END)
def Terminate(self):
self._flag = True
# def YuHunTwoWindow(LogUI, NeedCloseGame, NeedCloseSystem):
# """
# 自动御魂,双开模式
# """
# imgs = loadImgs()
# LogUI.insert(END,
# time.strftime('%Y-%m-%d %H:%M:%S ',
# time.localtime(time.time())) + '开始挑战\n')
# Count = 1
# while True:
#
# logging.debug('开始挑战')
# screen = GetScreenShot()
# WindowShape = screen.shape
# result = []
#
# # 为了优化速度,把计算屏幕截图的特征提取出来,避免重复运算
# kp2, des2 = ComputeScreenShot(screen)
# for i in ['tili60', 'tili80', 'auto', 'jieshou2', 'jieshou1', 'end1', 'end2', 'reject', 'queding', 'tiaozhan']:
# obj = imgs[i]
# # begin = time.clock()
# pos = GetLocation(obj, kp2, des2)
# # logging.debug('检测结算目标图像')
# # print(time.clock()-begin)
# if pos is not None:
# if i == 'tili60' or i == 'tili80':
# print('window.py', NeedCloseSystem)
# if NeedCloseSystem:
# print('log')
# os.system('shutdown -s -t 60')
# return
# if not NeedCloseGame:
# # print('体力用完,需要手动关闭加成或游戏')
# LogUI.insert(END,
# time.strftime('%Y-%m-%d %H:%M:%S ',
# time.localtime(time.time())) + '体力用完,需要手动关闭加成或游戏\n')
# return
# # 结束进程
# hasProcess = True
# while hasProcess:
# if 'onmyoji' in os.popen('tasklist /FI "IMAGENAME eq onmyoji.exe"').read():
# os.system('TASKKILL /F /IM onmyoji.exe')
# hasProcess = True
# else:
# hasProcess = False
# # 线程结束返回
# return
# elif i == 'end1':
# time.sleep(random.randint(300, 800) / 1000)
# pos = CheatPos(pos, 50)
# elif i == 'end2':
# newPos = (pos[0] + 80, pos[1] + 80)
# pos = CheatPos(newPos, 5)
# elif i == 'tiaozhan':
# LogUI.insert(END,
# time.strftime('%Y-%m-%d %H:%M:%S ',
# time.localtime(time.time())) + '第' + str(Count) + '轮开始\n')
# Count += 1
# elif i == 'reject':
# pos = CheatPos(pos, 3)
# else:
# pos = CheatPos(pos, 10)
# result.append(pos)
#
# LogUI.see(END)
# else:
# result.append(None)
# # 开始检查结果
# for i in result:
# if i is not None:
# print(WindowShape[1] * 0.06)
# print(WindowShape[0] * 0.96)
# if i[0] < WindowShape[1] * 0.06 or i[1] > WindowShape[0] * 0.96:
# continue
# else:
# Click(i)
# if len(LogUI.get('1.0', 'end-1c')) > 6000:
# LogUI.delete(1.0, END) # 使用 delete
# LogUI.insert(END, ' 清空日志\n')
# LogUI.see(END)
if __name__ == '__main__':
pass | |
main.py | # Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from frontend.tokenizer import *
from frontend.parser import *
from frontend.preprocessor import Preprocessor, PreprocessorException
from frontend.semantic_check import P4SemanticChecker
from frontend.dumper import P4HlirDumper
from frontend.ast import P4Program
from collections import OrderedDict
import hlir.p4 as p4
import itertools
import logging
import json
import pkg_resources
logger = logging.getLogger(__name__)
class HLIR():
def __init__(self, *args):
self.source_files = [] + list(args)
self.source_txt = []
self.preprocessor_args = []
self.primitives = []
self.p4_objects = []
self.p4_actions = OrderedDict()
self.p4_control_flows = OrderedDict()
self.p4_headers = OrderedDict()
self.p4_header_instances = OrderedDict()
self.p4_fields = OrderedDict()
self.p4_field_lists = OrderedDict()
self.p4_field_list_calculations = OrderedDict()
self.p4_parser_exceptions = OrderedDict()
self.p4_parse_value_sets = OrderedDict()
self.p4_parse_states = OrderedDict()
self.p4_counters = OrderedDict()
self.p4_meters = OrderedDict()
self.p4_registers = OrderedDict()
self.p4_nodes = OrderedDict()
self.p4_tables = OrderedDict()
self.p4_action_profiles = OrderedDict()
self.p4_action_selectors = OrderedDict()
self.p4_conditional_nodes = OrderedDict()
self.calculated_fields = []
self.p4_ingress_ptr = {}
self.p4_egress_ptr = None
self.primitives = json.loads(pkg_resources.resource_string('p4_hlir.frontend', 'primitives.json'))
def version(self):
return pkg_resources.require("p4-hlir")[0].version
def add_src_files(self, *args):
self.source_files += args
def add_preprocessor_args (self, *args):
self.preprocessor_args += args
def add_src_txt(self, *args):
self.source_txt += args
def add_primitives (self, primitives_dict):
|
def build(self, optimize=True, analyze=True, dump_preprocessed=False):
if len(self.source_files) == 0:
print "no source file to process"
return False
# Preprocess all program text
preprocessed_sources = []
try:
preprocessor = Preprocessor()
preprocessor.args += self.preprocessor_args
for p4_source in self.source_files:
absolute_source = os.path.join(os.getcwd(), p4_source)
if not self._check_source_path(absolute_source):
print "Source file '" + p4_source + "' could not be opened or does not exist."
return False
preprocessed_sources.append(preprocessor.preprocess_file(
absolute_source,
dest='%s.i'%p4_source if dump_preprocessed else None
))
for p4_txt in self.source_txt:
preprocessed_sources.append(preprocessor.preprocess_str(
p4_txt,
dest=None
))
except PreprocessorException as e:
print str(e)
return False
# Parse preprocessed text
all_p4_objects = []
for preprocessed_source in preprocessed_sources:
p4_objects, errors_cnt = P4Parser().parse(preprocessed_source)
if errors_cnt > 0:
print errors_cnt, "errors during parsing"
print "Interrupting compilation"
return False
all_p4_objects += p4_objects
print "parsing successful"
p4_program = P4Program("", -1, all_p4_objects)
# Semantic checking, round 1
sc = P4SemanticChecker()
errors_cnt = sc.semantic_check(p4_program, self.primitives)
if errors_cnt > 0:
print errors_cnt, "errors during semantic checking"
print "Interrupting compilation"
return False
else:
print "semantic checking successful"
# Dump AST to HLIR objects
d = P4HlirDumper()
d.dump_to_p4(self, p4_program, self.primitives)
# Semantic checking, round 2
# TODO: merge these two rounds and try to separate name resolution from
# higher level semantic checks
try:
p4.p4_validate(self)
except p4.p4_compiler_msg as e:
print e
return False
# Perform target-agnostic optimizations
if optimize:
p4.optimize_table_graph(self)
# Analyze program and annotate objects with derived information
if analyze:
p4.p4_dependencies(self)
p4.p4_field_access(self)
return True
def _check_source_path(self, source):
return os.path.isfile(source)
def HLIR_from_txt (program_str, **kwargs):
h = HLIR()
h.add_src_txt(program_str)
if h.build(**kwargs):
return h
else:
return None
| self.primitives.update(primitives_dict) |
resource_server_route_match.go | package caddy
import (
"github.com/conradludgate/terraform-provider-caddy/caddyapi"
"github.com/conradludgate/tfutils"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func mapListStringSetFunc(v interface{}) int {
m := v.(map[string]interface{})
return schema.HashString(m["name"])
}
// MapListString is a schema that represents map[string][]string
var MapListString = tfutils.SchemaMap{
"name": tfutils.String().Required(true),
"values": tfutils.String().List().Required(true),
}.IntoSet().SetFunc(mapListStringSetFunc)
func IntoMapListString(m map[string][]string) *schema.Set {
s := schema.NewSet(mapListStringSetFunc, nil)
for k, v := range m {
s.Add(map[string]interface{}{
"name": k,
"values": v,
})
}
return s
}
// ParseMapListString converts the data from a MapListString schema to a map[string][]string type
func ParseMapListString(d *MapData, key string) map[string][]string {
sets := GetObjectSet(d, key)
var values map[string][]string
if len(sets) > 0 {
values = make(map[string][]string, len(sets))
for _, d := range sets {
values[GetString(&d, "name")] = GetStringList(&d, "values")
}
}
return values
}
type ServerRouteMatcher struct {
not bool
}
func (s ServerRouteMatcher) Schema() tfutils.SchemaMap {
sm := tfutils.SchemaMap{
"host": tfutils.String().List().Optional(true),
"path": tfutils.String().List().Optional(true),
"method": tfutils.String().List().Optional(true),
"header": MapListString.Optional(true),
"query": MapListString.Optional(true),
}
if !s.not {
sm["not"] = tfutils.ListOf(ServerRouteMatcher{true}).Optional(true)
}
return sm
}
func ServerRouteMatcherFrom(d *MapData) caddyapi.Match {
match := caddyapi.Match{
Host: GetStringList(d, "host"),
Path: GetStringList(d, "path"),
Method: GetStringList(d, "method"),
Header: ParseMapListString(d, "header"),
Query: ParseMapListString(d, "query"),
}
if nots := GetObjectList(d, "not"); len(nots) > 0 {
match.Not = ServerRouteMatchersFrom(nots)
}
return match
}
func ServerRouteMatchersFrom(d []MapData) []caddyapi.Match {
matchers := make([]caddyapi.Match, 0, len(d))
for _, d := range d {
matchers = append(matchers, ServerRouteMatcherFrom(&d))
}
return matchers
}
func ServerRouteMatcherInto(match caddyapi.Match) map[string]interface{} {
return map[string]interface{}{
"host": match.Host,
"path": match.Path,
"method": match.Method,
"header": IntoMapListString(match.Header),
"query": IntoMapListString(match.Query),
}
}
func ServerRouteMatchersInto(matchers []caddyapi.Match) []map[string]interface{} | {
d := make([]map[string]interface{}, 0, len(matchers))
for _, match := range matchers {
d = append(d, ServerRouteMatcherInto(match))
}
return d
} |
|
pdns.go | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pdns
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
"math"
"net"
"net/http"
"sort"
"strings"
"time"
pgo "github.com/ffledgling/pdns-go"
log "github.com/sirupsen/logrus"
"github.com/yangkailc/bigtree-dns/endpoint"
"github.com/yangkailc/bigtree-dns/pkg/tlsutils"
"github.com/yangkailc/bigtree-dns/plan"
"github.com/yangkailc/bigtree-dns/provider"
)
type pdnsChangeType string
const (
apiBase = "/api/v1"
// Unless we use something like pdnsproxy (discontinued upstream), this value will _always_ be localhost
defaultServerID = "localhost"
defaultTTL = 300
// PdnsDelete and PdnsReplace are effectively an enum for "pgo.RrSet.changetype"
// TODO: Can we somehow get this from the pgo swagger client library itself?
// PdnsDelete : PowerDNS changetype used for deleting rrsets
// ref: https://doc.powerdns.com/authoritative/http-api/zone.html#rrset (see "changetype")
PdnsDelete pdnsChangeType = "DELETE"
// PdnsReplace : PowerDNS changetype for creating, updating and patching rrsets
PdnsReplace pdnsChangeType = "REPLACE"
// Number of times to retry failed PDNS requests
retryLimit = 3
// time in milliseconds
retryAfterTime = 250 * time.Millisecond
)
// PDNSConfig is comprised of the fields necessary to create a new PDNSProvider
type PDNSConfig struct {
DomainFilter endpoint.DomainFilter
DryRun bool
Server string
APIKey string
TLSConfig TLSConfig
}
// TLSConfig is comprised of the TLS-related fields necessary to create a new PDNSProvider
type TLSConfig struct {
TLSEnabled bool
CAFilePath string
ClientCertFilePath string
ClientCertKeyFilePath string
}
func (tlsConfig *TLSConfig) setHTTPClient(pdnsClientConfig *pgo.Configuration) error {
if !tlsConfig.TLSEnabled {
log.Debug("Skipping TLS for PDNS Provider.")
return nil
}
log.Debug("Configuring TLS for PDNS Provider.")
if tlsConfig.CAFilePath == "" {
return errors.New("certificate authority file path must be specified if TLS is enabled")
}
tlsClientConfig, err := tlsutils.NewTLSConfig(tlsConfig.ClientCertFilePath, tlsConfig.ClientCertKeyFilePath, tlsConfig.CAFilePath, "", false, tls.VersionTLS12)
if err != nil {
return err
}
// Timeouts taken from net.http.DefaultTransport
transporter := &http.Transport{
Proxy: http.ProxyFromEnvironment, | DialContext: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
DualStack: true,
}).DialContext,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
TLSClientConfig: tlsClientConfig,
}
pdnsClientConfig.HTTPClient = &http.Client{
Transport: transporter,
}
return nil
}
// Function for debug printing
func stringifyHTTPResponseBody(r *http.Response) (body string) {
if r == nil {
return ""
}
buf := new(bytes.Buffer)
buf.ReadFrom(r.Body)
body = buf.String()
return body
}
// PDNSAPIProvider : Interface used and extended by the PDNSAPIClient struct as
// well as mock APIClients used in testing
type PDNSAPIProvider interface {
ListZones() ([]pgo.Zone, *http.Response, error)
PartitionZones(zones []pgo.Zone) ([]pgo.Zone, []pgo.Zone)
ListZone(zoneID string) (pgo.Zone, *http.Response, error)
PatchZone(zoneID string, zoneStruct pgo.Zone) (*http.Response, error)
}
// PDNSAPIClient : Struct that encapsulates all the PowerDNS specific implementation details
type PDNSAPIClient struct {
dryRun bool
authCtx context.Context
client *pgo.APIClient
domainFilter endpoint.DomainFilter
}
// ListZones : Method returns all enabled zones from PowerDNS
// ref: https://doc.powerdns.com/authoritative/http-api/zone.html#get--servers-server_id-zones
func (c *PDNSAPIClient) ListZones() (zones []pgo.Zone, resp *http.Response, err error) {
for i := 0; i < retryLimit; i++ {
zones, resp, err = c.client.ZonesApi.ListZones(c.authCtx, defaultServerID)
if err != nil {
log.Debugf("Unable to fetch zones %v", err)
log.Debugf("Retrying ListZones() ... %d", i)
time.Sleep(retryAfterTime * (1 << uint(i)))
continue
}
return zones, resp, err
}
log.Errorf("Unable to fetch zones. %v", err)
return zones, resp, err
}
// PartitionZones : Method returns a slice of zones that adhere to the domain filter and a slice of ones that does not adhere to the filter
func (c *PDNSAPIClient) PartitionZones(zones []pgo.Zone) (filteredZones []pgo.Zone, residualZones []pgo.Zone) {
if c.domainFilter.IsConfigured() {
for _, zone := range zones {
if c.domainFilter.Match(zone.Name) {
filteredZones = append(filteredZones, zone)
} else {
residualZones = append(residualZones, zone)
}
}
} else {
filteredZones = zones
}
return filteredZones, residualZones
}
// ListZone : Method returns the details of a specific zone from PowerDNS
// ref: https://doc.powerdns.com/authoritative/http-api/zone.html#get--servers-server_id-zones-zone_id
func (c *PDNSAPIClient) ListZone(zoneID string) (zone pgo.Zone, resp *http.Response, err error) {
for i := 0; i < retryLimit; i++ {
zone, resp, err = c.client.ZonesApi.ListZone(c.authCtx, defaultServerID, zoneID)
if err != nil {
log.Debugf("Unable to fetch zone %v", err)
log.Debugf("Retrying ListZone() ... %d", i)
time.Sleep(retryAfterTime * (1 << uint(i)))
continue
}
return zone, resp, err
}
log.Errorf("Unable to list zone. %v", err)
return zone, resp, err
}
// PatchZone : Method used to update the contents of a particular zone from PowerDNS
// ref: https://doc.powerdns.com/authoritative/http-api/zone.html#patch--servers-server_id-zones-zone_id
func (c *PDNSAPIClient) PatchZone(zoneID string, zoneStruct pgo.Zone) (resp *http.Response, err error) {
for i := 0; i < retryLimit; i++ {
resp, err = c.client.ZonesApi.PatchZone(c.authCtx, defaultServerID, zoneID, zoneStruct)
if err != nil {
log.Debugf("Unable to patch zone %v", err)
log.Debugf("Retrying PatchZone() ... %d", i)
time.Sleep(retryAfterTime * (1 << uint(i)))
continue
}
return resp, err
}
log.Errorf("Unable to patch zone. %v", err)
return resp, err
}
// PDNSProvider is an implementation of the Provider interface for PowerDNS
type PDNSProvider struct {
provider.BaseProvider
client PDNSAPIProvider
}
// NewPDNSProvider initializes a new PowerDNS based Provider.
func NewPDNSProvider(ctx context.Context, config PDNSConfig) (*PDNSProvider, error) {
// Do some input validation
if config.APIKey == "" {
return nil, errors.New("missing API Key for PDNS. Specify using --pdns-api-key=")
}
// We do not support dry running, exit safely instead of surprising the user
// TODO: Add Dry Run support
if config.DryRun {
return nil, errors.New("PDNS Provider does not currently support dry-run")
}
if config.Server == "localhost" {
log.Warnf("PDNS Server is set to localhost, this may not be what you want. Specify using --pdns-server=")
}
pdnsClientConfig := pgo.NewConfiguration()
pdnsClientConfig.BasePath = config.Server + apiBase
if err := config.TLSConfig.setHTTPClient(pdnsClientConfig); err != nil {
return nil, err
}
provider := &PDNSProvider{
client: &PDNSAPIClient{
dryRun: config.DryRun,
authCtx: context.WithValue(ctx, pgo.ContextAPIKey, pgo.APIKey{Key: config.APIKey}),
client: pgo.NewAPIClient(pdnsClientConfig),
domainFilter: config.DomainFilter,
},
}
return provider, nil
}
func (p *PDNSProvider) convertRRSetToEndpoints(rr pgo.RrSet) (endpoints []*endpoint.Endpoint, _ error) {
endpoints = []*endpoint.Endpoint{}
for _, record := range rr.Records {
// If a record is "Disabled", it's not supposed to be "visible"
if !record.Disabled {
endpoints = append(endpoints, endpoint.NewEndpointWithTTL(rr.Name, rr.Type_, endpoint.TTL(rr.Ttl), record.Content))
}
}
return endpoints, nil
}
// ConvertEndpointsToZones marshals endpoints into pdns compatible Zone structs
func (p *PDNSProvider) ConvertEndpointsToZones(eps []*endpoint.Endpoint, changetype pdnsChangeType) (zonelist []pgo.Zone, _ error) {
zonelist = []pgo.Zone{}
endpoints := make([]*endpoint.Endpoint, len(eps))
copy(endpoints, eps)
// Sort the endpoints array so we have deterministic inserts
sort.SliceStable(endpoints,
func(i, j int) bool {
// We only care about sorting endpoints with the same dnsname
if endpoints[i].DNSName == endpoints[j].DNSName {
return endpoints[i].RecordType < endpoints[j].RecordType
}
return endpoints[i].DNSName < endpoints[j].DNSName
})
zones, _, err := p.client.ListZones()
if err != nil {
return nil, err
}
filteredZones, residualZones := p.client.PartitionZones(zones)
// Sort the zone by length of the name in descending order, we use this
// property later to ensure we add a record to the longest matching zone
sort.SliceStable(filteredZones, func(i, j int) bool { return len(filteredZones[i].Name) > len(filteredZones[j].Name) })
// NOTE: Complexity of this loop is O(FilteredZones*Endpoints).
// A possibly faster implementation would be a search of the reversed
// DNSName in a trie of Zone names, which should be O(Endpoints), but at this point it's not
// necessary.
for _, zone := range filteredZones {
zone.Rrsets = []pgo.RrSet{}
for i := 0; i < len(endpoints); {
ep := endpoints[i]
dnsname := provider.EnsureTrailingDot(ep.DNSName)
if dnsname == zone.Name || strings.HasSuffix(dnsname, "."+zone.Name) {
// The assumption here is that there will only ever be one target
// per (ep.DNSName, ep.RecordType) tuple, which holds true for
// external-dns v5.0.0-alpha onwards
records := []pgo.Record{}
for _, t := range ep.Targets {
if ep.RecordType == "CNAME" {
t = provider.EnsureTrailingDot(t)
}
records = append(records, pgo.Record{Content: t})
}
rrset := pgo.RrSet{
Name: dnsname,
Type_: ep.RecordType,
Records: records,
Changetype: string(changetype),
}
// DELETEs explicitly forbid a TTL, therefore only PATCHes need the TTL
if changetype == PdnsReplace {
if int64(ep.RecordTTL) > int64(math.MaxInt32) {
return nil, errors.New("value of record TTL overflows, limited to int32")
}
if ep.RecordTTL == 0 {
// No TTL was specified for the record, we use the default
rrset.Ttl = int32(defaultTTL)
} else {
rrset.Ttl = int32(ep.RecordTTL)
}
}
zone.Rrsets = append(zone.Rrsets, rrset)
// "pop" endpoint if it's matched
endpoints = append(endpoints[0:i], endpoints[i+1:]...)
} else {
// If we didn't pop anything, we move to the next item in the list
i++
}
}
if len(zone.Rrsets) > 0 {
zonelist = append(zonelist, zone)
}
}
// residualZones is unsorted by name length like its counterpart
// since we only care to remove endpoints that do not match domain filter
for _, zone := range residualZones {
for i := 0; i < len(endpoints); {
ep := endpoints[i]
dnsname := provider.EnsureTrailingDot(ep.DNSName)
if dnsname == zone.Name || strings.HasSuffix(dnsname, "."+zone.Name) {
// "pop" endpoint if it's matched to a residual zone... essentially a no-op
log.Debugf("Ignoring Endpoint because it was matched to a zone that was not specified within Domain Filter(s): %s", dnsname)
endpoints = append(endpoints[0:i], endpoints[i+1:]...)
} else {
i++
}
}
}
// If we still have some endpoints left, it means we couldn't find a matching zone (filtered or residual) for them
// We warn instead of hard fail here because we don't want a misconfig to cause everything to go down
if len(endpoints) > 0 {
log.Warnf("No matching zones were found for the following endpoints: %+v", endpoints)
}
log.Debugf("Zone List generated from Endpoints: %+v", zonelist)
return zonelist, nil
}
// mutateRecords takes a list of endpoints and creates, replaces or deletes them based on the changetype
func (p *PDNSProvider) mutateRecords(endpoints []*endpoint.Endpoint, changetype pdnsChangeType) error {
zonelist, err := p.ConvertEndpointsToZones(endpoints, changetype)
if err != nil {
return err
}
for _, zone := range zonelist {
jso, err := json.Marshal(zone)
if err != nil {
log.Errorf("JSON Marshal for zone struct failed!")
} else {
log.Debugf("Struct for PatchZone:\n%s", string(jso))
}
resp, err := p.client.PatchZone(zone.Id, zone)
if err != nil {
log.Debugf("PDNS API response: %s", stringifyHTTPResponseBody(resp))
return err
}
}
return nil
}
// Records returns all DNS records controlled by the configured PDNS server (for all zones)
func (p *PDNSProvider) Records(ctx context.Context) (endpoints []*endpoint.Endpoint, _ error) {
zones, _, err := p.client.ListZones()
if err != nil {
return nil, err
}
filteredZones, _ := p.client.PartitionZones(zones)
for _, zone := range filteredZones {
z, _, err := p.client.ListZone(zone.Id)
if err != nil {
log.Warnf("Unable to fetch Records")
return nil, err
}
for _, rr := range z.Rrsets {
e, err := p.convertRRSetToEndpoints(rr)
if err != nil {
return nil, err
}
endpoints = append(endpoints, e...)
}
}
log.Debugf("Records fetched:\n%+v", endpoints)
return endpoints, nil
}
// ApplyChanges takes a list of changes (endpoints) and updates the PDNS server
// by sending the correct HTTP PATCH requests to a matching zone
func (p *PDNSProvider) ApplyChanges(ctx context.Context, changes *plan.Changes) error {
startTime := time.Now()
// Create
for _, change := range changes.Create {
log.Debugf("CREATE: %+v", change)
}
// We only attempt to mutate records if there are any to mutate. A
// call to mutate records with an empty list of endpoints is still a
// valid call and a no-op, but we might as well not make the call to
// prevent unnecessary logging
if len(changes.Create) > 0 {
// "Replacing" non-existent records creates them
err := p.mutateRecords(changes.Create, PdnsReplace)
if err != nil {
return err
}
}
// Update
for _, change := range changes.UpdateOld {
// Since PDNS "Patches", we don't need to specify the "old"
// record. The Update New change type will automatically take
// care of replacing the old RRSet with the new one We simply
// leave this logging here for information
log.Debugf("UPDATE-OLD (ignored): %+v", change)
}
for _, change := range changes.UpdateNew {
log.Debugf("UPDATE-NEW: %+v", change)
}
if len(changes.UpdateNew) > 0 {
err := p.mutateRecords(changes.UpdateNew, PdnsReplace)
if err != nil {
return err
}
}
// Delete
for _, change := range changes.Delete {
log.Debugf("DELETE: %+v", change)
}
if len(changes.Delete) > 0 {
err := p.mutateRecords(changes.Delete, PdnsDelete)
if err != nil {
return err
}
}
log.Debugf("Changes pushed out to PowerDNS in %s\n", time.Since(startTime))
return nil
} | |
post_fids.py | #|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of posting Market Price JSON data using Websockets """
import sys
import time
import getopt
import socket
import json
import websocket
import threading
import os
from threading import Thread, Event
# Global Default Variables
hostname = '127.0.0.1'
port = '15000'
user = 'root'
app_id = '256'
position = socket.gethostbyname(socket.gethostname())
service = 'DIST_CACHE'
ric = 'TEST.RIC'
# Global Variables
next_post_time = 0
web_socket_app = None
web_socket_open = False
post_id = 1
obj = None
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
# If our Login stream is now open, we can start sending posts.
global next_post_time
if ('ID' in message_json and message_json['ID'] == 1 and next_post_time == 0 and
(not 'State' in message_json or message_json['State']['Stream'] == "Open" and message_json['State']['Data'] == "Ok")):
next_post_time = time.time() + 3
def process_login_response(ws, message_json):
""" Send post message """
send_market_price_post(ws)
def | (ws):
global post_id
""" Send a post message containing market-price content for TRI.N """
if post_id==1:
msg_type = 'Refresh'
else:
msg_type = 'Update'
mp_post_json = {
'ID': 1,
'Type':'Post',
'Domain':'MarketPrice',
'Key': {
'Name': ric,
'Service': service
},
'Ack':True,
'PostID':post_id,
'PostUserInfo': {
'Address':position, # Use IP address as the Post User Address.
'UserID':os.getpid() # Use process ID as the Post User Id.
},
'Message': {
'ID': 0,
'Type':msg_type,
'Domain':'MarketPrice',
"Solicited": (post_id>1),
'Fields': obj
}
}
ws.send(json.dumps(mp_post_json))
print("SENT:")
print(json.dumps(mp_post_json, sort_keys=True, indent=2, separators=(',', ':')))
post_id += 1
for field in obj:
if type(obj[field]) is float:
obj[field]=round(obj[field]+0.1,2)
elif type(obj[field]) is int:
obj[field]+=1
def send_login_request(ws):
""" Generate a login request from command line data (or defaults) and send """
login_json = {
'ID': 1,
'Domain': 'Login',
'Key': {
'Name': '',
'Elements': {
'ApplicationId': '',
'Position': ''
}
}
}
login_json['Key']['Name'] = user
login_json['Key']['Elements']['ApplicationId'] = app_id
login_json['Key']['Elements']['Position'] = position
ws.send(json.dumps(login_json))
print("SENT:")
print(json.dumps(login_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws):
""" Called when websocket is closed """
global web_socket_open
print("WebSocket Closed")
web_socket_open = False
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
send_login_request(ws)
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "host=", "port=", "app_id=", "user=", "position=", "ric=", "service="])
except getopt.GetoptError:
print('Usage: market_price.py [--host hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--ric ric_code] [--service service] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--position position] [--ric ric_code] [--service service] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--position"):
position = arg
elif opt in ("--service"):
service = arg
elif opt in ("--ric"):
ric = arg
try:
with open('fields.json', 'r') as myfile:
data=myfile.read()
except FileNotFoundError as fnf_error:
print(fnf_error)
sys.exit(2)
obj = json.loads(data)
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, header=['User-Agent: Python'],
on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'])
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
if next_post_time != 0 and time.time() > next_post_time:
send_market_price_post(web_socket_app)
next_post_time = time.time() + 3
except KeyboardInterrupt:
web_socket_app.close()
| send_market_price_post |
GL_TRANSFORMER.py | # GridPot code
# switch object class for integrating with a GridLAB-D simulation instance
# Author: sk4ld
import logging
import urllib2
logger = logging.getLogger(__name__)
from GL_obj import GL_obj
# base object class for integrating with a GridLAB-D simulation instance
class GL_TRANSFORMER(GL_obj):
def init_params(self):
# Here we define what we want to poll for this object.
# We dont necessarily want to have a setter for each one of these
# Nor do we necessarily have to display each of these to the HMI
self.params["status"] = ""
self.params["phases"] = ""
self.params["from"] = ""
self.params["to"] = ""
self.params["ambient_temperature"] = ""
self.params["winding_hot_spot_temperature"] = "" # Not sure exactly what this is
self.params["configuration "] = "" # This one is a config file, is complicated to update/set
# OVERLOADED http display for the conpot built in http hmi
def http_display(self):
ht_format = "<table border=0>\n"
ht_format += "<tr>\n"
ht_format += " <td>"+ self.obj_name +"</td>\n" | ht_format += " <td></td>\n"
ht_format += "</tr>\n"
for x in ('status', 'phases', 'from', 'to', 'ambient_temperature'):
ht_format += "<tr>\n"
ht_format += " <td>" + x + "</td>\n"
ht_format += " <td>" + self.params[x] + "</td>\n"
ht_format += "<tr>\n"
return ht_format | |
modify_registration_details_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package application
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/equinor/radix-cicd-canary/generated-client/models"
)
// ModifyRegistrationDetailsReader is a Reader for the ModifyRegistrationDetails structure.
type ModifyRegistrationDetailsReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *ModifyRegistrationDetailsReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewModifyRegistrationDetailsOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewModifyRegistrationDetailsBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewModifyRegistrationDetailsUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 404:
result := NewModifyRegistrationDetailsNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 409:
result := NewModifyRegistrationDetailsConflict()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewModifyRegistrationDetailsOK creates a ModifyRegistrationDetailsOK with default headers values
func NewModifyRegistrationDetailsOK() *ModifyRegistrationDetailsOK {
return &ModifyRegistrationDetailsOK{}
}
/* ModifyRegistrationDetailsOK describes a response with status code 200, with default header values.
Successful at modifying registration details
*/
type ModifyRegistrationDetailsOK struct {
Payload *models.ApplicationRegistration
}
func (o *ModifyRegistrationDetailsOK) Error() string {
return fmt.Sprintf("[PATCH /applications/{appName}][%d] modifyRegistrationDetailsOK %+v", 200, o.Payload)
}
func (o *ModifyRegistrationDetailsOK) GetPayload() *models.ApplicationRegistration {
return o.Payload
}
func (o *ModifyRegistrationDetailsOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ApplicationRegistration)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewModifyRegistrationDetailsBadRequest creates a ModifyRegistrationDetailsBadRequest with default headers values
func NewModifyRegistrationDetailsBadRequest() *ModifyRegistrationDetailsBadRequest {
return &ModifyRegistrationDetailsBadRequest{}
}
/* ModifyRegistrationDetailsBadRequest describes a response with status code 400, with default header values.
Invalid application
*/
type ModifyRegistrationDetailsBadRequest struct {
}
func (o *ModifyRegistrationDetailsBadRequest) Error() string {
return fmt.Sprintf("[PATCH /applications/{appName}][%d] modifyRegistrationDetailsBadRequest ", 400)
}
func (o *ModifyRegistrationDetailsBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewModifyRegistrationDetailsUnauthorized creates a ModifyRegistrationDetailsUnauthorized with default headers values
func NewModifyRegistrationDetailsUnauthorized() *ModifyRegistrationDetailsUnauthorized {
return &ModifyRegistrationDetailsUnauthorized{}
}
/* ModifyRegistrationDetailsUnauthorized describes a response with status code 401, with default header values.
Unauthorized
*/
type ModifyRegistrationDetailsUnauthorized struct {
}
func (o *ModifyRegistrationDetailsUnauthorized) Error() string {
return fmt.Sprintf("[PATCH /applications/{appName}][%d] modifyRegistrationDetailsUnauthorized ", 401)
}
func (o *ModifyRegistrationDetailsUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewModifyRegistrationDetailsNotFound creates a ModifyRegistrationDetailsNotFound with default headers values
func NewModifyRegistrationDetailsNotFound() *ModifyRegistrationDetailsNotFound {
return &ModifyRegistrationDetailsNotFound{}
}
/* ModifyRegistrationDetailsNotFound describes a response with status code 404, with default header values.
Not found
*/
type ModifyRegistrationDetailsNotFound struct {
}
func (o *ModifyRegistrationDetailsNotFound) Error() string {
return fmt.Sprintf("[PATCH /applications/{appName}][%d] modifyRegistrationDetailsNotFound ", 404)
}
func (o *ModifyRegistrationDetailsNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewModifyRegistrationDetailsConflict creates a ModifyRegistrationDetailsConflict with default headers values
func NewModifyRegistrationDetailsConflict() *ModifyRegistrationDetailsConflict |
/* ModifyRegistrationDetailsConflict describes a response with status code 409, with default header values.
Conflict
*/
type ModifyRegistrationDetailsConflict struct {
}
func (o *ModifyRegistrationDetailsConflict) Error() string {
return fmt.Sprintf("[PATCH /applications/{appName}][%d] modifyRegistrationDetailsConflict ", 409)
}
func (o *ModifyRegistrationDetailsConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
| {
return &ModifyRegistrationDetailsConflict{}
} |
lib.rs | //! The `logger` module configures `env_logger`
use {
lazy_static::lazy_static,
std::sync::{Arc, RwLock},
};
lazy_static! {
static ref LOGGER: Arc<RwLock<env_logger::Logger>> =
Arc::new(RwLock::new(env_logger::Logger::from_default_env()));
}
struct LoggerShim {}
impl log::Log for LoggerShim {
fn enabled(&self, metadata: &log::Metadata) -> bool {
LOGGER.read().unwrap().enabled(metadata)
}
fn log(&self, record: &log::Record) {
LOGGER.read().unwrap().log(record);
}
fn flush(&self) {}
}
fn replace_logger(logger: env_logger::Logger) {
log::set_max_level(logger.filter());
*LOGGER.write().unwrap() = logger;
let _ = log::set_boxed_logger(Box::new(LoggerShim {}));
}
// Configures logging with a specific filter overriding RUST_LOG. _RUST_LOG is used instead
// so if set it takes precedence.
// May be called at any time to re-configure the log filter
pub fn setup_with(filter: &str) {
let logger =
env_logger::Builder::from_env(env_logger::Env::new().filter_or("_RUST_LOG", filter))
.format_timestamp_nanos()
.build();
replace_logger(logger);
}
// Configures logging with a default filter if RUST_LOG is not set
pub fn setup_with_default(filter: &str) {
let logger = env_logger::Builder::from_env(env_logger::Env::new().default_filter_or(filter))
.format_timestamp_nanos()
.build();
replace_logger(logger);
}
// Configures logging with the default filter "error" if RUST_LOG is not set
pub fn setup() {
setup_with_default("error");
}
// Configures file logging with a default filter if RUST_LOG is not set
//
// NOTE: This does not work at the moment, pending the resolution of https://github.com/env-logger-rs/env_logger/issues/208
pub fn setup_file_with_default(logfile: &str, filter: &str) | {
use std::fs::OpenOptions;
let file = OpenOptions::new()
.write(true)
.create(true)
.append(true)
.open(logfile)
.unwrap();
let logger = env_logger::Builder::from_env(env_logger::Env::new().default_filter_or(filter))
.format_timestamp_nanos()
.target(env_logger::Target::Pipe(Box::new(file)))
.build();
replace_logger(logger);
} |
|
CSharpTemplate.py |
import antlr4
from csharp.CSharp4Lexer import CSharp4Lexer
import re
def parseCSharp(code):
|
if __name__ == '__main__':
print parseCSharp("public Boolean SomeValue { get { return someValue; } set { someValue = value; } }")
print parseCSharp("Console.WriteLine('cat'); int mouse = 5; int cat = 0.4; int cow = 'c'; int moo = \"mouse\"; ")
print parseCSharp("int i = 4; // i is assigned the literal value of '4' \n int j = i // j is assigned the value of i. Since i is a variable, //it can change and is not a 'literal'")
try:
print parseCSharp('string `fixed = Regex.Replace(input, "\s*()","$1");');
except:
print "Error"
| code = code.replace('\\n', '\n')
parsedVersion = []
stream = antlr4.InputStream(code)
lexer = CSharp4Lexer(stream)
toks = antlr4.CommonTokenStream(lexer)
toks.fetch(500)
identifiers = {}
identCount = 0
for token in toks.tokens:
if token.type == 109:
parsedVersion += ["CODE_INTEGER"]
elif token.type == 111:
parsedVersion += ["CODE_REAL"]
elif token.type == 112:
parsedVersion += ["CODE_CHAR"]
elif token.type == 113:
parsedVersion += ["CODE_STRING"]
elif token.type == 9 or token.type == 7 or token.type == 6: # whitespace and comments and newline
pass
else:
parsedVersion += [str(token.text)]
return parsedVersion |
array.rs | use std::sync::Arc;
use chrono_tz::Tz;
use crate::{
binary::{Encoder, ReadEx},
errors::Result,
types::{
column::{column_data::{BoxColumnData, ArcColumnData}, list::List, ArcColumnWrapper, ColumnData},
SqlType, Value, ValueRef,
},
};
pub(crate) struct ArrayColumnData {
pub(crate) inner: ArcColumnData,
pub(crate) offsets: List<u64>,
}
impl ArrayColumnData {
pub(crate) fn load<R: ReadEx>(
reader: &mut R,
type_name: &str,
rows: usize,
tz: Tz,
) -> Result<Self> {
let mut offsets = List::with_capacity(rows);
offsets.resize(rows, 0_u64);
reader.read_bytes(offsets.as_mut())?;
let size = match rows {
0 => 0,
_ => offsets.at(rows - 1) as usize,
};
let inner = <dyn ColumnData>::load_data::<ArcColumnWrapper, _>(reader, type_name, size, tz)?;
Ok(ArrayColumnData { inner, offsets })
}
}
impl ColumnData for ArrayColumnData {
fn sql_type(&self) -> SqlType {
let inner_type = self.inner.sql_type();
SqlType::Array(inner_type.into())
}
fn save(&self, encoder: &mut Encoder, start: usize, end: usize) {
let mut offset = 0_u64;
for i in start..end {
offset = self.offsets.at(i);
encoder.write(offset);
}
self.inner.save(encoder, 0, offset as usize);
}
fn len(&self) -> usize {
self.offsets.len()
}
fn push(&mut self, value: Value) {
if let Value::Array(_, vs) = value {
let offsets_len = self.offsets.len();
let prev = if offsets_len == 0 {
0_usize
} else {
self.offsets.at(offsets_len - 1) as usize
};
let inner_column = Arc::get_mut(&mut self.inner).unwrap();
self.offsets.push((prev + vs.len()) as u64);
for v in vs.iter() {
inner_column.push(v.clone());
}
} else {
panic!("value should be an array")
}
}
fn at(&self, index: usize) -> ValueRef {
let sql_type = self.inner.sql_type();
let start = if index > 0 {
self.offsets.at(index - 1) as usize
} else {
0_usize
};
let end = self.offsets.at(index) as usize;
let mut vs = Vec::with_capacity(end);
for i in start..end {
let v = self.inner.at(i);
vs.push(v);
}
ValueRef::Array(sql_type.into(), Arc::new(vs))
}
fn clone_instance(&self) -> BoxColumnData |
unsafe fn get_internal(&self, pointers: &[*mut *const u8], level: u8) -> Result<()> {
if level == self.sql_type().level() {
*pointers[0] = self.offsets.as_ptr() as *const u8;
*(pointers[1] as *mut usize) = self.offsets.len();
Ok(())
} else {
self.inner.get_internal(pointers, level)
}
}
fn cast_to(&self, _this: &ArcColumnData, target: &SqlType) -> Option<ArcColumnData> {
if let SqlType::Array(inner_target) = target {
if let Some(inner) = self.inner.cast_to(&self.inner, inner_target) {
return Some(Arc::new(ArrayColumnData {
inner,
offsets: self.offsets.clone()
}))
}
}
None
}
}
#[cfg(test)]
mod test {
use std::io::Cursor;
use super::*;
use crate::{Block, types::Simple};
#[test]
fn test_write_and_read() {
let block = Block::<Simple>::new().column(
"vals",
vec![vec![7_u32, 8], vec![9, 1, 2], vec![3, 4, 5, 6]],
);
let mut encoder = Encoder::new();
block.write(&mut encoder, false);
let mut reader = Cursor::new(encoder.get_buffer_ref());
let rblock = Block::load(&mut reader, Tz::Zulu, false).unwrap();
assert_eq!(block, rblock);
}
}
| {
Box::new(Self {
inner: self.inner.clone(),
offsets: self.offsets.clone(),
})
} |
main.ts | import store from './state'
import buttonMap from './buttonMap'
import calc, { isNumeric } from './core'
import { last } from 'lodash' |
const keyMap = new Map<string, Function>()
const keypad = document.querySelector<HTMLElement>('.keypad')!
const operationChain = document.querySelector<HTMLElement>('.operation-chain')!
const result = document.querySelector<HTMLElement>('.result')!
function changeDisplayNumber() {
const { chain, isFinalResult } = store.getState()
operationChain.value = chain.join(' ')
if (isFinalResult) operationChain.focus()
operationChain.classList.toggle('isFinalResult', isFinalResult)
result.innerText = chain.length >= 3 && isNumeric(last(chain)!)
? `= ${calc(chain)}`
: ''
}
changeDisplayNumber()
store.subscribe(changeDisplayNumber)
// Add buttons to interface
buttonMap.forEach(({ value, action, key, ...otherProps }) => {
const button = document.createElement('button')
button.innerHTML = value
if (typeof action !== 'undefined') {
button.addEventListener('click', () => {
store.dispatch(action)
})
}
for (const prop in otherProps) {
const value = otherProps[prop]
if (prop === 'classname') button.className = value;
else if (prop === 'title') {
button.setAttribute('title', value);
button.setAttribute('aria-label', value);
}
else button.setAttribute(prop, value);
}
if (key) {
const callback = () => button.click()
keyMap.set(key, callback)
}
keypad.append(button)
})
// Listen for keyboard keys
window.addEventListener('keydown', (event) => {
const keyCode = event.key
if (keyMap.has(keyCode)) {
keyMap.get(keyCode)()
}
}) | |
33.go | package leetcode
func search(nums []int, target int) int {
if len(nums) == 0 {
return -1
}
p := findPivot(nums)
if ret := binarySearch(nums, 0, p-1, target); ret != -1 {
return ret
} | }
return -1
}
func findPivot(nums []int) int {
lo, hi := 0, len(nums)-1
for lo < hi {
mid := (lo + hi) / 2
if nums[mid] < nums[hi] { //说明mid到hi是升序,pivot不在此区间
hi = mid
} else { //说明mid到hi是乱序,pivot必在此区间内
lo = mid + 1
}
}
return hi
}
func binarySearch(nums []int, lo, hi, target int) int {
for lo <= hi {
mid := (lo + hi) / 2
if nums[mid] == target {
return mid
} else if nums[mid] < target {
lo = mid + 1
} else {
hi = mid - 1
}
}
return -1
} | if ret := binarySearch(nums, p, len(nums)-1, target); ret != -1 {
return ret |
ModificationReason4.go | package iso20022
// Modification reasons.
type ModificationReason4 struct {
// Specifies the reason why the transaction is modified.
Code *ModificationReason4Choice `xml:"Cd"`
// Provides additional reason information that cannot be provided in a structured field.
AdditionalReasonInformation *Max210Text `xml:"AddtlRsnInf,omitempty"`
}
func (m *ModificationReason4) AddCode() *ModificationReason4Choice {
m.Code = new(ModificationReason4Choice)
return m.Code | }
func (m *ModificationReason4) SetAdditionalReasonInformation(value string) {
m.AdditionalReasonInformation = (*Max210Text)(&value)
} | |
test_tables.py | """Test the miniencoding lib
A decent testing approach is to test the round trip with a random, valid
string of bytes. by taking this approach, the same error/ bug would have to be
present in both the 'from' and 'to' functions which whilst possible is unlikely
"""
# pylint: disable=invalid-name
import random
import string
from miniencoding.tables import *
def test_CDC1604_MAGTAPE_len():
""" Test CDC1604_MAGTAPE length """
assert len(CDC1604_MAGTAPE) == 64
def test_CDC1604_MAGTAPE():
""" Test CDC1604_MAGTAPE round trip """
testString = "?1234567890#@??? /STUVWXYZ?,%???-JKLMNOPQR0$*???&ABCDEFGHI0.¤???"
assert toUnicode(CDC1604_MAGTAPE, toCharset(CDC1604_MAGTAPE,
testString)) == testString
def test_CDC1604_PUNCHCARD_len():
""" Test CDC1604_PUNCHCARD length """
assert len(CDC1604_PUNCHCARD) == 64
def test_CDC1604_PUNCHCARD():
""" Test CDC1604_PUNCHCARD round trip """
testString = "?1234567890=-??? /STUVWXYZ?,(???—JKLMNOPQR0$*???+ABCDEFGHI0.)???"
assert toUnicode(CDC1604_PUNCHCARD, toCharset(CDC1604_PUNCHCARD,
testString)) == testString
def test_CDC1612_len():
""" Test CDC1612 length """
assert len(CDC1612) == 64
def test_CDC1612():
""" Test CDC1612 round trip """
testString = ":1234567890=≠≤![ /STUVWXYZ],(→≡~—JKLMNOPQR%$*↑↓>+ABCDEFGHI<.)≥?;"
assert toUnicode(CDC1612, toCharset(CDC1612, testString)) == testString
def test_DEC_SIXBIT_len():
""" Test DEC_SIXBIT length """
assert len(DEC_SIXBIT) == 64
def test_DEC_SIXBIT():
""" Test DEC_SIXBIT round trip """
testString = " !\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_"
assert toUnicode(DEC_SIXBIT, toCharset(DEC_SIXBIT, testString)) == testString
def test_EMCA1_len():
""" Test EMCA1 length """
assert len(EMCA1) == 64
def test_EMCA1():
""" Test EMCA1 round trip """
testString = " \t\n\v\f\r\x0e\x0f()*+,-./0123456789:;<=>?\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]\x1b\x7f"
assert toUnicode(EMCA1, toCharset(EMCA1, testString)) == testString
def test_ICL_len():
""" Test ICL length """
assert len(ICL) == 64
def test_ICL():
""" Test ICL round trip """
testString = "0123456789:;<=>? !\"#£%&'()*+,-./@ABCDEFGHIJKLMNOPQRSTUVWXYZ[$]↑←"
assert toUnicode(ICL, toCharset(ICL, testString)) == testString
def test_SIXBIT_len():
""" Test SIXBIT length """
assert len(SIXBIT) == 64
def test_SIXBIT():
""" Test SIXBIT round trip """
testString = "@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_ !\"#$%&'()*+,-./0123456789:;<=>?"
assert toUnicode(SIXBIT, toCharset(SIXBIT, testString)) == testString
def test_GOST_len():
""" Test GOST length """
assert len(GOST) == 64
def test_GOST():
""" Test GOST round trip """
testString = "0123456789+-/,. ⏨↑()×=;[]*‘’≠<>:АБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЫЬЭЮЯ\x7f"
assert toUnicode(GOST, toCharset(GOST, testString)) == testString
def test_GSM7_len():
""" Test GSM7 length """
assert len(GSM7) == 128
def test_GSM7():
""" Test GSM7 round trip """
testString = "@£$¥èéùìòÇ\nØø\rÅåΔ_ΦΓΛΩΠΨΣΘΞ\x07ÆæßÉ !\"#¤%&'()*+,-./0123456789:;<=>?¡ABCDEFGHIJKLMNOPQRSTUVWXYZÄÖÑܧ¿abcdefghijklmnopqrstuvwxyzäöñüà"
assert toUnicode(GSM7, toCharset(GSM7, testString)) == testString
def test_ASCII7_len():
""" Test ASCII7 length """
assert len(ASCII7) == 128
def test_ASCII7():
""" Test ASCII7 round trip """
testString = bytes(range(0, 128)).decode("utf-8")
assert toUnicode(ASCII7, toCharset(ASCII7, testString)) == testString
def test_IBM48_len():
""" Test IBM48 length """
assert len(IBM48) == 64
def test_IBM48():
""" Test IBM48 round trip """
testString = " 1234567890#@????/STUVWXYZ?,%???-JKLMNOPQR?$*???&ABCDEFGHI?.⌑???"
assert toUnicode(IBM48, toCharset(IBM48, testString)) == testString
def test_IBM704_len():
""" Test IBM704 length """
assert len(IBM704) == 64
def test_IBM704():
""" Test IBM704 round trip """
testString = "0123456789?#@???&ABCDEFGHI?.⌑???-JKLMNOPQR?$*??? /STUVWXYZ?,%???"
assert toUnicode(IBM704, toCharset(IBM704, testString)) == testString
def test_IBM7090_len():
""" Test IBM7090 length """
assert len(IBM7090) == 64
def test_IBM7090():
""" Test IBM7090 round trip """
testString = "0123456789?=\"???&ABCDEFGHI0.)???-JKLMNOPQR0$*??? /STUVWXYZ±,(???"
assert toUnicode(IBM7090, toCharset(IBM7090, testString)) == testString
def test_IBM1401_len():
""" Test IBM1401 length """
assert len(IBM1401) == 64
def test_IBM1401():
""" Test IBM1401 round trip """
testString = " 1234567890#@:>√¢/STUVWXYZ‡,%='\"-JKLMNOPQR!$*);Δ&ABCDEFGHI?.⌑(<⯒"
assert toUnicode(IBM1401, toCharset(IBM1401, testString)) == testString
def test_GBCD_len():
""" Test GBCD length """
assert len(GBCD) == 64
def test_GBCD():
""" Test GBCD round trip """
tes | 23456789[#@:>? ABCDEFGHI&.](<\\^JKLMNOPQR-$*);'+/STUVWXYZ_,%=\"!"
assert toUnicode(GBCD, toCharset(GBCD, testString)) == testString
def test_BURROUGHS_B5500_len():
""" Test BURROUGHS_B5500 length """
assert len(BURROUGHS_B5500) == 64
def test_BURROUGHS_B5500():
""" Test BURROUGHS_B5500 round trip """
testString = "0123456789#@?:>≥+ABCDEFGHI.[&(<←×JKLMNOPQR$*-);≤ /STUVWXYZ,%≠=]\""
assert toUnicode(BURROUGHS_B5500, toCharset(BURROUGHS_B5500, testString)) == testString
def test_CP353_len():
""" Test CP353 length """
assert len(CP353) == 64
def test_CP353():
""" Test CP353 round trip """
testString = " 1234567890#@:>√␢/STUVWXYZ‡,%γ\\⧻-JKLMNOPQR!#*];Δ&ABCDEFGHI?.⌑[<⯒"
assert toUnicode(CP353, toCharset(CP353, testString)) == testString
def test_CP355_len():
""" Test CP355 length """
assert len(CP355) == 64
def test_CP355():
""" Test CP355 round trip """
testString = " 1234567890#????@/STUVWXYZ‡,?γ??-JKLMNOPQR<$????&ABCDEFGHI).????"
assert toUnicode(CP355, toCharset(CP355, testString)) == testString
def test_CP357_len():
""" Test CP357 length """
assert len(CP357) == 64
def test_CP357():
""" Test CP357 round trip """
testString = " 1234567890=????'/STUVWXYZ‡,????-JKLMNOPQR!$????+ABCDEFGHI?.????"
assert toUnicode(CP357, toCharset(CP357, testString)) == testString
def test_CP358_len():
""" Test CP358 length """
assert len(CP358) == 64
def test_CP358():
""" Test CP358 round trip """
testString = " 1234567890'????!/STUVWXYZ‡,????-JKLMNOPQR<;????=ABCDEFGHI>.????"
assert toUnicode(CP358, toCharset(CP358, testString)) == testString
def test_CP359_len():
""" Test CP359 length """
assert len(CP359) == 64
def test_CP359():
""" Test CP359 round trip """
testString = " 1234567890#????@/STUVWXYZ?,????-JKLMNOPQR?$????&ABCDEFGHI?.????"
assert toUnicode(CP359, toCharset(CP359, testString)) == testString
| tString = "01 |
tests.rs | use crate::sync::atomic::{AtomicBool, Ordering};
use crate::sync::mpsc::channel;
use crate::sync::{Arc, Condvar, Mutex};
use crate::thread;
use crate::time::Duration;
#[test]
fn smoke() {
let c = Condvar::new();
c.notify_one();
c.notify_all();
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn notify_one() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
let c = Arc::new(Condvar::new());
let c2 = c.clone();
let g = m.lock().unwrap();
let _t = thread::spawn(move || {
let _g = m2.lock().unwrap();
c2.notify_one();
});
let g = c.wait(g).unwrap();
drop(g);
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn notify_all() {
const N: usize = 10;
let data = Arc::new((Mutex::new(0), Condvar::new()));
let (tx, rx) = channel();
for _ in 0..N {
let data = data.clone();
let tx = tx.clone();
thread::spawn(move || {
let &(ref lock, ref cond) = &*data;
let mut cnt = lock.lock().unwrap();
*cnt += 1;
if *cnt == N {
tx.send(()).unwrap();
}
while *cnt != 0 {
cnt = cond.wait(cnt).unwrap();
}
tx.send(()).unwrap();
});
}
drop(tx);
let &(ref lock, ref cond) = &*data;
rx.recv().unwrap();
let mut cnt = lock.lock().unwrap();
*cnt = 0;
cond.notify_all();
drop(cnt);
for _ in 0..N {
rx.recv().unwrap();
}
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_while() {
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair2 = pair.clone();
// Inside of our lock, spawn a new thread, and then wait for it to start.
thread::spawn(move || {
let &(ref lock, ref cvar) = &*pair2;
let mut started = lock.lock().unwrap();
*started = true;
// We notify the condvar that the value has changed.
cvar.notify_one();
});
// Wait for the thread to start up.
let &(ref lock, ref cvar) = &*pair;
let guard = cvar.wait_while(lock.lock().unwrap(), |started| !*started);
assert!(*guard.unwrap());
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_wait() {
let m = Arc::new(Mutex::new(()));
let c = Arc::new(Condvar::new());
loop {
let g = m.lock().unwrap();
let (_g, no_timeout) = c.wait_timeout(g, Duration::from_millis(1)).unwrap();
// spurious wakeups mean this isn't necessarily true
// so execute test again, if not timeout
if !no_timeout.timed_out() {
continue;
}
break;
}
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_while_wait() |
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_while_instant_satisfy() {
let m = Arc::new(Mutex::new(()));
let c = Arc::new(Condvar::new());
let g = m.lock().unwrap();
let (_g, wait) = c.wait_timeout_while(g, Duration::from_millis(0), |_| false).unwrap();
// ensure it didn't time-out even if we were not given any time.
assert!(!wait.timed_out());
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_while_wake() {
let pair = Arc::new((Mutex::new(false), Condvar::new()));
let pair_copy = pair.clone();
let &(ref m, ref c) = &*pair;
let g = m.lock().unwrap();
let _t = thread::spawn(move || {
let &(ref lock, ref cvar) = &*pair_copy;
let mut started = lock.lock().unwrap();
thread::sleep(Duration::from_millis(1));
*started = true;
cvar.notify_one();
});
let (g2, wait) = c
.wait_timeout_while(g, Duration::from_millis(u64::MAX), |&mut notified| !notified)
.unwrap();
// ensure it didn't time-out even if we were not given any time.
assert!(!wait.timed_out());
assert!(*g2);
}
#[test]
#[cfg_attr(target_os = "emscripten", ignore)]
fn wait_timeout_wake() {
let m = Arc::new(Mutex::new(()));
let c = Arc::new(Condvar::new());
loop {
let g = m.lock().unwrap();
let c2 = c.clone();
let m2 = m.clone();
let notified = Arc::new(AtomicBool::new(false));
let notified_copy = notified.clone();
let t = thread::spawn(move || {
let _g = m2.lock().unwrap();
thread::sleep(Duration::from_millis(1));
notified_copy.store(true, Ordering::SeqCst);
c2.notify_one();
});
let (g, timeout_res) = c.wait_timeout(g, Duration::from_millis(u64::MAX)).unwrap();
assert!(!timeout_res.timed_out());
// spurious wakeups mean this isn't necessarily true
// so execute test again, if not notified
if !notified.load(Ordering::SeqCst) {
t.join().unwrap();
continue;
}
drop(g);
t.join().unwrap();
break;
}
}
#[test]
#[should_panic]
#[cfg(all(unix, not(target_os = "linux"), not(target_os = "android")))]
fn two_mutexes() {
let m = Arc::new(Mutex::new(()));
let m2 = m.clone();
let c = Arc::new(Condvar::new());
let c2 = c.clone();
let mut g = m.lock().unwrap();
let _t = thread::spawn(move || {
let _g = m2.lock().unwrap();
c2.notify_one();
});
g = c.wait(g).unwrap();
drop(g);
let m = Mutex::new(());
let _ = c.wait(m.lock().unwrap()).unwrap();
}
| {
let m = Arc::new(Mutex::new(()));
let c = Arc::new(Condvar::new());
let g = m.lock().unwrap();
let (_g, wait) = c.wait_timeout_while(g, Duration::from_millis(1), |_| true).unwrap();
// no spurious wakeups. ensure it timed-out
assert!(wait.timed_out());
} |
verify.go | // Copyright 2020 The OPA Authors. All rights reserved.
// Use of this source code is governed by an Apache2
// license that can be found in the LICENSE file.
// Package bundle provide helpers that assist in the bundle signature verification process
package bundle
import (
"bytes"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/open-policy-agent/opa/internal/jwx/jwa"
"github.com/open-policy-agent/opa/internal/jwx/jws"
"github.com/open-policy-agent/opa/internal/jwx/jws/verify"
"github.com/open-policy-agent/opa/util"
"github.com/pkg/errors"
)
const defaultVerifierID = "_default"
var verifiers map[string]Verifier
// Verifier is the interface expected for implementations that verify bundle signatures.
type Verifier interface {
VerifyBundleSignature(SignaturesConfig, *VerificationConfig) (map[string]FileInfo, error)
}
// VerifyBundleSignature will retrieve the Verifier implementation based
// on the Plugin specified in SignaturesConfig, and call its implementation
// of VerifyBundleSignature. VerifyBundleSignature verifies the bundle signature
// using the given public keys or secret. If a signature is verified, it keeps
// track of the files specified in the JWT payload
func VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
// default implementation does not return a nil for map, so don't
// do it here either
files := make(map[string]FileInfo)
var plugin string
// for backwards compatibility, check if there is no plugin specified, and use default
if sc.Plugin == "" {
plugin = defaultVerifierID
} else {
plugin = sc.Plugin
}
verifier, err := GetVerifier(plugin)
if err != nil {
return files, err
}
return verifier.VerifyBundleSignature(sc, bvc)
}
// DefaultVerifier is the default bundle verification implementation. It verifies bundles by checking
// the JWT signature using a locally-accessible public key.
type DefaultVerifier struct{}
// VerifyBundleSignature verifies the bundle signature using the given public keys or secret.
// If a signature is verified, it keeps track of the files specified in the JWT payload
func (*DefaultVerifier) VerifyBundleSignature(sc SignaturesConfig, bvc *VerificationConfig) (map[string]FileInfo, error) {
files := make(map[string]FileInfo)
if len(sc.Signatures) == 0 {
return files, fmt.Errorf(".signatures.json: missing JWT (expected exactly one)")
}
if len(sc.Signatures) > 1 {
return files, fmt.Errorf(".signatures.json: multiple JWTs not supported (expected exactly one)")
}
for _, token := range sc.Signatures {
payload, err := verifyJWTSignature(token, bvc)
if err != nil {
return files, err
}
for _, file := range payload.Files {
files[file.Name] = file
}
}
return files, nil
}
func verifyJWTSignature(token string, bvc *VerificationConfig) (*DecodedSignature, error) {
// decode JWT to check if the header specifies the key to use and/or if claims have the scope.
parts, err := jws.SplitCompact(token)
if err != nil {
return nil, err
}
var decodedHeader []byte
if decodedHeader, err = base64.RawURLEncoding.DecodeString(parts[0]); err != nil {
return nil, errors.Wrap(err, "failed to base64 decode JWT headers")
}
var hdr jws.StandardHeaders
if err := json.Unmarshal(decodedHeader, &hdr); err != nil {
return nil, errors.Wrap(err, "failed to parse JWT headers")
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, err
}
var ds DecodedSignature
if err := json.Unmarshal(payload, &ds); err != nil {
return nil, err
}
// check for the id of the key to use for JWT signature verification
// first in the OPA config. If not found, then check the JWT kid.
keyID := bvc.KeyID
if keyID == "" {
keyID = hdr.KeyID
}
if keyID == "" {
// If header has no key id, check the deprecated key claim.
keyID = ds.KeyID
}
if keyID == "" {
return nil, fmt.Errorf("verification key ID is empty")
}
// now that we have the keyID, fetch the actual key
keyConfig, err := bvc.GetPublicKey(keyID)
if err != nil {
return nil, err
}
// verify JWT signature
alg := jwa.SignatureAlgorithm(keyConfig.Algorithm)
key, err := verify.GetSigningKey(keyConfig.Key, alg)
if err != nil {
return nil, err
}
_, err = jws.Verify([]byte(token), alg, key)
if err != nil {
return nil, err
}
// verify the scope
scope := bvc.Scope
if scope == "" {
scope = keyConfig.Scope
}
if ds.Scope != scope {
return nil, fmt.Errorf("scope mismatch")
}
return &ds, nil
}
// VerifyBundleFile verifies the hash of a file in the bundle matches to that provided in the bundle's signature
func VerifyBundleFile(path string, data bytes.Buffer, files map[string]FileInfo) error {
var file FileInfo
var ok bool
if file, ok = files[path]; !ok {
return fmt.Errorf("file %v not included in bundle signature", path)
}
if file.Algorithm == "" {
return fmt.Errorf("no hashing algorithm provided for file %v", path)
}
hash, err := NewSignatureHasher(HashingAlgorithm(file.Algorithm))
if err != nil {
return err
}
// hash the file content
// For unstructured files, hash the byte stream of the file
// For structured files, read the byte stream and parse into a JSON structure;
// then recursively order the fields of all objects alphabetically and then apply
// the hash function to result to compute the hash. This ensures that the digital signature is
// independent of whitespace and other non-semantic JSON features.
var value interface{}
if IsStructuredDoc(path) | else {
value = data.Bytes()
}
bs, err := hash.HashFile(value)
if err != nil {
return err
}
// compare file hash with same file in the JWT payloads
fb, err := hex.DecodeString(file.Hash)
if err != nil {
return err
}
if !bytes.Equal(fb, bs) {
return fmt.Errorf("%v: digest mismatch (want: %x, got: %x)", path, fb, bs)
}
delete(files, path)
return nil
}
// GetVerifier returns the Verifier registered under the given id
func GetVerifier(id string) (Verifier, error) {
verifier, ok := verifiers[id]
if !ok {
return nil, fmt.Errorf("no verifier exists under id %s", id)
}
return verifier, nil
}
// RegisterVerifier registers a Verifier under the given id
func RegisterVerifier(id string, v Verifier) error {
if id == defaultVerifierID {
return fmt.Errorf("verifier id %s is reserved, use a different id", id)
}
verifiers[id] = v
return nil
}
func init() {
verifiers = map[string]Verifier{
defaultVerifierID: &DefaultVerifier{},
}
}
| {
err := util.Unmarshal(data.Bytes(), &value)
if err != nil {
return err
}
} |
server.go | package main
import (
"context"
"github.com/xwzy/grpc-example/api"
"google.golang.org/grpc"
"log"
"net"
)
const port = ":50051"
type server struct {
hello.UnimplementedHelloServer
}
func (s *server) GetGreeting(ctx context.Context, in *hello.HelloRequest) (*hello.HelloReply, error) {
log.Printf("Received: %v", in.GetName())
return &hello.HelloReply{Message: "Hello " + in.GetName()}, nil
}
func | () {
listen, err := net.Listen("tcp", port)
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
s := grpc.NewServer()
hello.RegisterHelloServer(s, &server{})
if err := s.Serve(listen); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
| main |
routes.py | import urllib
from io import BytesIO
import requests
from flask import (Blueprint, current_app, jsonify, make_response,
render_template, request)
from .helpers import prepare_image_for_json
bp = Blueprint('routes', __name__, url_prefix='')
@bp.route('/', methods=['GET'])
def home():
return render_template('home.html')
@bp.route('/inpaint', methods=['GET', 'POST'])
def inpaint():
|
@bp.route('/cut', methods=['GET', 'POST'])
def cut():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/cut'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('cut.html')
@bp.route('/mask', methods=['GET', 'POST'])
def mask():
if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/mask'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('mask.html')
| if request.method == 'POST':
prepared_image = prepare_image_for_json(request.files['image'])
json = {'image': prepared_image}
url = current_app.config.get('INPAINT_API_URL') + 'api/inpaint'
api_response = requests.post(
url, json=json, timeout=60)
return make_response(jsonify(api_response.json()), 200)
elif request.method == 'GET':
return render_template('inpaint.html') |
export_utils.py | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""Utils exporting data from AFF4 to the rest of the world."""
import os
import Queue
import stat
import time
import logging
from grr.lib import aff4
from grr.lib import rdfvalue
from grr.lib import serialize
from grr.lib import threadpool
from grr.lib import type_info
from grr.lib import utils
from grr.lib.aff4_objects import aff4_grr
BUFFER_SIZE = 16 * 1024 * 1024
def GetAllClients(token=None):
"""Return a list of all client urns."""
results = []
for urn in aff4.FACTORY.Open(aff4.ROOT_URN, token=token).ListChildren():
try:
results.append(rdfvalue.ClientURN(urn))
except type_info.TypeValueError:
pass
return results
class IterateAllClientUrns(object):
"""Class to iterate over all URNs."""
THREAD_POOL_NAME = "ClientUrnIter"
QUEUE_TIMEOUT = 30
def __init__(self, func=None, max_threads=10, token=None):
"""Iterate over all clients in a threadpool.
Args:
func: A function to call with each client urn.
max_threads: Number of threads to use.
token: Auth token.
Raises:
RuntimeError: If function not specified.
"""
self.thread_pool = threadpool.ThreadPool.Factory(self.THREAD_POOL_NAME,
max_threads)
self.thread_pool.Start()
self.token = token
self.func = func
self.broken_subjects = [] # Entries that are broken or fail to run.
self.out_queue = Queue.Queue()
def GetInput(self):
"""Yield client urns."""
clients = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(clients))
return clients
def Run(self):
"""Run the iteration."""
count = 0
for count, input_data in enumerate(self.GetInput()):
if count % 2000 == 0:
logging.debug("%d processed.", count)
args = (input_data, self.out_queue, self.token)
self.thread_pool.AddTask(target=self.IterFunction, args=args,
name=self.THREAD_POOL_NAME)
while count >= 0:
try:
# We only use the timeout to wait if we got to the end of the Queue but
# didn't process everything yet.
out = self.out_queue.get(timeout=self.QUEUE_TIMEOUT, block=True)
if out:
yield out
count -= 1
except Queue.Empty:
break
# Join and stop to clean up the threadpool.
self.thread_pool.Stop()
def IterFunction(self, *args):
"""Function to run on each input. This can be overridden."""
self.func(*args)
class IterateAllClients(IterateAllClientUrns):
|
def DownloadFile(file_obj, target_path, buffer_size=BUFFER_SIZE):
"""Download an aff4 file to the local filesystem overwriting it if it exists.
Args:
file_obj: An aff4 object that supports the file interface (Read, Seek)
target_path: Full path of file to write to.
buffer_size: Read in chunks this size.
"""
logging.info(u"Downloading: %s to: %s", file_obj.urn, target_path)
target_file = open(target_path, "w")
file_obj.Seek(0)
count = 0
data_buffer = file_obj.Read(buffer_size)
while data_buffer:
target_file.write(data_buffer)
data_buffer = file_obj.Read(buffer_size)
count += 1
if not count % 3:
logging.debug(u"Downloading: %s: %s done", file_obj.urn,
utils.FormatNumberAsString(count * buffer_size))
target_file.close()
def RecursiveDownload(dir_obj, target_dir, max_depth=10, depth=1,
overwrite=False, max_threads=10):
"""Recursively downloads a file entry to the target path.
Args:
dir_obj: An aff4 object that contains children.
target_dir: Full path of the directory to write to.
max_depth: Depth to download to. 1 means just the directory itself.
depth: Current depth of recursion.
overwrite: Should we overwrite files that exist.
max_threads: Use this many threads to do the downloads.
"""
if (not isinstance(dir_obj, aff4.AFF4Volume) or
isinstance(dir_obj, aff4.HashImage)):
return
# Reuse the same threadpool as we call recursively.
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
for sub_file_entry in dir_obj.OpenChildren():
path_elements = [target_dir]
sub_target_dir = u"/".join(path_elements)
try:
# Any file-like object with data in AFF4 should inherit AFF4Stream.
if isinstance(sub_file_entry, aff4.AFF4Stream):
args = (sub_file_entry.urn, sub_target_dir, sub_file_entry.token,
overwrite)
thread_pool.AddTask(target=CopyAFF4ToLocal, args=args,
name="Downloader")
elif "Container" in sub_file_entry.behaviours:
if depth >= max_depth: # Don't go any deeper.
continue
try:
os.makedirs(sub_target_dir)
except OSError:
pass
RecursiveDownload(sub_file_entry, sub_target_dir, overwrite=overwrite,
depth=depth + 1)
except IOError:
logging.exception("Unable to download %s", sub_file_entry.urn)
finally:
sub_file_entry.Close()
# Join and stop the threadpool.
if depth <= 1:
thread_pool.Stop()
def DownloadCollection(coll_path, target_path, token=None, overwrite=False,
dump_client_info=False, flatten=False,
max_threads=10):
"""Iterate through a Collection object downloading all files.
Args:
coll_path: Path to an AFF4 collection.
target_path: Base directory to write to.
token: Token for access.
overwrite: If True, overwrite existing files.
dump_client_info: If True, this will detect client paths, and dump a yaml
version of the client object to the root path. This is useful for seeing
the hostname/users of the machine the client id refers to.
flatten: If True, produce a "files" flat folder with links to all the found
files.
max_threads: Use this many threads to do the downloads.
"""
completed_clients = set()
try:
coll = aff4.FACTORY.Open(coll_path, aff4_type="RDFValueCollection",
token=token)
except IOError:
logging.error("%s is not a valid collection. Typo? "
"Are you sure something was written to it?", coll_path)
return
thread_pool = threadpool.ThreadPool.Factory("Downloader", max_threads)
thread_pool.Start()
logging.info("Expecting to download %s files", coll.size)
# Collections can include anything they want, but we only handle RDFURN and
# StatEntry entries in this function.
for grr_message in coll:
source = None
# If a raw message, work out the type.
if isinstance(grr_message, rdfvalue.GrrMessage):
source = grr_message.source
grr_message = grr_message.payload
# Collections can contain AFF4ObjectSummary objects which encapsulate
# RDFURNs and StatEntrys.
if isinstance(grr_message, rdfvalue.AFF4ObjectSummary):
urn = grr_message.urn
elif isinstance(grr_message, rdfvalue.RDFURN):
urn = grr_message
elif isinstance(grr_message, rdfvalue.StatEntry):
urn = rdfvalue.RDFURN(grr_message.aff4path)
elif isinstance(grr_message, rdfvalue.FileFinderResult):
urn = rdfvalue.RDFURN(grr_message.stat_entry.aff4path)
elif isinstance(grr_message, rdfvalue.RDFBytes):
try:
os.makedirs(target_path)
except OSError:
pass
try:
# We just dump out bytes and carry on.
client_id = source.Split()[0]
with open(os.path.join(target_path, client_id), "wb") as fd:
fd.write(str(grr_message))
except AttributeError:
pass
continue
else:
continue
# Handle dumping client info, but only once per client.
client_id = urn.Split()[0]
re_match = aff4.AFF4Object.VFSGRRClient.CLIENT_ID_RE.match(client_id)
if dump_client_info and re_match and client_id not in completed_clients:
args = (rdfvalue.ClientURN(client_id), target_path, token, overwrite)
thread_pool.AddTask(target=DumpClientYaml, args=args,
name="ClientYamlDownloader")
completed_clients.add(client_id)
# Now queue downloading the actual files.
args = (urn, target_path, token, overwrite)
if flatten:
target = CopyAndSymlinkAFF4ToLocal
else:
target = CopyAFF4ToLocal
thread_pool.AddTask(target=target, args=args, name="Downloader")
# Join and stop the threadpool.
thread_pool.Stop()
def CopyAFF4ToLocal(aff4_urn, target_dir, token=None, overwrite=False):
"""Copy an AFF4 object that supports a read interface to local filesystem.
Args:
aff4_urn: URN of thing to copy.
target_dir: Directory to copy the file to.
token: Auth token.
overwrite: If True overwrite the file if it exists.
Returns:
If aff4_urn points to a file, returns path to the downloaded file.
Otherwise returns None.
By default file will only be overwritten if file size differs.
"""
try:
fd = aff4.FACTORY.Open(aff4_urn, token=token)
filepath = os.path.join(target_dir, fd.urn.Path()[1:])
# If urn points to a directory, just create it.
if isinstance(fd, aff4.VFSDirectory):
try:
os.makedirs(filepath)
except OSError:
pass
return None
# If urn points to a file, download it.
elif isinstance(fd, aff4.AFF4Stream):
if not os.path.isfile(filepath):
try:
# Ensure directory exists.
os.makedirs(os.path.dirname(filepath))
except OSError:
pass
DownloadFile(fd, filepath)
elif (os.stat(filepath)[stat.ST_SIZE] != fd.Get(fd.Schema.SIZE) or
overwrite):
# We should overwrite because user said, or file sizes differ.
DownloadFile(fd, filepath)
else:
logging.info("File %s exists, skipping", filepath)
return filepath
else:
raise RuntimeError("Opened urn is neither a downloaded file nor a "
"directory: %s" % aff4_urn)
except IOError as e:
logging.exception("Failed to read %s due to %s", aff4_urn, e)
raise
def CopyAndSymlinkAFF4ToLocal(aff4_urn, target_dir, token=None,
overwrite=False):
path = CopyAFF4ToLocal(aff4_urn, target_dir, token=token,
overwrite=overwrite)
if path:
files_output_dir = os.path.join(target_dir, "files")
try:
os.makedirs(files_output_dir)
except OSError:
pass
unique_name = "_".join(aff4_urn.Split())
symlink_path = os.path.join(files_output_dir, unique_name)
try:
os.symlink(path, symlink_path)
except OSError:
logging.exception("Can't create symlink to a file: %s -> %s",
symlink_path, path)
def DumpClientYaml(client_urn, target_dir, token=None, overwrite=False):
"""Dump a yaml file containing client info."""
fd = aff4.FACTORY.Open(client_urn, "VFSGRRClient", token=token)
dirpath = os.path.join(target_dir, fd.urn.Split()[0])
try:
# Due to threading this can actually be created by another thread.
os.makedirs(dirpath)
except OSError:
pass
filepath = os.path.join(dirpath, "client_info.yaml")
if not os.path.isfile(filepath) or overwrite:
with open(filepath, "w") as out_file:
out_file.write(serialize.YamlDumper(fd))
| """Class to iterate over all GRR Client objects."""
def __init__(self, max_age, client_chunksize=25, **kwargs):
"""Iterate over all clients in a threadpool.
Args:
max_age: Maximum age in seconds of clients to check.
client_chunksize: A function to call with each client urn.
**kwargs: Arguments passed to init.
"""
super(IterateAllClients, self).__init__(**kwargs)
self.client_chunksize = client_chunksize
self.max_age = max_age
def GetInput(self):
"""Yield client urns."""
client_list = GetAllClients(token=self.token)
logging.debug("Got %d clients", len(client_list))
for client_group in utils.Grouper(client_list, self.client_chunksize):
for fd in aff4.FACTORY.MultiOpen(client_group, mode="r",
aff4_type="VFSGRRClient",
token=self.token):
if isinstance(fd, aff4_grr.VFSGRRClient):
# Skip if older than max_age
oldest_time = (time.time() - self.max_age) * 1e6
if fd.Get(aff4.VFSGRRClient.SchemaCls.PING) >= oldest_time:
yield fd |
utils.ts | export const DEFAULT_PATH = Object.freeze('/'); |
||
OsmTileLayer.js | import BaseTileLayer from './BaseTileLayer';
import params from './../param';
class | extends BaseTileLayer {
constructor(id, options = {}) {
const style = options.style || 'Normal';
options.urlTemplate = params.Osm[style].url;
super(id, options);
}
}
export default OsmTileLayer;
| OsmTileLayer |
generic.py | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import re
import sys
import yaml
import traceback
from twccli.twcc.session import Session2
from twccli.twcc.util import isNone, isDebug, timezone2local, send_ga
from twccli.twcc.clidriver import ServiceOperation
from twccli.twccli import logger
# change to new-style-class https://goo.gl/AYgxqp
class GenericService(object):
def __init__(self, api_key=None, cluster_tag=None, skip_session=False):
# current working information
self._csite_ = "__UNDEF__"
self._func_ = self.__class__.__name__
self._res_type_ = "json"
self._debug_ = isDebug()
self._api_key_ = Session2._getApiKey(api_key)
self._user_agent = Session2._getUserAgent()
self.twcc = ServiceOperation(api_key=api_key)
self.twcc._debug = isDebug()
self.cluster_tag = cluster_tag
if isNone(self.cluster_tag):
self.cluster_tag = "CNTR"
if not skip_session:
self.twcc_session = Session2()
self._project_code = self.twcc_session.getDefaultProject()
self.project_ids = self.twcc_session.twcc_proj_id
self._project_id = self.twcc_session.twcc_proj_id[self.cluster_tag]
# set defult project id
self._csite_ = Session2._getClusterName(self.cluster_tag)
# map to url
self.url_dic = None
# map to data entries
self.data_dic = None
# map to get's parameter, aka ?project=898
self.ext_get = None
self.res_type = 'json'
self.res_type_valid = self.twcc.res_type_valid
self.http_verb = 'get'
self.http_verb_valid = self.twcc.http_verb_valid
def _chkSite_(self):
if isNone(self._csite_):
raise ValueError("No site value.")
elif not self._csite_ in self.getSites():
raise ValueError(
"Site value is not valid. {0}".format(self._csite_))
else:
return True
def getSites(self):
exclu = ['admin', 'harbor', 'goc',
'test_sit', 'nchc-ad', 'haproxy_stats']
return [x for x in self.twcc._session_.clusters if not x in exclu]
def _isAlive(self):
return self.twcc.try_alive()
def _send_ga(self, event_name, t_url=None):
twcc_file_session = Session2._getSessionFile()
sessConf = yaml.load(
open(twcc_file_session, "r").read(), Loader=yaml.SafeLoader)
if not sessConf == None and 'ga_cid' in sessConf['_meta']:
func_call_stack = []
for trace_line in traceback.format_stack():
funcs = re.findall(r'in ([_A-Za-z]+)', trace_line)
if funcs:
func_call_stack.extend(funcs)
ua = '' if self._user_agent == None else self._user_agent
country = sessConf['_meta']['ga_country'] if 'ga_country' in sessConf['_meta'] else ''
func_list = ','.join(func_call_stack)[','.join(
func_call_stack).rindex('invoke'):].split(',')[1:-3]
ga_params = {'geoid': country, 'ua': ua, "version": sessConf['_meta']['cli_version'], "func": '-'.join(
func_list), "p_version": sys.version.split(' ')[0]}
if event_name == 'do_api':
ga_params = {'func': ','.join(func_list), 'url': t_url, 'geoid': country, 'ua': ua,
"version": sessConf['_meta']['cli_version'], "func": '-'.join(func_list), "p_version": sys.version.split(' ')[0]}
send_ga(event_name, sessConf['_meta']['ga_cid'], ga_params)
def _do_api(self):
if self._debug_:
logger_info = {'csite': self._csite_,
'func': self._func_, 'res_type': self.res_type}
if not isNone(self.url_dic):
logger_info.update({'url_dic': self.url_dic})
if not isNone(self.data_dic):
logger_info.update({'data_dic': self.data_dic})
logger.info(logger_info)
res, t_url = self.twcc.doAPI(
site_sn=self._csite_,
api_key=self._api_key_,
user_agent=self._user_agent,
func=self._func_.lower(),
url_dict=self.url_dic if not isNone(self.url_dic) else None,
data_dict=self.data_dic if not isNone(self.data_dic) else None,
http=self.http_verb,
url_ext_get=self.ext_get,
res_type=self.res_type)
if self._debug_:
logger.info({'res': res})
self._send_ga('do_api', t_url=t_url)
if type(res) == type([]):
for eachone in res:
if 'create_time' in eachone:
eachone['create_time'] = timezone2local(
eachone['create_time']).strftime("%Y-%m-%d %H:%M:%S")
elif type(res) == type({}):
if 'create_time' in res:
res['create_time'] = timezone2local(
res['create_time']).strftime("%Y-%m-%d %H:%M:%S")
if 'message' in res and 'request is unauthorized' in res['message']:
raise ValueError("API Key is not validated.")
return res
def create(self, mid):
pass
def list(self):
|
def queryById(self, mid):
self.url_dic = {self._func_: mid}
self.http_verb = 'get'
res = self._do_api()
self.url_dic = None
return res
@property
def project_id(self):
return self._project_id
@project_id.setter
def project_id(self, proj_id):
self._project_id = proj_id
def delete(self, mid):
self.http_verb = "delete"
self.url_dic = {self._func_: mid}
res = self._do_api()
return res
def __log(self, mstr):
if self._debug_:
print("DEBUG in [{}]: {}".format(self.__class__.__name__, mstr))
class CpuService(GenericService):
def __init__(self):
GenericService.__init__(self, cluster_tag="VCS")
def getQuota(self, isAll=False):
if isAll:
self._func_ = "projects"
self.url_dic = {"projects": "%s/user_quotas" % (self._project_id)}
else:
self._func_ = "project_quotas"
self.url_dic = {"project_quotas": ""}
self.ext_get = {"project": self._project_id}
return self.list()
class GpuService(GenericService):
def __init__(self):
GenericService.__init__(self)
self.cluster_tag = "CNTR"
self._csite_ = Session2._getClusterName(self.cluster_tag)
def getQuota(self, isAll=False):
if isAll:
self._func_ = "projects"
self.url_dic = {"projects": "%s/user_quotas" % (self._project_id)}
else:
self._func_ = "project_quotas"
self.url_dic = {"project_quotas": ""}
self.ext_get = {"project": self._project_id}
return self.list()
| self.http_verb = 'get'
self.res_type = 'json'
return self._do_api() |
commands.go | package commands
import (
"fmt"
"github.com/mgutz/ansi"
"github.com/trntv/sshed/host"
"github.com/trntv/sshed/keychain"
"github.com/trntv/sshed/ssh"
"github.com/urfave/cli"
"gopkg.in/AlecAivazis/survey.v1"
"io/ioutil"
"os"
"os/exec"
"os/user"
"sort"
"strings"
)
type Commands struct {
bin string
}
type options struct {
verbose bool
}
func | (app *cli.App) {
commands := &Commands{}
beforeFunc := app.Before
app.Before = func(context *cli.Context) error {
err := beforeFunc(context)
if err != nil {
return err
}
commands.bin = context.String("bin")
if keychain.Bootstrapped == false {
fmt.Println("Creating keychain...")
var encrypt bool
err = survey.AskOne(&survey.Confirm{
Message: "Protect keychain with password?",
Default: false,
}, &encrypt, nil)
if encrypt == true {
key := commands.askPassword()
err = keychain.EncryptDatabase(key)
if err != nil {
return err
}
}
return nil
}
if keychain.Encrypted == true {
key := commands.askPassword()
keychain.Password = key
}
return nil
}
app.Commands = []cli.Command{
commands.newShowCommand(),
commands.newListCommand(),
commands.newAddCommand(),
commands.newRemoveCommand(),
commands.newToCommand(),
commands.newAtCommand(),
commands.newEncryptCommand(),
commands.newConfigCommand(),
}
}
func (cmds *Commands) completeWithServers() {
hosts := ssh.Config.GetAll()
for key := range hosts {
fmt.Println(key)
}
}
func (cmds *Commands) askPassword() string {
key := ""
prompt := &survey.Password{
Message: "Please type your password:",
}
survey.AskOne(prompt, &key, nil)
return key
}
func (cmds *Commands) askServerKey() (string, error) {
var key string
options := make([]string, 0)
srvs := ssh.Config.GetAll()
for key := range srvs {
options = append(options, key)
}
sort.Strings(options)
prompt := &survey.Select{
Message: "Choose server:",
Options: options,
PageSize: 16,
}
err := survey.AskOne(prompt, &key, survey.Required)
return key, err
}
func (cmds *Commands) askServersKeys() ([]string, error) {
var keys []string
options := make([]string, 0)
srvs := ssh.Config.GetAll()
for _, h := range srvs {
options = append(options, h.Key)
}
sort.Strings(options)
prompt := &survey.MultiSelect{
Message: "Choose servers:",
Options: options,
PageSize: 16,
}
err := survey.AskOne(prompt, &keys, survey.Required)
return keys, err
}
func (cmds *Commands) createCommand(c *cli.Context, srv *host.Host, options *options, command string) (cmd *exec.Cmd, err error) {
var username string
if srv.User == "" {
u, err := user.Current()
if err != nil {
return nil, err
}
username = u.Username
} else {
username = srv.User
}
var args = make([]string, 0)
if srv.Password() != "" {
args = []string{
"sshpass",
fmt.Sprintf("-p %s", srv.Password()),
}
}
args = append(args, cmds.bin)
args = append(args, fmt.Sprintf("-F %s", ssh.Config.Path))
if pk := srv.PrivateKey(); pk != "" {
tf, err := ioutil.TempFile("", "")
defer os.Remove(tf.Name())
defer tf.Close()
if err != nil {
return nil, err
}
_, err = tf.Write([]byte(pk))
if err != nil {
return nil, err
}
err = tf.Chmod(os.FileMode(0600))
if err != nil {
return nil, err
}
srv.IdentityFile = tf.Name()
}
if srv.User != "" {
args = append(args, fmt.Sprintf("%s@%s", username, srv.Hostname))
} else {
args = append(args, fmt.Sprintf("%s", srv.Hostname))
}
if srv.Port != "" {
args = append(args, fmt.Sprintf("-p %s", srv.Port))
}
if srv.IdentityFile != "" {
args = append(args, fmt.Sprintf("-i %s", srv.IdentityFile))
}
if options.verbose == true {
args = append(args, "-v")
}
if command != "" {
args = append(args, command)
}
if options.verbose == true {
fmt.Printf("%s: %s\r\n", ansi.Color("Executing", "green"), strings.Join(args, " "))
}
cmd = exec.Command("sh", "-c", strings.Join(args, " "))
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
return cmd, err
}
| RegisterCommands |
test.py | import logging
import time
import pytest
from helpers.cluster import ClickHouseCluster
logging.getLogger().setLevel(logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler())
@pytest.fixture(scope="module")
def cluster():
try:
cluster = ClickHouseCluster(__file__)
cluster.add_instance("node1", main_configs=["configs/config.d/s3.xml"], macros={'replica': '1'},
with_minio=True,
with_zookeeper=True)
cluster.add_instance("node2", main_configs=["configs/config.d/s3.xml"], macros={'replica': '2'},
with_minio=True,
with_zookeeper=True)
logging.info("Starting cluster...")
cluster.start()
logging.info("Cluster started")
yield cluster
finally:
cluster.shutdown()
def get_large_objects_count(cluster, size=100):
minio = cluster.minio_client
counter = 0
for obj in minio.list_objects(cluster.minio_bucket, 'data/'):
if obj.size >= size:
counter = counter + 1
return counter
def wait_for_large_objects_count(cluster, expected, size=100, timeout=30):
while timeout > 0:
if get_large_objects_count(cluster, size) == expected:
return
timeout -= 1
time.sleep(1)
assert get_large_objects_count(cluster, size) == expected
@pytest.mark.parametrize(
"policy", ["s3"]
)
def test_s3_zero_copy_replication(cluster, policy):
node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
node1.query(
"""
CREATE TABLE s3_test ON CLUSTER test_cluster (id UInt32, value String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/s3_test', '{}')
ORDER BY id
SETTINGS storage_policy='{}'
"""
.format('{replica}', policy)
)
node1.query("INSERT INTO s3_test VALUES (0,'data'),(1,'data')")
time.sleep(1)
assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data')"
# Based on version 20.x - should be only one file with size 100+ (checksums.txt), used by both nodes
assert get_large_objects_count(cluster) == 1
node2.query("INSERT INTO s3_test VALUES (2,'data'),(3,'data')")
time.sleep(1)
assert node2.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
assert node1.query("SELECT * FROM s3_test order by id FORMAT Values") == "(0,'data'),(1,'data'),(2,'data'),(3,'data')"
# Based on version 20.x - two parts
wait_for_large_objects_count(cluster, 2)
node1.query("OPTIMIZE TABLE s3_test")
# Based on version 20.x - after merge, two old parts and one merged
wait_for_large_objects_count(cluster, 3)
# Based on version 20.x - after cleanup - only one merged part
wait_for_large_objects_count(cluster, 1, timeout=60)
node1.query("DROP TABLE IF EXISTS s3_test NO DELAY")
node2.query("DROP TABLE IF EXISTS s3_test NO DELAY")
def test_s3_zero_copy_on_hybrid_storage(cluster):
| node1 = cluster.instances["node1"]
node2 = cluster.instances["node2"]
node1.query(
"""
CREATE TABLE hybrid_test ON CLUSTER test_cluster (id UInt32, value String)
ENGINE=ReplicatedMergeTree('/clickhouse/tables/hybrid_test', '{}')
ORDER BY id
SETTINGS storage_policy='hybrid'
"""
.format('{replica}')
)
node1.query("INSERT INTO hybrid_test VALUES (0,'data'),(1,'data')")
time.sleep(1)
assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
node1.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'")
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','default')"
# Total objects in S3
s3_objects = get_large_objects_count(cluster, 0)
node2.query("ALTER TABLE hybrid_test MOVE PARTITION ID 'all' TO DISK 's31'")
assert node1.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
assert node2.query("SELECT partition_id,disk_name FROM system.parts WHERE table='hybrid_test' FORMAT Values") == "('all','s31')"
# Check that after moving partition on node2 no new obects on s3
wait_for_large_objects_count(cluster, s3_objects, size=0)
assert node1.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
assert node2.query("SELECT * FROM hybrid_test ORDER BY id FORMAT Values") == "(0,'data'),(1,'data')"
node1.query("DROP TABLE IF EXISTS hybrid_test NO DELAY")
node2.query("DROP TABLE IF EXISTS hybrid_test NO DELAY") |
|
middleware.go | package middleware
import (
"context"
"html/template"
"net/http"
"github.com/unrolled/render"
)
const (
CtxKey = "render"
)
var ren *render.Render
func init() |
func Middleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
next.ServeHTTP(w, r.WithContext(context.WithValue(r.Context(), CtxKey, ren)))
})
}
| {
ren = render.New(render.Options{
Funcs:[]template.FuncMap{
{
"safeHtml": func(text string) template.HTML { return template.HTML(text) },
},
},
})
} |
keyword-override.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms. | let override = (); //~ ERROR `override` is a reserved keyword
} |
fn main() { |
main.py | #GUI classes for the application
from kivy.app import App
from kivy.lang import Builder
from kivy.core.window import Window
from kivy.uix.screenmanager import ScreenManager, Screen, FadeTransition
from kivy.uix.popup import Popup
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.spinner import Spinner
from kivy.uix.textinput import TextInput
from kivy.properties import ObjectProperty, BooleanProperty
from kivy.uix.recycleview import RecycleView
from kivy.uix.recycleboxlayout import RecycleBoxLayout
from kivy.uix.recycleview.views import RecycleDataViewBehavior
from kivy.uix.behaviors import FocusBehavior
from kivy.uix.recycleview.layout import LayoutSelectionBehavior
#Window.size = (1200, 800)
#FUNCTION classes for the application
from app_functions import AmpFunctions, RoomDesign
from app_constants import AppConstants
class SelectableLabel(RecycleDataViewBehavior, Label):
index = None
selected = BooleanProperty(False)
selectable = BooleanProperty(True)
def refresh_view_attrs(self, rv, index, data):
self.index = index
self.selected = True
''' Catch and handle the view changes '''
return super(SelectableLabel, self).refresh_view_attrs(
rv, index, data)
def on_touch_down(self, touch):
''' Add selection on touch down '''
if super(SelectableLabel, self).on_touch_down(touch):
return True
if self.collide_point(*touch.pos) and self.selectable:
return self.parent.select_with_touch(self.index, touch)
def apply_selection(self, rv, index, is_selected):
action = CAESD()
''' Respond to the selection of items in the view. '''
self.selected = is_selected
if is_selected:
machine_data = """
Machine Section: %s
Machine Name: %s
Machine Load: %s
Machine Current: %sA
Machine Current(fx): %sA
Machine Cable Size: %smm2
Machine Breaker Size: %sA
Machine Cable Type: Armoured PVC Insulated Single Core Cable
Machine Breaker Type: %s
""" % (str(rv.data[index]['machine_section']),
str(rv.data[index]['machine_name']),
str(rv.data[index]['machine_load']),
str(rv.data[index]['machine_amp']),
str(rv.data[index]['machine_amp_gd']),
str(rv.data[index]['cable_size']),
str(rv.data[index]['breaker_size']),
str(rv.data[index]['breaker_type']))
action.popDisplays('Machine Details', machine_data)
class SelectableRecycleBoxLayout(FocusBehavior, LayoutSelectionBehavior,
RecycleBoxLayout):
''' Adds selection and focus behaviour to the view. '''
#Screens
class LaunchPage(Screen):
pass
class CctvPage(Screen):
dropManufacturer = ObjectProperty()
dropModel = ObjectProperty()
dropSensor = ObjectProperty()
distFromCamera = ObjectProperty()
sceneWidth = ObjectProperty()
sceneHeight = ObjectProperty()
sceneArea = ObjectProperty()
focalLength = ObjectProperty()
datastore = {
'Manu_Model_pairs': [],
'Manufacturer': '',
'Model': '',
'Sensor': '',
'Distance': '',
'Width': '',
'Height': '',
'Focal': '',
'Area': ''
}
def selectedManufacturer(self):
self.datastore['Manufacturer'] = self.dropManufacturer.text
self.datastore['Manu_Model_pairs'] = AppConstants().manufacturerModels(self.dropManufacturer.text)
self.dropModel.values = [i for i in self.datastore['Manu_Model_pairs'].keys()]
pass
def selectedModel(self):
if self.dropModel.text != 'Model':
self.datastore['Model'] = self.dropModel.text
self.datastore['Sensor'] = self.datastore['Manu_Model_pairs'][self.dropModel.text]
self.dropSensor.text = 'Sensor format: '+ self.datastore['Sensor']+'"'
self.sensor_values = AppConstants().sensorsValues(self.datastore['Sensor'])
def checkManufacturerModelSelected(self):
if self.dropManufacturer.text != "" and self.dropModel.text != 'Model':
return True
def clearValues(self):
if self.sceneWidth.text == '':
self.sceneHeight.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
elif self.sceneHeight.text == '':
self.sceneWidth.text = ''
self.focalLength.text = ''
self.sceneArea.text = ''
def calculateSceneDimensions(self, dimension, value):
app = CAESD()
if value != '':
if self.checkManufacturerModelSelected():
if self.distFromCamera.focus:
self.datastore['Distance'] = self.distFromCamera.text
if self.sceneWidth.text == '' or self.sceneHeight.text == '':
pass
else:
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneWidth.focus:
self.datastore['Height'] = ''
self.datastore['Width'] = self.sceneWidth.text
self.sceneHeight.text = str(round((float(self.sceneWidth.text)*float(self.sensor_values[1]))/float(self.sensor_values[0]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[0])*float(self.distFromCamera.text))/float(self.sceneWidth.text), 1))
self.sceneArea.text = str(round(float(self.sceneWidth.text)*float(self.sceneHeight.text), 2))
elif self.sceneHeight.focus:
self.datastore['Width'] = ''
self.datastore['Height'] = self.sceneHeight.text
self.sceneWidth.text = str(round((float(self.sceneHeight.text)*float(self.sensor_values[0]))/float(self.sensor_values[1]), 1))
if self.distFromCamera.text != '':
self.focalLength.text = str(round((float(self.sensor_values[1])*float(self.distFromCamera.text))/float(self.sceneHeight.text), 1))
self.sceneArea.text = str(round(float(self.sceneHeight.text)*float(self.sceneWidth.text), 2))
else:
pass
else:
errorMessage = 'Please select the Model'
app.popDisplays('Application Error', errorMessage)
else:
if self.distFromCamera.text == '':
self.focalLength.text = ''
self.clearValues()
else:
self.clearValues()
class EarthingPage(Screen):
pass
class PowerPage_one(Screen):
numMachines = ObjectProperty()
numSections = ObjectProperty()
normalVoltage: ObjectProperty()
utilityVoltage: ObjectProperty
growthFactor: ObjectProperty()
deratingFactor: ObjectProperty()
loadingFactor: ObjectProperty()
dispPowerOneError: ObjectProperty()
buttAddMachines: ObjectProperty()
def calculatePowerInputs(self, machines, sections):
if machines:
if sections:
self.buttAddMachines.disabled = False
PowerPage_two().powerdataApp(machines, sections, self.normalVoltage.text, self.utilityVoltage.text, self.growthFactor.text, self.deratingFactor.text)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Sections', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerOneError, t='Please Indicate Number of Machines', i=True)
class PowerPage_two(Screen):
machineOutOfNum = ObjectProperty()
machineNameName = ObjectProperty()
machineNameInput = ObjectProperty()
machineLoad = ObjectProperty
machineFactor = ObjectProperty()
dropSelectMachineSection = ObjectProperty()
dispPowerTwoScreen = ObjectProperty()
buttAddMachines = ObjectProperty()
buttAllMachines = ObjectProperty()
dropViewMachineSection = ObjectProperty()
dispMachineListHeader = ObjectProperty()
dispMachineScreen = ObjectProperty()
num_of_machines_and_sections = []
storageMachineData = []
def addMachineParameters(self, machine_name, load, section_selected):
if machine_name:
if load:
if section_selected != 'Select Machine Section':
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='', i=True)
self.buttAllMachines.disabled = False
self.dropViewMachineSection.disabled = False
self.dispMachineListHeader.disabled = False
if int(self.getCurMachineNumber()) == int(self.num_of_machines_and_sections[0]):
self.machineListLabels()
self. displayPowerViewboard()
self.buttAddMachines.disabled = True
self.dropSelectMachineSection.disabled = True
out_message = "Complete!!! "+str(int(self.getCurMachineNumber()))+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message)
else:
self.machineListLabels()
self. displayPowerViewboard()
self.machineNameName.text = "Name for Machine "+str(int(self.getCurMachineNumber())+1)
self.machineNameInput.text = "Machine "+str(int(self.getCurMachineNumber()))
out_message =str(int(self.getCurMachineNumber())-1)+" out of "+str(self.num_of_machines_and_sections[0])+" machines added!"
CAESD().displayInLabelMessage(self.machineOutOfNum, t=out_message, c=[0,0,0,1])
self.machineLoad.text = ''
self.dropSelectMachineSection.text = 'Select Machine Section'
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Select A Machine Section', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Load', i=True)
else:
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t='Please Indicate Machine Name', i=True)
def powerdataApp(self, machines, sections, a, b, c, d):
self.num_of_machines_and_sections.append(machines)
self.num_of_machines_and_sections.append(sections)
self.num_of_machines_and_sections.append([a,b,c,d])
def getCurMachineNumber(self):
return self.machineNameName.text.split(' ')[3]
def selectMachineSection(self):
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(self.num_of_machines_and_sections[1])+1):
values.append('Section '+str(section_alt[i-1]))
self.dropSelectMachineSection.values = values
self.dropViewMachineSection.values = values
#self.buttMachineSection.values = values
def machineListLabels(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
appCons = AppConstants()
self.storageMachineData.insert(0, { 'machine_section': str(self.dropSelectMachineSection.text),
'machine_name': str(self.machineNameInput.text),
'machine_load': str(self.machineLoad.text),
'machine_amp': str(ampCal.ampWithoutFutureExpansion()),
'machine_amp_gd': str(ampCal.ampWithFutureExpansion()),
'breaker_size': str(appCons.breakerSize(ampCal.ampWithFutureExpansion())),
'cable_size': str(appCons.cableSize(ampCal.ampWithoutFutureExpansion())),
'breaker_type': str(appCons.breakerType(appCons.breakerSize(ampCal.ampWithFutureExpansion())))})
self.dispMachineScreen.data = self.storageMachineData
def machineSectionLabels(self, sections, data):
self.dispMachineSection.data = []
values = []
section_alt = [chr(i) for i in range(65,91)]
for i in range(1, int(sections)+1):
values.append('Section '+str(section_alt[i-1]))
values.reverse()
for sect in values:
section_data = []
for row in data:
if row['machine_section'] == sect:
section_data.append(row)
formatted_data = ['Machine | Load | Amp |\n']+[i['machine_name']+' | '+i['machine_load']+'kVa | '+i['machine_amp']+'A | \n' for i in section_data]
#section_header = 'Machine Name | Machine Load |\n'
#formatted_data(section_header)
self.dispMachineSection.data.insert(0, {'machine_section_name': str(sect), 'machine_section_data': str(''.join(formatted_data))})
def displayPowerViewboard(self):
ampCal = AmpFunctions(float(self.machineLoad.text),
float(self.num_of_machines_and_sections[2][0]),
float(self.num_of_machines_and_sections[2][2]),
float(self.num_of_machines_and_sections[2][3]))
#Determine the total current
all_currents = []
for i in self.dispMachineScreen.data:
all_currents.append(float(i['machine_amp']))
t_current = round(sum(all_currents), 2)
#Determine the transformer capacity
p_current = (float(self.num_of_machines_and_sections[2][0]) * t_current)/float(self.num_of_machines_and_sections[2][1])
t_capacity = round((ampCal.phaseRoot() * float(self.num_of_machines_and_sections[2][1]) * p_current * 1)/1000, 2)
power_viewboard_message = """
POWER VIEWBOARD
Total Current from Machines: %sA
Change Over Switch Capacity: 2500A
Transformer Capacity: %skVA
Generator Capacity: %skVA
""" % (t_current, t_capacity, t_capacity)
self.dispPowerTwoScreen.text = power_viewboard_message
def displayPanelBoard(self, data_key):
if data_key == 'All Machines':
self.dispMachineScreen.data = self.storageMachineData
#self.sectionViewboard.text = ''
else:
section_data = []
self.dispMachineScreen.data = []
for row in self.storageMachineData:
if row['machine_section'] == data_key:
section_data.append(row)
else:
self.dispMachineScreen.data = []
self.dispMachineScreen.data = section_data
if self.dispMachineScreen.data == []:
out_message = 'NO MACHINE ADDED YET FOR '+data_key.upper()
CAESD().displayInLabelMessage(self.dispPowerTwoScreen, t=out_message, c=[0,0,0,1])
else:
tot_load = 0
tot_amp = 0
tot_amp_gd = 0
tot_breaker_size = 0
#tot_cable_size = 0
for i in self.dispMachineScreen.data:
tot_load += float(i['machine_load'])
tot_amp += float(i['machine_amp'])
tot_amp_gd += float(i['machine_amp_gd'])
tot_breaker_size += float(i['breaker_size'])
#tot_cable_size += float(i['cable_size'])
data_summary = """
SUMMARY FOR %s
Number of Machines: %s
Total Load: %skVA
Total Current: %sA
Total Current(fx): %sA
Total Breaker Size: %sA
""" % (data_key.upper(), len(self.dispMachineScreen.data), tot_load, round(tot_amp, 2), round(tot_amp_gd, 2), round(tot_breaker_size, 2))
self.dispPowerTwoScreen.text = data_summary
class IlluminationPage(Screen):
|
#Main Screen Manager
class CAESDApp(ScreenManager):
pass
main_kv = Builder.load_file("main.kv")
class CAESD(App):
def build(self):
self.title = 'Computer Aided Electrical Services Design'
self.background_color = 0,0,0,1
return main_kv
def displayInLabelMessage(self, obj, **kwargs):
obj.color = 1, 0, 0, 1
obj.italic = False
if kwargs == {}:
#Default error message
obj.text = 'Attention: Application Message'
else:
for i in kwargs.keys():
if i == 'text' or i == 't':
obj.text = kwargs[i]
elif i == 'color' or i == 'c':
obj.color = kwargs[i]
elif i == 'italic' or i == 'i':
obj.italic = kwargs[i]
def popDisplays(self, title, message, hint=(.7, .45)):
Popup(title=title, title_color=[1,1,1,1],
content=Label(text=message),
size_hint=hint,
separator_color=[1,1,0,.6]).open()
if __name__ == '__main__':
CAESD().run()
| lengthOfRoom = ObjectProperty()
breadthOfRoom = ObjectProperty()
workingHeight = ObjectProperty()
wattMSq = ObjectProperty()
lampL = ObjectProperty()
numL = ObjectProperty()
mainFac = ObjectProperty()
dispIllumination = ObjectProperty()
dispLampDistributions = ObjectProperty()
def calculateLampsNeeded(self, length, breadth, w_height, watt_m_sq, lamp_l, no_lumin, main_fac):
app = CAESD()
if length and breadth and watt_m_sq and lamp_l:
if lamp_l != 'Lamp lumen':
if main_fac != 'Maintenance factor':
Ll = AppConstants().lampLumen(str(self.lampL.text))
room = RoomDesign(float(self.lengthOfRoom.text),
float(self.breadthOfRoom.text),
float(self.workingHeight.text),
float(self.wattMSq.text),
float(Ll),
float(self.numL.text),
float(self.mainFac.text))
message_illumination = """
Room Index Calculated at: %s \r
Total Number of lamps needed: %s
""" % (str(room.roomIndex()), str(room.roomLamps()))
lamp_dis = """
POSSIBLE COMBINATIONS OF LAMPS\r
%s
""" % str(room.possibleLampConfigurations())
app.displayInLabelMessage(self.dispIllumination, t=message_illumination, c=[0,0,0,1])
app.displayInLabelMessage(self.dispLampDistributions, t=lamp_dis, c=[0,0,0,1])
else:
app.displayInLabelMessage(self.dispIllumination, t='Please select the maintenance factor', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Please choose the lamp lumen', i=True)
else:
app.displayInLabelMessage(self.dispIllumination, t='Missing Parameter/Input', i=True) |
listIotHubResourceKeys.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package v20180122
import (
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
func | (ctx *pulumi.Context, args *ListIotHubResourceKeysArgs, opts ...pulumi.InvokeOption) (*ListIotHubResourceKeysResult, error) {
var rv ListIotHubResourceKeysResult
err := ctx.Invoke("azure-nextgen:devices/v20180122:listIotHubResourceKeys", args, &rv, opts...)
if err != nil {
return nil, err
}
return &rv, nil
}
type ListIotHubResourceKeysArgs struct {
// The name of the resource group that contains the IoT hub.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the IoT hub.
ResourceName string `pulumi:"resourceName"`
}
// The list of shared access policies with a next link.
type ListIotHubResourceKeysResult struct {
// The next link.
NextLink string `pulumi:"nextLink"`
// The list of shared access policies.
Value []SharedAccessSignatureAuthorizationRuleResponse `pulumi:"value"`
}
| ListIotHubResourceKeys |
secp256k1.rs | /*
* Copyright 2017 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
extern crate secp256k1;
extern crate crypto;
use self::crypto::digest::Digest;
use self::crypto::sha2::Sha256;
use super::PrivateKey;
use super::PublicKey;
use super::Context;
use super::Error;
use super::pem_loader::load_pem_key;
impl From<secp256k1::Error> for Error {
fn | (e: secp256k1::Error) -> Self {
Error::SigningError(Box::new(e))
}
}
pub struct Secp256k1PrivateKey {
private: Vec<u8>
}
impl Secp256k1PrivateKey {
pub fn from_hex(s: &str) -> Result<Self, Error> {
hex_str_to_bytes(s).map(|key_bytes| Secp256k1PrivateKey{ private: key_bytes })
}
pub fn from_pem(s: &str) -> Result<Self, Error> {
let (priv_key_str, _) = load_pem_key(s, "")?;
Self::from_hex(&priv_key_str)
}
pub fn from_pem_with_password(s: &str, pw: &str) -> Result<Self, Error> {
let (priv_key_str, _) = load_pem_key(s, pw)?;
Self::from_hex(&priv_key_str)
}
}
impl PrivateKey for Secp256k1PrivateKey {
fn get_algorithm_name(&self) -> &str {
"secp256k1"
}
fn as_hex(&self) -> String {
bytes_to_hex_str(&self.private)
}
fn as_slice(&self) -> &[u8] {
return &self.private;
}
}
pub struct Secp256k1PublicKey {
public: Vec<u8>
}
impl Secp256k1PublicKey {
pub fn from_hex(s: &str) -> Result<Self, Error> {
hex_str_to_bytes(s).map(|key_bytes| Secp256k1PublicKey{ public: key_bytes })
}
}
impl PublicKey for Secp256k1PublicKey {
fn get_algorithm_name(&self) -> &str {
"secp256k1"
}
fn as_hex(&self) -> String {
bytes_to_hex_str(&self.public)
}
fn as_slice(&self) -> &[u8] {
return &self.public;
}
}
pub struct Secp256k1Context {
context: secp256k1::Secp256k1
}
impl Secp256k1Context {
pub fn new() -> Self {
Secp256k1Context{
context: secp256k1::Secp256k1::new()
}
}
}
impl Context for Secp256k1Context {
fn get_algorithm_name(&self) -> &str {
"secp256k1"
}
fn sign(&self, message: &[u8], key: &PrivateKey) -> Result<String, Error> {
let mut sha = Sha256::new();
sha.input(message);
let hash: &mut [u8] = & mut [0; 32];
sha.result(hash);
let sk = secp256k1::key::SecretKey::from_slice(&self.context, key.as_slice())?;
let sig = self.context.sign(&secp256k1::Message::from_slice(hash)?, &sk)?;
let compact = sig.serialize_compact(&self.context);
Ok(compact.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<_>>()
.join(""))
}
fn verify(&self, signature: &str, message: &[u8], key: &PublicKey) -> Result<bool, Error> {
let mut sha = Sha256::new();
sha.input(message);
let hash: &mut [u8] = & mut [0; 32];
sha.result(hash);
let result = self.context.verify(
&secp256k1::Message::from_slice(hash)?,
&secp256k1::Signature::from_compact(&self.context, &hex_str_to_bytes(&signature)?)?,
&secp256k1::key::PublicKey::from_slice(&self.context, key.as_slice())?);
match result {
Ok(()) => Ok(true),
Err(secp256k1::Error::IncorrectSignature) => Ok(false),
Err(err) => Err(Error::from(err))
}
}
fn get_public_key(&self, private_key: &PrivateKey) -> Result<Box<PublicKey>, Error> {
let sk = secp256k1::key::SecretKey::from_slice(&self.context, private_key.as_slice())?;
let result = Secp256k1PublicKey::from_hex(
bytes_to_hex_str(
&secp256k1::key::PublicKey::from_secret_key(
&self.context, &sk)?.serialize_vec(&self.context, true)).as_str());
match result {
Err(err) => Err(err),
Ok(pk) => Ok(Box::new(pk))
}
}
}
fn hex_str_to_bytes(s: &str) -> Result<Vec<u8>, Error> {
for (i, ch) in s.chars().enumerate() {
if !ch.is_digit(16) {
return Err(Error::ParseError(format!("invalid character position {}", i)));
}
}
let input: Vec<_> = s.chars().collect();
let decoded: Vec<u8> = input.chunks(2).map(|chunk| {
((chunk[0].to_digit(16).unwrap() << 4) |
(chunk[1].to_digit(16).unwrap())) as u8
}).collect();
return Ok(decoded);
}
fn bytes_to_hex_str(b: &[u8]) -> String {
b.iter()
.map(|b| format!("{:02x}", b))
.collect::<Vec<_>>()
.join("")
}
#[cfg(test)]
mod secp256k1_test {
use super::Secp256k1PrivateKey;
use super::Secp256k1PublicKey;
use super::super::CryptoFactory;
use super::super::PrivateKey;
use super::super::PublicKey;
use super::super::create_context;
static KEY1_PRIV_HEX: &'static str = "2f1e7b7a130d7ba9da0068b3bb0ba1d79e7e77110302c9f746c3c2a63fe40088";
static KEY1_PUB_HEX: &'static str = "026a2c795a9776f75464aa3bda3534c3154a6e91b357b1181d3f515110f84b67c5";
static KEY2_PRIV_HEX: &'static str = "51b845c2cdde22fe646148f0b51eaf5feec8c82ee921d5e0cbe7619f3bb9c62d";
static KEY2_PUB_HEX: &'static str = "039c20a66b4ec7995391dbec1d8bb0e2c6e6fd63cd259ed5b877cb4ea98858cf6d";
static MSG1: &'static str = "test";
static MSG1_KEY1_SIG: &'static str = "5195115d9be2547b720ee74c23dd841842875db6eae1f5da8605b050a49e702b4aa83be72ab7e3cb20f17c657011b49f4c8632be2745ba4de79e6aa05da57b35";
static MSG2: &'static str = "test2";
static MSG2_KEY2_SIG: &'static str = "d589c7b1fa5f8a4c5a389de80ae9582c2f7f2a5e21bab5450b670214e5b1c1235e9eb8102fd0ca690a8b42e2c406a682bd57f6daf6e142e5fa4b2c26ef40a490";
#[test]
fn hex_key() {
let priv_key = Secp256k1PrivateKey::from_hex(KEY1_PRIV_HEX).unwrap();
assert_eq!(priv_key.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key.as_hex(), KEY1_PRIV_HEX);
let pub_key = Secp256k1PublicKey::from_hex(KEY1_PUB_HEX).unwrap();
assert_eq!(pub_key.get_algorithm_name(), "secp256k1");
assert_eq!(pub_key.as_hex(), KEY1_PUB_HEX);
}
#[test]
fn priv_to_public_key() {
let context = create_context("secp256k1").unwrap();
assert_eq!(context.get_algorithm_name(), "secp256k1");
let priv_key1 = Secp256k1PrivateKey::from_hex(KEY1_PRIV_HEX).unwrap();
assert_eq!(priv_key1.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key1.as_hex(), KEY1_PRIV_HEX);
let public_key1 = context.get_public_key(&priv_key1).unwrap();
assert_eq!(public_key1.as_hex(), KEY1_PUB_HEX);
let priv_key2 = Secp256k1PrivateKey::from_hex(KEY2_PRIV_HEX).unwrap();
assert_eq!(priv_key2.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key2.as_hex(), KEY2_PRIV_HEX);
let public_key2 = context.get_public_key(&priv_key2).unwrap();
assert_eq!(public_key2.as_hex(), KEY2_PUB_HEX);
}
#[test]
fn check_invalid_digit() {
let mut priv_chars: Vec<char> = KEY1_PRIV_HEX.chars().collect();
priv_chars[3] = 'i';
let priv_result = Secp256k1PrivateKey::from_hex(
priv_chars.into_iter().collect::<String>().as_str());
assert!(priv_result.is_err());
let mut pub_chars: Vec<char> = KEY1_PUB_HEX.chars().collect();
pub_chars[3] = 'i';
let result = Secp256k1PublicKey::from_hex(
pub_chars.into_iter().collect::<String>().as_str());
assert!(result.is_err());
}
#[test]
fn single_key_signing() {
let context = create_context("secp256k1").unwrap();
assert_eq!(context.get_algorithm_name(), "secp256k1");
let factory = CryptoFactory::new(&*context);
assert_eq!(factory.get_context().get_algorithm_name(), "secp256k1");
let priv_key = Secp256k1PrivateKey::from_hex(KEY1_PRIV_HEX).unwrap();
assert_eq!(priv_key.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key.as_hex(), KEY1_PRIV_HEX);
let signer = factory.new_signer(&priv_key);
let signature = signer.sign(&String::from(MSG1).into_bytes()).unwrap();
assert_eq!(signature, MSG1_KEY1_SIG);
}
#[test]
fn many_key_signing() {
let context = create_context("secp256k1").unwrap();
assert_eq!(context.get_algorithm_name(), "secp256k1");
let priv_key1 = Secp256k1PrivateKey::from_hex(KEY1_PRIV_HEX).unwrap();
assert_eq!(priv_key1.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key1.as_hex(), KEY1_PRIV_HEX);
let priv_key2 = Secp256k1PrivateKey::from_hex(KEY2_PRIV_HEX).unwrap();
assert_eq!(priv_key2.get_algorithm_name(), "secp256k1");
assert_eq!(priv_key2.as_hex(), KEY2_PRIV_HEX);
let signature = context.sign(
&String::from(MSG1).into_bytes(),
&priv_key1).unwrap();
assert_eq!(signature, MSG1_KEY1_SIG);
let signature = context.sign(
&String::from(MSG2).into_bytes(),
&priv_key2).unwrap();
assert_eq!(signature, MSG2_KEY2_SIG);
}
#[test]
fn verification() {
let context = create_context("secp256k1").unwrap();
assert_eq!(context.get_algorithm_name(), "secp256k1");
let pub_key1 = Secp256k1PublicKey::from_hex(KEY1_PUB_HEX).unwrap();
assert_eq!(pub_key1.get_algorithm_name(), "secp256k1");
assert_eq!(pub_key1.as_hex(), KEY1_PUB_HEX);
let result = context.verify(MSG1_KEY1_SIG,
&String::from(MSG1).into_bytes(),
&pub_key1);
assert_eq!(result.unwrap(), true);
}
#[test]
fn verification_error() {
let context = create_context("secp256k1").unwrap();
assert_eq!(context.get_algorithm_name(), "secp256k1");
let pub_key1 = Secp256k1PublicKey::from_hex(KEY1_PUB_HEX).unwrap();
assert_eq!(pub_key1.get_algorithm_name(), "secp256k1");
assert_eq!(pub_key1.as_hex(), KEY1_PUB_HEX);
// This signature doesn't match for MSG1/KEY1
let result = context.verify(MSG2_KEY2_SIG,
&String::from(MSG1).into_bytes(),
&pub_key1);
assert_eq!(result.unwrap(), false);
}
}
| from |
index.ts | import { PostModeler } from "./Post";
import { UserModeler } from "./User";
| export { PostModeler, UserModeler }; |
|
extensionManagement.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { localize } from 'vs/nls';
import { Event } from 'vs/base/common/event';
import { IPager } from 'vs/base/common/paging';
import { createDecorator } from 'vs/platform/instantiation/common/instantiation';
import { URI } from 'vs/base/common/uri';
import { CancellationToken } from 'vs/base/common/cancellation';
import { IExtensionManifest, IExtension, ExtensionType } from 'vs/platform/extensions/common/extensions';
import { FileAccess } from 'vs/base/common/network';
export const EXTENSION_IDENTIFIER_PATTERN = '^([a-z0-9A-Z][a-z0-9-A-Z]*)\\.([a-z0-9A-Z][a-z0-9-A-Z]*)$';
export const EXTENSION_IDENTIFIER_REGEX = new RegExp(EXTENSION_IDENTIFIER_PATTERN);
export const WEB_EXTENSION_TAG = '__web_extension';
export interface IGalleryExtensionProperties {
dependencies?: string[];
extensionPack?: string[];
engine?: string;
localizedLanguages?: string[];
}
export interface IGalleryExtensionAsset {
uri: string;
fallbackUri: string;
}
export interface IGalleryExtensionAssets {
manifest: IGalleryExtensionAsset | null;
readme: IGalleryExtensionAsset | null;
changelog: IGalleryExtensionAsset | null;
license: IGalleryExtensionAsset | null;
repository: IGalleryExtensionAsset | null;
download: IGalleryExtensionAsset;
icon: IGalleryExtensionAsset;
coreTranslations: [string, IGalleryExtensionAsset][];
}
export function | (thing: any): thing is IExtensionIdentifier {
return thing
&& typeof thing === 'object'
&& typeof thing.id === 'string'
&& (!thing.uuid || typeof thing.uuid === 'string');
}
/* __GDPR__FRAGMENT__
"ExtensionIdentifier" : {
"id" : { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"uuid": { "classification": "SystemMetaData", "purpose": "FeatureInsight" }
}
*/
export interface IExtensionIdentifier {
id: string;
uuid?: string;
}
export interface IExtensionIdentifierWithVersion extends IExtensionIdentifier {
id: string;
uuid?: string;
version: string;
}
export interface IGalleryExtensionIdentifier extends IExtensionIdentifier {
uuid: string;
}
export interface IGalleryExtensionVersion {
version: string;
date: string;
}
export interface IGalleryExtension {
name: string;
identifier: IGalleryExtensionIdentifier;
version: string;
date: string;
displayName: string;
publisherId: string;
publisher: string;
publisherDisplayName: string;
description: string;
installCount: number;
rating: number;
ratingCount: number;
categories: readonly string[];
tags: readonly string[];
releaseDate: number;
lastUpdated: number;
assetUri: URI;
assetTypes: string[];
assets: IGalleryExtensionAssets;
properties: IGalleryExtensionProperties;
telemetryData: any;
preview: boolean;
webExtension: boolean;
}
export interface IGalleryMetadata {
id: string;
publisherId: string;
publisherDisplayName: string;
}
export interface ILocalExtension extends IExtension {
isMachineScoped: boolean;
publisherId: string | null;
publisherDisplayName: string | null;
installedTimestamp?: number;
}
export const enum SortBy {
NoneOrRelevance = 0,
LastUpdatedDate = 1,
Title = 2,
PublisherName = 3,
InstallCount = 4,
PublishedDate = 5,
AverageRating = 6,
WeightedRating = 12
}
export const enum SortOrder {
Default = 0,
Ascending = 1,
Descending = 2
}
export interface IQueryOptions {
text?: string;
ids?: string[];
names?: string[];
pageSize?: number;
sortBy?: SortBy;
sortOrder?: SortOrder;
source?: string;
}
export const enum StatisticType {
Uninstall = 'uninstall'
}
export interface IReportedExtension {
id: IExtensionIdentifier;
malicious: boolean;
}
export const enum InstallOperation {
None = 0,
Install,
Update
}
export interface ITranslation {
contents: { [key: string]: {} };
}
export const IExtensionGalleryService = createDecorator<IExtensionGalleryService>('extensionGalleryService');
export interface IExtensionGalleryService {
readonly _serviceBrand: undefined;
isEnabled(): boolean;
query(token: CancellationToken): Promise<IPager<IGalleryExtension>>;
query(options: IQueryOptions, token: CancellationToken): Promise<IPager<IGalleryExtension>>;
getExtensions(ids: string[], token: CancellationToken): Promise<IGalleryExtension[]>;
download(extension: IGalleryExtension, location: URI, operation: InstallOperation): Promise<void>;
reportStatistic(publisher: string, name: string, version: string, type: StatisticType): Promise<void>;
getReadme(extension: IGalleryExtension, token: CancellationToken): Promise<string>;
getManifest(extension: IGalleryExtension, token: CancellationToken): Promise<IExtensionManifest | null>;
getChangelog(extension: IGalleryExtension, token: CancellationToken): Promise<string>;
getCoreTranslation(extension: IGalleryExtension, languageId: string): Promise<ITranslation | null>;
getAllVersions(extension: IGalleryExtension, compatible: boolean): Promise<IGalleryExtensionVersion[]>;
getExtensionsReport(): Promise<IReportedExtension[]>;
getCompatibleExtension(extension: IGalleryExtension): Promise<IGalleryExtension | null>;
getCompatibleExtension(id: IExtensionIdentifier, version?: string): Promise<IGalleryExtension | null>;
}
export interface InstallExtensionEvent {
identifier: IExtensionIdentifier;
source: URI | IGalleryExtension;
}
export interface InstallExtensionResult {
readonly identifier: IExtensionIdentifier;
readonly operation: InstallOperation;
readonly source?: URI | IGalleryExtension;
readonly local?: ILocalExtension;
}
export interface DidUninstallExtensionEvent {
identifier: IExtensionIdentifier;
error?: string;
}
export const INSTALL_ERROR_NOT_SUPPORTED = 'notsupported';
export const INSTALL_ERROR_MALICIOUS = 'malicious';
export const INSTALL_ERROR_INCOMPATIBLE = 'incompatible';
export class ExtensionManagementError extends Error {
constructor(message: string, readonly code: string) {
super(message);
this.name = code;
}
}
export type InstallOptions = { isBuiltin?: boolean, isMachineScoped?: boolean, donotIncludePackAndDependencies?: boolean };
export type InstallVSIXOptions = InstallOptions & { installOnlyNewlyAddedFromExtensionPack?: boolean };
export type UninstallOptions = { donotIncludePack?: boolean, donotCheckDependents?: boolean };
export interface IExtensionManagementParticipant {
postInstall(local: ILocalExtension, source: URI | IGalleryExtension, options: InstallOptions | InstallVSIXOptions, token: CancellationToken): Promise<void>;
postUninstall(local: ILocalExtension, options: UninstallOptions, token: CancellationToken): Promise<void>;
}
export const IExtensionManagementService = createDecorator<IExtensionManagementService>('extensionManagementService');
export interface IExtensionManagementService {
readonly _serviceBrand: undefined;
onInstallExtension: Event<InstallExtensionEvent>;
onDidInstallExtensions: Event<readonly InstallExtensionResult[]>;
onUninstallExtension: Event<IExtensionIdentifier>;
onDidUninstallExtension: Event<DidUninstallExtensionEvent>;
zip(extension: ILocalExtension): Promise<URI>;
unzip(zipLocation: URI): Promise<IExtensionIdentifier>;
getManifest(vsix: URI): Promise<IExtensionManifest>;
install(vsix: URI, options?: InstallVSIXOptions): Promise<ILocalExtension>;
canInstall(extension: IGalleryExtension): Promise<boolean>;
installFromGallery(extension: IGalleryExtension, options?: InstallOptions): Promise<ILocalExtension>;
uninstall(extension: ILocalExtension, options?: UninstallOptions): Promise<void>;
reinstallFromGallery(extension: ILocalExtension): Promise<void>;
getInstalled(type?: ExtensionType): Promise<ILocalExtension[]>;
getExtensionsReport(): Promise<IReportedExtension[]>;
updateMetadata(local: ILocalExtension, metadata: IGalleryMetadata): Promise<ILocalExtension>;
updateExtensionScope(local: ILocalExtension, isMachineScoped: boolean): Promise<ILocalExtension>;
registerParticipant(pariticipant: IExtensionManagementParticipant): void;
}
export const DISABLED_EXTENSIONS_STORAGE_PATH = 'extensionsIdentifiers/disabled';
export const ENABLED_EXTENSIONS_STORAGE_PATH = 'extensionsIdentifiers/enabled';
export const IGlobalExtensionEnablementService = createDecorator<IGlobalExtensionEnablementService>('IGlobalExtensionEnablementService');
export interface IGlobalExtensionEnablementService {
readonly _serviceBrand: undefined;
readonly onDidChangeEnablement: Event<{ readonly extensions: IExtensionIdentifier[], readonly source?: string }>;
getDisabledExtensions(): IExtensionIdentifier[];
enableExtension(extension: IExtensionIdentifier, source?: string): Promise<boolean>;
disableExtension(extension: IExtensionIdentifier, source?: string): Promise<boolean>;
}
export type IConfigBasedExtensionTip = {
readonly extensionId: string,
readonly extensionName: string,
readonly isExtensionPack: boolean,
readonly configName: string,
readonly important: boolean,
};
export type IExecutableBasedExtensionTip = {
readonly extensionId: string,
readonly extensionName: string,
readonly isExtensionPack: boolean,
readonly exeName: string,
readonly exeFriendlyName: string,
readonly windowsPath?: string,
};
export type IWorkspaceTips = { readonly remoteSet: string[]; readonly recommendations: string[]; };
export const IExtensionTipsService = createDecorator<IExtensionTipsService>('IExtensionTipsService');
export interface IExtensionTipsService {
readonly _serviceBrand: undefined;
getConfigBasedTips(folder: URI): Promise<IConfigBasedExtensionTip[]>;
getImportantExecutableBasedTips(): Promise<IExecutableBasedExtensionTip[]>;
getOtherExecutableBasedTips(): Promise<IExecutableBasedExtensionTip[]>;
getAllWorkspacesTips(): Promise<IWorkspaceTips[]>;
}
export const DefaultIconPath = FileAccess.asBrowserUri('./media/defaultIcon.png', require).toString(true);
export const ExtensionsLabel = localize('extensions', "Extensions");
export const ExtensionsLocalizedLabel = { value: ExtensionsLabel, original: 'Extensions' };
export const ExtensionsChannelId = 'extensions';
export const PreferencesLabel = localize('preferences', "Preferences");
export const PreferencesLocalizedLabel = { value: PreferencesLabel, original: 'Preferences' };
export interface CLIOutput {
log(s: string): void;
error(s: string): void;
}
export const IExtensionManagementCLIService = createDecorator<IExtensionManagementCLIService>('IExtensionManagementCLIService');
export interface IExtensionManagementCLIService {
readonly _serviceBrand: undefined;
listExtensions(showVersions: boolean, category?: string, output?: CLIOutput): Promise<void>;
installExtensions(extensions: (string | URI)[], builtinExtensionIds: string[], isMachineScoped: boolean, force: boolean, output?: CLIOutput): Promise<void>;
uninstallExtensions(extensions: (string | URI)[], force: boolean, output?: CLIOutput): Promise<void>;
locateExtension(extensions: string[], output?: CLIOutput): Promise<void>;
}
| isIExtensionIdentifier |
face_eval.py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import paddle.fluid as fluid
import numpy as np
from PIL import Image
from collections import OrderedDict
import ppdet.utils.checkpoint as checkpoint
from ppdet.utils.cli import ArgsParser
from ppdet.utils.check import check_gpu
from ppdet.utils.widerface_eval_utils import get_shrink, bbox_vote, \
save_widerface_bboxes, save_fddb_bboxes, to_chw_bgr
from ppdet.core.workspace import load_config, merge_config, create
from ppdet.modeling.model_input import create_feed
import logging | logger = logging.getLogger(__name__)
def face_img_process(image,
mean=[104., 117., 123.],
std=[127.502231, 127.502231, 127.502231]):
img = np.array(image)
img = to_chw_bgr(img)
img = img.astype('float32')
img -= np.array(mean)[:, np.newaxis, np.newaxis].astype('float32')
img /= np.array(std)[:, np.newaxis, np.newaxis].astype('float32')
img = [img]
img = np.array(img)
return img
def face_eval_run(exe,
compile_program,
fetches,
img_root_dir,
gt_file,
pred_dir='output/pred',
eval_mode='widerface',
multi_scale=False):
# load ground truth files
with open(gt_file, 'r') as f:
gt_lines = f.readlines()
imid2path = []
pos_gt = 0
while pos_gt < len(gt_lines):
name_gt = gt_lines[pos_gt].strip('\n\t').split()[0]
imid2path.append(name_gt)
pos_gt += 1
n_gt = int(gt_lines[pos_gt].strip('\n\t').split()[0])
pos_gt += 1 + n_gt
logger.info('The ground truth file load {} images'.format(len(imid2path)))
dets_dist = OrderedDict()
for iter_id, im_path in enumerate(imid2path):
image_path = os.path.join(img_root_dir, im_path)
if eval_mode == 'fddb':
image_path += '.jpg'
image = Image.open(image_path).convert('RGB')
if multi_scale:
shrink, max_shrink = get_shrink(image.size[1], image.size[0])
det0 = detect_face(exe, compile_program, fetches, image, shrink)
det1 = flip_test(exe, compile_program, fetches, image, shrink)
[det2, det3] = multi_scale_test(exe, compile_program, fetches, image,
max_shrink)
det4 = multi_scale_test_pyramid(exe, compile_program, fetches, image,
max_shrink)
det = np.row_stack((det0, det1, det2, det3, det4))
dets = bbox_vote(det)
else:
dets = detect_face(exe, compile_program, fetches, image, 1)
if eval_mode == 'widerface':
save_widerface_bboxes(image_path, dets, pred_dir)
else:
dets_dist[im_path] = dets
if iter_id % 100 == 0:
logger.info('Test iter {}'.format(iter_id))
if eval_mode == 'fddb':
save_fddb_bboxes(dets_dist, pred_dir)
logger.info("Finish evaluation.")
def detect_face(exe, compile_program, fetches, image, shrink):
image_shape = [3, image.size[1], image.size[0]]
if shrink != 1:
h, w = int(image_shape[1] * shrink), int(image_shape[2] * shrink)
image = image.resize((w, h), Image.ANTIALIAS)
image_shape = [3, h, w]
img = face_img_process(image)
detection, = exe.run(compile_program,
feed={'image': img},
fetch_list=[fetches['bbox']],
return_numpy=False)
detection = np.array(detection)
# layout: xmin, ymin, xmax. ymax, score
if np.prod(detection.shape) == 1:
logger.info("No face detected")
return np.array([[0, 0, 0, 0, 0]])
det_conf = detection[:, 1]
det_xmin = image_shape[2] * detection[:, 2] / shrink
det_ymin = image_shape[1] * detection[:, 3] / shrink
det_xmax = image_shape[2] * detection[:, 4] / shrink
det_ymax = image_shape[1] * detection[:, 5] / shrink
det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))
return det
def flip_test(exe, compile_program, fetches, image, shrink):
img = image.transpose(Image.FLIP_LEFT_RIGHT)
det_f = detect_face(exe, compile_program, fetches, img, shrink)
det_t = np.zeros(det_f.shape)
# image.size: [width, height]
det_t[:, 0] = image.size[0] - det_f[:, 2]
det_t[:, 1] = det_f[:, 1]
det_t[:, 2] = image.size[0] - det_f[:, 0]
det_t[:, 3] = det_f[:, 3]
det_t[:, 4] = det_f[:, 4]
return det_t
def multi_scale_test(exe, compile_program, fetches, image, max_shrink):
# Shrink detecting is only used to detect big faces
st = 0.5 if max_shrink >= 0.75 else 0.5 * max_shrink
det_s = detect_face(exe, compile_program, fetches, image, st)
index = np.where(
np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1)
> 30)[0]
det_s = det_s[index, :]
# Enlarge one times
bt = min(2, max_shrink) if max_shrink > 1 else (st + max_shrink) / 2
det_b = detect_face(exe, compile_program, fetches, image, bt)
# Enlarge small image x times for small faces
if max_shrink > 2:
bt *= 2
while bt < max_shrink:
det_b = np.row_stack((det_b, detect_face(exe, compile_program,
fetches, image, bt)))
bt *= 2
det_b = np.row_stack((det_b, detect_face(exe, compile_program, fetches,
image, max_shrink)))
# Enlarged images are only used to detect small faces.
if bt > 1:
index = np.where(
np.minimum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
det_b = det_b[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1,
det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
det_b = det_b[index, :]
return det_s, det_b
def multi_scale_test_pyramid(exe, compile_program, fetches, image, max_shrink):
# Use image pyramids to detect faces
det_b = detect_face(exe, compile_program, fetches, image, 0.25)
index = np.where(
np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)
> 30)[0]
det_b = det_b[index, :]
st = [0.75, 1.25, 1.5, 1.75]
for i in range(len(st)):
if st[i] <= max_shrink:
det_temp = detect_face(exe, compile_program, fetches, image, st[i])
# Enlarged images are only used to detect small faces.
if st[i] > 1:
index = np.where(
np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]
det_temp = det_temp[index, :]
# Shrinked images are only used to detect big faces.
else:
index = np.where(
np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,
det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]
det_temp = det_temp[index, :]
det_b = np.row_stack((det_b, det_temp))
return det_b
def main():
"""
Main evaluate function
"""
cfg = load_config(FLAGS.config)
if 'architecture' in cfg:
main_arch = cfg.architecture
else:
raise ValueError("'architecture' not specified in config file.")
merge_config(FLAGS.opt)
# check if set use_gpu=True in paddlepaddle cpu version
check_gpu(cfg.use_gpu)
if 'eval_feed' not in cfg:
eval_feed = create(main_arch + 'EvalFeed')
else:
eval_feed = create(cfg.eval_feed)
# define executor
place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
# build program
model = create(main_arch)
startup_prog = fluid.Program()
eval_prog = fluid.Program()
with fluid.program_guard(eval_prog, startup_prog):
with fluid.unique_name.guard():
_, feed_vars = create_feed(eval_feed, use_pyreader=False)
fetches = model.eval(feed_vars)
eval_prog = eval_prog.clone(True)
# load model
exe.run(startup_prog)
if 'weights' in cfg:
checkpoint.load_params(exe, eval_prog, cfg.weights)
assert cfg.metric in ['WIDERFACE'], \
"unknown metric type {}".format(cfg.metric)
annotation_file = getattr(eval_feed.dataset, 'annotation', None)
dataset_dir = FLAGS.dataset_dir if FLAGS.dataset_dir else \
getattr(eval_feed.dataset, 'dataset_dir', None)
img_root_dir = dataset_dir
if FLAGS.eval_mode == "widerface":
image_dir = getattr(eval_feed.dataset, 'image_dir', None)
img_root_dir = os.path.join(dataset_dir, image_dir)
gt_file = os.path.join(dataset_dir, annotation_file)
pred_dir = FLAGS.output_eval if FLAGS.output_eval else 'output/pred'
face_eval_run(
exe,
eval_prog,
fetches,
img_root_dir,
gt_file,
pred_dir=pred_dir,
eval_mode=FLAGS.eval_mode,
multi_scale=FLAGS.multi_scale)
if __name__ == '__main__':
parser = ArgsParser()
parser.add_argument(
"-d",
"--dataset_dir",
default=None,
type=str,
help="Dataset path, same as DataFeed.dataset.dataset_dir")
parser.add_argument(
"-f",
"--output_eval",
default=None,
type=str,
help="Evaluation file directory, default is current directory.")
parser.add_argument(
"-e",
"--eval_mode",
default="widerface",
type=str,
help="Evaluation mode, include `widerface` and `fddb`, default is `widerface`."
)
parser.add_argument(
"--multi_scale",
action='store_true',
default=False,
help="If True it will select `multi_scale` evaluation. Default is `False`, it will select `single-scale` evaluation.")
FLAGS = parser.parse_args()
main() | FORMAT = '%(asctime)s-%(levelname)s: %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT) |
AvgCubicWeightCalculator_test.go | package main
import (
"math"
"testing"
)
func TestAvgCubicWeight(t *testing.T) |
func TestCubicWeight(t *testing.T) {
var cubicWeightConversionFactor float64
cubicWeightConversionFactor = 250
F64_TOLERANCE := 0.000001
val1, err := cubicWeight(&ProductSize{1.0, 1.0, 1.0}, cubicWeightConversionFactor)
if err != nil {
t.Errorf(`cubicWeight(&ProductSize{1.0, 1.0, 1.0}, %f) returned error %v`, err)
}
if math.Abs(val1 - 0.00025) > F64_TOLERANCE {
t.Errorf(`cubicWeight(&ProductSize{1.0, 1.0, 1.0}, %f) - 1000.501682591 > %f (got %f)`, cubicWeightConversionFactor, F64_TOLERANCE, val1)
}
_, err = cubicWeight(&ProductSize{-20.5, 30.0, 40.0}, cubicWeightConversionFactor)
if err == nil {
t.Errorf(`cubicWeight(&ProductSize{-20.5, 30.0, 40.0}, %f) did not return an error`)
}
_, err = cubicWeight(&ProductSize{0, 0, 0}, cubicWeightConversionFactor)
if err == nil {
t.Errorf(`cubicWeight(&ProductSize{0, 0, 0}, %f) did not return an error`)
}
val2, err := cubicWeight(&ProductSize{40, 20, 30}, cubicWeightConversionFactor)
if err != nil {
t.Errorf(`cubicWeight(&ProductSize{40, 20, 30}, %f) returned error %v`, err)
}
if math.Abs(val2 - 6.0) > F64_TOLERANCE {
t.Errorf(`cubicWeight(&ProductSize{40, 20, 30}, %f) - 6.0 > %f (got %f)`, cubicWeightConversionFactor, F64_TOLERANCE, val2)
}
}
| {
F64_TOLERANCE := 0.000001
// func avgCubicWeight(weightTotal, productTotal float64) float64
if avgCubicWeight(200, 0) != 200 {
t.Error(`avgCubicWeight(200, 0) != 200`)
}
if avgCubicWeight(0, 0) != 0 {
t.Error(`avgCubicWeight(0, 0) != 0`)
}
if avgCubicWeight(-1, -1) != -1 {
t.Error(`avgCubicWeight(-1, -1) != -1`)
}
val1 := avgCubicWeight(200, 2)
if val1 != 100 {
t.Errorf(`avgCubicWeight(200, 2) != 100 (got %f)`, val1)
}
val2 := avgCubicWeight(350, 35)
if val2 != 10 {
t.Errorf(`avgCubicWeight(350, 35) != 10 (got %f)`, val2)
}
val3 := avgCubicWeight(3216841, 354)
if math.Abs(val3-9087.121468927) > F64_TOLERANCE {
// if val3 != 9087.121468927 {
t.Errorf(`avgCubicWeight(3216841, 354) - 9087.121468927 > %f (got %f)`, F64_TOLERANCE, val3)
}
val4 := avgCubicWeight(321986454, 321825)
if math.Abs(val4-1000.501682591) > F64_TOLERANCE {
// if val3 != 9087.121468927 {
t.Errorf(`avgCubicWeight(321986454, 321825) - 1000.501682591 > %f (got %f)`, F64_TOLERANCE, val3)
}
} |
generate_device_config_responses.go | // Code generated by go-swagger; DO NOT EDIT.
package presentations
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/moikot/smartthings-go/models"
)
// GenerateDeviceConfigReader is a Reader for the GenerateDeviceConfig structure.
type GenerateDeviceConfigReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GenerateDeviceConfigReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGenerateDeviceConfigOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 400:
result := NewGenerateDeviceConfigBadRequest()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 401:
result := NewGenerateDeviceConfigUnauthorized()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 403:
result := NewGenerateDeviceConfigForbidden()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 429:
result := NewGenerateDeviceConfigTooManyRequests()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
result := NewGenerateDeviceConfigDefault(response.Code())
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
if response.Code()/100 == 2 {
return result, nil
}
return nil, result
}
}
// NewGenerateDeviceConfigOK creates a GenerateDeviceConfigOK with default headers values
func NewGenerateDeviceConfigOK() *GenerateDeviceConfigOK {
return &GenerateDeviceConfigOK{}
}
/*GenerateDeviceConfigOK handles this case with default header values.
The device configuratiion.
*/
type GenerateDeviceConfigOK struct {
Payload *models.CreateDeviceConfigRequest
}
func (o *GenerateDeviceConfigOK) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfigOK %+v", 200, o.Payload)
}
func (o *GenerateDeviceConfigOK) GetPayload() *models.CreateDeviceConfigRequest {
return o.Payload
}
func (o *GenerateDeviceConfigOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.CreateDeviceConfigRequest)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGenerateDeviceConfigBadRequest creates a GenerateDeviceConfigBadRequest with default headers values
func NewGenerateDeviceConfigBadRequest() *GenerateDeviceConfigBadRequest {
return &GenerateDeviceConfigBadRequest{}
}
/*GenerateDeviceConfigBadRequest handles this case with default header values.
Bad request
*/
type GenerateDeviceConfigBadRequest struct {
Payload *models.ErrorResponse
}
func (o *GenerateDeviceConfigBadRequest) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfigBadRequest %+v", 400, o.Payload)
}
|
func (o *GenerateDeviceConfigBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGenerateDeviceConfigUnauthorized creates a GenerateDeviceConfigUnauthorized with default headers values
func NewGenerateDeviceConfigUnauthorized() *GenerateDeviceConfigUnauthorized {
return &GenerateDeviceConfigUnauthorized{}
}
/*GenerateDeviceConfigUnauthorized handles this case with default header values.
Unauthorized
*/
type GenerateDeviceConfigUnauthorized struct {
}
func (o *GenerateDeviceConfigUnauthorized) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfigUnauthorized ", 401)
}
func (o *GenerateDeviceConfigUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGenerateDeviceConfigForbidden creates a GenerateDeviceConfigForbidden with default headers values
func NewGenerateDeviceConfigForbidden() *GenerateDeviceConfigForbidden {
return &GenerateDeviceConfigForbidden{}
}
/*GenerateDeviceConfigForbidden handles this case with default header values.
Forbidden
*/
type GenerateDeviceConfigForbidden struct {
}
func (o *GenerateDeviceConfigForbidden) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfigForbidden ", 403)
}
func (o *GenerateDeviceConfigForbidden) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
return nil
}
// NewGenerateDeviceConfigTooManyRequests creates a GenerateDeviceConfigTooManyRequests with default headers values
func NewGenerateDeviceConfigTooManyRequests() *GenerateDeviceConfigTooManyRequests {
return &GenerateDeviceConfigTooManyRequests{}
}
/*GenerateDeviceConfigTooManyRequests handles this case with default header values.
Too many requests
*/
type GenerateDeviceConfigTooManyRequests struct {
Payload *models.ErrorResponse
}
func (o *GenerateDeviceConfigTooManyRequests) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfigTooManyRequests %+v", 429, o.Payload)
}
func (o *GenerateDeviceConfigTooManyRequests) GetPayload() *models.ErrorResponse {
return o.Payload
}
func (o *GenerateDeviceConfigTooManyRequests) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGenerateDeviceConfigDefault creates a GenerateDeviceConfigDefault with default headers values
func NewGenerateDeviceConfigDefault(code int) *GenerateDeviceConfigDefault {
return &GenerateDeviceConfigDefault{
_statusCode: code,
}
}
/*GenerateDeviceConfigDefault handles this case with default header values.
Unexpected error
*/
type GenerateDeviceConfigDefault struct {
_statusCode int
Payload *models.ErrorResponse
}
// Code gets the status code for the generate device config default response
func (o *GenerateDeviceConfigDefault) Code() int {
return o._statusCode
}
func (o *GenerateDeviceConfigDefault) Error() string {
return fmt.Sprintf("[GET /presentation/types/{typeIntegrationId}/deviceconfig][%d] generateDeviceConfig default %+v", o._statusCode, o.Payload)
}
func (o *GenerateDeviceConfigDefault) GetPayload() *models.ErrorResponse {
return o.Payload
}
func (o *GenerateDeviceConfigDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ErrorResponse)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
} | func (o *GenerateDeviceConfigBadRequest) GetPayload() *models.ErrorResponse {
return o.Payload
} |
models_cnr.py | from collections import namedtuple
from datetime import datetime
import cnr.semver
from cnr.exception import raise_package_not_found, raise_channel_not_found, CnrException
import features
import data.model
from app import app, storage, authentication, model_cache
from data import appr_model
from data import model as data_model
from data.cache import cache_key
from data.database import Repository, MediaType, db_transaction
from data.appr_model.models import NEW_MODELS
from endpoints.appr.models_interface import (
ApplicationManifest,
ApplicationRelease,
ApplicationSummaryView,
AppRegistryDataInterface,
BlobDescriptor,
ChannelView,
ChannelReleasesView,
)
from util.audit import track_and_log
from util.morecollections import AttrDict
from util.names import parse_robot_username
class ReadOnlyException(CnrException):
status_code = 405
errorcode = "read-only"
def _strip_sha256_header(digest):
if digest.startswith("sha256:"):
return digest.split("sha256:")[1]
return digest
def _split_package_name(package):
"""
Returns the namespace and package-name.
"""
return package.split("/")
def _join_package_name(ns, name):
"""
Returns a app-name in the 'namespace/name' format.
"""
return "%s/%s" % (ns, name)
def _timestamp_to_iso(timestamp, in_ms=True):
if in_ms:
timestamp = timestamp // 1000
return datetime.fromtimestamp(timestamp).isoformat()
def _application(package):
ns, name = _split_package_name(package)
repo = data.model.repository.get_app_repository(ns, name)
if repo is None:
raise_package_not_found(package)
return repo
class CNRAppModel(AppRegistryDataInterface):
def __init__(self, models_ref, is_readonly):
self.models_ref = models_ref
self.is_readonly = is_readonly
def log_action(
self,
event_name,
namespace_name,
repo_name=None,
analytics_name=None,
analytics_sample=1,
metadata=None,
):
metadata = {} if metadata is None else metadata
repo = None
if repo_name is not None:
db_repo = data.model.repository.get_repository(
namespace_name, repo_name, kind_filter="application"
)
repo = AttrDict(
{
"id": db_repo.id,
"name": db_repo.name,
"namespace_name": db_repo.namespace_user.username,
"is_free_namespace": db_repo.namespace_user.stripe_id is None,
}
)
track_and_log(
event_name,
repo,
analytics_name=analytics_name,
analytics_sample=analytics_sample,
**metadata,
)
def list_applications(
self, namespace=None, media_type=None, search=None, username=None, with_channels=False
):
"""
Lists all repositories that contain applications, with optional filtering to a specific
namespace and view a specific user.
"""
limit = app.config.get("APP_REGISTRY_RESULTS_LIMIT", 50)
namespace_whitelist = app.config.get("APP_REGISTRY_PACKAGE_LIST_CACHE_WHITELIST", [])
# NOTE: This caching only applies for the super-large and commonly requested results
# sets.
if (
namespace is not None
and namespace in namespace_whitelist
and media_type is None
and search is None
and username is None
and not with_channels
):
def _list_applications():
return [
found._asdict()
for found in self._list_applications(namespace=namespace, limit=limit)
]
apps_cache_key = cache_key.for_appr_applications_list(namespace, limit)
return [
ApplicationSummaryView(**found)
for found in model_cache.retrieve(apps_cache_key, _list_applications)
]
else:
return self._list_applications(
namespace, media_type, search, username, with_channels, limit=limit
)
def | (
self,
namespace=None,
media_type=None,
search=None,
username=None,
with_channels=False,
limit=None,
):
limit = limit or app.config.get("APP_REGISTRY_RESULTS_LIMIT", 50)
views = []
for repo in appr_model.package.list_packages_query(
self.models_ref, namespace, media_type, search, username=username, limit=limit
):
tag_set_prefetch = getattr(repo, self.models_ref.tag_set_prefetch_name)
releases = [t.name for t in tag_set_prefetch]
if not releases:
continue
available_releases = [
str(x) for x in sorted(cnr.semver.versions(releases, False), reverse=True)
]
channels = None
if with_channels:
channels = [
ChannelView(name=chan.name, current=chan.linked_tag.name)
for chan in appr_model.channel.get_repo_channels(repo, self.models_ref)
]
app_name = _join_package_name(repo.namespace_user.username, repo.name)
manifests = self.list_manifests(app_name, available_releases[0])
view = ApplicationSummaryView(
namespace=repo.namespace_user.username,
name=app_name,
visibility=data_model.repository.repository_visibility_name(repo),
default=available_releases[0],
channels=channels,
manifests=manifests,
releases=available_releases,
updated_at=_timestamp_to_iso(tag_set_prefetch[-1].lifetime_start),
created_at=_timestamp_to_iso(tag_set_prefetch[0].lifetime_start),
)
views.append(view)
return views
def application_is_public(self, package_name):
"""
Returns:
* True if the repository is public
"""
namespace, name = _split_package_name(package_name)
return data.model.repository.repository_is_public(namespace, name)
def create_application(self, package_name, visibility, owner):
"""
Create a new app repository, owner is the user who creates it.
"""
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
ns, name = _split_package_name(package_name)
data.model.repository.create_repository(ns, name, owner, visibility, "application")
def application_exists(self, package_name):
"""
Create a new app repository, owner is the user who creates it.
"""
ns, name = _split_package_name(package_name)
return data.model.repository.get_repository(ns, name, kind_filter="application") is not None
def basic_search(self, query, username=None):
"""Returns an array of matching AppRepositories in the format: 'namespace/name'
Note:
* Only 'public' repositories are returned
Todo:
* Filter results with readeable reposistory for the user (including visibilitys)
"""
limit = app.config.get("APP_REGISTRY_RESULTS_LIMIT", 50)
return [
_join_package_name(r.namespace_user.username, r.name)
for r in data.model.repository.get_app_search(
lookup=query, username=username, limit=limit
)
]
def list_releases(self, package_name, media_type=None):
"""Return the list of all releases of an Application
Example:
>>> get_app_releases('ant31/rocketchat')
['1.7.1', '1.7.0', '1.7.2']
Todo:
* Paginate
"""
return appr_model.release.get_releases(
_application(package_name), self.models_ref, media_type
)
def list_manifests(self, package_name, release=None):
"""
Returns the list of all manifests of an Application.
Todo:
* Paginate
"""
try:
repo = _application(package_name)
return list(appr_model.manifest.get_manifest_types(repo, self.models_ref, release))
except (Repository.DoesNotExist, self.models_ref.Tag.DoesNotExist):
raise_package_not_found(package_name, release)
def fetch_release(self, package_name, release, media_type):
"""
Retrieves an AppRelease from it's repository-name and release-name.
"""
repo = _application(package_name)
try:
tag, manifest, blob = appr_model.release.get_app_release(
repo, release, media_type, self.models_ref
)
created_at = _timestamp_to_iso(tag.lifetime_start)
blob_descriptor = BlobDescriptor(
digest=_strip_sha256_header(blob.digest),
mediaType=blob.media_type.name,
size=blob.size,
urls=[],
)
app_manifest = ApplicationManifest(
digest=manifest.digest, mediaType=manifest.media_type.name, content=blob_descriptor
)
app_release = ApplicationRelease(
release=tag.name, created_at=created_at, name=package_name, manifest=app_manifest
)
return app_release
except (
self.models_ref.Tag.DoesNotExist,
self.models_ref.Manifest.DoesNotExist,
self.models_ref.Blob.DoesNotExist,
Repository.DoesNotExist,
MediaType.DoesNotExist,
):
raise_package_not_found(package_name, release, media_type)
def store_blob(self, cnrblob, content_media_type):
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
fp = cnrblob.packager.io_file
path = cnrblob.upload_url(cnrblob.digest)
locations = storage.preferred_locations
storage.stream_write(locations, path, fp, "application/x-gzip")
db_blob = appr_model.blob.get_or_create_blob(
cnrblob.digest, cnrblob.size, content_media_type, locations, self.models_ref
)
return BlobDescriptor(
mediaType=content_media_type,
digest=_strip_sha256_header(db_blob.digest),
size=db_blob.size,
urls=[],
)
def create_release(self, package, user, visibility, force=False):
"""
Add an app-release to a repository package is an instance of data.cnr.package.Package.
"""
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
manifest = package.manifest()
ns, name = package.namespace, package.name
repo = data.model.repository.get_or_create_repository(
ns, name, user, visibility=visibility, repo_kind="application"
)
tag_name = package.release
appr_model.release.create_app_release(
repo,
tag_name,
package.manifest(),
manifest["content"]["digest"],
self.models_ref,
force,
)
def delete_release(self, package_name, release, media_type):
"""
Remove/Delete an app-release from an app-repository.
It does not delete the entire app-repository, only a single release
"""
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
repo = _application(package_name)
try:
appr_model.release.delete_app_release(repo, release, media_type, self.models_ref)
except (
self.models_ref.Channel.DoesNotExist,
self.models_ref.Tag.DoesNotExist,
MediaType.DoesNotExist,
):
raise_package_not_found(package_name, release, media_type)
def release_exists(self, package, release):
"""
Return true if a release with that name already exist or have existed (include deleted ones)
"""
# TODO: Figure out why this isn't implemented.
def channel_exists(self, package_name, channel_name):
"""
Returns true if channel exists.
"""
repo = _application(package_name)
return appr_model.tag.tag_exists(repo, channel_name, self.models_ref, "channel")
def delete_channel(self, package_name, channel_name):
"""Delete an AppChannel
Note:
It doesn't delete the AppReleases
"""
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
repo = _application(package_name)
try:
appr_model.channel.delete_channel(repo, channel_name, self.models_ref)
except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
raise_channel_not_found(package_name, channel_name)
def list_channels(self, package_name):
"""
Returns all AppChannel for a package.
"""
repo = _application(package_name)
channels = appr_model.channel.get_repo_channels(repo, self.models_ref)
return [ChannelView(name=chan.name, current=chan.linked_tag.name) for chan in channels]
def fetch_channel(self, package_name, channel_name, with_releases=True):
"""
Returns an AppChannel.
"""
repo = _application(package_name)
try:
channel = appr_model.channel.get_channel(repo, channel_name, self.models_ref)
except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
raise_channel_not_found(package_name, channel_name)
if with_releases:
releases = appr_model.channel.get_channel_releases(repo, channel, self.models_ref)
chanview = ChannelReleasesView(
current=channel.linked_tag.name,
name=channel.name,
releases=[channel.linked_tag.name] + [c.name for c in releases],
)
else:
chanview = ChannelView(current=channel.linked_tag.name, name=channel.name)
return chanview
def list_release_channels(self, package_name, release, active=True):
repo = _application(package_name)
try:
channels = appr_model.channel.get_tag_channels(
repo, release, self.models_ref, active=active
)
return [ChannelView(name=c.name, current=release) for c in channels]
except (self.models_ref.Channel.DoesNotExist, self.models_ref.Tag.DoesNotExist):
raise_package_not_found(package_name, release)
def update_channel(self, package_name, channel_name, release):
"""Append a new release to the AppChannel
Returns:
A new AppChannel with the release
"""
if self.is_readonly:
raise ReadOnlyException("Currently in read-only mode")
repo = _application(package_name)
channel = appr_model.channel.create_or_update_channel(
repo, channel_name, release, self.models_ref
)
return ChannelView(current=channel.linked_tag.name, name=channel.name)
def get_blob_locations(self, digest):
return appr_model.blob.get_blob_locations(digest, self.models_ref)
# Phase 3: Read and write from new tables.
model = CNRAppModel(NEW_MODELS, features.READONLY_APP_REGISTRY)
| _list_applications |
keyed.rs | }
impl<'key, T: Keyed<'key>> Keyed<'key> for Rc<T> {
fn key(&self) -> &'key str {
self.as_ref().key()
}
}
pub(crate) fn serialize<'src, S, K>(keyed: &K, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
K: Keyed<'src>,
{
serializer.serialize_str(keyed.key())
}
pub(crate) fn serialize_option<'src, S, K>(
recipe: &Option<K>,
serializer: S,
) -> Result<S::Ok, S::Error>
where
S: Serializer,
K: Keyed<'src>,
{
match recipe {
None => serializer.serialize_none(),
Some(keyed) => serialize(keyed, serializer),
}
} | use crate::common::*;
pub(crate) trait Keyed<'key> {
fn key(&self) -> &'key str; |
|
comment.js | import CompostMixin from '../../../node_modules/@lamplightdev/compost/src/compost-mixin.js';
import globalStyles from '../utility/styles.js';
import './comments.js';
class Comment extends CompostMixin(HTMLElement) {
static get properties() {
return {
// comment data
data: {
type: Object,
value: null,
observer: 'observeData',
},
// are sub comments currently
showComments: {
type: Boolean,
value: false,
observer: 'observeShowComments',
},
// how deeply nested this comment is
depth: {
type: Number,
value: 0,
observer: 'observeDepth',
},
};
}
render() {
return `
<style>
${globalStyles}
:host {
contain: content;
display: block;
color: #333;
font-size: 0.9rem;
margin-bottom: 1rem;
border-bottom: 1px solid #ddd;
}
#info {
color: #666;
font-weight: bold;
margin-bottom: 1rem;
}
.hide {
display: none;
}
button {
margin-bottom: 0.5rem;
font-size: 1rem;
border: 0;
box-shadow: none;
background: transparent;
color: #333;
padding: 0;
cursor: pointer;
}
</style>
<div id="top">
<div id="info">
<a id="by" href=""></a> <span id="time"></span>
</div>
<div id="text"></div>
<button id="commentstoggle" on-click="toggleComments" hidden>[+]</button>
</div>
<x-comments id="comments" class="hide"></x-comments>
`;
}
// update comment data
observeData(oldValue, newValue) {
if (!newValue) return;
this.$id.by.href = `https://news.ycombinator.com/user?id=${newValue.user}`;
this.$id.by.textContent = newValue.user;
this.$id.time.textContent = newValue.time_ago;
this.$id.text.innerHTML = newValue.content;
if (newValue.comments.length) {
this.$id.commentstoggle.hidden = false;
} else {
this.$id.commentstoggle.hidden = true;
}
this.$id.comments.items = newValue.comments;
this.$id.comments.depth = this.depth;
}
observeDepth(oldValue, newValue) {
this.$id.comments.depth = newValue;
}
// update hide/show comments | } else {
this.$id.comments.classList.add('hide');
this.$id.commentstoggle.textContent = '[+]';
}
}
toggleComments() {
this.showComments = !this.showComments;
}
}
customElements.define('x-comment', Comment); | observeShowComments(oldValue, newValue) {
if (newValue) {
this.$id.comments.classList.remove('hide');
this.$id.commentstoggle.textContent = '[-]'; |
post.go | package api
import (
"errors"
"github.com/gin-gonic/gin"
"github.com/lotteryjs/ten-minutes-app/model"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/bson/primitive"
"net/http"
"strconv"
)
// The PostDatabase interface for encapsulating database access.
type PostDatabase interface {
GetPosts(paging *model.Paging) []*model.Post
GetPostByID(id primitive.ObjectID) *model.Post
CreatePost(post *model.Post) *model.Post
UpdatePost(post *model.Post) *model.Post
DeletePostByID(id primitive.ObjectID) error | CountPost(condition interface{}) string
}
// The PostAPI provides handlers for managing posts.
type PostAPI struct {
DB PostDatabase
}
// CreatePost creates a post.
func (a *PostAPI) CreatePost(ctx *gin.Context) {
var post = model.Post{}
if err := ctx.ShouldBind(&post); err == nil {
if result := a.DB.CreatePost(post.New()); result != nil {
ctx.JSON(201, result)
} else {
ctx.AbortWithError(500, errors.New("CreatePost error"))
}
} else {
ctx.AbortWithError(500, errors.New("ShouldBind error"))
}
}
// GetPosts returns all the posts
// _end=5&_order=DESC&_sort=id&_start=0 adapt react-admin
func (a *PostAPI) GetPosts(ctx *gin.Context) {
var (
start int64
end int64
sort string
order int
userID string
)
start, _ = strconv.ParseInt(ctx.DefaultQuery("_start", "0"), 10, 64)
end, _ = strconv.ParseInt(ctx.DefaultQuery("_end", "10"), 10, 64)
userID = ctx.DefaultQuery("userId", "")
sort = ctx.DefaultQuery("_sort", "_id")
order = 1
if sort == "id" {
sort = "_id"
}
if ctx.DefaultQuery("_order", "DESC") == "DESC" {
order = -1
}
condition := bson.D{}
if userID != "" {
coditionUserID, _ := primitive.ObjectIDFromHex(userID)
condition = bson.D{{
Key: "userId",
Value: coditionUserID,
}}
}
limit := end - start
posts := a.DB.GetPosts(
&model.Paging{
Skip: &start,
Limit: &limit,
SortKey: sort,
SortVal: order,
Condition: condition,
})
ctx.Header("X-Total-Count", a.DB.CountPost(nil))
ctx.JSON(200, posts)
}
// GetPostByID returns the post by id
func (a *PostAPI) GetPostByID(ctx *gin.Context) {
withID(ctx, "id", func(id primitive.ObjectID) {
if post := a.DB.GetPostByID(id); post != nil {
ctx.JSON(200, post)
} else {
ctx.AbortWithError(404, errors.New("post does not exist"))
}
})
}
// DeletePostByID deletes the post by id
func (a *PostAPI) DeletePostByID(ctx *gin.Context) {
withID(ctx, "id", func(id primitive.ObjectID) {
if err := a.DB.DeletePostByID(id); err == nil {
ctx.JSON(200, http.StatusOK)
} else {
ctx.AbortWithError(404, errors.New("post does not exist"))
}
})
}
// UpdatePostByID is
func (a *PostAPI) UpdatePostByID(ctx *gin.Context) {
withID(ctx, "id", func(id primitive.ObjectID) {
var post = model.Post{}
abort := errors.New("post does not exist")
if err := ctx.ShouldBind(&post); err == nil {
if result := a.DB.UpdatePost(&post); result != nil {
ctx.JSON(200, result)
} else {
ctx.AbortWithError(404, abort)
}
} else {
ctx.AbortWithError(404, abort)
}
})
} | |
Chef.py | # coding: utf-8
import time
import pickle
import socket
import random
import logging
import argparse
import threading
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S')
logger = logging.getLogger('Chef')
def contains_successor(identification, successor, node):
if identification < node <= successor:
return True
elif successor < identification and (node > identification or node < successor):
return True
return False
class Chef(threading.Thread):
def __init__(self, port=5002, ide=2, ring_address=5000):
threading.Thread.__init__(self)
self.name = "CHEF"
self.id = ide
self.port = port
self.ring_address = ring_address
self.ring_completed = False
self.ring_ids_dict = {'RESTAURANT': None, 'WAITER': None, 'CHEF': self.id, 'CLERK': None}
if ring_address is None:
self.successor_id = self.id
self.successor_port = self.port
self.inside_ring = True
else:
self.successor_id = None
self.successor_port = None
self.inside_ring = False
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.logger = logging.getLogger("Node {}".format(self.id))
def send(self, port, o):
p = pickle.dumps(o)
self.socket.sendto(p, ('localhost', port))
def recv(self):
try:
p, port = self.socket.recvfrom(1024)
except socket.timeout:
return None, None
else:
if len(p) == 0:
return None, port
else:
return p, port
def | (self, args):
self.logger.debug('Node join: %s', args)
port = args['addr']
identification = args['id']
if self.id == self.successor_id:
self.successor_id = identification
self.successor_port = port
args = {'successor_id': self.id, 'successor_port': self.port}
self.send(port, {'method': 'JOIN_REP', 'args': args})
elif contains_successor(self.id, self.successor_id, identification):
args = {'successor_id': self.successor_id, 'successor_port': self.successor_port}
self.successor_id = identification
self.successor_port = port
self.send(port, {'method': 'JOIN_REP', 'args': args})
else:
self.logger.debug('Find Successor(%d)', args['id'])
self.send(self.successor_port, {'method': 'JOIN_RING', 'args': args})
self.logger.info(self)
def node_discovery(self, args):
if self.ring_ids_dict['RESTAURANT'] is None and args['RESTAURANT'] is not None:
self.ring_ids_dict['RESTAURANT'] = args['RESTAURANT']
if self.ring_ids_dict['WAITER'] is None and args['WAITER'] is not None:
self.ring_ids_dict['WAITER'] = args['WAITER']
if self.ring_ids_dict['CLERK'] is None and args['CLERK'] is not None:
self.ring_ids_dict['CLERK'] = args['CLERK']
self.send(self.successor_port, {'method': 'NODE_DISCOVERY', 'args': self.ring_ids_dict})
def __str__(self):
return 'Node ID: {}; Ring Address: {}; Successor_id: {}; Successor_port: {};' \
.format(self.id, self.ring_address, self.successor_id, self.successor_port)
def __repr__(self):
return self.__str__()
def run(self):
self.socket.bind(('localhost', self.port))
while not self.inside_ring:
o = {'method': 'JOIN_RING', 'args': {'addr': self.port, 'id': self.id}}
self.send(self.ring_address, o)
p, addr = self.recv()
if p is not None:
o = pickle.loads(p)
self.logger.debug('O: %s', o)
if o['method'] == 'JOIN_REP':
args = o['args']
self.successor_id = args['successor_id']
self.successor_port = args['successor_port']
self.inside_ring = True
self.logger.info(self)
done = False
while not done:
p, addr = self.recv()
if p is not None:
o = pickle.loads(p)
self.logger.info('O: %s', o)
if o['method'] == 'JOIN_RING':
self.node_join(o['args'])
elif o['method'] == 'NODE_DISCOVERY':
self.node_discovery(o['args'])
elif o['method'] == 'START':
self.send(self.successor_port, {'method': 'COOK', 'args': o['args']})
elif o['method'] == 'COOKED':
self.send(self.successor_port,{'method': 'DONE', 'args': o['args']})
else:
self.send(self.successor_port,o)
| node_join |
vault.go | package models
import (
"context"
"database/sql"
"time"
"github.com/keydotcat/keycatd/util"
)
type Vault struct {
Id string `scaneo:"pk" json:"id"`
Team string `scaneo:"pk" json:"-"`
Version uint32 `json:"version"`
PublicKey []byte `json:"public_key"`
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
}
func createVault(tx *sql.Tx, id, team string, vkp VaultKeyPair) (*Vault, error) |
func (v *Vault) insert(tx *sql.Tx) error {
if err := v.validate(); err != nil {
return err
}
now := time.Now().UTC()
v.CreatedAt = now
v.UpdatedAt = now
v.Version = 1
_, err := v.dbInsert(tx)
switch {
case IsDuplicateErr(err):
return util.NewErrorFrom(ErrAlreadyExists)
case isErrOrPanic(err):
return err
}
return nil
}
func (v *Vault) update(tx *sql.Tx) error {
if err := v.validate(); err != nil {
return err
}
v.UpdatedAt = time.Now().UTC()
res, err := tx.Exec(`UPDATE "vault" SET "version" = "version" + 1, "updated_at" = $1 WHERE "team" = $2 AND "id" = $3`, v.UpdatedAt, v.Team, v.Id)
if err := treatUpdateErr(res, err); err != nil {
return err
}
r := tx.QueryRow(`SELECT "version" FROM "vault" WHERE "team" = $1 AND "id" = $2`, v.Team, v.Id)
err = r.Scan(&v.Version)
if isNotExistsErr(err) {
return util.NewErrorFrom(ErrDoesntExist)
}
if isErrOrPanic(err) {
return err
}
return nil
}
func (v Vault) validate() error {
errs := util.NewErrorFields().(*util.Error)
if len(v.Id) == 0 {
errs.SetFieldError("vault_id", "missing")
}
if len(v.Team) == 0 {
errs.SetFieldError("vault_team", "missing")
}
if len(v.PublicKey) != publicKeyPackSize {
errs.SetFieldError("vault_public_key", "invalid")
}
if v.Version == 0 {
errs.SetFieldError("version", "invalid")
}
return errs.SetErrorOrCamo(ErrAlreadyExists)
}
func (v Vault) AddUsers(ctx context.Context, userKeys map[string][]byte) error {
for _, k := range userKeys {
if _, err := verifyAndUnpack(v.PublicKey, k); err != nil {
return err
}
}
return doTx(ctx, func(tx *sql.Tx) error {
t := &Team{Id: v.Team}
users, err := t.getUsers(tx)
if err != nil {
return err
}
for uk := range userKeys {
found := false
for _, user := range users {
if user.Id == uk {
found = true
break
}
}
if !found {
return util.NewErrorFrom(ErrNotInTeam)
}
}
for u, k := range userKeys {
if err := v.addUser(tx, u, k); err != nil {
if IsDuplicateErr(err) {
return util.NewErrorFrom(ErrAlreadyExists)
}
return err
}
}
return nil
})
}
func (v Vault) GetUserIds(ctx context.Context) (uids []string, err error) {
return uids, doTx(ctx, func(tx *sql.Tx) error {
uids, err = v.getUserIds(tx)
return err
})
}
func (v Vault) getUserIds(tx *sql.Tx) ([]string, error) {
rows, err := tx.Query(`SELECT "user" FROM "vault_user" WHERE "vault_user"."vault" = $1 AND "vault_user"."team" = $2`, v.Id, v.Team)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
var users []string
var uid string
for rows.Next() {
err = rows.Scan(&uid)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
users = append(users, uid)
}
if err = rows.Err(); isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
return users, nil
}
func (v Vault) addUser(tx *sql.Tx, username string, key []byte) error {
if err := v.update(tx); err != nil {
return err
}
vu := &vaultUser{Team: v.Team, Vault: v.Id, User: username, Key: key}
return vu.insert(tx)
}
func (v Vault) RemoveUser(ctx context.Context, username string) error {
return doTx(ctx, func(tx *sql.Tx) error {
t := &Team{Id: v.Team}
tu, err := t.getUserAffiliation(tx, username)
if err != nil {
return err
}
if tu.Admin {
return util.NewErrorFrom(err)
}
return v.removeUser(tx, username)
})
}
func (v Vault) removeUser(tx *sql.Tx, username string) error {
vu := &vaultUser{Team: v.Team, Vault: v.Id, User: username}
return treatUpdateErr(vu.dbDelete(tx))
}
func (v *Vault) AddSecret(ctx context.Context, s *Secret) error {
var err error
for retry := 0; retry < 3; retry++ {
err = doTx(ctx, func(tx *sql.Tx) error {
return v.addSecret(tx, s)
})
if err == ErrAlreadyExists {
continue
}
break
}
return err
}
func (v *Vault) addSecret(tx *sql.Tx, s *Secret) error {
s.Team = v.Team
s.Vault = v.Id
if _, err := verifyAndUnpack(v.PublicKey, s.Data); err != nil {
return err
}
if err := v.update(tx); err != nil {
return err
}
s.VaultVersion = v.Version
return s.insert(tx)
}
func (v *Vault) AddSecretList(ctx context.Context, sl []*Secret) error {
for _, s := range sl {
s.Team = v.Team
s.Vault = v.Id
if _, err := verifyAndUnpack(v.PublicKey, s.Data); err != nil {
return err
}
}
var err error
for retry := 0; retry < 3; retry++ {
err = doTx(ctx, func(tx *sql.Tx) error {
for _, s := range sl {
if err := v.update(tx); err != nil {
return err
}
s.VaultVersion = v.Version
if err := s.insert(tx); err != nil {
return err
}
}
return nil
})
if err == ErrAlreadyExists {
continue
}
break
}
return err
}
func (v *Vault) UpdateSecret(ctx context.Context, s *Secret) error {
_, err := verifyAndUnpack(v.PublicKey, s.Data)
if err != nil {
return err
}
return doTx(ctx, func(tx *sql.Tx) error {
os, err := v.getSecret(tx, s.Id)
if err != nil {
return err
}
if err := v.update(tx); err != nil {
return err
}
s.Team = os.Team
s.Vault = os.Vault
s.Version = os.Version + 1
s.VaultVersion = v.Version
return s.update(tx)
})
}
func (v *Vault) DeleteSecret(ctx context.Context, sid string) error {
return doTx(ctx, func(tx *sql.Tx) error {
return v.deleteSecret(tx, sid)
})
}
func (v *Vault) deleteSecret(tx *sql.Tx, sid string) error {
if err := v.update(tx); err != nil {
return err
}
res, err := tx.Exec(`DELETE FROM "secret" WHERE "secret"."id" = $1`, sid)
return treatUpdateErr(res, err)
}
func (v Vault) GetSecrets(ctx context.Context) ([]*Secret, error) {
db := GetDB(ctx)
query := `
SELECT DISTINCT ON ("secret"."team", "secret"."vault", "secret"."id") ` + selectSecretFullFields + `
FROM "secret" WHERE "secret"."team" = $1 AND "secret"."vault" = $2
ORDER BY "secret"."team", "secret"."vault", "secret"."id", "secret"."version" DESC`
rows, err := db.Query(query, v.Team, v.Id)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
secrets, err := scanSecrets(rows)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
return secrets, nil
}
func (v Vault) GetSecretsAllVersions(ctx context.Context) ([]*Secret, error) {
db := GetDB(ctx)
query := `SELECT` + selectSecretFullFields + ` FROM "secret" WHERE "secret"."team" = $1 AND "secret"."vault" = $2`
rows, err := db.Query(query, v.Team, v.Id)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
secrets, err := scanSecrets(rows)
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
return secrets, nil
}
func (v Vault) GetSecret(ctx context.Context, sid string) (s *Secret, err error) {
return s, doTx(ctx, func(tx *sql.Tx) error {
s, err = v.getSecret(tx, sid)
return err
})
}
func (v Vault) getSecret(tx *sql.Tx, sid string) (*Secret, error) {
s := &Secret{Id: sid}
r := tx.QueryRow(`SELECT `+selectSecretFields+` FROM "secret" WHERE "secret"."team" = $1 AND "secret"."vault" = $2 AND "secret"."id" = $3 ORDER BY "secret"."version" DESC LIMIT 1`, v.Team, v.Id, sid)
err := s.dbScanRow(r)
if isNotExistsErr(err) {
return nil, util.NewErrorFrom(ErrDoesntExist)
}
if isErrOrPanic(err) {
return nil, util.NewErrorFrom(err)
}
return s, nil
}
| {
v := &Vault{Id: id, Team: team, Version: 1, PublicKey: vkp.PublicKey}
if err := v.insert(tx); err != nil {
return nil, err
}
for u, k := range vkp.Keys {
if err := v.addUser(tx, u, k); err != nil {
return nil, err
}
}
return v, nil
} |
header.component.ts | import { Component, OnInit, Output, EventEmitter } from '@angular/core';
import { TodoService } from 'src/app/services/todo-service';
import { Store } from '@ngrx/store';
import { User } from 'src/app/store/models/user.model';
import { GetUser } from 'src/app/store/actions/user.actions';
import { ToastService } from '../../services/todo-service.service';
@Component({
selector: 'app-header',
templateUrl: './header.component.html',
styleUrls: ['./header.component.css'],
})
export class HeaderComponent implements OnInit {
@Output() emitDeleteAll = new EventEmitter();
info: string = undefined;
constructor(
private todoService: TodoService,
private store: Store<{ user: User }>,
private toastService: ToastService
) {}
ngOnInit(): void {}
deleteAll() {
this.emitDeleteAll.emit();
}
login(name: string, pass: string) {
this.todoService.login(name, pass).then((data) => {
if (data.state == 'Success') {
this.toastService.show(
'Success, logged in!',
'Now make some todos...',
'success'
);
this.todoService
.getTodos()
.toPromise()
.then((user) => {
this.store.dispatch(GetUser({ payload: user }));
});
} else {
this.toastService.show(data.error.err, 'Sorry about that...', 'danger');
}
});
}
register(name: string, pass: string) {
this.todoService.register(name, pass).then((data) => {
if (data.stat == 'Success') {
this.toastService.show( | 'Success, logged in!',
'Now make some todos...',
'success'
);
this.todoService
.getTodos()
.toPromise()
.then((user) => {
this.store.dispatch(GetUser({ payload: user }));
});
} else {
this.toastService.show(data.error.err, 'Sorry about that...', 'danger');
}
});
}
} | |
node_lifecycle_controller_test.go | /*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodelifecycle
import (
"context"
"strings"
"testing"
"time"
"k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
extensionsinformers "k8s.io/client-go/informers/extensions/v1beta1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
testcore "k8s.io/client-go/testing"
cloudprovider "k8s.io/cloud-provider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
"k8s.io/kubernetes/pkg/controller/testutil"
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/pkg/scheduler/algorithm"
"k8s.io/kubernetes/pkg/util/node"
taintutils "k8s.io/kubernetes/pkg/util/taints"
)
const (
testNodeMonitorGracePeriod = 40 * time.Second
testNodeStartupGracePeriod = 60 * time.Second
testNodeMonitorPeriod = 5 * time.Second
testRateLimiterQPS = float32(10000)
testLargeClusterThreshold = 20
testUnhealthyThreshold = float32(0.55)
)
func alwaysReady() bool { return true }
type nodeLifecycleController struct {
*Controller
nodeInformer coreinformers.NodeInformer
daemonSetInformer extensionsinformers.DaemonSetInformer
}
// doEviction does the fake eviction and returns the status of eviction operation.
func (nc *nodeLifecycleController) doEviction(fakeNodeHandler *testutil.FakeNodeHandler) bool {
var podEvicted bool
zones := testutil.GetZones(fakeNodeHandler)
for _, zone := range zones {
nc.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
uid, _ := value.UID.(string)
nodeutil.DeletePods(fakeNodeHandler, nc.recorder, value.Value, uid, nc.daemonSetStore)
return true, 0
})
}
for _, action := range fakeNodeHandler.Actions() {
if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" {
podEvicted = true
return podEvicted
}
}
return podEvicted
}
func (nc *nodeLifecycleController) syncNodeStore(fakeNodeHandler *testutil.FakeNodeHandler) error {
nodes, err := fakeNodeHandler.List(metav1.ListOptions{})
if err != nil {
return err
}
newElems := make([]interface{}, 0, len(nodes.Items))
for i := range nodes.Items {
newElems = append(newElems, &nodes.Items[i])
}
return nc.nodeInformer.Informer().GetStore().Replace(newElems, "newRV")
}
func newNodeLifecycleControllerFromClient(
cloud cloudprovider.Interface,
kubeClient clientset.Interface,
podEvictionTimeout time.Duration,
evictionLimiterQPS float32,
secondaryEvictionLimiterQPS float32,
largeClusterThreshold int32,
unhealthyZoneThreshold float32,
nodeMonitorGracePeriod time.Duration,
nodeStartupGracePeriod time.Duration,
nodeMonitorPeriod time.Duration,
useTaints bool,
) (*nodeLifecycleController, error) {
factory := informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
nodeInformer := factory.Core().V1().Nodes()
daemonSetInformer := factory.Extensions().V1beta1().DaemonSets()
nc, err := NewNodeLifecycleController(
factory.Core().V1().Pods(),
nodeInformer,
daemonSetInformer,
cloud,
kubeClient,
nodeMonitorPeriod,
nodeStartupGracePeriod,
nodeMonitorGracePeriod,
podEvictionTimeout,
evictionLimiterQPS,
secondaryEvictionLimiterQPS,
largeClusterThreshold,
unhealthyZoneThreshold,
useTaints,
useTaints,
useTaints,
)
if err != nil {
return nil, err
}
nc.podInformerSynced = alwaysReady
nc.nodeInformerSynced = alwaysReady
nc.daemonSetInformerSynced = alwaysReady
return &nodeLifecycleController{nc, nodeInformer, daemonSetInformer}, nil
}
func TestMonitorNodeHealthEvictPods(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
labels := map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
}
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
// we need second healthy node in tests. Because of how the tests are written we need to update
// the status of this Node.
healthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status has just been updated, and is NotReady for 10min.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
table := []struct {
fakeNodeHandler *testutil.FakeNodeHandler
daemonSets []extensions.DaemonSet
timeToPass time.Duration
newNodeStatus v1.NodeStatus
secondNodeNewStatus v1.NodeStatus
expectedEvictPods bool
description string
}{
// Node created recently, with no status (happens only at cluster startup).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: fakeNow,
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: 0,
newNodeStatus: v1.NodeStatus{},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false,
description: "Node created recently, with no status.",
},
// Node created recently without FailureDomain labels which is added back later, with no status (happens only at cluster startup).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: fakeNow,
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: 0,
newNodeStatus: v1.NodeStatus{},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false,
description: "Node created recently without FailureDomain labels which is added back later, with no status (happens only at cluster startup).",
},
// Node created long time ago, and kubelet posted NotReady for a short period of time.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: evictionTimeout,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
// Node status has just been updated, and is NotReady for 10min.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false,
description: "Node created long time ago, and kubelet posted NotReady for a short period of time.",
},
// Pod is ds-managed, and kubelet posted NotReady for a long period of time.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(
&v1.PodList{
Items: []v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "pod0",
Namespace: "default",
Labels: map[string]string{"daemon": "yes"},
},
Spec: v1.PodSpec{
NodeName: "node0",
},
},
},
},
),
},
daemonSets: []extensions.DaemonSet{
{
ObjectMeta: metav1.ObjectMeta{
Name: "ds0",
Namespace: "default",
},
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"daemon": "yes"},
},
},
},
},
timeToPass: time.Hour,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
// Node status has just been updated, and is NotReady for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false,
description: "Pod is ds-managed, and kubelet posted NotReady for a long period of time.",
},
// Node created long time ago, and kubelet posted NotReady for a long period of time.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: time.Hour,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
// Node status has just been updated, and is NotReady for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 59, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: true,
description: "Node created long time ago, and kubelet posted NotReady for a long period of time.",
},
// Node created long time ago, node controller posted Unknown for a short period of time.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: evictionTimeout - testNodeMonitorGracePeriod,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
// Node status was updated by nodecontroller 10min ago
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: false,
description: "Node created long time ago, node controller posted Unknown for a short period of time.",
},
// Node created long time ago, node controller posted Unknown for a long period of time.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
daemonSets: nil,
timeToPass: 60 * time.Minute,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
// Node status was updated by nodecontroller 1hr ago
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedEvictPods: true,
description: "Node created long time ago, node controller posted Unknown for a long period of time.",
},
}
for _, item := range table {
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
item.fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
for _, ds := range item.daemonSets {
nodeController.daemonSetInformer.Informer().GetStore().Add(&ds)
}
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if item.timeToPass > 0 {
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus
}
if len(item.fakeNodeHandler.Existing[0].Labels) == 0 && len(item.fakeNodeHandler.Existing[1].Labels) == 0 {
item.fakeNodeHandler.Existing[0].Labels = labels
item.fakeNodeHandler.Existing[1].Labels = labels
}
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil |
zones := testutil.GetZones(item.fakeNodeHandler)
for _, zone := range zones {
if _, ok := nodeController.zonePodEvictor[zone]; ok {
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string)
nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetInformer.Lister())
return true, 0
})
} else {
t.Fatalf("Zone %v was unitialized!", zone)
}
}
podEvicted := false
for _, action := range item.fakeNodeHandler.Actions() {
if action.GetVerb() == "delete" && action.GetResource().Resource == "pods" {
podEvicted = true
}
}
if item.expectedEvictPods != podEvicted {
t.Errorf("expected pod eviction: %+v, got %+v for %+v", item.expectedEvictPods,
podEvicted, item.description)
}
}
}
func TestPodStatusChange(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
// we need second healthy node in tests. Because of how the tests are written we need to update
// the status of this Node.
healthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status has just been updated, and is NotReady for 10min.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 9, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
// Node created long time ago, node controller posted Unknown for a long period of time.
table := []struct {
fakeNodeHandler *testutil.FakeNodeHandler
daemonSets []extensions.DaemonSet
timeToPass time.Duration
newNodeStatus v1.NodeStatus
secondNodeNewStatus v1.NodeStatus
expectedPodUpdate bool
expectedReason string
description string
}{
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
timeToPass: 60 * time.Minute,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
// Node status was updated by nodecontroller 1hr ago
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
secondNodeNewStatus: healthyNodeNewStatus,
expectedPodUpdate: true,
expectedReason: node.NodeUnreachablePodReason,
description: "Node created long time ago, node controller posted Unknown for a " +
"long period of time, the pod status must include reason for termination.",
},
}
for _, item := range table {
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
item.fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if item.timeToPass > 0 {
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
item.fakeNodeHandler.Existing[1].Status = item.secondNodeNewStatus
}
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
zones := testutil.GetZones(item.fakeNodeHandler)
for _, zone := range zones {
nodeController.zonePodEvictor[zone].Try(func(value scheduler.TimedValue) (bool, time.Duration) {
nodeUID, _ := value.UID.(string)
nodeutil.DeletePods(item.fakeNodeHandler, nodeController.recorder, value.Value, nodeUID, nodeController.daemonSetStore)
return true, 0
})
}
podReasonUpdate := false
for _, action := range item.fakeNodeHandler.Actions() {
if action.GetVerb() == "update" && action.GetResource().Resource == "pods" {
updateReason := action.(testcore.UpdateActionImpl).GetObject().(*v1.Pod).Status.Reason
podReasonUpdate = true
if updateReason != item.expectedReason {
t.Errorf("expected pod status reason: %+v, got %+v for %+v", item.expectedReason, updateReason, item.description)
}
}
}
if podReasonUpdate != item.expectedPodUpdate {
t.Errorf("expected pod update: %+v, got %+v for %+v", podReasonUpdate, item.expectedPodUpdate, item.description)
}
}
}
func TestMonitorNodeHealthEvictPodsWithDisruption(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
timeToPass := 60 * time.Minute
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
// we need second healthy node in tests. Because of how the tests are written we need to update
// the status of this Node.
healthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 13, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
unhealthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
// Node status was updated by nodecontroller 1hr ago
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
table := []struct {
nodeList []*v1.Node
podList []v1.Pod
updatedNodeStatuses []v1.NodeStatus
expectedInitialStates map[string]ZoneState
expectedFollowingStates map[string]ZoneState
expectedEvictPods bool
description string
}{
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
// Only zone is down - eviction shouldn't take place
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{testutil.CreateZoneID("region1", "zone1"): stateFullDisruption},
expectedFollowingStates: map[string]ZoneState{testutil.CreateZoneID("region1", "zone1"): stateFullDisruption},
expectedEvictPods: false,
description: "Network Disruption: Only zone is down - eviction shouldn't take place.",
},
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
// Both zones down - eviction shouldn't take place
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region2",
kubeletapis.LabelZoneFailureDomain: "zone2",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region2", "zone2"): stateFullDisruption,
},
expectedFollowingStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region2", "zone2"): stateFullDisruption,
},
expectedEvictPods: false,
description: "Network Disruption: Both zones down - eviction shouldn't take place.",
},
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
// One zone is down - eviction should take place
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone2",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedFollowingStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedEvictPods: true,
description: "Network Disruption: One zone is down - eviction should take place.",
},
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period
// of on first Node, eviction should stop even though -master Node is healthy.
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node-master",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
},
expectedFollowingStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
},
expectedEvictPods: false,
description: "NetworkDisruption: eviction should stop, only -master Node is healthy",
},
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
// Initially both zones down, one comes back - eviction should take place
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone2",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateFullDisruption,
},
expectedFollowingStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): stateFullDisruption,
testutil.CreateZoneID("region1", "zone2"): stateNormal,
},
expectedEvictPods: true,
description: "Initially both zones down, one comes back - eviction should take place",
},
// NetworkDisruption: Node created long time ago, node controller posted Unknown for a long period of time on both Nodes.
// Zone is partially disrupted - eviction should take place
{
nodeList: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node3",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
{
ObjectMeta: metav1.ObjectMeta{
Name: "node4",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
podList: []v1.Pod{*testutil.NewPod("pod0", "node0")},
updatedNodeStatuses: []v1.NodeStatus{
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
unhealthyNodeNewStatus,
healthyNodeNewStatus,
healthyNodeNewStatus,
},
expectedInitialStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): statePartialDisruption,
},
expectedFollowingStates: map[string]ZoneState{
testutil.CreateZoneID("region1", "zone1"): statePartialDisruption,
},
expectedEvictPods: true,
description: "Zone is partially disrupted - eviction should take place.",
},
}
for _, item := range table {
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: item.nodeList,
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: item.podList}),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.enterPartialDisruptionFunc = func(nodeNum int) float32 {
return testRateLimiterQPS
}
nodeController.recorder = testutil.NewFakeRecorder()
nodeController.enterFullDisruptionFunc = func(nodeNum int) float32 {
return testRateLimiterQPS
}
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("%v: unexpected error: %v", item.description, err)
}
for zone, state := range item.expectedInitialStates {
if state != nodeController.zoneStates[zone] {
t.Errorf("%v: Unexpected zone state: %v: %v instead %v", item.description, zone, nodeController.zoneStates[zone], state)
}
}
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(timeToPass)} }
for i := range item.updatedNodeStatuses {
fakeNodeHandler.Existing[i].Status = item.updatedNodeStatuses[i]
}
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("%v: unexpected error: %v", item.description, err)
}
for zone, state := range item.expectedFollowingStates {
if state != nodeController.zoneStates[zone] {
t.Errorf("%v: Unexpected zone state: %v: %v instead %v", item.description, zone, nodeController.zoneStates[zone], state)
}
}
var podEvicted bool
start := time.Now()
// Infinite loop, used for retrying in case ratelimiter fails to reload for Try function.
// this breaks when we have the status that we need for test case or when we don't see the
// intended result after 1 minute.
for {
podEvicted = nodeController.doEviction(fakeNodeHandler)
if podEvicted == item.expectedEvictPods || time.Since(start) > 1*time.Minute {
break
}
}
if item.expectedEvictPods != podEvicted {
t.Errorf("%v: expected pod eviction: %+v, got %+v", item.description, item.expectedEvictPods, podEvicted)
}
}
}
func TestCloudProviderNodeShutdown(t *testing.T) {
testCases := []struct {
testName string
node *v1.Node
shutdown bool
}{
{
testName: "node shutdowned add taint",
shutdown: true,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Spec: v1.NodeSpec{
ProviderID: "node0",
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
{
testName: "node started after shutdown remove taint",
shutdown: false,
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Spec: v1.NodeSpec{
ProviderID: "node0",
Taints: []v1.Taint{
{
Key: algorithm.TaintNodeShutdown,
Effect: v1.TaintEffectNoSchedule,
},
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
fnh := &testutil.FakeNodeHandler{
Existing: []*v1.Node{tc.node},
Clientset: fake.NewSimpleClientset(),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fnh,
10*time.Minute,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
nodeController.recorder = testutil.NewFakeRecorder()
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
return tc.shutdown, nil
}
if err := nodeController.syncNodeStore(fnh); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fnh.UpdatedNodes) != 1 {
t.Errorf("Node was not updated")
}
if tc.shutdown {
if len(fnh.UpdatedNodes[0].Spec.Taints) != 1 {
t.Errorf("Node Taint was not added")
}
if fnh.UpdatedNodes[0].Spec.Taints[0].Key != "node.cloudprovider.kubernetes.io/shutdown" {
t.Errorf("Node Taint key is not correct")
}
} else {
if len(fnh.UpdatedNodes[0].Spec.Taints) != 0 {
t.Errorf("Node Taint was not removed after node is back in ready state")
}
}
})
}
}
// TestCloudProviderNoRateLimit tests that monitorNodes() immediately deletes
// pods and the node when kubelet has not reported, and the cloudprovider says
// the node is gone.
func TestCloudProviderNoRateLimit(t *testing.T) {
fnh := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0"), *testutil.NewPod("pod1", "node0")}}),
DeleteWaitChan: make(chan struct{}),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fnh,
10*time.Minute,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.now = func() metav1.Time { return metav1.Date(2016, 1, 1, 12, 0, 0, 0, time.UTC) }
nodeController.recorder = testutil.NewFakeRecorder()
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
return false, nil
}
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
return false, nil
}
// monitorNodeHealth should allow this node to be immediately deleted
if err := nodeController.syncNodeStore(fnh); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
select {
case <-fnh.DeleteWaitChan:
case <-time.After(wait.ForeverTestTimeout):
t.Errorf("Timed out waiting %v for node to be deleted", wait.ForeverTestTimeout)
}
if len(fnh.DeletedNodes) != 1 || fnh.DeletedNodes[0].Name != "node0" {
t.Errorf("Node was not deleted")
}
if nodeOnQueue := nodeController.zonePodEvictor[""].Remove("node0"); nodeOnQueue {
t.Errorf("Node was queued for eviction. Should have been immediately deleted.")
}
}
func TestMonitorNodeHealthUpdateStatus(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
table := []struct {
fakeNodeHandler *testutil.FakeNodeHandler
timeToPass time.Duration
newNodeStatus v1.NodeStatus
expectedEvictPods bool
expectedRequestCount int
expectedNodes []*v1.Node
}{
// Node created long time ago, without status:
// Expect Unknown status posted from node controller.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 2, // List+Update
expectedNodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: fakeNow,
},
},
},
},
},
},
// Node created recently, without status.
// Expect no action from node controller (within startup grace period).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: fakeNow,
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 1, // List
expectedNodes: nil,
},
// Node created long time ago, with status updated by kubelet exceeds grace period.
// Expect Unknown status posted from node controller.
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 3, // (List+)List+Update
timeToPass: time.Hour,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionFalse,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
expectedNodes: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
Reason: "NodeStatusUnknown",
Message: "Kubelet stopped posting node status.",
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionUnknown,
Reason: "NodeStatusUnknown",
Message: "Kubelet stopped posting node status.",
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
{
Type: v1.NodeMemoryPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), // should default to node creation time if condition was never updated
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
{
Type: v1.NodeDiskPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), // should default to node creation time if condition was never updated
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
{
Type: v1.NodePIDPressure,
Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated",
Message: "Kubelet never posted node status.",
LastHeartbeatTime: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC), // should default to node creation time if condition was never updated
LastTransitionTime: metav1.Time{Time: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC).Add(time.Hour)},
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
},
// Node created long time ago, with status updated recently.
// Expect no action from node controller (within monitor grace period).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status has just been updated.
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedRequestCount: 1, // List
expectedNodes: nil,
},
}
for i, item := range table {
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
item.fakeNodeHandler,
5*time.Minute,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if item.timeToPass > 0 {
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
}
if item.expectedRequestCount != item.fakeNodeHandler.RequestCount {
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
}
if len(item.fakeNodeHandler.UpdatedNodes) > 0 && !apiequality.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodes[0]))
}
if len(item.fakeNodeHandler.UpdatedNodeStatuses) > 0 && !apiequality.Semantic.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodeStatuses) {
t.Errorf("Case[%d] unexpected nodes: %s", i, diff.ObjectDiff(item.expectedNodes[0], item.fakeNodeHandler.UpdatedNodeStatuses[0]))
}
}
}
func TestMonitorNodeHealthMarkPodsNotReady(t *testing.T) {
fakeNow := metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
table := []struct {
fakeNodeHandler *testutil.FakeNodeHandler
timeToPass time.Duration
newNodeStatus v1.NodeStatus
expectedPodStatusUpdate bool
}{
// Node created recently, without status.
// Expect no action from node controller (within startup grace period).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: fakeNow,
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedPodStatusUpdate: false,
},
// Node created long time ago, with status updated recently.
// Expect no action from node controller (within monitor grace period).
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status has just been updated.
LastHeartbeatTime: fakeNow,
LastTransitionTime: fakeNow,
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
expectedPodStatusUpdate: false,
},
// Node created long time ago, with status updated by kubelet exceeds grace period.
// Expect pods status updated and Unknown node status posted from node controller
{
fakeNodeHandler: &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
},
timeToPass: 1 * time.Minute,
newNodeStatus: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
// Node status hasn't been updated for 1hr.
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
},
},
expectedPodStatusUpdate: true,
},
}
for i, item := range table {
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
item.fakeNodeHandler,
5*time.Minute,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("Case[%d] unexpected error: %v", i, err)
}
if item.timeToPass > 0 {
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(item.timeToPass)} }
item.fakeNodeHandler.Existing[0].Status = item.newNodeStatus
if err := nodeController.syncNodeStore(item.fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("Case[%d] unexpected error: %v", i, err)
}
}
podStatusUpdated := false
for _, action := range item.fakeNodeHandler.Actions() {
if action.GetVerb() == "update" && action.GetResource().Resource == "pods" && action.GetSubresource() == "status" {
podStatusUpdated = true
}
}
if podStatusUpdated != item.expectedPodStatusUpdate {
t.Errorf("Case[%d] expect pod status updated to be %v, but got %v", i, item.expectedPodStatusUpdate, podStatusUpdated)
}
}
}
func TestSwapUnreachableNotReadyTaints(t *testing.T) {
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
// Because of the logic that prevents NC from evicting anything when all Nodes are NotReady
// we need second healthy node in tests. Because of how the tests are written we need to update
// the status of this Node.
{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
timeToPass := evictionTimeout
newNodeStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
// Node status has just been updated, and is NotReady for 10min.
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 9, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
healthyNodeNewStatus := v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2017, 1, 1, 12, 10, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
}
originalTaint := UnreachableTaintTemplate
updatedTaint := NotReadyTaintTemplate
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
true)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doNoExecuteTaintingPass()
node0, err := fakeNodeHandler.Get("node0", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node0...")
return
}
node1, err := fakeNodeHandler.Get("node1", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node1...")
return
}
if originalTaint != nil && !taintutils.TaintExists(node0.Spec.Taints, originalTaint) {
t.Errorf("Can't find taint %v in %v", originalTaint, node0.Spec.Taints)
}
nodeController.now = func() metav1.Time { return metav1.Time{Time: fakeNow.Add(timeToPass)} }
node0.Status = newNodeStatus
node1.Status = healthyNodeNewStatus
_, err = fakeNodeHandler.UpdateStatus(node0)
if err != nil {
t.Errorf(err.Error())
return
}
_, err = fakeNodeHandler.UpdateStatus(node1)
if err != nil {
t.Errorf(err.Error())
return
}
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doNoExecuteTaintingPass()
node0, err = fakeNodeHandler.Get("node0", metav1.GetOptions{})
if err != nil {
t.Errorf("Can't get current node0...")
return
}
if updatedTaint != nil {
if !taintutils.TaintExists(node0.Spec.Taints, updatedTaint) {
t.Errorf("Can't find taint %v in %v", updatedTaint, node0.Spec.Taints)
}
}
}
func TestTaintsNodeByCondition(t *testing.T) {
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
true)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
outOfDiskTaint := &v1.Taint{
Key: algorithm.TaintNodeOutOfDisk,
Effect: v1.TaintEffectNoSchedule,
}
networkUnavailableTaint := &v1.Taint{
Key: algorithm.TaintNodeNetworkUnavailable,
Effect: v1.TaintEffectNoSchedule,
}
notReadyTaint := &v1.Taint{
Key: algorithm.TaintNodeNotReady,
Effect: v1.TaintEffectNoSchedule,
}
unreachableTaint := &v1.Taint{
Key: algorithm.TaintNodeUnreachable,
Effect: v1.TaintEffectNoSchedule,
}
tests := []struct {
Name string
Node *v1.Node
ExpectedTaints []*v1.Taint
}{
{
Name: "NetworkUnavailable is true",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
ExpectedTaints: []*v1.Taint{networkUnavailableTaint},
},
{
Name: "NetworkUnavailable and OutOfDisk are true",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
ExpectedTaints: []*v1.Taint{networkUnavailableTaint, outOfDiskTaint},
},
{
Name: "NetworkUnavailable is true, OutOfDisk is unknown",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeNetworkUnavailable,
Status: v1.ConditionTrue,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
{
Type: v1.NodeOutOfDisk,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
ExpectedTaints: []*v1.Taint{networkUnavailableTaint},
},
{
Name: "Ready is false",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionFalse,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
ExpectedTaints: []*v1.Taint{notReadyTaint},
},
{
Name: "Ready is unknown",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
},
},
},
},
ExpectedTaints: []*v1.Taint{unreachableTaint},
},
}
for _, test := range tests {
fakeNodeHandler.Update(test.Node)
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doNoScheduleTaintingPass(test.Node.Name)
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
node0, err := nodeController.nodeLister.Get("node0")
if err != nil {
t.Errorf("Can't get current node0...")
return
}
if len(node0.Spec.Taints) != len(test.ExpectedTaints) {
t.Errorf("%s: Unexpected number of taints: expected %d, got %d",
test.Name, len(test.ExpectedTaints), len(node0.Spec.Taints))
}
for _, taint := range test.ExpectedTaints {
if !taintutils.TaintExists(node0.Spec.Taints, taint) {
t.Errorf("%s: Can't find taint %v in %v", test.Name, taint, node0.Spec.Taints)
}
}
}
}
func TestNodeEventGeneration(t *testing.T) {
fakeNow := metav1.Date(2016, 9, 10, 12, 0, 0, 0, time.UTC)
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
UID: "1234567890",
CreationTimestamp: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{
Type: v1.NodeReady,
Status: v1.ConditionUnknown,
LastHeartbeatTime: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
LastTransitionTime: metav1.Date(2015, 8, 10, 0, 0, 0, 0, time.UTC),
},
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fakeNodeHandler,
5*time.Minute,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
false)
nodeController.cloud = &fakecloud.FakeCloud{}
nodeController.nodeExistsInCloudProvider = func(nodeName types.NodeName) (bool, error) {
return false, nil
}
nodeController.nodeShutdownInCloudProvider = func(ctx context.Context, node *v1.Node) (bool, error) {
return false, nil
}
nodeController.now = func() metav1.Time { return fakeNow }
fakeRecorder := testutil.NewFakeRecorder()
nodeController.recorder = fakeRecorder
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
if err := nodeController.monitorNodeHealth(); err != nil {
t.Errorf("unexpected error: %v", err)
}
if len(fakeRecorder.Events) != 2 {
t.Fatalf("unexpected events, got %v, expected %v: %+v", len(fakeRecorder.Events), 2, fakeRecorder.Events)
}
if fakeRecorder.Events[0].Reason != "RegisteredNode" || fakeRecorder.Events[1].Reason != "DeletingNode" {
var reasons []string
for _, event := range fakeRecorder.Events {
reasons = append(reasons, event.Reason)
}
t.Fatalf("unexpected events generation: %v", strings.Join(reasons, ","))
}
for _, event := range fakeRecorder.Events {
involvedObject := event.InvolvedObject
actualUID := string(involvedObject.UID)
if actualUID != "1234567890" {
t.Fatalf("unexpected event uid: %v", actualUID)
}
}
}
// TestFixDeprecatedTaintKey verifies we have backwards compatibility after upgraded alpha taint key to GA taint key.
// TODO(resouer): this is introduced in 1.9 and should be removed in the future.
func TestFixDeprecatedTaintKey(t *testing.T) {
fakeNow := metav1.Date(2017, 1, 1, 12, 0, 0, 0, time.UTC)
evictionTimeout := 10 * time.Minute
fakeNodeHandler := &testutil.FakeNodeHandler{
Existing: []*v1.Node{
{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
},
},
Clientset: fake.NewSimpleClientset(&v1.PodList{Items: []v1.Pod{*testutil.NewPod("pod0", "node0")}}),
}
nodeController, _ := newNodeLifecycleControllerFromClient(
nil,
fakeNodeHandler,
evictionTimeout,
testRateLimiterQPS,
testRateLimiterQPS,
testLargeClusterThreshold,
testUnhealthyThreshold,
testNodeMonitorGracePeriod,
testNodeStartupGracePeriod,
testNodeMonitorPeriod,
true)
nodeController.now = func() metav1.Time { return fakeNow }
nodeController.recorder = testutil.NewFakeRecorder()
deprecatedNotReadyTaint := &v1.Taint{
Key: algorithm.DeprecatedTaintNodeNotReady,
Effect: v1.TaintEffectNoExecute,
}
nodeNotReadyTaint := &v1.Taint{
Key: algorithm.TaintNodeNotReady,
Effect: v1.TaintEffectNoExecute,
}
deprecatedUnreachableTaint := &v1.Taint{
Key: algorithm.DeprecatedTaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
}
nodeUnreachableTaint := &v1.Taint{
Key: algorithm.TaintNodeUnreachable,
Effect: v1.TaintEffectNoExecute,
}
tests := []struct {
Name string
Node *v1.Node
ExpectedTaints []*v1.Taint
}{
{
Name: "Node with deprecated not-ready taint key",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
*deprecatedNotReadyTaint,
},
},
},
ExpectedTaints: []*v1.Taint{nodeNotReadyTaint},
},
{
Name: "Node with deprecated unreachable taint key",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
*deprecatedUnreachableTaint,
},
},
},
ExpectedTaints: []*v1.Taint{nodeUnreachableTaint},
},
{
Name: "Node with not-ready taint key",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
*nodeNotReadyTaint,
},
},
},
ExpectedTaints: []*v1.Taint{nodeNotReadyTaint},
},
{
Name: "Node with unreachable taint key",
Node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node0",
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
Labels: map[string]string{
kubeletapis.LabelZoneRegion: "region1",
kubeletapis.LabelZoneFailureDomain: "zone1",
},
},
Spec: v1.NodeSpec{
Taints: []v1.Taint{
*nodeUnreachableTaint,
},
},
},
ExpectedTaints: []*v1.Taint{nodeUnreachableTaint},
},
}
for _, test := range tests {
fakeNodeHandler.Update(test.Node)
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
nodeController.doFixDeprecatedTaintKeyPass(test.Node)
if err := nodeController.syncNodeStore(fakeNodeHandler); err != nil {
t.Errorf("unexpected error: %v", err)
}
node, err := nodeController.nodeLister.Get(test.Node.GetName())
if err != nil {
t.Errorf("Can't get current node...")
return
}
if len(node.Spec.Taints) != len(test.ExpectedTaints) {
t.Errorf("%s: Unexpected number of taints: expected %d, got %d",
test.Name, len(test.ExpectedTaints), len(node.Spec.Taints))
}
for _, taint := range test.ExpectedTaints {
if !taintutils.TaintExists(node.Spec.Taints, taint) {
t.Errorf("%s: Can't find taint %v in %v", test.Name, taint, node.Spec.Taints)
}
}
}
}
| {
t.Errorf("unexpected error: %v", err)
} |
log.go | package log
import "log"
import "fmt"
var DebugMode bool
func Init() {
log.SetFlags(0)
log.SetPrefix("gendao: ") |
func Fatalln(v ...interface{}) {
log.Fatalln(v...)
}
func Infof(format string, args ...interface{}) {
log.Printf(fmt.Sprintf("(info) %v", format), args...)
}
func Debugf(format string, args ...interface{}) {
if DebugMode {
log.Printf(fmt.Sprintf("(debug) %v", format), args...)
}
}
func Debugln(v ...interface{}) {
if DebugMode {
log.Println(v...)
}
}
func Errorf(format string, args ...interface{}) {
log.Fatalf(fmt.Sprintf("(error) %v", format), args...)
} | } |
target.go | package main
import (
"io/ioutil"
"strings"
"golang.org/x/crypto/ssh"
"os/user"
"fmt"
)
/* {
* "username": "bob",
* "host": "myserver:22",
* "auth": {
* "method": "password" or "pki",
* "artifact": "<secret>" or "/path/to/private_key.pem"
* }
* }
*/
type targetConfig struct {
User string `json:"username"`
Host string `json:"host"`
Auth struct {
Method string `json:"method"`
Artifact string `json:"artifact"`
} `json:"auth"`
}
// Fix the configuration before handing it to clientConfig():
// - if ~ found in pki artifact, expand it to home directory
func (target *targetConfig) Preprocess() error {
// A ~ in the private key path? Try to expand it!
if target.Auth.Method == "pki" &&
strings.Contains(target.Auth.Artifact, "~") {
active, err := user.Current()
if err != nil {
return fmt.Errorf("failed getting current user while expanding home (~): %s", err.Error())
}
target.Auth.Artifact = strings.Replace(target.Auth.Artifact, "~", active.HomeDir, 1)
}
|
return nil
}
// Generate a password-auth'd ssh ClientConfig
func (target *targetConfig) password() (*ssh.ClientConfig, error) {
// Password might be "" so can't check len(artifact)
return &ssh.ClientConfig{
User: target.User,
Auth: []ssh.AuthMethod{
ssh.Password(target.Auth.Artifact),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
// Generate a PKI-auth'd ssh ClientConfig
func (target *targetConfig) pki() (*ssh.ClientConfig, error) {
pem, err := ioutil.ReadFile(target.Auth.Artifact)
if err != nil {
return nil, fmt.Errorf("failed reading key: %s", err.Error())
}
signer, err := ssh.ParsePrivateKey(pem)
if err != nil {
return nil, fmt.Errorf("failed parsing key: %s", err.Error())
}
return &ssh.ClientConfig{
User: target.User,
Auth: []ssh.AuthMethod{
ssh.PublicKeys(signer),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}, nil
}
// Figure out how to generate the ssh ClientConfig, or bail
func (target *targetConfig) ClientConfig() (*ssh.ClientConfig, error) {
if len(target.User) == 0 {
return nil, fmt.Errorf("target config requires a username")
}
// Only supports password and pki methods. Soon interactive as well?
switch target.Auth.Method {
case "password":
return target.password()
case "pki":
return target.pki()
default:
err := fmt.Errorf("unknown authentication method %s", target.Auth.Method)
return nil, err
}
} | |
structs2.rs | // structs2.rs
// Address all the TODOs to make the tests pass!
#[derive(Debug)]
struct Order {
name: String,
year: u32,
made_by_phone: bool,
made_by_mobile: bool,
made_by_email: bool,
item_number: u32,
count: u32,
}
fn | () -> Order {
Order {
name: String::from("Bob"),
year: 2019,
made_by_phone: false,
made_by_mobile: false,
made_by_email: true,
item_number: 123,
count: 0,
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn your_order() {
let order_template = create_order_template();
// TODO: Create your own order using the update syntax and template above!
let mut your_order = Order{
name: "Hacker in Rust".to_string(),
count: 1,
.. order_template
};
assert_eq!(your_order.name, "Hacker in Rust");
assert_eq!(your_order.year, order_template.year);
assert_eq!(your_order.made_by_phone, order_template.made_by_phone);
assert_eq!(your_order.made_by_mobile, order_template.made_by_mobile);
assert_eq!(your_order.made_by_email, order_template.made_by_email);
assert_eq!(your_order.item_number, order_template.item_number);
assert_eq!(your_order.count, 1);
}
}
| create_order_template |
server.go | package server
import (
"log"
"github.com/gin-gonic/gin"
"github.com/raptorbox/raptor-sdk-go/models"
"github.com/raptorbox/raptor-stream/api"
)
//Start a server
func Start(port string) error | {
r := gin.Default()
r.PUT("/:objectId", func(c *gin.Context) {
dev := models.NewDevice()
dev.ID = c.Param("objectId")
stream := models.NewStream(dev)
stream.Name = c.Param("streamId")
r := make([]*models.Record, 0)
err := c.BindJSON(r)
if err != nil {
c.JSON(400, gin.H{
"message": err.Error(),
"code": 400,
})
return
}
err = api.Write(r)
if err != nil {
c.JSON(400, gin.H{
"message": err.Error(),
"code": 400,
})
return
}
c.Status(202)
})
r.PUT("/:objectId/:streamId", func(c *gin.Context) {
dev := models.NewDevice()
dev.ID = c.Param("objectId")
stream := models.NewStream(dev)
stream.Name = c.Param("stremId")
record := stream.CreateRecord()
r := make([]*models.Record, 0)
err := c.BindJSON(record)
if err != nil {
c.JSON(400, gin.H{
"message": err.Error(),
"code": 400,
})
} else {
r = append(r, record)
}
err = api.Save(r)
if err != nil {
c.JSON(400, gin.H{
"message": err.Error(),
"code": 400,
})
return
}
c.Status(202)
})
// drop data
r.DELETE("/:objectId", func(c *gin.Context) {
f := api.RecordQuery{
DeviceID: c.Param("objectId"),
}
err := api.Delete(f)
if err != nil {
c.JSON(err.Code, gin.H{
"message": err.Message,
"code": err.Code,
})
return
}
})
r.DELETE("/:objectId/:streamId", func(c *gin.Context) {
f := api.RecordQuery{
DeviceID: c.Param("objectId"),
StreamID: c.Param("streamId"),
}
err := api.Delete(f)
if err != nil {
c.JSON(err.Code, gin.H{
"message": err.Message,
"code": err.Code,
})
return
}
})
// list paged data
r.GET("/:objectId/:streamId", func(c *gin.Context) {
log.Fatalf("Not implemented GET /:objectId/:streamId")
})
// search data
r.POST("/:objectId/:streamId", func(c *gin.Context) {
log.Fatalf("Not implemented POST /:objectId/:streamId")
})
return r.Run(port)
} |
|
callback.ts | import uuid from 'uuid-random';
import { db } from '../../../../../db/postgres.js';
import { GuildCommandInteraction } from '../../../../events/interactionCreate.js';
import { replyError } from '../../../../lib/embeds.js';
import { ApplicationCommandCallback } from '../../../../slashCommandHandler.js';
import { deleteNote } from '../shared.js';
| if (!uuid.test(id)) return replyError(interaction, 'The specified UUID is invalid.');
const note = (await db.query(/*sql*/ `SELECT id, user_id, mod_id, content, context_url, timestamp, edited_mod_id, edited_timestamp FROM note WHERE id = $1;`, [id])).rows[0];
if (!note) return replyError(interaction, 'There is no note with the specified UUID.');
deleteNote(interaction, note);
},
}; | export const command: ApplicationCommandCallback = {
requiredPermissions: ['KICK_MEMBERS'],
callback: async (interaction: GuildCommandInteraction) => {
const id = interaction.options.getString('id', true); |
retry_policy.rs | //! Query retries configurations\
//! To decide when to retry a query the `Session` can use any object which implements
//! the `RetryPolicy` trait
use crate::frame::types::LegacyConsistency;
use crate::transport::errors::{DbError, QueryError, WriteType};
/// Information about a failed query
pub struct QueryInfo<'a> {
/// The error with which the query failed
pub error: &'a QueryError,
/// A query is idempotent if it can be applied multiple times without changing the result of the initial application\
/// If set to `true` we can be sure that it is idempotent\
/// If set to `false` it is unknown whether it is idempotent
pub is_idempotent: bool,
/// Consistency with which the query failed
pub consistency: LegacyConsistency,
}
#[derive(Debug, PartialEq, Eq)]
pub enum RetryDecision {
RetrySameNode,
RetryNextNode,
DontRetry,
}
/// Specifies a policy used to decide when to retry a query
pub trait RetryPolicy: Send + Sync {
/// Called for each new query, starts a session of deciding about retries
fn new_session(&self) -> Box<dyn RetrySession>;
/// Used to clone this RetryPolicy
fn clone_boxed(&self) -> Box<dyn RetryPolicy>;
}
impl Clone for Box<dyn RetryPolicy> {
fn clone(&self) -> Box<dyn RetryPolicy> {
self.clone_boxed()
}
}
/// Used throughout a single query to decide when to retry it
/// After this query is finished it is destroyed or reset
pub trait RetrySession: Send + Sync {
/// Called after the query failed - decide what to do next
fn decide_should_retry(&mut self, query_info: QueryInfo) -> RetryDecision;
/// Reset before using for a new query
fn reset(&mut self);
}
/// Forwards all errors directly to the user, never retries
pub struct FallthroughRetryPolicy;
pub struct FallthroughRetrySession;
impl FallthroughRetryPolicy {
pub fn new() -> FallthroughRetryPolicy {
FallthroughRetryPolicy
}
}
impl Default for FallthroughRetryPolicy {
fn default() -> FallthroughRetryPolicy {
FallthroughRetryPolicy
}
}
impl RetryPolicy for FallthroughRetryPolicy {
fn new_session(&self) -> Box<dyn RetrySession> {
Box::new(FallthroughRetrySession)
}
fn clone_boxed(&self) -> Box<dyn RetryPolicy> {
Box::new(FallthroughRetryPolicy)
}
}
impl RetrySession for FallthroughRetrySession {
fn decide_should_retry(&mut self, _query_info: QueryInfo) -> RetryDecision |
fn reset(&mut self) {}
}
/// Default retry policy - retries when there is a high chance that a retry might help.\
/// Behaviour based on [DataStax Java Driver](https://docs.datastax.com/en/developer/java-driver/4.10/manual/core/retries/)
pub struct DefaultRetryPolicy;
impl DefaultRetryPolicy {
pub fn new() -> DefaultRetryPolicy {
DefaultRetryPolicy
}
}
impl Default for DefaultRetryPolicy {
fn default() -> DefaultRetryPolicy {
DefaultRetryPolicy::new()
}
}
impl RetryPolicy for DefaultRetryPolicy {
fn new_session(&self) -> Box<dyn RetrySession> {
Box::new(DefaultRetrySession::new())
}
fn clone_boxed(&self) -> Box<dyn RetryPolicy> {
Box::new(DefaultRetryPolicy)
}
}
pub struct DefaultRetrySession {
was_unavailable_retry: bool,
was_read_timeout_retry: bool,
was_write_timeout_retry: bool,
}
impl DefaultRetrySession {
pub fn new() -> DefaultRetrySession {
DefaultRetrySession {
was_unavailable_retry: false,
was_read_timeout_retry: false,
was_write_timeout_retry: false,
}
}
}
impl Default for DefaultRetrySession {
fn default() -> DefaultRetrySession {
DefaultRetrySession::new()
}
}
impl RetrySession for DefaultRetrySession {
fn decide_should_retry(&mut self, query_info: QueryInfo) -> RetryDecision {
match query_info.error {
// Basic errors - there are some problems on this node
// Retry on a different one if possible
QueryError::IoError(_)
| QueryError::DbError(DbError::Overloaded, _)
| QueryError::DbError(DbError::ServerError, _)
| QueryError::DbError(DbError::TruncateError, _) => {
if query_info.is_idempotent {
RetryDecision::RetryNextNode
} else {
RetryDecision::DontRetry
}
}
// Unavailable - the current node believes that not enough nodes
// are alive to satisfy specified consistency requirements.
// Maybe this node has network problems - try a different one.
// Perform at most one retry - it's unlikely that two nodes
// have network problems at the same time
QueryError::DbError(DbError::Unavailable { .. }, _) => {
if !self.was_unavailable_retry {
self.was_unavailable_retry = true;
RetryDecision::RetryNextNode
} else {
RetryDecision::DontRetry
}
}
// ReadTimeout - coordinator didn't receive enough replies in time.
// Retry at most once and only if there were actually enough replies
// to satisfy consistency but they were all just checksums (data_present == true).
// This happens when the coordinator picked replicas that were overloaded/dying.
// Retried request should have some useful response because the node will detect
// that these replicas are dead.
QueryError::DbError(
DbError::ReadTimeout {
received,
required,
data_present,
..
},
_,
) => {
if !self.was_read_timeout_retry && received >= required && *data_present {
self.was_read_timeout_retry = true;
RetryDecision::RetrySameNode
} else {
RetryDecision::DontRetry
}
}
// Write timeout - coordinator didn't receive enough replies in time.
// Retry at most once and only for BatchLog write.
// Coordinator probably didn't detect the nodes as dead.
// By the time we retry they should be detected as dead.
QueryError::DbError(DbError::WriteTimeout { write_type, .. }, _) => {
if !self.was_write_timeout_retry
&& query_info.is_idempotent
&& *write_type == WriteType::BatchLog
{
self.was_write_timeout_retry = true;
RetryDecision::RetrySameNode
} else {
RetryDecision::DontRetry
}
}
// The node is still bootstrapping it can't execute the query, we should try another one
QueryError::DbError(DbError::IsBootstrapping, _) => RetryDecision::RetryNextNode,
// Connection to the contacted node is overloaded, try another one
QueryError::UnableToAllocStreamId => RetryDecision::RetryNextNode,
// In all other cases propagate the error to the user
_ => RetryDecision::DontRetry,
}
}
fn reset(&mut self) {
*self = DefaultRetrySession::new();
}
}
#[cfg(test)]
mod tests {
use super::{DefaultRetryPolicy, QueryInfo, RetryDecision, RetryPolicy};
use crate::frame::types::LegacyConsistency;
use crate::statement::Consistency;
use crate::transport::errors::{BadQuery, DbError, QueryError, WriteType};
use bytes::Bytes;
use std::io::ErrorKind;
use std::sync::Arc;
fn make_query_info(error: &QueryError, is_idempotent: bool) -> QueryInfo<'_> {
QueryInfo {
error,
is_idempotent,
consistency: LegacyConsistency::Regular(Consistency::One),
}
}
// Asserts that default policy never retries for this Error
fn default_policy_assert_never_retries(error: QueryError) {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, false)),
RetryDecision::DontRetry
);
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, true)),
RetryDecision::DontRetry
);
}
#[test]
fn default_never_retries() {
let never_retried_dberrors = vec![
DbError::SyntaxError,
DbError::Invalid,
DbError::AlreadyExists {
keyspace: String::new(),
table: String::new(),
},
DbError::FunctionFailure {
keyspace: String::new(),
function: String::new(),
arg_types: vec![],
},
DbError::AuthenticationError,
DbError::Unauthorized,
DbError::ConfigError,
DbError::ReadFailure {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 2,
required: 1,
numfailures: 1,
data_present: false,
},
DbError::WriteFailure {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 1,
required: 2,
numfailures: 1,
write_type: WriteType::BatchLog,
},
DbError::Unprepared {
statement_id: Bytes::from_static(b"deadbeef"),
},
DbError::ProtocolError,
DbError::Other(0x124816),
];
for dberror in never_retried_dberrors {
default_policy_assert_never_retries(QueryError::DbError(dberror, String::new()));
}
default_policy_assert_never_retries(QueryError::BadQuery(BadQuery::ValueLenMismatch(1, 2)));
default_policy_assert_never_retries(QueryError::ProtocolError("test"));
}
// Asserts that for this error policy retries on next on idempotent queries only
fn default_policy_assert_idempotent_next(error: QueryError) {
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, false)),
RetryDecision::DontRetry
);
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, true)),
RetryDecision::RetryNextNode
);
}
#[test]
fn default_idempotent_next_retries() {
let idempotent_next_errors = vec![
QueryError::DbError(DbError::Overloaded, String::new()),
QueryError::DbError(DbError::TruncateError, String::new()),
QueryError::DbError(DbError::ServerError, String::new()),
QueryError::IoError(Arc::new(std::io::Error::new(ErrorKind::Other, "test"))),
];
for error in idempotent_next_errors {
default_policy_assert_idempotent_next(error);
}
}
// Always retry on next node if current one is bootstrapping
#[test]
fn default_bootstrapping() {
let error = QueryError::DbError(DbError::IsBootstrapping, String::new());
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, false)),
RetryDecision::RetryNextNode
);
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&error, true)),
RetryDecision::RetryNextNode
);
}
// On Unavailable error we retry one time no matter the idempotence
#[test]
fn default_unavailable() {
let error = QueryError::DbError(
DbError::Unavailable {
consistency: LegacyConsistency::Regular(Consistency::Two),
required: 2,
alive: 1,
},
String::new(),
);
let mut policy_not_idempotent = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy_not_idempotent.decide_should_retry(make_query_info(&error, false)),
RetryDecision::RetryNextNode
);
assert_eq!(
policy_not_idempotent.decide_should_retry(make_query_info(&error, false)),
RetryDecision::DontRetry
);
let mut policy_idempotent = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy_idempotent.decide_should_retry(make_query_info(&error, true)),
RetryDecision::RetryNextNode
);
assert_eq!(
policy_idempotent.decide_should_retry(make_query_info(&error, true)),
RetryDecision::DontRetry
);
}
// On ReadTimeout we retry one time if there were enough responses and the data was present no matter the idempotence
#[test]
fn default_read_timeout() {
// Enough responses and data_present == true
let enough_responses_with_data = QueryError::DbError(
DbError::ReadTimeout {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 2,
required: 2,
data_present: true,
},
String::new(),
);
// Not idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_with_data, false)),
RetryDecision::RetrySameNode
);
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_with_data, false)),
RetryDecision::DontRetry
);
// Idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_with_data, true)),
RetryDecision::RetrySameNode
);
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_with_data, true)),
RetryDecision::DontRetry
);
// Enough responses but data_present == false
let enough_responses_no_data = QueryError::DbError(
DbError::ReadTimeout {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 2,
required: 2,
data_present: false,
},
String::new(),
);
// Not idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_no_data, false)),
RetryDecision::DontRetry
);
// Idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&enough_responses_no_data, true)),
RetryDecision::DontRetry
);
// Not enough responses, data_present == true
let not_enough_responses_with_data = QueryError::DbError(
DbError::ReadTimeout {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 1,
required: 2,
data_present: true,
},
String::new(),
);
// Not idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(¬_enough_responses_with_data, false)),
RetryDecision::DontRetry
);
// Idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(¬_enough_responses_with_data, true)),
RetryDecision::DontRetry
);
}
// WriteTimeout will retry once when the query is idempotent and write_type == BatchLog
#[test]
fn default_write_timeout() {
// WriteType == BatchLog
let good_write_type = QueryError::DbError(
DbError::WriteTimeout {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 1,
required: 2,
write_type: WriteType::BatchLog,
},
String::new(),
);
// Not idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&good_write_type, false)),
RetryDecision::DontRetry
);
// Idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&good_write_type, true)),
RetryDecision::RetrySameNode
);
assert_eq!(
policy.decide_should_retry(make_query_info(&good_write_type, true)),
RetryDecision::DontRetry
);
// WriteType != BatchLog
let bad_write_type = QueryError::DbError(
DbError::WriteTimeout {
consistency: LegacyConsistency::Regular(Consistency::Two),
received: 4,
required: 2,
write_type: WriteType::Simple,
},
String::new(),
);
// Not idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&bad_write_type, false)),
RetryDecision::DontRetry
);
// Idempotent
let mut policy = DefaultRetryPolicy::new().new_session();
assert_eq!(
policy.decide_should_retry(make_query_info(&bad_write_type, true)),
RetryDecision::DontRetry
);
}
}
| {
RetryDecision::DontRetry
} |
GridGenerator.js | import Hex from './models/Hex';
class GridGenerator {
static getGenerator(name) {
if (GridGenerator.hasOwnProperty(name))
return GridGenerator[name];
return null;
}
static parallelogram(q1, q2, r1, r2) {
let hexas = [];
for (let q = q1; q <= q2; q++) {
for (let r = r1; r <= r2; r++) {
hexas.push(new Hex(q, r, -q-r));
}
}
return hexas; |
static triangle(mapSize) {
let hexas = [];
for (let q = 0; q <= mapSize; q++) {
for (let r = 0; r <= mapSize - q; r++) {
hexas.push(new Hex(q, r, -q-r));
}
}
return hexas;
}
static hexagon(mapRadius) {
let hexas = [];
for (let q = -mapRadius; q <= mapRadius; q++) {
let r1 = Math.max(-mapRadius, -q - mapRadius);
let r2 = Math.min(mapRadius, -q + mapRadius);
for (let r = r1; r <= r2; r++) {
hexas.push(new Hex(q, r, -q-r));
}
}
return hexas;
}
static rectangle(mapWidth, mapHeight) {
let hexas = [];
for (let r = 0; r < mapHeight; r++) {
let offset = Math.floor(r/2); // or r>>1
for (let q = -offset; q < mapWidth - offset; q++) {
hexas.push(new Hex(q, r, -q-r));
}
}
return hexas;
}
static orientedRectangle(mapWidth, mapHeight) {
let hexas = [];
for (let q = 0; q < mapWidth; q++) {
let offset = Math.floor(q/2); // or q>>1
for (let r = -offset; r < mapHeight - offset; r++) {
hexas.push(new Hex(q, r, -q-r));
}
}
return hexas;
}
}
export default GridGenerator; | } |
tests.py | from date_sniff.sniffer import DateSniffer
def test_years_separation():
sniffer = DateSniffer(year=2019)
assert sniffer.sniff('2019') == {'2019': []}
assert sniffer.sniff('prefix 2019 and long text') == {'prefix 2019 and long text': []}
res = {'prefix 2019 and long text another 2019': []}
assert sniffer.sniff('prefix 2019 and long text another 2019') == res
assert sniffer.sniff('2019 two 2019') == {'2019 two 2019': []}
def test_month_search():
sniffer = DateSniffer(year=2019, month=1)
assert sniffer.sniff('prefix 2019') == {} | res = sniffer.sniff('EXPANSION PLAN Germany Finland Denmark 2019 Norway Egypt UAE France Spain 2021')
assert res == {}
res = sniffer.sniff('EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021')
assert res == {'EXPANSION PLAN Germany Finland March. 2019 Norway Egypt UAE France Spain 2021': []}
def test_find_isolated():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.find_isolated('10', '2019-03-04 101')
assert res == []
def test_keyword_search():
sniffer = DateSniffer(year=2019, month=1, keyword='test')
assert sniffer.sniff('prefix 2019-01-10') == {}
print(sniffer.sniff('prefix 2019-01-10 test'))
assert sniffer.sniff('prefix 2019-01-10 test') == {'prefix 2019-01-10 test': [10]}
def test_days():
sniffer = DateSniffer(year=2019, month=3)
res = sniffer.sniff('2019-03-04 101')
assert res == {'2019-03-04 101': [4]} | assert sniffer.sniff('prefix January 2019') == {'prefix January 2019': []}
assert sniffer.sniff('prefix 2019-01-10') == {'prefix 2019-01-10': [10]}
sniffer = DateSniffer(year=2019, month=3) |
web_hub.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
package app
import (
"fmt"
"hash/fnv"
"runtime"
"runtime/debug"
"strconv"
"strings"
"sync/atomic"
"time"
l4g "github.com/alecthomas/log4go"
"github.com/bolsunovskyi/mattermost-server/model"
"github.com/bolsunovskyi/mattermost-server/utils"
)
const (
BROADCAST_QUEUE_SIZE = 4096
DEADLOCK_TICKER = 15 * time.Second // check every 15 seconds
DEADLOCK_WARN = (BROADCAST_QUEUE_SIZE * 99) / 100 // number of buffered messages before printing stack trace
)
type Hub struct {
// connectionCount should be kept first.
// See https://github.com/mattermost/mattermost-server/pull/7281
connectionCount int64
app *App
connections []*WebConn
connectionIndex int
register chan *WebConn
unregister chan *WebConn
broadcast chan *model.WebSocketEvent
stop chan struct{}
didStop chan struct{}
invalidateUser chan string
ExplicitStop bool
goroutineId int
}
func (a *App) NewWebHub() *Hub {
return &Hub{
app: a,
register: make(chan *WebConn, 1),
unregister: make(chan *WebConn, 1),
connections: make([]*WebConn, 0, model.SESSION_CACHE_SIZE),
broadcast: make(chan *model.WebSocketEvent, BROADCAST_QUEUE_SIZE),
stop: make(chan struct{}),
didStop: make(chan struct{}),
invalidateUser: make(chan string),
ExplicitStop: false,
}
}
func (a *App) TotalWebsocketConnections() int {
count := int64(0)
for _, hub := range a.Hubs {
count = count + atomic.LoadInt64(&hub.connectionCount)
}
return int(count)
}
func (a *App) HubStart() {
// Total number of hubs is twice the number of CPUs.
numberOfHubs := runtime.NumCPU() * 2
l4g.Info(utils.T("api.web_hub.start.starting.debug"), numberOfHubs)
a.Hubs = make([]*Hub, numberOfHubs)
a.HubsStopCheckingForDeadlock = make(chan bool, 1)
for i := 0; i < len(a.Hubs); i++ {
a.Hubs[i] = a.NewWebHub()
a.Hubs[i].connectionIndex = i
a.Hubs[i].Start()
}
go func() {
ticker := time.NewTicker(DEADLOCK_TICKER)
defer func() {
ticker.Stop()
}()
for {
select {
case <-ticker.C:
for _, hub := range a.Hubs {
if len(hub.broadcast) >= DEADLOCK_WARN {
l4g.Error("Hub processing might be deadlock on hub %v goroutine %v with %v events in the buffer", hub.connectionIndex, hub.goroutineId, len(hub.broadcast))
buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
output := fmt.Sprintf("%s", buf)
splits := strings.Split(output, "goroutine ")
for _, part := range splits {
if strings.Contains(part, fmt.Sprintf("%v", hub.goroutineId)) {
l4g.Error("Trace for possible deadlock goroutine %v", part)
}
}
}
}
case <-a.HubsStopCheckingForDeadlock:
return
}
}
}()
}
func (a *App) HubStop() {
l4g.Info(utils.T("api.web_hub.start.stopping.debug"))
select {
case a.HubsStopCheckingForDeadlock <- true:
default:
l4g.Warn("We appear to have already sent the stop checking for deadlocks command")
}
for _, hub := range a.Hubs {
hub.Stop()
}
a.Hubs = []*Hub{}
}
func (a *App) GetHubForUserId(userId string) *Hub {
hash := fnv.New32a()
hash.Write([]byte(userId))
index := hash.Sum32() % uint32(len(a.Hubs))
return a.Hubs[index]
}
func (a *App) HubRegister(webConn *WebConn) {
a.GetHubForUserId(webConn.UserId).Register(webConn)
}
func (a *App) HubUnregister(webConn *WebConn) {
a.GetHubForUserId(webConn.UserId).Unregister(webConn)
}
func (a *App) Publish(message *model.WebSocketEvent) {
if metrics := a.Metrics; metrics != nil {
metrics.IncrementWebsocketEvent(message.Event)
}
a.PublishSkipClusterSend(message)
if a.Cluster != nil {
cm := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_PUBLISH,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: message.ToJson(),
}
if message.Event == model.WEBSOCKET_EVENT_POSTED ||
message.Event == model.WEBSOCKET_EVENT_POST_EDITED ||
message.Event == model.WEBSOCKET_EVENT_DIRECT_ADDED ||
message.Event == model.WEBSOCKET_EVENT_GROUP_ADDED ||
message.Event == model.WEBSOCKET_EVENT_ADDED_TO_TEAM {
cm.SendType = model.CLUSTER_SEND_RELIABLE
}
a.Cluster.SendClusterMessage(cm)
}
}
func (a *App) PublishSkipClusterSend(message *model.WebSocketEvent) {
for _, hub := range a.Hubs {
hub.Broadcast(message)
}
}
func (a *App) InvalidateCacheForChannel(channel *model.Channel) {
a.InvalidateCacheForChannelSkipClusterSend(channel.Id)
a.InvalidateCacheForChannelByNameSkipClusterSend(channel.TeamId, channel.Name)
if a.Cluster != nil { | SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: channel.Id,
}
a.Cluster.SendClusterMessage(msg)
nameMsg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_BY_NAME,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Props: make(map[string]string),
}
nameMsg.Props["name"] = channel.Name
if channel.TeamId == "" {
nameMsg.Props["id"] = "dm"
} else {
nameMsg.Props["id"] = channel.TeamId
}
a.Cluster.SendClusterMessage(nameMsg)
}
}
func (a *App) InvalidateCacheForChannelSkipClusterSend(channelId string) {
a.Srv.Store.Channel().InvalidateChannel(channelId)
}
func (a *App) InvalidateCacheForChannelMembers(channelId string) {
a.InvalidateCacheForChannelMembersSkipClusterSend(channelId)
if a.Cluster != nil {
msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: channelId,
}
a.Cluster.SendClusterMessage(msg)
}
}
func (a *App) InvalidateCacheForChannelMembersSkipClusterSend(channelId string) {
a.Srv.Store.User().InvalidateProfilesInChannelCache(channelId)
a.Srv.Store.Channel().InvalidateMemberCount(channelId)
}
func (a *App) InvalidateCacheForChannelMembersNotifyProps(channelId string) {
a.InvalidateCacheForChannelMembersNotifyPropsSkipClusterSend(channelId)
if a.Cluster != nil {
msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_MEMBERS_NOTIFY_PROPS,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: channelId,
}
a.Cluster.SendClusterMessage(msg)
}
}
func (a *App) InvalidateCacheForChannelMembersNotifyPropsSkipClusterSend(channelId string) {
a.Srv.Store.Channel().InvalidateCacheForChannelMembersNotifyProps(channelId)
}
func (a *App) InvalidateCacheForChannelByNameSkipClusterSend(teamId, name string) {
if teamId == "" {
teamId = "dm"
}
a.Srv.Store.Channel().InvalidateChannelByName(teamId, name)
}
func (a *App) InvalidateCacheForChannelPosts(channelId string) {
a.InvalidateCacheForChannelPostsSkipClusterSend(channelId)
if a.Cluster != nil {
msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL_POSTS,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: channelId,
}
a.Cluster.SendClusterMessage(msg)
}
}
func (a *App) InvalidateCacheForChannelPostsSkipClusterSend(channelId string) {
a.Srv.Store.Post().InvalidateLastPostTimeCache(channelId)
}
func (a *App) InvalidateCacheForUser(userId string) {
a.InvalidateCacheForUserSkipClusterSend(userId)
if a.Cluster != nil {
msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_USER,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: userId,
}
a.Cluster.SendClusterMessage(msg)
}
}
func (a *App) InvalidateCacheForUserSkipClusterSend(userId string) {
a.Srv.Store.Channel().InvalidateAllChannelMembersForUser(userId)
a.Srv.Store.User().InvalidateProfilesInChannelCacheByUser(userId)
a.Srv.Store.User().InvalidatProfileCacheForUser(userId)
if len(a.Hubs) != 0 {
a.GetHubForUserId(userId).InvalidateUser(userId)
}
}
func (a *App) InvalidateCacheForWebhook(webhookId string) {
a.InvalidateCacheForWebhookSkipClusterSend(webhookId)
if a.Cluster != nil {
msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_WEBHOOK,
SendType: model.CLUSTER_SEND_BEST_EFFORT,
Data: webhookId,
}
a.Cluster.SendClusterMessage(msg)
}
}
func (a *App) InvalidateCacheForWebhookSkipClusterSend(webhookId string) {
a.Srv.Store.Webhook().InvalidateWebhookCache(webhookId)
}
func (a *App) InvalidateWebConnSessionCacheForUser(userId string) {
if len(a.Hubs) != 0 {
a.GetHubForUserId(userId).InvalidateUser(userId)
}
}
func (h *Hub) Register(webConn *WebConn) {
h.register <- webConn
if webConn.IsAuthenticated() {
webConn.SendHello()
}
}
func (h *Hub) Unregister(webConn *WebConn) {
select {
case h.unregister <- webConn:
case <-h.stop:
}
}
func (h *Hub) Broadcast(message *model.WebSocketEvent) {
if message != nil {
h.broadcast <- message
}
}
func (h *Hub) InvalidateUser(userId string) {
h.invalidateUser <- userId
}
func getGoroutineId() int {
var buf [64]byte
n := runtime.Stack(buf[:], false)
idField := strings.Fields(strings.TrimPrefix(string(buf[:n]), "goroutine "))[0]
id, err := strconv.Atoi(idField)
if err != nil {
id = -1
}
return id
}
func (h *Hub) Stop() {
close(h.stop)
<-h.didStop
}
func (h *Hub) Start() {
var doStart func()
var doRecoverableStart func()
var doRecover func()
doStart = func() {
h.goroutineId = getGoroutineId()
l4g.Debug("Hub for index %v is starting with goroutine %v", h.connectionIndex, h.goroutineId)
for {
select {
case webCon := <-h.register:
h.connections = append(h.connections, webCon)
atomic.StoreInt64(&h.connectionCount, int64(len(h.connections)))
case webCon := <-h.unregister:
userId := webCon.UserId
found := false
indexToDel := -1
for i, webConCandidate := range h.connections {
if webConCandidate == webCon {
indexToDel = i
continue
}
if userId == webConCandidate.UserId {
found = true
if indexToDel != -1 {
break
}
}
}
if indexToDel != -1 {
// Delete the webcon we are unregistering
h.connections[indexToDel] = h.connections[len(h.connections)-1]
h.connections = h.connections[:len(h.connections)-1]
}
if len(userId) == 0 {
continue
}
if !found {
h.app.Go(func() {
h.app.SetStatusOffline(userId, false)
})
}
case userId := <-h.invalidateUser:
for _, webCon := range h.connections {
if webCon.UserId == userId {
webCon.InvalidateCache()
}
}
case msg := <-h.broadcast:
for _, webCon := range h.connections {
if webCon.ShouldSendEvent(msg) {
select {
case webCon.Send <- msg:
default:
l4g.Error(fmt.Sprintf("webhub.broadcast: cannot send, closing websocket for userId=%v", webCon.UserId))
close(webCon.Send)
for i, webConCandidate := range h.connections {
if webConCandidate == webCon {
h.connections[i] = h.connections[len(h.connections)-1]
h.connections = h.connections[:len(h.connections)-1]
break
}
}
}
}
}
case <-h.stop:
userIds := make(map[string]bool)
for _, webCon := range h.connections {
userIds[webCon.UserId] = true
webCon.Close()
}
for userId := range userIds {
h.app.SetStatusOffline(userId, false)
}
h.connections = make([]*WebConn, 0, model.SESSION_CACHE_SIZE)
h.ExplicitStop = true
close(h.didStop)
return
}
}
}
doRecoverableStart = func() {
defer doRecover()
doStart()
}
doRecover = func() {
if !h.ExplicitStop {
if r := recover(); r != nil {
l4g.Error(fmt.Sprintf("Recovering from Hub panic. Panic was: %v", r))
} else {
l4g.Error("Webhub stopped unexpectedly. Recovering.")
}
l4g.Error(string(debug.Stack()))
go doRecoverableStart()
}
}
go doRecoverableStart()
} | msg := &model.ClusterMessage{
Event: model.CLUSTER_EVENT_INVALIDATE_CACHE_FOR_CHANNEL, |
middleware.go | // Copyright © 2019 The Things Network Foundation, The Things Industries B.V.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package web implements a middleware to handle HTTP errors.
package web
import (
"fmt"
"net/http"
"sort"
"github.com/getsentry/sentry-go"
"github.com/golang/gddo/httputil"
echo "github.com/labstack/echo/v4"
"go.thethings.network/lorawan-stack/v3/pkg/errors"
sentryerrors "go.thethings.network/lorawan-stack/v3/pkg/errors/sentry"
_ "go.thethings.network/lorawan-stack/v3/pkg/ttnpb" // imported for side-effect of correct TTN error rendering.
)
var globalRenderers = map[string]ErrorRenderer{}
// ErrorRenderer is an interface for rendering errors to HTTP responses.
type ErrorRenderer interface {
RenderError(c echo.Context, statusCode int, err error) error
}
// ErrorRendererFunc is a function signature that implements ErrorRenderer.
type ErrorRendererFunc func(c echo.Context, statusCode int, err error) error
// RenderError implements the ErrorRenderer interface.
func (f ErrorRendererFunc) RenderError(c echo.Context, statusCode int, err error) error {
return f(c, statusCode, err)
}
// RegisterRenderer registers a global error renderer.
func RegisterRenderer(contentType string, renderer ErrorRenderer) {
globalRenderers[contentType] = renderer
}
// ProcessError processes an HTTP error by converting it if appropriate, and
// determining the HTTP status code to return.
func ProcessError(in error) (statusCode int, err error) {
statusCode, err = http.StatusInternalServerError, in
if echoErr, ok := err.(*echo.HTTPError); ok { | if ttnErr, ok := errors.From(err); ok {
statusCode = errors.ToHTTPStatusCode(ttnErr)
return statusCode, ttnErr
}
ttnErr := errors.FromHTTPStatusCode(statusCode, "message")
return statusCode, ttnErr.WithCause(err).WithAttributes("message", err.Error())
}
// ErrorMiddleware returns an Echo middleware that catches errors in the chain,
// and renders them using the negotiated renderer. Global renderers can be registered
// with RegisterRenderer, and extra renderers can be passed to this function.
func ErrorMiddleware(extraRenderers map[string]ErrorRenderer) echo.MiddlewareFunc {
renderers := make(map[string]ErrorRenderer)
for contentType, renderer := range globalRenderers {
renderers[contentType] = renderer
}
for contentType, renderer := range extraRenderers {
renderers[contentType] = renderer
}
offers := make([]string, 0, len(renderers))
for k := range renderers {
offers = append(offers, k)
}
sort.Strings(offers) // Send offers in alphabetical order
return func(next echo.HandlerFunc) echo.HandlerFunc {
return func(c echo.Context) error {
err := next(c)
if err == nil {
return nil
}
statusCode, err := ProcessError(err)
if c.Response().Committed {
statusCode = c.Response().Status
}
if statusCode >= 500 {
errEvent := sentryerrors.NewEvent(err)
errEvent.Transaction = c.Path()
errEvent.Request = sentry.NewRequest(c.Request())
sentry.CaptureEvent(errEvent)
}
if c.Response().Committed {
return err
}
renderer := httputil.NegotiateContentType(c.Request(), offers, "application/json")
if renderer != "" {
return renderers[renderer].RenderError(c, statusCode, err)
}
return err
}
}
}
func init() {
RegisterRenderer("application/json", ErrorRendererFunc(func(c echo.Context, statusCode int, err error) error {
return c.JSON(statusCode, err)
}))
}
|
if echoErr.Code != 0 {
statusCode = echoErr.Code
}
if echoErr.Internal == nil {
ttnErr := errors.FromHTTPStatusCode(statusCode, "message")
return statusCode, ttnErr.WithAttributes("message", fmt.Sprint(echoErr.Message))
}
err = echoErr.Internal
}
|
ini.go | // These are some sample code for YAML,TOML,JSON,INI,HCL
package main
import (
"fmt"
"github.com/urionz/config"
"github.com/urionz/config/ini"
)
// go run ./examples/ini.go
func main() | {
config.WithOptions(config.ParseEnv)
// add Decoder and Encoder
config.AddDriver(ini.Driver)
// Or
// config.SetEncoder(config.Ini, ini.Encoder)
err := config.LoadFiles("testdata/ini_base.ini")
if err != nil {
panic(err)
}
fmt.Printf("config data: \n %#v\n", config.Data())
err = config.LoadFiles("testdata/ini_other.ini")
// config.LoadFiles("testdata/ini_base.ini", "testdata/ini_other.ini")
if err != nil {
panic(err)
}
fmt.Printf("config data: \n %#v\n", config.Data())
fmt.Print("get config example:\n")
name, ok := config.String("name")
fmt.Printf("- get string\n ok: %v, val: %v\n", ok, name)
// NOTICE: ini is not support array
map1, ok := config.StringMap("map1")
fmt.Printf("- get map\n ok: %v, val: %#v\n", ok, map1)
val0, ok := config.String("map1.key")
fmt.Printf("- get sub-value by path 'map.key'\n ok: %v, val: %v\n", ok, val0)
// can parse env name(ParseEnv: true)
fmt.Printf("get env 'envKey' val: %s\n", config.DefString("envKey", ""))
fmt.Printf("get env 'envKey1' val: %s\n", config.DefString("envKey1", ""))
// set value
config.Set("name", "new name")
name, ok = config.String("name")
fmt.Printf("- set string\n ok: %v, val: %v\n", ok, name)
} |
|
generic.py | """
Define the SeriesGroupBy and DataFrameGroupBy
classes that hold the groupby interfaces (and some implementations).
These are user facing as the result of the ``df.groupby(...)`` operations,
which here returns a DataFrameGroupBy object.
"""
from __future__ import annotations
from collections import abc
from functools import partial
from textwrap import dedent
from typing import (
Any,
Callable,
Hashable,
Iterable,
Mapping,
NamedTuple,
Sequence,
TypeVar,
Union,
cast,
)
import warnings
import numpy as np
from pandas._libs import (
Interval,
reduction as libreduction,
)
from pandas._typing import (
ArrayLike,
Manager,
Manager2D,
SingleManager,
)
from pandas.util._decorators import (
Appender,
Substitution,
doc,
)
from pandas.util._exceptions import find_stack_level
from pandas.core.dtypes.common import (
ensure_int64,
is_bool,
is_categorical_dtype,
is_dict_like,
is_integer_dtype,
is_interval_dtype,
is_scalar,
)
from pandas.core.dtypes.missing import (
isna,
notna,
)
from pandas.core import (
algorithms,
nanops,
)
from pandas.core.apply import (
GroupByApply,
maybe_mangle_lambdas,
reconstruct_func,
validate_func_kwargs,
)
from pandas.core.base import SpecificationError
import pandas.core.common as com
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame
from pandas.core.groupby import base
from pandas.core.groupby.groupby import (
GroupBy,
_agg_template,
_apply_docs,
_transform_template,
warn_dropping_nuisance_columns_deprecated,
)
from pandas.core.groupby.grouper import get_grouper
from pandas.core.indexes.api import (
Index,
MultiIndex,
all_indexes_same,
)
from pandas.core.series import Series
from pandas.core.shared_docs import _shared_docs
from pandas.core.util.numba_ import maybe_use_numba
from pandas.plotting import boxplot_frame_groupby
# TODO(typing) the return value on this callable should be any *scalar*.
AggScalar = Union[str, Callable[..., Any]]
# TODO: validate types on ScalarResult and move to _typing
# Blocked from using by https://github.com/python/mypy/issues/1484
# See note at _mangle_lambda_list
ScalarResult = TypeVar("ScalarResult")
class NamedAgg(NamedTuple):
column: Hashable
aggfunc: AggScalar
def generate_property(name: str, klass: type[DataFrame | Series]):
"""
Create a property for a GroupBy subclass to dispatch to DataFrame/Series.
Parameters
----------
name : str
klass : {DataFrame, Series}
Returns
-------
property
"""
def prop(self):
return self._make_wrapper(name)
parent_method = getattr(klass, name)
prop.__doc__ = parent_method.__doc__ or ""
prop.__name__ = name
return property(prop)
def pin_allowlisted_properties(
klass: type[DataFrame | Series], allowlist: frozenset[str]
):
"""
Create GroupBy member defs for DataFrame/Series names in a allowlist.
Parameters
----------
klass : DataFrame or Series class
class where members are defined.
allowlist : frozenset[str]
Set of names of klass methods to be constructed
Returns
-------
class decorator
Notes
-----
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
def pinner(cls):
for name in allowlist:
if hasattr(cls, name):
# don't override anything that was explicitly defined
# in the base class
continue
prop = generate_property(name, klass)
setattr(cls, name, prop)
return cls
return pinner
@pin_allowlisted_properties(Series, base.series_apply_allowlist)
class SeriesGroupBy(GroupBy[Series]):
_apply_allowlist = base.series_apply_allowlist
def _wrap_agged_manager(self, mgr: Manager) -> Series:
if mgr.ndim == 1:
mgr = cast(SingleManager, mgr)
single = mgr
else:
mgr = cast(Manager2D, mgr)
single = mgr.iget(0)
ser = self.obj._constructor(single, name=self.obj.name)
# NB: caller is responsible for setting ser.index
return ser
def _get_data_to_aggregate(self) -> SingleManager:
ser = self._obj_with_exclusions
single = ser._mgr
return single
def _iterate_slices(self) -> Iterable[Series]:
yield self._selected_obj
_agg_examples_doc = dedent(
"""
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
The output column names can be controlled by passing
the desired column names and aggregations as keyword arguments.
>>> s.groupby([1, 1, 2, 2]).agg(
... minimum='min',
... maximum='max',
... )
minimum maximum
1 1 2
2 3 4
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> s.groupby([1, 1, 2, 2]).agg(lambda x: x.astype(float).min())
1 1.0
2 3.0
dtype: float64
"""
)
@Appender(
_apply_docs["template"].format(
input="series", examples=_apply_docs["series_examples"]
)
)
def apply(self, func, *args, **kwargs) -> Series:
return super().apply(func, *args, **kwargs)
@doc(_agg_template, examples=_agg_examples_doc, klass="Series")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data.to_frame(), func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result.ravel(), index=index, name=data.name)
relabeling = func is None
columns = None
if relabeling:
columns, func = validate_func_kwargs(kwargs)
kwargs = {}
if isinstance(func, str):
return getattr(self, func)(*args, **kwargs)
elif isinstance(func, abc.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
func = maybe_mangle_lambdas(func)
ret = self._aggregate_multiple_funcs(func)
if relabeling:
# error: Incompatible types in assignment (expression has type
# "Optional[List[str]]", variable has type "Index")
ret.columns = columns # type: ignore[assignment]
return ret
else:
cyfunc = com.get_cython_func(func)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func, *args, **kwargs)
try:
return self._python_agg_general(func, *args, **kwargs)
except KeyError:
# TODO: KeyError is raised in _python_agg_general,
# see test_groupby.test_basic
result = self._aggregate_named(func, *args, **kwargs)
# result is a dict whose keys are the elements of result_index
index = self.grouper.result_index
return create_series_with_explicit_dtype(
result, index=index, dtype_if_empty=object
)
agg = aggregate
def _aggregate_multiple_funcs(self, arg) -> DataFrame:
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
raise SpecificationError("nested renamer is not supported")
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x for x in arg]
# indicated column order
columns = next(zip(*arg))
else:
# list of functions / function names
columns = []
for f in arg:
columns.append(com.get_callable_name(f) or f)
arg = zip(columns, arg)
results: dict[base.OutputKey, DataFrame | Series] = {}
for idx, (name, func) in enumerate(arg):
key = base.OutputKey(label=name, position=idx)
results[key] = self.aggregate(func)
if any(isinstance(x, DataFrame) for x in results.values()):
from pandas import concat
res_df = concat(
results.values(), axis=1, keys=[key.label for key in results.keys()]
)
return res_df
indexed_output = {key.position: val for key, val in results.items()}
output = self.obj._constructor_expanddim(indexed_output, index=None)
output.columns = Index(key.label for key in results)
output = self._reindex_output(output)
return output
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> Series:
"""
Wrap the dict result of a GroupBy aggregation into a Series.
"""
assert len(output) == 1
values = next(iter(output.values()))
result = self.obj._constructor(values)
result.name = self.obj.name
return result
def _wrap_applied_output(
self,
data: Series,
values: list[Any],
not_indexed_same: bool = False,
override_group_keys: bool = False,
) -> DataFrame | Series:
"""
Wrap the output of SeriesGroupBy.apply into the expected result.
Parameters
----------
data : Series
Input data for groupby operation.
values : List[Any]
Applied output for each group.
not_indexed_same : bool, default False
Whether the applied outputs are not indexed the same as the group axes.
Returns
-------
DataFrame or Series
"""
if len(values) == 0:
# GH #6265
return self.obj._constructor(
[],
name=self.obj.name,
index=self.grouper.result_index,
dtype=data.dtype,
)
assert values is not None
if isinstance(values[0], dict):
# GH #823 #24880
index = self.grouper.result_index
res_df = self.obj._constructor_expanddim(values, index=index)
res_df = self._reindex_output(res_df)
# if self.observed is False,
# keep all-NaN rows created while re-indexing
res_ser = res_df.stack(dropna=self.observed)
res_ser.name = self.obj.name
return res_ser
elif isinstance(values[0], (Series, DataFrame)):
result = self._concat_objects(
values,
not_indexed_same=not_indexed_same,
override_group_keys=override_group_keys,
)
result.name = self.obj.name
return result
else:
# GH #6265 #24880
result = self.obj._constructor(
data=values, index=self.grouper.result_index, name=self.obj.name
)
return self._reindex_output(result)
def _aggregate_named(self, func, *args, **kwargs):
# Note: this is very similar to _aggregate_series_pure_python,
# but that does not pin group.name
result = {}
initialized = False
for name, group in self:
object.__setattr__(group, "name", name)
output = func(group, *args, **kwargs)
output = libreduction.extract_result(output)
if not initialized:
# We only do this validation on the first iteration
libreduction.check_result_array(output, group.dtype)
initialized = True
result[name] = output
return result
@Substitution(klass="Series")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _cython_transform(
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
):
assert axis == 0 # handled by caller
obj = self._selected_obj
try:
result = self.grouper._cython_operation(
"transform", obj._values, how, axis, **kwargs
)
except NotImplementedError as err:
raise TypeError(f"{how} is not supported for {obj.dtype} dtype") from err
return obj._constructor(result, index=self.obj.index, name=obj.name)
def _transform_general(self, func: Callable, *args, **kwargs) -> Series:
"""
Transform with a callable func`.
"""
assert callable(func)
klass = type(self.obj)
results = []
for name, group in self:
# this setattr is needed for test_transform_lambda_with_datetimetz
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
results.append(klass(res, index=group.index))
# check for empty "results" to avoid concat ValueError
if results:
from pandas.core.reshape.concat import concat
concatenated = concat(results)
result = self._set_result_index_ordered(concatenated)
else:
result = self.obj._constructor(dtype=np.float64)
result.name = self.obj.name
return result
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, str):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x) -> bool:
b = wrapper(x)
return b and notna(b)
try:
indices = [
self._get_index(name) for name, group in self if true_and_notna(group)
]
except (ValueError, TypeError) as err:
raise TypeError("the filter must return a boolean result") from err
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna: bool = True) -> Series:
"""
Return number of unique elements in the group.
Returns
-------
Series
Number of unique values within each group.
"""
ids, _, _ = self.grouper.group_info
val = self.obj._values
codes, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((codes, ids))
codes = codes[sorter]
ids = ids[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, codes[1:] != codes[:-1]]
# 1st item of each group is a new unique observation
mask = codes == -1
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype("int64", copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
result = self.obj._constructor(res, index=ri, name=self.obj.name)
return self._reindex_output(result, fill_value=0)
@doc(Series.describe)
def describe(self, **kwargs):
return super().describe(**kwargs)
def value_counts(
self,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
bins=None,
dropna: bool = True,
):
from pandas.core.reshape.merge import get_join_indexers
from pandas.core.reshape.tile import cut
ids, _, _ = self.grouper.group_info
val = self.obj._values
names = self.grouper.names + [self.obj.name]
if is_categorical_dtype(val.dtype) or (
bins is not None and not np.iterable(bins)
):
# scalar bins cannot be done at top level
# in a backward compatible way
# GH38672 relates to categorical dtype
ser = self.apply(
Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins,
)
ser.index.names = names
return ser
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
# error: "ndarray" has no attribute "cat"
lev = lab.cat.categories # type: ignore[attr-defined]
# error: No overload variant of "take" of "_ArrayOrScalarCommon" matches
# argument types "Any", "bool", "Union[Any, float]"
lab = lev.take( # type: ignore[call-overload]
# error: "ndarray" has no attribute "cat"
lab.cat.codes, # type: ignore[attr-defined]
allow_fill=True,
# error: Item "ndarray" of "Union[ndarray, Index]" has no attribute
# "_na_value"
fill_value=lev._na_value, # type: ignore[union-attr]
)
llab = lambda lab, inc: lab[inc]._multiindex.codes[-1]
if is_interval_dtype(lab.dtype):
# TODO: should we do this inside II?
lab_interval = cast(Interval, lab)
sorter = np.lexsort((lab_interval.left, lab_interval.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idchanges = 1 + np.nonzero(ids[1:] != ids[:-1])[0]
idx = np.r_[0, idchanges]
if not len(ids):
idx = idchanges
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
if not len(val):
inc = lchanges
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
codes = self.grouper.reconstructed_codes
codes = [rep(level_codes) for level_codes in codes] + [llab(lab, inc)]
# error: List item 0 has incompatible type "Union[ndarray[Any, Any], Index]";
# expected "Index"
levels = [ping.group_index for ping in self.grouper.groupings] + [
lev # type: ignore[list-item]
]
if dropna:
mask = codes[-1] != -1
if mask.all():
dropna = False
else:
out, codes = out[mask], [level_codes[mask] for level_codes in codes]
if normalize:
out = out.astype("float")
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, codes[-1] = out[sorter], codes[-1][sorter]
if bins is not None:
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype="bool")
for level_codes in codes[:-1]:
diff |= np.r_[True, level_codes[1:] != level_codes[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin), np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, codes[-1]]
_, idx = get_join_indexers(left, right, sort=False, how="left")
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
def build_codes(lev_codes: np.ndarray) -> np.ndarray:
return np.repeat(lev_codes[diff], nbin)
codes = [build_codes(lev_codes) for lev_codes in codes[:-1]]
codes.append(left[-1])
mi = MultiIndex(levels=levels, codes=codes, names=names, verify_integrity=False)
if is_integer_dtype(out.dtype):
out = ensure_int64(out)
return self.obj._constructor(out, index=mi, name=self.obj.name)
@doc(Series.nlargest)
def nlargest(self, n: int = 5, keep: str = "first"):
f = partial(Series.nlargest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
@doc(Series.nsmallest)
def nsmallest(self, n: int = 5, keep: str = "first"):
f = partial(Series.nsmallest, n=n, keep=keep)
data = self._obj_with_exclusions
# Don't change behavior if result index happens to be the same, i.e.
# already ordered and n >= all group sizes.
result = self._python_apply_general(f, data, not_indexed_same=True)
return result
@pin_allowlisted_properties(DataFrame, base.dataframe_apply_allowlist)
class DataFrameGroupBy(GroupBy[DataFrame]):
_apply_allowlist = base.dataframe_apply_allowlist
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame(
... {
... "A": [1, 1, 2, 2],
... "B": [1, 2, 3, 4],
... "C": [0.362838, 0.227877, 1.267767, -0.562860],
... }
... )
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590715
2 3 4 0.704907
To control the output names with different aggregations per column,
pandas supports "named aggregation"
>>> df.groupby("A").agg(
... b_min=pd.NamedAgg(column="B", aggfunc="min"),
... c_sum=pd.NamedAgg(column="C", aggfunc="sum"))
b_min c_sum
A
1 1 0.590715
2 3 0.704907
- The keywords are the *output* column names
- The values are tuples whose first element is the column to select
and the second element is the aggregation to apply to that column.
Pandas provides the ``pandas.NamedAgg`` namedtuple with the fields
``['column', 'aggfunc']`` to make it clearer what the arguments are.
As usual, the aggregation can be a callable or a string alias.
See :ref:`groupby.aggregate.named` for more.
.. versionchanged:: 1.3.0
The resulting dtype will reflect the return value of the aggregating function.
>>> df.groupby("A")[["B"]].agg(lambda x: x.astype(float).min())
B
A
1 1.0
2 3.0
"""
)
@doc(_agg_template, examples=_agg_examples_doc, klass="DataFrame")
def aggregate(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
if maybe_use_numba(engine):
with self._group_selection_context():
data = self._selected_obj
result = self._aggregate_with_numba(
data, func, *args, engine_kwargs=engine_kwargs, **kwargs
)
index = self.grouper.result_index
return self.obj._constructor(result, index=index, columns=data.columns)
relabeling, func, columns, order = reconstruct_func(func, **kwargs)
func = maybe_mangle_lambdas(func)
op = GroupByApply(self, func, args, kwargs)
result = op.agg()
if not is_dict_like(func) and result is not None:
return result
elif relabeling and result is not None:
# this should be the only (non-raising) case with relabeling
# used reordered index of columns
result = result.iloc[:, order]
result.columns = columns
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
# test_groupby_as_index_series_scalar gets here with 'not self.as_index'
return self._python_agg_general(func, *args, **kwargs)
elif args or kwargs:
# test_pass_args_kwargs gets here (with and without as_index)
# can't return early
result = self._aggregate_frame(func, *args, **kwargs)
elif self.axis == 1:
# _aggregate_multiple_funcs does not allow self.axis == 1
# Note: axis == 1 precludes 'not self.as_index', see __init__
result = self._aggregate_frame(func)
return result
else:
# try to treat as if we are passing a list
gba = GroupByApply(self, [func], args=(), kwargs={})
try:
result = gba.agg()
except ValueError as err:
if "no results" not in str(err):
# raised directly by _aggregate_multiple_funcs
raise
result = self._aggregate_frame(func)
else:
sobj = self._selected_obj
if isinstance(sobj, Series):
# GH#35246 test_groupby_as_index_select_column_sum_empty_df
result.columns = self._obj_with_exclusions.columns.copy()
else:
# Retain our column names
result.columns._set_names(
sobj.columns.names, level=list(range(sobj.columns.nlevels))
)
# select everything except for the last level, which is the one
# containing the name of the function(s), see GH#32040
result.columns = result.columns.droplevel(-1)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = Index(range(len(result)))
return result
agg = aggregate
def _iterate_slices(self) -> Iterable[Series]:
obj = self._selected_obj
if self.axis == 1:
obj = obj.T
if isinstance(obj, Series) and obj.name not in self.exclusions:
# Occurs when doing DataFrameGroupBy(...)["X"]
yield obj
else:
for label, values in obj.items():
if label in self.exclusions:
continue
yield values
def _aggregate_frame(self, func, *args, **kwargs) -> DataFrame:
if self.grouper.nkeys != 1:
raise AssertionError("Number of keys must be 1")
obj = self._obj_with_exclusions
result: dict[Hashable, NDFrame | np.ndarray] = {}
if self.axis == 0:
# test_pass_args_kwargs_duplicate_columns gets here with non-unique columns
for name, data in self:
fres = func(data, *args, **kwargs)
result[name] = fres
else:
# we get here in a number of test_multilevel tests
for name in self.indices:
grp_df = self.get_group(name, obj=obj)
fres = func(grp_df, *args, **kwargs)
result[name] = fres
result_index = self.grouper.result_index
other_ax = obj.axes[1 - self.axis]
out = self.obj._constructor(result, index=other_ax, columns=result_index)
if self.axis == 0:
out = out.T
return out
def _aggregate_item_by_item(self, func, *args, **kwargs) -> DataFrame:
# only for axis==0
# tests that get here with non-unique cols:
# test_resample_with_timedelta_yields_no_empty_groups,
# test_resample_apply_product
obj = self._obj_with_exclusions
result: dict[int, NDFrame] = {}
for i, (item, sgb) in enumerate(self._iterate_column_groupbys(obj)):
result[i] = sgb.aggregate(func, *args, **kwargs)
res_df = self.obj._constructor(result)
res_df.columns = obj.columns
return res_df
def _wrap_applied_output(
self,
data: DataFrame,
values: list,
not_indexed_same: bool = False,
override_group_keys: bool = False,
):
if len(values) == 0:
result = self.obj._constructor(
index=self.grouper.result_index, columns=data.columns
)
result = result.astype(data.dtypes, copy=False)
return result
# GH12824
first_not_none = next(com.not_none(*values), None)
if first_not_none is None:
# GH9684 - All values are None, return an empty frame.
return self.obj._constructor()
elif isinstance(first_not_none, DataFrame):
return self._concat_objects(
values,
not_indexed_same=not_indexed_same,
override_group_keys=override_group_keys,
)
key_index = self.grouper.result_index if self.as_index else None
if isinstance(first_not_none, (np.ndarray, Index)):
# GH#1738: values is list of arrays of unequal lengths
# fall through to the outer else clause
# TODO: sure this is right? we used to do this
# after raising AttributeError above
return self.obj._constructor_sliced(
values, index=key_index, name=self._selection
)
elif not isinstance(first_not_none, Series):
# values are not series or array-like but scalars
# self._selection not passed through to Series as the
# result should not take the name of original selection
# of columns
if self.as_index:
return self.obj._constructor_sliced(values, index=key_index)
else:
result = self.obj._constructor(values, columns=[self._selection])
self._insert_inaxis_grouper_inplace(result)
return result
else:
# values are Series
return self._wrap_applied_output_series(
values,
not_indexed_same,
first_not_none,
key_index,
override_group_keys,
)
def _wrap_applied_output_series(
self,
values: list[Series],
not_indexed_same: bool,
first_not_none,
key_index,
override_group_keys: bool,
) -> DataFrame | Series:
# this is to silence a DeprecationWarning
# TODO(2.0): Remove when default dtype of empty Series is object
kwargs = first_not_none._construct_axes_dict()
backup = create_series_with_explicit_dtype(dtype_if_empty=object, **kwargs)
values = [x if (x is not None) else backup for x in values]
all_indexed_same = all_indexes_same(x.index for x in values)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
applied_index = self._selected_obj._get_axis(self.axis)
singular_series = len(values) == 1 and applied_index.nlevels == 1
if singular_series:
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
values,
not_indexed_same=not_indexed_same,
override_group_keys=override_group_keys,
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
values,
not_indexed_same=True,
override_group_keys=override_group_keys,
)
# Combine values
# vstack+constructor is faster than concat and handles MI-columns
stacked_values = np.vstack([np.asarray(v) for v in values])
if self.axis == 0:
index = key_index
columns = first_not_none.index.copy()
if columns.name is None:
# GH6124 - propagate name of Series when it's consistent
names = {v.name for v in values}
if len(names) == 1:
columns.name = list(names)[0]
else:
index = first_not_none.index
columns = key_index
stacked_values = stacked_values.T
if stacked_values.dtype == object:
# We'll have the DataFrame constructor do inference
stacked_values = stacked_values.tolist()
result = self.obj._constructor(stacked_values, index=index, columns=columns)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
return self._reindex_output(result)
def | (
self, how: str, numeric_only: bool = True, axis: int = 0, **kwargs
) -> DataFrame:
assert axis == 0 # handled by caller
# TODO: no tests with self.ndim == 1 for DataFrameGroupBy
# With self.axis == 0, we have multi-block tests
# e.g. test_rank_min_int, test_cython_transform_frame
# test_transform_numeric_ret
# With self.axis == 1, _get_data_to_aggregate does a transpose
# so we always have a single block.
mgr: Manager2D = self._get_data_to_aggregate()
if numeric_only:
mgr = mgr.get_numeric_data(copy=False)
def arr_func(bvalues: ArrayLike) -> ArrayLike:
return self.grouper._cython_operation(
"transform", bvalues, how, 1, **kwargs
)
# We could use `mgr.apply` here and not have to set_axis, but
# we would have to do shape gymnastics for ArrayManager compat
res_mgr = mgr.grouped_reduce(arr_func, ignore_failures=True)
res_mgr.set_axis(1, mgr.axes[1])
if len(res_mgr) < len(mgr):
warn_dropping_nuisance_columns_deprecated(type(self), how)
res_df = self.obj._constructor(res_mgr)
if self.axis == 1:
res_df = res_df.T
return res_df
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
# Determine whether to use slow or fast path by evaluating on the first group.
# Need to handle the case of an empty generator and process the result so that
# it does not need to be computed again.
try:
name, group = next(gen)
except StopIteration:
pass
else:
object.__setattr__(group, "name", name)
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError as err:
msg = "transform must return a scalar value for each group"
raise ValueError(msg) from err
if group.size > 0:
res = _wrap_transform_general_frame(self.obj, group, res)
applied.append(res)
# Compute and process with the remaining groups
for name, group in gen:
if group.size == 0:
continue
object.__setattr__(group, "name", name)
res = path(group)
res = _wrap_transform_general_frame(self.obj, group, res)
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
other_axis = 1 if self.axis == 0 else 0 # switches between 0 & 1
concatenated = concat(applied, axis=self.axis, verify_integrity=False)
concatenated = concatenated.reindex(concat_index, axis=other_axis, copy=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass="DataFrame")
@Appender(_transform_template)
def transform(self, func, *args, engine=None, engine_kwargs=None, **kwargs):
return self._transform(
func, *args, engine=engine, engine_kwargs=engine_kwargs, **kwargs
)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, str):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis
)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis
)
return fast_path, slow_path
def _choose_path(self, fast_path: Callable, slow_path: Callable, group: DataFrame):
path = slow_path
res = slow_path(group)
if self.ngroups == 1:
# no need to evaluate multiple paths when only
# a single group exists
return path, res
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
except AssertionError:
raise # pragma: no cover
except Exception:
# GH#29631 For user-defined function, we can't predict what may be
# raised; see test_transform.test_transform_fastpath_raises
return path, res
# verify fast path returns either:
# a DataFrame with columns equal to group.columns
# OR a Series with index equal to group.columns
if isinstance(res_fast, DataFrame):
if not res_fast.columns.equals(group.columns):
return path, res
elif isinstance(res_fast, Series):
if not res_fast.index.equals(group.columns):
return path, res
else:
return path, res
if res_fast.equals(res):
path = fast_path
return path, res
def _transform_item_by_item(self, obj: DataFrame, wrapper) -> DataFrame:
# iterate through columns, see test_transform_exclude_nuisance
# gets here with non-unique columns
output = {}
inds = []
for i, (colname, sgb) in enumerate(self._iterate_column_groupbys(obj)):
try:
output[i] = sgb.transform(wrapper)
except TypeError:
# e.g. trying to call nanmean with string values
warn_dropping_nuisance_columns_deprecated(type(self), "transform")
else:
inds.append(i)
if not output:
raise TypeError("Transform function invalid for data types")
columns = obj.columns.take(inds)
result = self.obj._constructor(output, index=obj.index)
result.columns = columns
return result
def filter(self, func, dropna=True, *args, **kwargs):
"""
Return a copy of a DataFrame excluding filtered elements.
Elements from groups are filtered if they do not satisfy the
boolean criterion specified by func.
Parameters
----------
func : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
If False, groups that evaluate False are filled with NaNs.
Returns
-------
filtered : DataFrame
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Functions that mutate the passed object can produce unexpected
behavior or errors and are not supported. See :ref:`gotchas.udf-mutation`
for more details.
Examples
--------
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, "name", name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError(
f"filter function returned a {type(res).__name__}, "
"but expected a scalar bool"
)
return self._apply_filter(indices, dropna)
def __getitem__(self, key) -> DataFrameGroupBy | SeriesGroupBy:
if self.axis == 1:
# GH 37725
raise ValueError("Cannot subset columns when using axis=1")
# per GH 23566
if isinstance(key, tuple) and len(key) > 1:
# if len == 1, then it becomes a SeriesGroupBy and this is actually
# valid syntax, so don't raise warning
warnings.warn(
"Indexing with multiple keys (implicitly converted to a tuple "
"of keys) will be deprecated, use a list instead.",
FutureWarning,
stacklevel=find_stack_level(),
)
return super().__getitem__(key)
def _gotitem(self, key, ndim: int, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : {1, 2}
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(
subset,
self.grouper,
axis=self.axis,
level=self.level,
grouper=self.grouper,
exclusions=self.exclusions,
selection=key,
as_index=self.as_index,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
mutated=self.mutated,
dropna=self.dropna,
)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(
subset,
level=self.level,
grouper=self.grouper,
selection=key,
sort=self.sort,
group_keys=self.group_keys,
squeeze=self.squeeze,
observed=self.observed,
dropna=self.dropna,
)
raise AssertionError("invalid ndim for _gotitem")
def _get_data_to_aggregate(self) -> Manager2D:
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._mgr
else:
return obj._mgr
def _insert_inaxis_grouper_inplace(self, result: DataFrame) -> None:
# zip in reverse so we can always insert at loc 0
columns = result.columns
for name, lev, in_axis in zip(
reversed(self.grouper.names),
reversed(self.grouper.get_group_levels()),
reversed([grp.in_axis for grp in self.grouper.groupings]),
):
# GH #28549
# When using .apply(-), name will be in columns already
if in_axis and name not in columns:
result.insert(0, name, lev)
def _indexed_output_to_ndframe(
self, output: Mapping[base.OutputKey, ArrayLike]
) -> DataFrame:
"""
Wrap the dict result of a GroupBy aggregation into a DataFrame.
"""
indexed_output = {key.position: val for key, val in output.items()}
columns = Index([key.label for key in output])
columns._set_names(self._obj_with_exclusions._get_axis(1 - self.axis).names)
result = self.obj._constructor(indexed_output)
result.columns = columns
return result
def _wrap_agged_manager(self, mgr: Manager2D) -> DataFrame:
if not self.as_index:
# GH 41998 - empty mgr always gets index of length 0
rows = mgr.shape[1] if mgr.shape[0] > 0 else 0
index = Index(range(rows))
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr.set_axis(1, index)
result = self.obj._constructor(mgr)
if self.axis == 1:
result = result.T
# Note: we only need to pass datetime=True in order to get numeric
# values converted
return self._reindex_output(result)._convert(datetime=True)
def _iterate_column_groupbys(self, obj: DataFrame | Series):
for i, colname in enumerate(obj.columns):
yield colname, SeriesGroupBy(
obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions,
observed=self.observed,
)
def _apply_to_column_groupbys(self, func, obj: DataFrame | Series) -> DataFrame:
from pandas.core.reshape.concat import concat
columns = obj.columns
results = [
func(col_groupby) for _, col_groupby in self._iterate_column_groupbys(obj)
]
if not len(results):
# concat would raise
return DataFrame([], columns=columns, index=self.grouper.result_index)
else:
return concat(results, keys=columns, axis=1)
def nunique(self, dropna: bool = True) -> DataFrame:
"""
Return DataFrame with counts of unique elements in each position.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
value1 value2
id
egg 1 1
ham 1 2
spam 2 1
Check for rows with the same id but conflicting values:
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
if self.axis != 0:
# see test_groupby_crash_on_nunique
return self._python_agg_general(lambda sgb: sgb.nunique(dropna))
obj = self._obj_with_exclusions
results = self._apply_to_column_groupbys(
lambda sgb: sgb.nunique(dropna), obj=obj
)
if not self.as_index:
results.index = Index(range(len(results)))
self._insert_inaxis_grouper_inplace(results)
return results
@doc(
_shared_docs["idxmax"],
numeric_only_default="True for axis=0, False for axis=1",
)
def idxmax(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):
axis = DataFrame._get_axis_number(axis)
if numeric_only is None:
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#38217
res = df._reduce(
nanops.nanargmax,
"argmax",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmax"
return self._python_apply_general(func, self._obj_with_exclusions)
@doc(
_shared_docs["idxmin"],
numeric_only_default="True for axis=0, False for axis=1",
)
def idxmin(self, axis=0, skipna: bool = True, numeric_only: bool | None = None):
axis = DataFrame._get_axis_number(axis)
if numeric_only is None:
numeric_only = None if axis == 0 else False
def func(df):
# NB: here we use numeric_only=None, in DataFrame it is False GH#46560
res = df._reduce(
nanops.nanargmin,
"argmin",
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
)
indices = res._values
index = df._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return df._constructor_sliced(result, index=res.index)
func.__name__ = "idxmin"
return self._python_apply_general(func, self._obj_with_exclusions)
boxplot = boxplot_frame_groupby
def value_counts(
self,
subset: Sequence[Hashable] | None = None,
normalize: bool = False,
sort: bool = True,
ascending: bool = False,
dropna: bool = True,
) -> DataFrame | Series:
"""
Return a Series or DataFrame containing counts of unique rows.
.. versionadded:: 1.4.0
Parameters
----------
subset : list-like, optional
Columns to use when counting unique combinations.
normalize : bool, default False
Return proportions rather than frequencies.
sort : bool, default True
Sort by frequencies.
ascending : bool, default False
Sort in ascending order.
dropna : bool, default True
Don’t include counts of rows that contain NA values.
Returns
-------
Series or DataFrame
Series if the groupby as_index is True, otherwise DataFrame.
See Also
--------
Series.value_counts: Equivalent method on Series.
DataFrame.value_counts: Equivalent method on DataFrame.
SeriesGroupBy.value_counts: Equivalent method on SeriesGroupBy.
Notes
-----
- If the groupby as_index is True then the returned Series will have a
MultiIndex with one level per input column.
- If the groupby as_index is False then the returned DataFrame will have an
additional column with the value_counts. The column is labelled 'count' or
'proportion', depending on the ``normalize`` parameter.
By default, rows that contain any NA values are omitted from
the result.
By default, the result will be in descending order so that the
first element of each group is the most frequently-occurring row.
Examples
--------
>>> df = pd.DataFrame({
... 'gender': ['male', 'male', 'female', 'male', 'female', 'male'],
... 'education': ['low', 'medium', 'high', 'low', 'high', 'low'],
... 'country': ['US', 'FR', 'US', 'FR', 'FR', 'FR']
... })
>>> df
gender education country
0 male low US
1 male medium FR
2 female high US
3 male low FR
4 female high FR
5 male low FR
>>> df.groupby('gender').value_counts()
gender education country
female high FR 1
US 1
male low FR 2
US 1
medium FR 1
dtype: int64
>>> df.groupby('gender').value_counts(ascending=True)
gender education country
female high FR 1
US 1
male low US 1
medium FR 1
low FR 2
dtype: int64
>>> df.groupby('gender').value_counts(normalize=True)
gender education country
female high FR 0.50
US 0.50
male low FR 0.50
US 0.25
medium FR 0.25
dtype: float64
>>> df.groupby('gender', as_index=False).value_counts()
gender education country count
0 female high FR 1
1 female high US 1
2 male low FR 2
3 male low US 1
4 male medium FR 1
>>> df.groupby('gender', as_index=False).value_counts(normalize=True)
gender education country proportion
0 female high FR 0.50
1 female high US 0.50
2 male low FR 0.50
3 male low US 0.25
4 male medium FR 0.25
"""
if self.axis == 1:
raise NotImplementedError(
"DataFrameGroupBy.value_counts only handles axis=0"
)
with self._group_selection_context():
df = self.obj
in_axis_names = {
grouping.name for grouping in self.grouper.groupings if grouping.in_axis
}
if isinstance(self._selected_obj, Series):
name = self._selected_obj.name
keys = [] if name in in_axis_names else [self._selected_obj]
else:
keys = [
# Can't use .values because the column label needs to be preserved
self._selected_obj.iloc[:, idx]
for idx, name in enumerate(self._selected_obj.columns)
if name not in in_axis_names
]
if subset is not None:
clashing = set(subset) & set(in_axis_names)
if clashing:
raise ValueError(
f"Keys {clashing} in subset cannot be in "
"the groupby column keys"
)
groupings = list(self.grouper.groupings)
for key in keys:
grouper, _, _ = get_grouper(
df,
key=key,
axis=self.axis,
sort=self.sort,
dropna=dropna,
)
groupings += list(grouper.groupings)
# Take the size of the overall columns
gb = df.groupby(
groupings,
sort=self.sort,
observed=self.observed,
dropna=self.dropna,
)
result_series = cast(Series, gb.size())
if normalize:
# Normalize the results by dividing by the original group sizes.
# We are guaranteed to have the first N levels be the
# user-requested grouping.
levels = list(
range(len(self.grouper.groupings), result_series.index.nlevels)
)
indexed_group_size = result_series.groupby(
result_series.index.droplevel(levels),
sort=self.sort,
observed=self.observed,
dropna=self.dropna,
).transform("sum")
result_series /= indexed_group_size
if sort:
# Sort the values and then resort by the main grouping
index_level = range(len(self.grouper.groupings))
result_series = result_series.sort_values(
ascending=ascending
).sort_index(level=index_level, sort_remaining=False)
result: Series | DataFrame
if self.as_index:
result = result_series
else:
# Convert to frame
name = "proportion" if normalize else "count"
index = result_series.index
columns = com.fill_missing_names(index.names)
if name in columns:
raise ValueError(
f"Column label '{name}' is duplicate of result column"
)
result_series.name = name
result_series.index = index.set_names(range(len(columns)))
result_frame = result_series.reset_index()
result_frame.columns = columns + [name]
result = result_frame
return result.__finalize__(self.obj, method="value_counts")
def _wrap_transform_general_frame(
obj: DataFrame, group: DataFrame, res: DataFrame | Series
) -> DataFrame:
from pandas import concat
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if res.index.is_(obj.index):
res_frame = concat([res] * len(group.columns), axis=1)
res_frame.columns = group.columns
res_frame.index = group.index
else:
res_frame = obj._constructor(
np.tile(res.values, (len(group.index), 1)),
columns=group.columns,
index=group.index,
)
assert isinstance(res_frame, DataFrame)
return res_frame
else:
return res
| _cython_transform |
auth.py | # -*- coding: utf-8 -*-
"""
auth.py - Creates an authenticated pygsheets object
""" | import pygsheets
import pathlib
from config import config # import config
# Grab the service account auth file location from the config
SA_auth_file = pathlib.Path(config['auth_file'])
# Create the pygsheets obj
client = pygsheets.authorize(service_file=SA_auth_file)
spreadsheet = client.open(config['spreadsheet_name'])
tracker_wks = spreadsheet.worksheet_by_title(config['tracker_worksheet_name'])
requests_wks = spreadsheet.worksheet_by_title(config['requests_worksheet_name'])
stats_wks = spreadsheet.worksheet_by_title(config['stats_worksheet_name']) |
# Libs |
util.go | // Package cliutil contains methods used across all cli commands
// @todo: get rid of os.Exits and use errors instread
package util
import (
"encoding/json"
"fmt"
"net"
"os"
"strings"
ccli "github.com/micro/cli/v2"
"github.com/micro/micro/v2/internal/config"
"github.com/micro/micro/v2/internal/platform"
"github.com/micro/micro/v2/service/runtime/profile"
)
const (
// EnvLocal is a builtin environment, it means services launched
// with `micro run` will use default, zero dependency implementations for
// interfaces, like mdns for registry.
EnvLocal = "local"
// EnvServer is a builtin environment, it represents your local `micro server`
EnvServer = "server"
// EnvPlatform is a builtin environment, the One True Micro Live(tm) environment.
EnvPlatform = "platform"
)
const (
// localProxyAddress is the default proxy address for environment local
// local env does not use other services so talking about a proxy
localProxyAddress = "none"
// serverProxyAddress is the default proxy address for environment server
serverProxyAddress = "127.0.0.1:8081"
// platformProxyAddress is teh default proxy address for environment platform
platformProxyAddress = "proxy.micro.mu"
)
var defaultEnvs = map[string]Env{
EnvLocal: Env{
Name: EnvLocal,
ProxyAddress: localProxyAddress,
},
EnvServer: Env{
Name: EnvServer,
ProxyAddress: serverProxyAddress,
},
EnvPlatform: Env{
Name: EnvPlatform,
ProxyAddress: platformProxyAddress,
},
}
func isBuiltinService(command string) bool {
if command == "server" {
return true
}
for _, service := range platform.Services {
if command == service {
return true
}
}
return false
}
// SetupCommand includes things that should run for each command.
func SetupCommand(ctx *ccli.Context) {
if ctx.Args().Len() == 1 && isBuiltinService(ctx.Args().First()) {
return
}
if ctx.Args().Len() >= 1 && ctx.Args().First() == "env" {
return
}
toFlag := func(s string) string {
return strings.ToLower(strings.ReplaceAll(s, "MICRO_", ""))
}
setFlags := func(envars []string) {
for _, envar := range envars {
// setting both env and flags here
// as the proxy settings for example did not take effect
// with only flags
parts := strings.Split(envar, "=")
key := toFlag(parts[0])
os.Setenv(parts[0], parts[1])
ctx.Set(key, parts[1])
}
}
env := GetEnv(ctx)
// if we're running a local environment return here
if len(env.ProxyAddress) == 0 || env.Name == EnvLocal {
return
}
switch env.Name {
case EnvServer:
setFlags(profile.ServerCLI())
case EnvPlatform:
setFlags(profile.PlatformCLI())
default:
// default case for ad hoc envs, see comments above about tests
setFlags(profile.ServerCLI())
}
// Set the proxy
setFlags([]string{"MICRO_PROXY=" + env.ProxyAddress})
}
type Env struct {
Name string
ProxyAddress string
}
func AddEnv(env Env) {
envs := getEnvs()
envs[env.Name] = env
setEnvs(envs)
}
func getEnvs() map[string]Env {
envsJSON, err := config.Get("envs")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
envs := map[string]Env{}
if len(envsJSON) > 0 {
err := json.Unmarshal([]byte(envsJSON), &envs)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
for k, v := range defaultEnvs {
envs[k] = v
}
return envs
}
func setEnvs(envs map[string]Env) {
envsJSON, err := json.Marshal(envs)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
err = config.Set(string(envsJSON), "envs")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
}
// GetEnv returns the current selected environment
// Does not take
func GetEnv(ctx *ccli.Context) Env {
var envName string
if len(ctx.String("env")) > 0 {
envName = ctx.String("env")
} else {
env, err := config.Get("env")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if env == "" {
env = EnvLocal
}
envName = env
}
return GetEnvByName(envName)
}
func GetEnvByName(env string) Env {
envs := getEnvs()
envir, ok := envs[env]
if !ok {
fmt.Println(fmt.Sprintf("Env \"%s\" not found. See `micro env` for available environments.", env))
os.Exit(1)
}
if len(envir.ProxyAddress) == 0 {
return envir
}
// default to :443
if _, port, _ := net.SplitHostPort(envir.ProxyAddress); len(port) == 0 {
envir.ProxyAddress = net.JoinHostPort(envir.ProxyAddress, "443")
}
return envir
}
func GetEnvs() []Env {
envs := getEnvs()
ret := []Env{defaultEnvs[EnvLocal], defaultEnvs[EnvServer], defaultEnvs[EnvPlatform]}
nonDefaults := []Env{}
for _, env := range envs {
if _, isDefault := defaultEnvs[env.Name]; !isDefault {
nonDefaults = append(nonDefaults, env)
}
}
// @todo order nondefault envs alphabetically
ret = append(ret, nonDefaults...)
return ret
}
// SetEnv selects an environment to be used.
func | (envName string) {
envs := getEnvs()
_, ok := envs[envName]
if !ok {
fmt.Printf("Environment '%v' does not exist\n", envName)
os.Exit(1)
}
config.Set(envName, "env")
}
func IsLocal(ctx *ccli.Context) bool {
return GetEnv(ctx).Name == EnvLocal
}
func IsServer(ctx *ccli.Context) bool {
return GetEnv(ctx).Name == EnvServer
}
func IsPlatform(ctx *ccli.Context) bool {
return GetEnv(ctx).Name == EnvPlatform
}
| SetEnv |
e2e.test.js | const { chromium } = require('playwright-chromium');
const { expect } = require('chai');
const host = 'http://localhost:3000'; // Application host (NOT service host - that can be anything)
const DEBUG = true;
const mockData = require('./mock-data.json');
const endpoints = {
register: '/users/register',
login: '/users/login',
logout: '/users/logout',
memes: '/data/memes?sortBy=_createdOn%20desc',
create: '/data/memes',
details: '/data/memes/',
delete: '/data/memes/',
profile: '/data/memes?where=_ownerId%3D%220002%22&sortBy=_createdOn%20desc'
};
function json(data) {
return {
status: 200,
headers: {
'Access-Control-Allow-Origin': '*',
'Content-Type': 'application/json'
},
body: JSON.stringify(data)
};
}
let browser;
let context;
let page;
describe('E2E tests', function () {
if (DEBUG) {
this.timeout(120000);
} else {
this.timeout(6000);
}
before(async () => {
if (DEBUG) {
browser = await chromium.launch({ headless: false, slowMo: 500 });
} else {
browser = await chromium.launch();
}
});
after(async () => {
await browser.close();
});
beforeEach(async () => {
context = await browser.newContext();
await context.route('**' + endpoints.memes, route => route.fulfill(json(mockData)));
await context.route('**' + endpoints.details + '*', route => route.fulfill(json(mockData[0])));
// Block external calls
await context.route(url => url.href.slice(0, host.length) != host, route => {
if (DEBUG) {
console.log('aborting', route.request().url());
}
route.abort();
});
page = await context.newPage();
});
afterEach(async () => {
await page.close();
await context.close();
});
describe('Authentication [ 20 Points ]', () => {
it('register does not work with empty fields [ 5 Points ]', async () => {
const endpoint = '**' + endpoints.register;
let called = false;
page.route(endpoint, route => called = true);
await page.goto(host);
await page.click('text=Register');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.click('[type="submit"]');
await page.waitForTimeout(300);
expect(called).to.be.false;
});
it('register makes correct API call [ 5 Points ]', async () => {
const endpoint = '**' + endpoints.register;
const username = 'Ivan';
const email = '[email protected]';
const password = '345321';
page.route(endpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('text=Register');
|
await page.fill('[name="username"]', username);
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.fill('[name="repeatPass"]', password);
await page.check('#male');
await page.waitForTimeout(300);
const [response] = await Promise.all([
page.waitForResponse(endpoint),
page.click('[type="submit"]')
]);
const postData = JSON.parse(response.request().postData());
expect(postData.email).to.equal(email);
expect(postData.password).to.equal(password);
});
it('login makes correct API call [ 5 Points ]', async () => {
const endpoint = '**' + endpoints.login;
const email = '[email protected]';
const password = '345321';
page.route(endpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
const [response] = await Promise.all([
page.waitForResponse(endpoint),
page.click('[type="submit"]')
]);
const postData = JSON.parse(response.request().postData());
expect(postData.email).to.equal(email);
expect(postData.password).to.equal(password);
});
it('logout makes correct API call [ 5 Points ]', async () => {
const loginEndpoint = '**' + endpoints.login;
const email = '[email protected]';
const password = '345321';
page.route(loginEndpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
await Promise.all([
page.waitForResponse(loginEndpoint),
page.click('[type="submit"]')
]);
const endpoint = '**' + endpoints.logout;
await page.waitForTimeout(300);
const [request] = await Promise.all([
page.waitForRequest(endpoint),
page.click('nav >> text="Logout"')
]);
const token = request.headers()['x-authorization'];
expect(request.method()).to.equal('GET');
expect(token).to.equal('AAAA');
});
});
describe('Navigation bar [ 5 Points ]', () => {
const email = '[email protected]';
const password = '345321';
it.only('logged user should see correct navigation [ 2.5 Points ]', async () => {
// Login user
const endpoint = '**' + endpoints.login;
page.route(endpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
await Promise.all([
page.waitForResponse(endpoint),
page.click('[type="submit"]')
]);
//Test for navigation
await page.waitForTimeout(300);
expect(await page.isVisible('nav >> text="All Memes"')).to.be.true;
expect(await page.isVisible('nav >> text="Create Meme"')).to.be.true;
expect(await page.isVisible('nav >> text="My Profile"')).to.be.true;
expect(await page.isVisible('nav >> text="Logout"')).to.be.true;
expect(await page.isVisible('nav >> text="Login"')).to.be.false;
expect(await page.isVisible('nav >> text="Register"')).to.be.false;
expect(await page.isVisible('nav >> text="Home Page"')).to.be.false;
});
it('guest user should see correct navigation [ 2.5 Points ]', async () => {
await page.goto(host);
await page.waitForTimeout(300);
expect(await page.isVisible('text="All Memes"')).to.be.true;
expect(await page.isVisible('text="Create Meme"')).to.be.false;
expect(await page.isVisible('text="My Profile"')).to.be.false;
expect(await page.isVisible('text="Logout"')).to.be.false;
expect(await page.isVisible('text="Login"')).to.be.true;
expect(await page.isVisible('text="Register"')).to.be.true;
expect(await page.isVisible('text="Home Page"')).to.be.true;
});
});
describe('Catalog [ 25 Points ]', () => {
it('loads static home page [ 5 Points ]', async () => {
await page.goto(host);
await page.waitForSelector('text=Welcome to Meme Lounge');
await page.waitForTimeout(300);
expect(await page.isVisible('text=Login to see our memes')).to.be.true;
expect(await page.isVisible('#button-div >> text=Login')).to.be.true;
expect(await page.isVisible('#button-div >> text=Register')).to.be.true;
});
it('show most recent memes [ 10 Points ]', async () => {
await page.goto(host);
await page.click('text=All Memes');
await page.waitForTimeout(300);
const titles = await page.$$eval('#memes .meme-title', t => t.map(s => s.textContent));
await page.waitForTimeout(300);
expect(titles.length).to.equal(6);
expect(titles[0]).to.contains('test');
expect(titles[1]).to.contains('meme 2');
expect(titles[2]).to.contains('test 3');
expect(titles[3]).to.contains('meme 4');
expect(titles[4]).to.contains('test 5');
});
it('show meme details [ 5 Points ]', async () => {
await page.goto(host);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mockData[3])));
await page.click('.meme:has-text("meme 4") >> text="Details"');
await page.waitForTimeout(300);
await page.waitForSelector('#meme-details > h1:has-text("meme 4")');
await page.waitForSelector('.meme-description >p:has-text("description 4")');
const title = await page.textContent('h1');
const desc = await page.textContent('.meme-description >p');
const img = await page.getAttribute('.meme-img >img', 'src');
await page.waitForTimeout(300);
expect(title).to.contains(mockData[3].title);
expect(desc).to.contains(mockData[3].description);
expect(img).to.contains(mockData[3].imageUrl);
});
it('guest does NOT see delete button [ 5 Points ]', async () => {
await page.goto(host);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.click('.meme:first-child >> text="Details"');
await page.waitForTimeout(300);
expect(await page.isVisible('text="Delete"')).to.be.false;
expect(await page.isVisible('text="Edit"')).to.be.false;
});
});
describe('CRUD [ 40 Points ]', () => {
const email = '[email protected]';
const password = '345321';
// Login user
beforeEach(async () => {
const loginEndpoint = '**' + endpoints.login;
page.route(loginEndpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await Promise.all([
page.waitForResponse(loginEndpoint),
page.click('[type="submit"]')
]);
});
it('create does NOT work with empty fields [ 5 Points ]', async () => {
const endpoint = '**' + endpoints.create;
let called = false;
await page.waitForTimeout(300);
await page.click('text="Create Meme"');
await page.waitForSelector('form');
page.route(endpoint, route => called = true);
page.click('[type="submit"]');
await page.waitForTimeout(300);
expect(called).to.be.false;
});
it('create makes correct API call for logged in user [ 10 Points ]', async () => {
const endpoint = '**' + endpoints.create;
const mock = mockData[5];
page.route(endpoint, route => route.fulfill(json(mock)));
await page.waitForTimeout(300);
await page.click('text=Create Meme');
await page.waitForSelector('form');
await page.fill('[name="title"]', mock.title);
await page.fill('[name="description"]', mock.description);
await page.fill('[name="imageUrl"]', mock.imageUrl);
await page.waitForTimeout(300);
const [response] = await Promise.all([
page.waitForResponse(endpoint),
page.click('[type="submit"]')
]);
const postData = JSON.parse(response.request().postData());
expect(postData.title).to.equal(mock.title);
expect(postData.description).to.equal(mock.description);
expect(postData.imageUrl).to.equal(mock.imageUrl);
});
it('non-author does NOT see delete and edit buttons [ 2.5 Points ]', async () => {
const mock = Object.assign({}, mockData[4], { _ownerId: '0002' }); // Replace mock with non-owned object
await page.goto(host);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mock)));
await page.click('.meme:has-text("meme 4") >> text="Details"');
await page.waitForTimeout(300);
await page.waitForSelector('h2:has-text("Meme Description")');
expect(await page.isVisible('text="Delete"')).to.be.false;
expect(await page.isVisible('text="Edit"')).to.be.false;
});
it('author sees delete and edit buttons [ 2.5 Points ]', async () => {
const mock = mockData[5];
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mock)));
await page.click('.meme:has-text("My New Meme") >> text="Details"');
await page.waitForTimeout(300);
await page.waitForSelector('#meme-details > h1:has-text("Meme Title: My New Meme")');
await page.waitForSelector('.meme-description >p:has-text("some description about this Meme")');
expect(await page.isVisible('text="Delete"')).to.be.true;
expect(await page.isEnabled('text="Delete"')).to.be.true;
expect(await page.isVisible('text="Edit"')).to.be.true;
expect(await page.isEnabled('text="Edit"')).to.be.true;
});
it('delete makes correct API call for logged in user [ 5 Points ]', async () => {
const mock = mockData[5];
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mock)));
await page.click('.meme:has-text("My New Meme") >> text="Details"');
await page.waitForSelector('#meme-details > h1:has-text("Meme Title: My New Meme")');
page.on('dialog', dialog => dialog.accept());
await page.waitForTimeout(300);
const [request] = await Promise.all([
page.waitForRequest('**' + endpoints.delete + '74463e5b-b893-44e8-bd14-5fc8feeddb94'),
page.click('text="Delete"')
]);
expect(request.method()).to.equal('DELETE');
});
it('edit does NOT work with empty fields [ 5 Points ]', async () => {
const endpoint = endpoints.details;
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mockData[5])));
await page.click('.meme:has-text("test 5") >> text="Details"');
await page.waitForTimeout(300);
await page.click('text="Edit"');
await page.waitForTimeout(300);
let called = false;
page.route(endpoint, route => called = true);
await page.fill('[name="title"]', '');
await page.fill('[name="description"]', '');
await page.fill('[name="imageUrl"]', '');
page.click('[type="submit"]');
await page.waitForTimeout(300);
expect(called).to.be.false;
});
it('edit should populate form with correct data [ 5 Points ]', async () => {
const endpoint = endpoints.details;
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mockData[5])));
await page.click('.meme:has-text("test 5") >> text="Details"');
await page.waitForTimeout(300);
await page.click('text="Edit"');
await page.waitForTimeout(300);
const inputs = await page.$$eval('.container input', t => t.map(i => i.value));
const textArea = await page.$eval('.container textarea', i => i.value);
await page.waitForTimeout(300);
expect(inputs[0]).to.contains(mockData[5].title);
expect(inputs[1]).to.contains(mockData[5].imageUrl);
expect(textArea).to.contains(mockData[5].description);
});
it('edit makes correct API call for logged in user [ 5 Points ]', async () => {
const endpoint = endpoints.details;
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoint + '*', route => route.fulfill(json(mockData[5])));
await page.click('.meme:has-text("test 5") >> text="Details"');
await page.waitForTimeout(300);
await page.click('text="Edit"');
await page.waitForTimeout(300);
await page.fill('[name="title"]', mockData[0].title);
await page.fill('[name="description"]', mockData[0].description);
await page.fill('[name="imageUrl"]', mockData[0].imageUrl);
await page.waitForTimeout(300);
const [request] = await Promise.all([
page.waitForRequest('**' + endpoint + '74463e5b-b893-44e8-bd14-5fc8feeddb94'),
page.click('[type="submit"]')
]);
const postData = JSON.parse(request.postData());
expect(request.method()).to.equal('PUT');
expect(postData.title).to.contains(mockData[0].title);
expect(postData.description).to.contains(mockData[0].description);
expect(postData.imageUrl).to.equal(mockData[0].imageUrl);
});
});
describe('User Profile Page [ 10 Points ]', async () => {
const email = '[email protected]';
const username = 'Merry';
const password = '123456';
const loginEndpoint = '**' + endpoints.login;
// Login user
beforeEach(async () => {
page.route(loginEndpoint, route => route.fulfill(json({ _id: '0002', gender: 'female', username, email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForSelector('form');
await page.waitForTimeout(300);
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
await Promise.all([
page.waitForResponse(loginEndpoint),
page.click('[type="submit"]')
]);
});
it('check profile page information - with 0 memes [ 5 Points ]', async () => {
await page.route('**' + endpoints.profile, route => route.fulfill(json([])));
await page.waitForTimeout(300);
await page.click('text="My Profile"');
await page.waitForTimeout(300);
const values = await page.$$eval('.user-info p', p => p.map(p => p.textContent));
const img = await page.getAttribute('#user-avatar-url', 'src');
expect(values[0]).to.contains(username);
expect(values[1]).to.contains(email);
expect(values[2]).to.equal('My memes count: 0');
expect(img).to.contains('/images/female.png');
});
it('check profile page for "No memes in database." - with 0 memes [ 2.5 Points ]', async () => {
await page.waitForTimeout(300);
await page.route('**' + endpoints.profile, route => route.fulfill(json([])));
await page.click('text="My Profile"');
await page.waitForTimeout(300);
const userMemes = await page.textContent('.no-memes');
await page.waitForTimeout(300);
expect(userMemes).to.contains('No memes in database.');
});
it('check profile page information - with 2 memes [ 2.5 Points ]', async () => {
await page.route('**' + endpoints.profile, route => route.fulfill(json([mockData[0], mockData[1]])));
await page.waitForTimeout(300);
await page.click('text="My Profile"');
await page.waitForTimeout(300);
const memes = await page.$$eval('.user-meme-listings .user-meme', p => p.map(p => p.textContent));
await page.waitForTimeout(300);
expect(memes.length).to.equal(2);
expect(memes[0]).to.contains('test');
expect(memes[1]).to.contains('meme 2');
});
});
describe('BONUS: Notifications [ 5 Points ]', () => {
it('Login notification with invalid data', async () => {
const endpoint = '**' + endpoints.login;
let called = false;
page.route(endpoint, route => called = true);
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
const preClickNotification = await page.isVisible('#errorBox');
expect(preClickNotification).to.be.false;
await page.click('[type="submit"]');
await page.waitForTimeout(300);
const notification = await page.isVisible('#errorBox');
expect(notification).to.be.true;
});
it('Register notification with invalid data', async () => {
const endpoint = '**' + endpoints.register;
let called = false;
page.route(endpoint, route => called = true);
await page.goto(host);
await page.click('#button-div >> text="Register"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
const preClickNotification = await page.isVisible('#errorBox');
expect(preClickNotification).to.be.false;
await page.click('[type="submit"]');
await page.waitForTimeout(300);
const notification = await page.isVisible('#errorBox');
expect(notification).to.be.true;
});
it('Create notification with invalid data', async () => {
// Login user
const email = '[email protected]';
const password = '123456';
const longEndpoint = '**' + endpoints.login;
page.route(longEndpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
await Promise.all([
page.waitForResponse(longEndpoint),
page.click('[type="submit"]')
]);
//Test
await page.waitForTimeout(300);
const endpoint = '**' + endpoints.details;
let called = false;
page.route(endpoint, route => called = true);
await page.click('nav >> text="Create Meme"');
await page.waitForTimeout(300);
const preClickNotification = await page.isVisible('#errorBox');
expect(preClickNotification).to.be.false;
await page.click('[type="submit"]');
await page.waitForTimeout(300);
const notification = await page.isVisible('#errorBox');
expect(notification).to.be.true;
});
it('Edit notification with invalid data', async () => {
// Login user
const email = '[email protected]';
const password = '123456';
const longEndpoint = '**' + endpoints.login;
page.route(longEndpoint, route => route.fulfill(json({ _id: '0001', email, accessToken: 'AAAA' })));
await page.goto(host);
await page.click('#button-div >> text="Login"');
await page.waitForTimeout(300);
await page.waitForSelector('form');
await page.fill('[name="email"]', email);
await page.fill('[name="password"]', password);
await page.waitForTimeout(300);
await Promise.all([
page.waitForResponse(longEndpoint),
page.click('[type="submit"]')
]);
//Test
const endpoint = endpoints.details;
await page.waitForTimeout(300);
await page.click('text=All Memes');
await page.waitForTimeout(300);
await page.route('**' + endpoints.details + '*', route => route.fulfill(json(mockData[5])));
await page.click('.meme:has-text("My New Meme") >> text="Details"');
await page.waitForTimeout(300);
await page.click('text="Edit"');
await page.waitForTimeout(300);
const preClickNotification = await page.isVisible('#errorBox');
expect(preClickNotification).to.be.false;
await page.fill('[name="title"]', '');
await page.fill('[name="description"]', '');
await page.fill('[name="imageUrl"]', '');
await page.waitForTimeout(300);
page.click('[type="submit"]');
await page.waitForTimeout(300);
const notification = await page.isVisible('#errorBox');
expect(notification).to.be.true;
});
});
}); |
await page.waitForTimeout(300);
await page.waitForSelector('form');
|
pathHelper.test.ts | import * as pathHelper from '../src/inputProcessing/pathHelper';
import * as path from 'path';
import * as glob from 'glob';
import * as core from '@actions/core';
jest.mock('../src/inputProcessing/inputs', () => {
return {
includePathPatterns: ['policies1/**', 'policies2/**'],
excludePathPatterns: ['policies2/ignorePolicies/**'],
assignmentPatterns: ['assign.*.json']
}
});
describe('Testing all functions in pathHelper file', () => {
test('getAllPolicyDefinitionPaths() - get all directories in non excluding paths with policy.json', () => {
jest.spyOn(glob, 'sync').mockImplementation((pattern) => {
if (pattern == path.join('policies1', '**', 'policy.json')) return [
path.join('policies1', 'somePolicies', 'policy.json'),
path.join('policies1', 'policy.json'),
];
if (pattern == path.join('policies2', '**', 'policy.json')) return [
path.join('policies2', 'ignorePolicies', 'policy.json'),
path.join('policies2', 'somePolicies', 'policy.json')
];
if (pattern == path.join('policies2', 'ignorePolicies', '**', 'policy.json')) return [
path.join('policies2', 'ignorePolicies', 'policy.json')
];
});
jest.spyOn(core, 'debug').mockImplementation();
expect(pathHelper.getAllPolicyDefinitionPaths()).toMatchObject([
path.join('policies1', 'somePolicies'),
path.join('policies1'),
path.join('policies2', 'somePolicies')
]);
});
test('getAllInitiativesPaths() - get all directories in non excluding paths with policyset.json', () => {
jest.spyOn(glob, 'sync').mockImplementation((pattern) => {
if (pattern == path.join('policies1', '**', 'policyset.json')) return [
path.join('policies1', 'somePolicies', 'policyset.json'),
path.join('policies1', 'policyset.json'),
];
if (pattern == path.join('policies2', '**', 'policyset.json')) return [
path.join('policies2', 'ignorePolicies', 'policyset.json'),
path.join('policies2', 'somePolicies', 'policyset.json')
];
if (pattern == path.join('policies2', 'ignorePolicies', '**', 'policyset.json')) return [
path.join('policies2', 'ignorePolicies', 'policyset.json') | expect(pathHelper.getAllInitiativesPaths()).toMatchObject([
path.join('policies1', 'somePolicies'),
path.join('policies1'),
path.join('policies2', 'somePolicies')
]);
});
test('getAllPolicyAssignmentPaths() - get all assignment files in input paths parameter with input pattern', () => {
jest.spyOn(glob, 'sync').mockImplementation((pattern) => {
if (pattern == path.join('policies1', '**', 'assign.*.json')) return [
path.join('policies1', 'somePolicies', 'assign.one.json'),
path.join('policies1', 'assign.two.json')
];
if (pattern == path.join('policies2', '**', 'assign.*.json')) return [
path.join('policies2', 'ignorePolicies', 'assign.three.json'),
path.join('policies2', 'somePolicies', 'assign.four.json')
];
if (pattern == path.join('policies2', 'ignorePolicies', '**', 'assign.*.json')) return [
path.join('policies2', 'ignorePolicies', 'assign.three.json')
];
});
jest.spyOn(core, 'debug').mockImplementation();
expect(pathHelper.getAllPolicyAssignmentPaths()).toMatchObject([
path.join('policies1', 'somePolicies', 'assign.one.json'),
path.join('policies1', 'assign.two.json'),
path.join('policies2', 'somePolicies', 'assign.four.json'),
]);
});
test('getAllAssignmentInPaths() - get all assignment files in given paths parameter with input pattern', () => {
jest.spyOn(glob, 'sync').mockImplementation((pattern) => {
if (pattern == path.join('policies2', 'ignorePolicies', '**', 'assign.*.json')) return [
path.join('policies2', 'ignorePolicies', 'assign.one.json')
];
});
jest.spyOn(core, 'debug').mockImplementation();
expect(pathHelper.getAllAssignmentInPaths(['policies2/ignorePolicies/**'])).toMatchObject([
path.join('policies2', 'ignorePolicies', 'assign.one.json')
]);
});
}); | ];
});
jest.spyOn(core, 'debug').mockImplementation();
|
app.py | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
# pip install selenium==2.53.6
"""
if you wanna run it on your regular browser profile.
profile = webdriver.FirefoxProfile('/home/{your_username}/.mozilla/firefox/{your_default_profile}')
driver = webdriver.Firefox(profile)
"""
driver = webdriver.Chrome(executable_path=r'chromedriver.exe')
def wait(no):
|
def open_website():
""" Opens the website """
driver.get('https://10fastfingers.com/typing-test/english')
wait(5) # Due to slow network speed
def run_hack():
""" Implement the GOD speed hack """
open_website()
input_field = driver.find_element_by_id('inputfield')
try :
i = 0
while True:
elements = driver.find_element_by_xpath("//span[@wordnr='" + str(i) + "']")
print(elements.text)
input_field.send_keys(elements.text)
input_field.send_keys(" ")
i += 1
except :
print("Words completed")
def main():
""" Driver function """
run_hack()
if __name__ == '__main__':
main() | """ Waits for a particular time """
time.sleep(no) |
pollable_input_stream.rs | // Take a look at the license at the top of the repository in the LICENSE file.
use crate::prelude::*;
use crate::Cancellable;
use crate::PollableInputStream;
use futures_core::task::{Context, Poll};
use futures_io::AsyncRead;
use glib::object::{Cast, IsA};
use glib::translate::*;
use std::cell::RefCell;
use std::io;
use std::mem::transmute;
use std::ptr;
use futures_core::stream::Stream;
use std::pin::Pin;
pub trait PollableInputStreamExtManual: Sized {
#[doc(alias = "g_pollable_input_stream_create_source")]
fn create_source<F, C>(
&self,
cancellable: Option<&C>,
name: Option<&str>,
priority: glib::Priority,
func: F,
) -> glib::Source
where
F: FnMut(&Self) -> glib::Continue + 'static,
C: IsA<Cancellable>;
fn create_source_future<C: IsA<Cancellable>>(
&self,
cancellable: Option<&C>,
priority: glib::Priority,
) -> Pin<Box<dyn std::future::Future<Output = ()> + 'static>>;
fn create_source_stream<C: IsA<Cancellable>>(
&self,
cancellable: Option<&C>,
priority: glib::Priority,
) -> Pin<Box<dyn Stream<Item = ()> + 'static>>;
#[doc(alias = "g_pollable_input_stream_read_nonblocking")]
fn read_nonblocking<C: IsA<Cancellable>>(
&self,
buffer: &mut [u8],
cancellable: Option<&C>,
) -> Result<isize, glib::Error>;
fn into_async_read(self) -> Result<InputStreamAsyncRead<Self>, Self>
where
Self: IsA<PollableInputStream>,
{
if self.can_poll() {
Ok(InputStreamAsyncRead(self))
} else {
Err(self)
}
}
}
impl<O: IsA<PollableInputStream>> PollableInputStreamExtManual for O {
fn | <F, C>(
&self,
cancellable: Option<&C>,
name: Option<&str>,
priority: glib::Priority,
func: F,
) -> glib::Source
where
F: FnMut(&Self) -> glib::Continue + 'static,
C: IsA<Cancellable>,
{
unsafe extern "C" fn trampoline<
O: IsA<PollableInputStream>,
F: FnMut(&O) -> glib::Continue + 'static,
>(
stream: *mut ffi::GPollableInputStream,
func: glib::ffi::gpointer,
) -> glib::ffi::gboolean {
let func: &RefCell<F> = &*(func as *const RefCell<F>);
let mut func = func.borrow_mut();
(&mut *func)(PollableInputStream::from_glib_borrow(stream).unsafe_cast_ref())
.into_glib()
}
unsafe extern "C" fn destroy_closure<O, F>(ptr: glib::ffi::gpointer) {
Box::<RefCell<F>>::from_raw(ptr as *mut _);
}
let cancellable = cancellable.map(|c| c.as_ref());
let gcancellable = cancellable.to_glib_none();
unsafe {
let source = ffi::g_pollable_input_stream_create_source(
self.as_ref().to_glib_none().0,
gcancellable.0,
);
let trampoline = trampoline::<Self, F> as glib::ffi::gpointer;
glib::ffi::g_source_set_callback(
source,
Some(transmute::<
_,
unsafe extern "C" fn(glib::ffi::gpointer) -> glib::ffi::gboolean,
>(trampoline)),
Box::into_raw(Box::new(RefCell::new(func))) as glib::ffi::gpointer,
Some(destroy_closure::<Self, F>),
);
glib::ffi::g_source_set_priority(source, priority.into_glib());
if let Some(name) = name {
glib::ffi::g_source_set_name(source, name.to_glib_none().0);
}
from_glib_full(source)
}
}
fn read_nonblocking<C: IsA<Cancellable>>(
&self,
buffer: &mut [u8],
cancellable: Option<&C>,
) -> Result<isize, glib::Error> {
let cancellable = cancellable.map(|c| c.as_ref());
let gcancellable = cancellable.to_glib_none();
let count = buffer.len() as usize;
unsafe {
let mut error = ptr::null_mut();
let ret = ffi::g_pollable_input_stream_read_nonblocking(
self.as_ref().to_glib_none().0,
buffer.to_glib_none().0,
count,
gcancellable.0,
&mut error,
);
if error.is_null() {
Ok(ret)
} else {
Err(from_glib_full(error))
}
}
}
fn create_source_future<C: IsA<Cancellable>>(
&self,
cancellable: Option<&C>,
priority: glib::Priority,
) -> Pin<Box<dyn std::future::Future<Output = ()> + 'static>> {
let cancellable: Option<Cancellable> = cancellable.map(|c| c.as_ref()).cloned();
let obj = self.clone();
Box::pin(glib::SourceFuture::new(move |send| {
let mut send = Some(send);
obj.create_source(cancellable.as_ref(), None, priority, move |_| {
let _ = send.take().unwrap().send(());
glib::Continue(false)
})
}))
}
fn create_source_stream<C: IsA<Cancellable>>(
&self,
cancellable: Option<&C>,
priority: glib::Priority,
) -> Pin<Box<dyn Stream<Item = ()> + 'static>> {
let cancellable: Option<Cancellable> = cancellable.map(|c| c.as_ref()).cloned();
let obj = self.clone();
Box::pin(glib::SourceStream::new(move |send| {
obj.create_source(cancellable.as_ref(), None, priority, move |_| {
if send.unbounded_send(()).is_err() {
glib::Continue(false)
} else {
glib::Continue(true)
}
})
}))
}
}
#[derive(Debug)]
pub struct InputStreamAsyncRead<T: IsA<PollableInputStream>>(T);
impl<T: IsA<PollableInputStream>> InputStreamAsyncRead<T> {
pub fn into_input_stream(self) -> T {
self.0
}
pub fn input_stream(&self) -> &T {
&self.0
}
}
impl<T: IsA<PollableInputStream>> AsyncRead for InputStreamAsyncRead<T> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let stream = Pin::get_ref(self.as_ref());
let gio_result = stream
.0
.as_ref()
.read_nonblocking(buf, crate::Cancellable::NONE);
match gio_result {
Ok(size) => Poll::Ready(Ok(size as usize)),
Err(err) => {
let kind = err.kind::<crate::IOErrorEnum>().unwrap();
if kind == crate::IOErrorEnum::WouldBlock {
let mut waker = Some(cx.waker().clone());
let source = stream.0.as_ref().create_source(
crate::Cancellable::NONE,
None,
glib::PRIORITY_DEFAULT,
move |_| {
if let Some(waker) = waker.take() {
waker.wake();
}
glib::Continue(false)
},
);
let main_context = glib::MainContext::ref_thread_default();
source.attach(Some(&main_context));
Poll::Pending
} else {
Poll::Ready(Err(io::Error::new(io::ErrorKind::from(kind), err)))
}
}
}
}
}
| create_source |
translate_genes_ref.rs | use crate::align::params::AlignPairwiseParams;
use crate::gene::gene::GeneStrand;
use crate::io::gene_map::GeneMap;
use crate::io::nuc::Nuc;
use crate::translate::complement::reverse_complement_in_place;
use crate::translate::translate::translate;
use crate::translate::translate_genes::{Translation, TranslationMap};
use eyre::Report;
/// Translates genes in reference sequence
pub fn translate_genes_ref(
ref_seq: &[Nuc],
gene_map: &GeneMap,
params: &AlignPairwiseParams,
) -> Result<TranslationMap, Report> | {
gene_map
.iter()
.map(|(gene_name, gene)| -> Result<(String, Translation), Report> {
let mut gene_nuc_seq = ref_seq[gene.start..gene.end].to_vec();
if gene.strand == GeneStrand::Reverse {
reverse_complement_in_place(&mut gene_nuc_seq);
}
let peptide = translate(&gene_nuc_seq, gene, params)?;
Ok((gene_name.clone(), peptide))
})
.collect::<Result<TranslationMap, Report>>()
} |
|
main_page.py | from selenium.webdriver.support.wait import WebDriverWait
class MainPage:
def __init__(self, driver):
|
def open(self):
self.driver.get("http://localhost/litecart")
return self
@property
def choose_item_on_main_page(self):
return self.driver.find_element_by_css_selector("div.content a.link") | self.driver = driver
self.wait = WebDriverWait(driver, 10) |
namespaceSerialCpp.py | #!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1554930760.750316
__CHEETAH_genTimestamp__ = 'Wed Apr 10 14:12:40 2019'
__CHEETAH_src__ = 'namespaceSerialCpp.tmpl'
__CHEETAH_srcLastModified__ = 'Wed Apr 10 11:25:47 2019'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class namespaceSerialCpp(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(namespaceSerialCpp, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def | (self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
if VFSL([locals()]+SL+[globals(), builtin],"namespace_list",True) != None: # generated from line 1, col 1
for n in VFSL([locals()]+SL+[globals(), builtin],"namespace_list",True): # generated from line 2, col 2
write(u'''namespace ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"n",True) # u'${n}' on line 3, col 11
if _v is not None: write(_filter(_v, rawExpr=u'${n}')) # from line 3, col 11.
write(u''' {
''')
for (memname,type,size,format,comment) in VFSL([locals()]+SL+[globals(), builtin],"mem_list",True): # generated from line 6, col 1
if VFSL([locals()]+SL+[globals(), builtin],"type",True) == "string": # generated from line 7, col 1
write(u'''
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 9, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 9, col 5.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 9, col 14
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 9, col 14.
write(u'''String::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 9, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 9, col 32.
write(u'''String(const char* src): StringBase() {
this->copyBuff(src,this->getCapacity());
}
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 13, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 13, col 5.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 13, col 14
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 13, col 14.
write(u'''String::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 13, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 13, col 32.
write(u'''String(const Fw::StringBase& src): StringBase() {
this->copyBuff(src.toChar(),this->getCapacity());
}
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 17, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 17, col 5.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 17, col 14
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 17, col 14.
write(u'''String::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 17, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 17, col 32.
write(u'''String(const ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 17, col 55
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 17, col 55.
write(u'''String& src): StringBase() {
this->copyBuff(src.toChar(),this->getCapacity());
}
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 21, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 21, col 5.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 21, col 14
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 21, col 14.
write(u'''String::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 21, col 32
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 21, col 32.
write(u'''String(void): StringBase() {
this->m_buf[0] = 0;
}
''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 25, col 5
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 25, col 5.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 25, col 14
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 25, col 14.
write(u'''String::~''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 25, col 33
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 25, col 33.
write(u'''String(void) {
}
bool ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 28, col 10
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 28, col 10.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 28, col 19
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 28, col 19.
write(u'''String::operator==(const ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 28, col 54
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 28, col 54.
write(u'''String& src) const {
return (0 == strncmp(this->m_buf,src.m_buf,sizeof(this->m_buf)));
}
NATIVE_UINT_TYPE ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 32, col 22
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 32, col 22.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 32, col 31
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 32, col 31.
write(u'''String::length(void) const {
return (NATIVE_UINT_TYPE)strnlen(this->m_buf,sizeof(this->m_buf));
}
const char* ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 36, col 17
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 36, col 17.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 36, col 26
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 36, col 26.
write(u'''String::toChar(void) const {
return this->m_buf;
}
void ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 40, col 10
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 40, col 10.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 40, col 19
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 40, col 19.
write(u'''String::copyBuff(const char* buff, NATIVE_UINT_TYPE size) {
FW_ASSERT(buff);
// check for self copy
if (buff != this->m_buf) {
(void)strncpy(this->m_buf,buff,size);
// NULL terminate
this->terminate(sizeof(this->m_buf));
}
}
Fw::SerializeStatus ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 50, col 25
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 50, col 25.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 50, col 34
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 50, col 34.
write(u'''String::serialize(Fw::SerializeBufferBase& buffer) const {
NATIVE_UINT_TYPE strSize = strnlen(this->m_buf,sizeof(this->m_buf));
// serialize string
return buffer.serialize((U8*)this->m_buf,strSize);
}
Fw::SerializeStatus ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 56, col 25
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 56, col 25.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 56, col 34
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 56, col 34.
write(u'''String::deserialize(Fw::SerializeBufferBase& buffer) {
NATIVE_UINT_TYPE maxSize = sizeof(this->m_buf);
// deserialize string
Fw::SerializeStatus stat = buffer.deserialize((U8*)this->m_buf,maxSize);
// make sure it is null-terminated
this->terminate(maxSize);
return stat;
}
NATIVE_UINT_TYPE ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 66, col 22
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 66, col 22.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 66, col 31
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 66, col 31.
write(u'''String::getCapacity(void) const {
return sizeof(this->m_buf);
}
void ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 70, col 10
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 70, col 10.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 70, col 19
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 70, col 19.
write(u'''String::terminate(NATIVE_UINT_TYPE size) {
// null terminate the string
this->m_buf[size < sizeof(this->m_buf)?size:sizeof(this->m_buf)-1] = 0;
}
const ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 75, col 11
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 75, col 11.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 75, col 20
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 75, col 20.
write(u'''String& ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 75, col 38
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 75, col 38.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 75, col 47
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 75, col 47.
write(u'''String::operator=(const ''')
_v = VFSL([locals()]+SL+[globals(), builtin],"name",True) # u'${name}' on line 75, col 81
if _v is not None: write(_filter(_v, rawExpr=u'${name}')) # from line 75, col 81.
write(u'''::''')
_v = VFSL([locals()]+SL+[globals(), builtin],"memname",True) # u'${memname}' on line 75, col 90
if _v is not None: write(_filter(_v, rawExpr=u'${memname}')) # from line 75, col 90.
write(u'''String& other) {
this->copyBuff(other.m_buf,this->getCapacity());
return *this;
}
''')
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_namespaceSerialCpp= 'respond'
## END CLASS DEFINITION
if not hasattr(namespaceSerialCpp, '_initCheetahAttributes'):
templateAPIClass = getattr(namespaceSerialCpp, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(namespaceSerialCpp)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=namespaceSerialCpp()).run()
| respond |
up_test.go | package acceptance_test
import (
"fmt"
"os/exec"
"time"
acceptance "github.com/cloudfoundry/bosh-bootloader/acceptance-tests"
"github.com/cloudfoundry/bosh-bootloader/acceptance-tests/actors"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("up", func() {
var (
bbl actors.BBL
boshcli actors.BOSHCLI
directorAddress string
directorUsername string
directorPassword string
caCertPath string
stateDir string
iaas string
iaasHelper actors.IAASLBHelper
)
BeforeEach(func() {
acceptance.SkipUnless("bbl-up")
configuration, err := acceptance.LoadConfig()
Expect(err).NotTo(HaveOccurred())
iaas = configuration.IAAS
iaasHelper = actors.NewIAASLBHelper(iaas, configuration)
stateDir = configuration.StateFileDir
bbl = actors.NewBBL(stateDir, pathToBBL, configuration, "up-env")
boshcli = actors.NewBOSHCLI()
})
AfterEach(func() {
By("destroying the director and the jumpbox", func() {
session := bbl.Down()
Eventually(session, 10*time.Minute).Should(gexec.Exit(0))
})
})
It("bbl's up a new bosh director and jumpbox", func() {
By("cleaning up any leftovers", func() {
session := bbl.CleanupLeftovers(bbl.PredefinedEnvID())
Eventually(session, 10*time.Minute).Should(gexec.Exit())
}) | args := []string{
"--name", bbl.PredefinedEnvID(),
}
args = append(args, iaasHelper.GetLBArgs()...)
session := bbl.Up(args...)
Eventually(session, 60*time.Minute).Should(gexec.Exit(0))
By("exporting bosh environment variables", func() {
bbl.ExportBoshAllProxy()
})
By("checking if the bosh director exists via the bosh cli", func() {
directorAddress = bbl.DirectorAddress()
directorUsername = bbl.DirectorUsername()
directorPassword = bbl.DirectorPassword()
caCertPath = bbl.SaveDirectorCA()
directorExists := func() bool {
exists, err := boshcli.DirectorExists(directorAddress, directorUsername, directorPassword, caCertPath)
if err != nil {
fmt.Println(string(err.(*exec.ExitError).Stderr))
}
return exists
}
Eventually(directorExists, "1m", "10s").Should(BeTrue())
})
By("verifying that vm extensions were added to the cloud config", func() {
cloudConfig, err := boshcli.CloudConfig(directorAddress, caCertPath, directorUsername, directorPassword)
Expect(err).NotTo(HaveOccurred())
vmExtensions := acceptance.VmExtensionNames(cloudConfig)
iaasHelper.VerifyCloudConfigExtensions(vmExtensions)
})
By("checking if bbl print-env prints the bosh environment variables", func() {
stdout := bbl.PrintEnv()
Expect(stdout).To(ContainSubstring("export BOSH_ENVIRONMENT="))
Expect(stdout).To(ContainSubstring("export BOSH_CLIENT="))
Expect(stdout).To(ContainSubstring("export BOSH_CLIENT_SECRET="))
Expect(stdout).To(ContainSubstring("export BOSH_CA_CERT="))
})
By("rotating the jumpbox's ssh key", func() {
sshKey := bbl.SSHKey()
Expect(sshKey).NotTo(BeEmpty())
session := bbl.Rotate()
Eventually(session, 40*time.Minute).Should(gexec.Exit(0))
rotatedKey := bbl.SSHKey()
Expect(rotatedKey).NotTo(BeEmpty())
Expect(rotatedKey).NotTo(Equal(sshKey))
})
By("checking bbl up is idempotent", func() {
session := bbl.Up()
Eventually(session, 40*time.Minute).Should(gexec.Exit(0))
})
By("confirming that the load balancers exist", func() {
iaasHelper.ConfirmLBsExist(bbl.PredefinedEnvID())
})
By("verifying the bbl lbs output", func() {
stdout := bbl.Lbs()
iaasHelper.VerifyBblLBOutput(stdout)
})
By("deleting lbs", func() {
session := bbl.Plan("--name", bbl.PredefinedEnvID())
Eventually(session, 1*time.Minute).Should(gexec.Exit(0))
session = bbl.Up()
Eventually(session, 60*time.Minute).Should(gexec.Exit(0))
})
By("confirming that the load balancers no longer exist", func() {
iaasHelper.ConfirmNoLBsExist(bbl.PredefinedEnvID())
})
})
}) | |
action_test.py | import json
import os
os.environ["system_file"] = "./tests/testing_data/system.yaml"
from typing import Dict, Text, Any, List
import pytest
import responses
from mongoengine import connect, disconnect
from rasa_sdk import Tracker
from rasa_sdk.executor import CollectingDispatcher
from kairon.action_server.data_objects import HttpActionRequestBody, HttpActionConfig, HttpActionLog
from kairon.action_server.actions import ActionUtility, HttpAction
from kairon.action_server.exception import HttpActionFailure
from kairon.utils import Utility
def pytest_configure():
return {
'db_url': None,
}
class TestActions:
@pytest.fixture(autouse=True)
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_evironment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(host=db_url)
@pytest.fixture
def mock_get_http_action_exception(self, monkeypatch):
def _raise_excep(*arge, **kwargs):
raise HttpActionFailure("No HTTP action found for bot and action")
monkeypatch.setattr(ActionUtility, "get_http_action_config", _raise_excep)
@responses.activate
def test_execute_http_request_getWith_auth_token(self):
http_url = 'http://localhost:8080/mock'
# file deepcode ignore HardcodedNonCryptoSecret: Random string for testing
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_get_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
responses.add(
method=responses.GET,
url=http_url,
json={'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]},
status=200
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.GET)
assert response
assert response['data'] == 'test_data'
assert len(response['test_class']) == 2
assert response['test_class'][1]['key2'] == 'value2'
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_post_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_post_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data added successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.POST, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_put_with_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_put_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.PUT,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.PUT, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
@responses.activate
def test_execute_http_request_delete_with_request_body_auth_token(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[responses.json_params_matcher(request_params)]
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_with_auth_token_no_request_body(self):
http_url = 'http://localhost:8080/mock'
auth_token = "bearer jkhfhkujsfsfslfhjsfhkjsfhskhfksj"
resp_msg = "Data deleted successfully"
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
)
response = ActionUtility.execute_http_request(auth_token=auth_token, http_url=http_url,
request_method=responses.DELETE, request_body=None)
assert response
assert response == resp_msg
assert responses.calls[0].request.headers['Authorization'] == auth_token
@responses.activate
def test_execute_http_request_delete_no_auth_token(self):
http_url = 'http://localhost:8080/mock'
resp_msg = "Data updated successfully"
request_params = {'data': 'test_data', 'test_class': [{'key': 'value'}, {'key2': 'value2'}]}
responses.add(
method=responses.DELETE,
url=http_url,
body=resp_msg,
status=200,
match=[
responses.json_params_matcher(request_params)
]
)
response = ActionUtility.execute_http_request(auth_token=None, http_url=http_url,
request_method=responses.DELETE, request_body=request_params)
assert response
assert response == resp_msg
assert 'Authorization' not in responses.calls[0].request.headers
def test_get_http_action_config(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "http_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_config_deleted_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="",
action_name="test_get_http_action_config_deleted_action",
response="${RESPONSE}",
http_url="http://www.digite.com",
request_method="POST",
params_list=http_params,
bot="bot",
user="user",
status=False
).save().to_mongo().to_dict()
expected = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_get_http_action_config_deleted_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
actual = ActionUtility.get_http_action_config("bot", "test_get_http_action_config_deleted_action")
assert actual is not None
assert expected['auth_token'] == actual['auth_token']
assert expected['action_name'] == actual['action_name']
assert expected['response'] == actual['response']
assert expected['http_url'] == actual['http_url']
assert expected['request_method'] == actual['request_method']
assert expected['params_list'] is not None
assert expected['params_list'][0]['key'] == actual['params_list'][0]['key']
assert expected['params_list'][0]['value'] == actual['params_list'][0]['value']
assert expected['params_list'][0]['parameter_type'] == actual['params_list'][0]['parameter_type']
assert expected['params_list'][1]['key'] == actual['params_list'][1]['key']
assert expected['params_list'][1]['value'] == actual['params_list'][1]['value']
assert expected['params_list'][1]['parameter_type'] == actual['params_list'][1]['parameter_type']
assert actual['status']
def test_get_http_action_no_bot(self):
try:
ActionUtility.get_http_action_config(bot=None, action_name="http_action")
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_no_http_action(self):
try:
ActionUtility.get_http_action_config(bot="bot", action_name=None)
assert False
except HttpActionFailure as ex:
assert str(ex) == "Bot name and action name are required"
def test_get_http_action_invalid_bot(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot1", "http_action")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_invalid_http_action(self):
http_params = [HttpActionRequestBody(key="key1", value="value1", parameter_type="slot"),
HttpActionRequestBody(key="key2", value="value2")]
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_get_http_action_no_request_body(self):
http_params = []
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="json",
http_url="http://test.com",
request_method="GET",
params_list=http_params,
bot="bot",
user="user"
).save().to_mongo().to_dict()
try:
ActionUtility.get_http_action_config("bot", "http_action1")
assert False
except HttpActionFailure as ex:
assert str(ex).__contains__("No HTTP action found for bot")
def test_prepare_request(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "slot_name": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param2", value="slot_name", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
assert actual_request_body
assert actual_request_body['param1'] == 'value1'
assert actual_request_body['param2'] == 'param2value'
def test_prepare_request_empty_slot(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="param3", value="", parameter_type="slot")]
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert not request_params['param3']
def test_prepare_request_sender_id(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
http_action_config_params = [HttpActionRequestBody(key="param1", value="value1"),
HttpActionRequestBody(key="user_id", value="", parameter_type="sender_id")]
tracker = Tracker(sender_id="[email protected]", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
request_params = ActionUtility.prepare_request(tracker=tracker, http_action_config_params=http_action_config_params)
assert request_params['param1'] == "value1"
assert request_params['user_id'] == "[email protected]"
def test_prepare_request_no_request_params(self):
slots = {"bot": "demo_bot", "http_action_config": "http_action_name", "param2": "param2value"}
events: List[Dict] = None
http_action_config_params: List[HttpActionRequestBody] = None
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=None,
followup_action=None, active_loop=None, latest_action_name=None)
actual_request_body = ActionUtility.prepare_request(tracker=tracker,
http_action_config_params=http_action_config_params)
# deepcode ignore C1801: empty request body for http request with no request body params
assert len(actual_request_body) == 0
@pytest.mark.asyncio
async def test_name(self):
assert await HttpAction().name() == "kairon_http_action"
def test_is_empty(self):
assert ActionUtility.is_empty("")
assert ActionUtility.is_empty(" ")
assert ActionUtility.is_empty(None)
assert not ActionUtility.is_empty("None")
def test_prepare_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json1)
assert response == 'The value of 2 in red is []'
json2 = json.dumps({
"data": [
{"a": {
"b": {
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}}},
{"a": {
"b": {
"43": 5,
"c": [1, 2],
"d": ['buggy', 'bumpers'],
}}}
]
})
response = ActionUtility.prepare_response("The value of ${data.0.a} in ${data.0.a.b} is ${data.0.a.b.d}", json2)
assert response == 'The value of {"b": {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}} in {"43": 30, "c": [], "d": ["red", "buggy", "bumpers"]} is [\'red\', \'buggy\', \'bumpers\']'
def test_prepare_response_key_not_present(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_string_response(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("The value of red is 0", json1)
assert response == "The value of red is 0"
def test_prepare_response_string_empty_response_string(self):
json1 = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
response = ActionUtility.prepare_response("", json1)
assert response == '{"a": {"b": {"3": 2, "43": 30, "c": [], "d": ["red", "buggy", "bumpers"]}}}'
def test_prepare_response_string_empty_request_output(self):
json1 = json.dumps("{}")
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.e}", json1)
assert False
except HttpActionFailure:
assert True
def test_prepare_response_invalid_response_json(self):
json_as_string = "Not a json string"
try:
ActionUtility.prepare_response("The value of ${a.b.3} in ${a.b.d.0} is ${a.b.c}", json_as_string)
assert False
except HttpActionFailure as e:
assert str(e) == 'Could not find value for keys in response'
def test_prepare_response_as_json_and_expected_as_plain_string(self):
|
def test_prepare_response_as_string_and_expected_as_none(self):
response = ActionUtility.prepare_response("The value of 2 in red is []", None)
assert response == 'The value of 2 in red is []'
@pytest.mark.asyncio
async def test_run_invalid_http_action(self, mock_get_http_action_exception):
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_http_action": "test_run_invalid_http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="test_run_invalid_http_action1",
response="json",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
).save()
dispatcher: CollectingDispatcher = CollectingDispatcher()
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
await HttpAction().run(dispatcher, tracker, domain)
str(dispatcher.messages[0]['text']).__contains__(
"I have failed to process your request: No HTTP action found for bot")
log = HttpActionLog.objects(sender="sender1",
bot="5f50fd0a56b698ca10d35d2e",
status="FAILURE").get()
assert log['exception'].__contains__('No HTTP action found for bot')
@pytest.mark.asyncio
async def test_run_no_bot(self):
slots = {"bot": None, "http_action_config_http_action": "new_http_action", "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender2", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
log = HttpActionLog.objects(sender="sender2",
status="FAILURE").get()
assert log['exception'] == 'Bot id and HTTP action configuration name not found in slot'
@pytest.mark.asyncio
async def test_run_no_http_action(self):
slots = {"bot": "jhgfsjgfausyfgus", "http_action_config_http_action": None, "param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'http_action'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'I have failed to process your request'
@pytest.mark.asyncio
async def test_run(self, monkeypatch):
action = HttpActionConfig(
auth_token="bearer kjflksjflksajfljsdflinlsufisnflisjbjsdalibvs",
action_name="http_action",
response="This should be response",
http_url="http://www.google.com",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "http_action",
"param2": "param2value"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'This should be response'
log = HttpActionLog.objects(sender="sender_test_run",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent']
assert log['action']
assert log['bot_response']
assert log['api_response']
@pytest.mark.asyncio
async def test_run_with_post(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert actual[0]['name'] == 'KAIRON_ACTION_RESPONSE'
assert actual[0]['value'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_post_and_parameters(self, monkeypatch):
request_params = [HttpActionRequestBody(key='key1', value="value1"),
HttpActionRequestBody(key='key2', value="value2")]
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="Data added successfully, id:${RESPONSE}",
http_url="http://localhost:8080/mock",
request_method="POST",
params_list=request_params,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8080/mock'
resp_msg = "5000"
responses.start()
responses.add(
method=responses.POST,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender_test_run_with_post", slots=slots, events=events, paused=False,
latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'Data added successfully, id:5000'
log = HttpActionLog.objects(sender="sender_test_run_with_post",
action="test_run_with_post",
status="SUCCESS").get()
assert not log['exception']
assert log['timestamp']
assert log['intent'] == "test_run"
assert log['action'] == "test_run_with_post"
assert log['request_params'] == {"key1": "value1", "key2": "value2"}
assert log['api_response'] == '5000'
assert log['bot_response'] == 'Data added successfully, id:5000'
@pytest.mark.asyncio
async def test_run_with_get(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8081/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8081/mock'
resp_msg = json.dumps({
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
})
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']) == 'The value of 2 in red is [\'red\', \'buggy\', \'bumpers\']'
@pytest.mark.asyncio
async def test_run_no_connection(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_post",
response="This should be response",
http_url="http://localhost:8085/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
slots = {"bot": "5f50fd0a56b698ca10d35d2e", "http_action_config_test_run": "test_run_with_post"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(actual[0]['value']).__contains__('I have failed to process your request')
@pytest.mark.asyncio
async def test_run_with_get_placeholder_vs_string_response(self, monkeypatch):
action = HttpActionConfig(
auth_token="",
action_name="test_run_with_get_string_http_response_placeholder_required",
response="The value of ${a.b.3} in ${a.b.d.0} is ${a.b.d}",
http_url="http://localhost:8080/mock",
request_method="GET",
params_list=None,
bot="5f50fd0a56b698ca10d35d2e",
user="user"
)
def _get_action(*arge, **kwargs):
return action.to_mongo().to_dict()
monkeypatch.setattr(ActionUtility, "get_http_action_config", _get_action)
http_url = 'http://localhost:8082/mock'
resp_msg = "This is string http response"
responses.start()
responses.add(
method=responses.GET,
url=http_url,
body=resp_msg,
status=200,
)
slots = {"bot": "5f50fd0a56b698ca10d35d2e",
"http_action_config_test_run": "test_run_with_get_string_http_response_placeholder_required"}
events = [{"event1": "hello"}, {"event2": "how are you"}]
dispatcher: CollectingDispatcher = CollectingDispatcher()
latest_message = {'text': 'get intents', 'intent_ranking': [{'name': 'test_run'}]}
tracker = Tracker(sender_id="sender1", slots=slots, events=events, paused=False, latest_message=latest_message,
followup_action=None, active_loop=None, latest_action_name=None)
domain: Dict[Text, Any] = None
action.save().to_mongo().to_dict()
actual: List[Dict[Text, Any]] = await HttpAction().run(dispatcher, tracker, domain)
responses.stop()
assert actual is not None
assert str(actual[0]['name']) == 'KAIRON_ACTION_RESPONSE'
assert str(
actual[0]['value']) == 'I have failed to process your request'
def test_attach_response_no_placeholder(self):
output = ActionUtility.attach_response("This has no placeholder", {"a": "b"})
assert output == "This has no placeholder"
def test_attach_response(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", {"dollars": "51"})
assert output == 'I want ${\'dollars\': \'51\'}'
def test_attach_response_int(self):
output = ActionUtility.attach_response("I want $${RESPONSE}", 51)
assert output == 'I want $51'
def test_retrieve_value_from_response(self):
keys = ["a.b.3", 'a.b']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
key_values = ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert key_values is not None
assert key_values['${a.b.3}'] == 2
assert key_values['${a.b}'] is not None
assert key_values['${a.b}']['3'] == 2
assert key_values['${a.b}']['d'][0] == 'red'
def test_retrieve_value_from_response_invalid_key(self):
keys = ["d.e.f", 'g.h']
resp_msg = {
"a": {
"b": {
"3": 2,
"43": 30,
"c": [],
"d": ['red', 'buggy', 'bumpers'],
}
}
}
try:
ActionUtility.retrieve_value_from_response(keys, resp_msg)
assert False
except HttpActionFailure as e:
assert str(e) == 'Unable to retrieve value for key from HTTP response: \'d\''
| json_as_string = "Not a json string"
response = ActionUtility.prepare_response("The value of 2 in red is []", json_as_string)
assert response == 'The value of 2 in red is []' |
ex1.py | # input() reads a string with a line of input, stripping the ' ' (newline) at the end.
# This is all you need for most problems.
import os
os.system('cls')
file = open('shuffled_anagrams_sample_ts1_input.txt', 'r')
#overwrite input to mimic google input
def input():
line = file.readline()
return line
import math
import random
stringimp = 'IMPOSSIBLE'
t = int(input()) # read a line with a single integer
for i in range(1, t + 1):
n = [str(s) for s in input()] # read a list of integers, 2 in this case
if '\n' in n :
n.remove('\n')
a = [0]*26
res = ''
for index in n:
a[ord(index)-ord('a')] = a[ord(index)-ord('a')]+1
if (max(a) > math.floor(len(n)/2)) :
res = stringimp
else:
test = True
while test :
possibilite = set(n)
test = False
for index in n :
possibiliten = set(possibilite)
if index in possibiliten:
|
if len(possibiliten) == 0 :
test=True
a = [0]*26
res = ''
for index in n:
a[ord(index)-ord('a')] = a[ord(index)-ord('a')]+1
break
else:
remove = random.choice(list(possibiliten))
res += remove
a[ord(remove)-ord('a')] = a[ord(remove)-ord('a')]-1
if a[ord(remove)-ord('a')] == 0:
possibilite.remove(remove)
print("Case #{}: {}".format(i, res))
# check out .format's specification for more formatting options | possibiliten.remove(index) |
norm_module.py | import torch
from torch import nn
from utils import set_default
# This module is dedicated to Norm Macdonald
# Implementations from https://github.com/lucidrains/x-transformer
class RMSNorm(nn.Module):
def __init__(self, dim, eps=1e-8):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
_norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / _norm.clamp(min=self.eps) * self.g
class ScaleNorm(nn.Module):
def __init__(self, dim, eps=1e-5):
super().__init__()
self.scale = dim ** -0.5
self.eps = eps
self.g = nn.Parameter(torch.ones(1))
def forward(self, x):
norm = torch.norm(x, dim=-1, keepdim=True) * self.scale
return x / norm.clamp(min=self.eps) * self.g
def init_norm(_key, _config, _dim):
if _key not in _config:
norm_bool = False
norm_function = False
else:
assert type(_config[_key]) == str, f"{_config[_key]} is type {type(_config[_key])}, but should be a string!"
norm_bool = True
norm_function = get_norm(norm_type=_config[_key], dim=_dim)
return norm_bool, norm_function
def get_norm(norm_type: str, dim: int):
# TODO: Batch norm may involve rearranging
norm_type = norm_type.lower() # Make lowercase
if norm_type == 'layer_norm':
return nn.LayerNorm(dim)
elif norm_type == 'rms_norm':
return RMSNorm(dim)
elif norm_type == 'scale_norm':
return ScaleNorm(dim)
else:
print(f"Norm: {norm_type} not available.")
class Norm(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
Norm module
"""
# Configure input(s) and output(s)
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
self.input_dim = _streams[self.input_name][-1]
input_shape = _streams[self.input_name]
# Configuring norm
norm_name = set_default(_look='norm_type', _dict=config, _default='layer_norm')
self.norm = get_norm(norm_type=norm_name, dim=self.input_dim)
# Prepare streams info
self.streams_in_module = {'inputs': [[self.input_name, input_shape],
],
'outputs': [[self.output_name, input_shape],
]
}
def forward(self, _data):
|
class ScaleAlongDimension(nn.Module):
def __init__(self,
config,
_streams,
):
super().__init__()
"""
Learned scale used in as a weighted residual, or for scaling mha heads (see NormFormer)
"""
# Configure input(s) and output(s)
self.input_name = set_default(_look='input_name', _dict=config, _default='x')
self.dim_to_scale = set_default(_look='dim_to_scale', _dict=config, _default=2, _type=int)
self.output_name = set_default(_look='output_name', _dict=config, _default='x')
self.input_shape = _streams[self.input_name]
assert self.dim_to_scale > 0, f'dim_to_scale must be greater than 0!'
assert self.dim_to_scale <= len(self.input_shape), f'dim_to_scale must less than or equal to the number of ' \
f'input dimensions!'
num_params = self.input_shape[self.dim_to_scale]
# Initialize gate to 1
self.scale = nn.Parameter(torch.ones(num_params), requires_grad=True)
# Built einsum input strings
self.einsum_in_1 = 'abcdef' # max of 6 dims
self.einsum_in_1 = self.einsum_in_1[:len(self.input_shape)]
self.einsum_in_2 = self.einsum_in_1[self.dim_to_scale]
print(f"{self.einsum_in_1},{self.einsum_in_2}->{self.einsum_in_1}")
# Prepare streams info
self.streams_in_module = {'inputs': [[self.input_name, self.input_shape],
],
'outputs': [[self.output_name, self.input_shape],
]
}
def forward(self, _data):
_data[self.output_name] = torch.einsum(f'{self.einsum_in_1},{self.einsum_in_2}->{self.einsum_in_1}', _data[self.input_name], self.scale)
return _data | _data[self.output_name] = self.norm(_data[self.input_name])
return _data |
aller.py | #
# This file is part of LiteX-Boards.
#
# Copyright (c) 2018-2019 Rohit Singh <[email protected]>
# Copyright (c) 2019 Florent Kermarrec <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform
from litex.build.openocd import OpenOCD
# IOs ----------------------------------------------------------------------------------------------
_io = [
# clk / rst
("clk100", 0, Pins("W19"), IOStandard("LVCMOS33")),
# leds (only a single rgb led, aliased here also)
("user_led", 0, Pins("AB21"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("AB22"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("U20"), IOStandard("LVCMOS33")),
# rgb led, active-low
("rgb_led", 0,
Subsignal("r", Pins("AB21")),
Subsignal("g", Pins("AB22")),
Subsignal("b", Pins("U20")),
IOStandard("LVCMOS33"),
),
# flash
("flash", 0,
Subsignal("cs_n", Pins("T19")),
Subsignal("mosi", Pins("P22")),
Subsignal("miso", Pins("R22")),
Subsignal("hold", Pins("R21")),
Subsignal("rst_n", Pins("R19")),
IOStandard("LVCMOS33")
),
("flash4x", 0, # clock needs to be accessed through STARTUPE2
Subsignal("cs_n", Pins("T19")),
Subsignal("dq", Pins("P22", "R22", "P21", "R21")),
IOStandard("LVCMOS33")
),
# tpm
("tpm", 0,
Subsignal("clk", Pins("W20")),
Subsignal("rst_n", Pins("V19")),
Subsignal("cs_n", Pins("Y18")),
Subsignal("mosi", Pins("Y19")),
Subsignal("miso", Pins("V18")),
IOStandard("LVCMOS33"),
),
# pcie
("pcie_x1", 0,
Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")),
Subsignal("clk_p", Pins("F6")),
Subsignal("clk_n", Pins("E6")),
Subsignal("rx_p", Pins("B8")),
Subsignal("rx_n", Pins("A8")),
Subsignal("tx_p", Pins("B4")),
Subsignal("tx_n", Pins("A4"))
),
("pcie_x4", 0,
Subsignal("rst_n", Pins("AB20"), IOStandard("LVCMOS33"), Misc("PULLUP=TRUE")),
Subsignal("clk_p", Pins("F6")),
Subsignal("clk_n", Pins("E6")),
Subsignal("rx_p", Pins("B8 D11 B10 D9")),
Subsignal("rx_n", Pins("A8 C11 A10 C9")),
Subsignal("tx_p", Pins("B4 D5 B6 D7")),
Subsignal("tx_n", Pins("A4 C5 A6 C7"))
),
# dram
("ddram", 0,
Subsignal("a", Pins(
"U6 T5 Y6 T6 V2 T4 Y2 R2",
"Y1 R4 W5 W1 AA6 U2"),
IOStandard("SSTL15")),
Subsignal("ba", Pins("W6 U5 R6"), IOStandard("SSTL15")),
Subsignal("ras_n", Pins("V5"), IOStandard("SSTL15")),
Subsignal("cas_n", Pins("T1"), IOStandard("SSTL15")),
Subsignal("we_n", Pins("R3"), IOStandard("SSTL15")),
Subsignal("dm", Pins("Y7 AA1"), IOStandard("SSTL15")),
Subsignal("dq", Pins(
"Y8 AB6 W9 AA8 AB7 V7 AB8 W7",
"V4 AB2 AA5 AB3 AB5 W4 AB1 AA4"),
IOStandard("SSTL15"),
Misc("IN_TERM=UNTUNED_SPLIT_50")),
Subsignal("dqs_p", Pins("V9 Y3"), IOStandard("DIFF_SSTL15")),
Subsignal("dqs_n", Pins("V8 AA3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_p", Pins("U3"), IOStandard("DIFF_SSTL15")),
Subsignal("clk_n", Pins("V3"), IOStandard("DIFF_SSTL15")),
Subsignal("cke", Pins("U1"), IOStandard("SSTL15")),
Subsignal("odt", Pins("W2"), IOStandard("SSTL15")),
Subsignal("reset_n", Pins("U7"), IOStandard("LVCMOS15")),
Subsignal("cs_n", Pins("T3"), IOStandard("SSTL15")),
Misc("SLEW=FAST"),
),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk100"
default_clk_period = 1e9/100e6
def __init__(self):
|
def create_programmer(self):
return OpenOCD("openocd_xc7_ft232.cfg", "bscan_spi_xc7a200t.bit")
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk100", loose=True), 1e9/100e6)
| XilinxPlatform.__init__(self, "xc7a200t-fbg484-2", _io, toolchain="vivado")
self.add_platform_command("set_property INTERNAL_VREF 0.750 [get_iobanks 34]")
self.toolchain.bitstream_commands = [
"set_property BITSTREAM.CONFIG.SPI_BUSWIDTH 4 [current_design]",
"set_property BITSTREAM.CONFIG.CONFIGRATE 16 [current_design]",
"set_property BITSTREAM.GENERAL.COMPRESS TRUE [current_design]"
]
self.toolchain.additional_commands = \
["write_cfgmem -force -format bin -interface spix4 -size 16 "
"-loadbit \"up 0x0 {build_name}.bit\" -file {build_name}.bin"] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.