file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
results.go | package flavors
import (
"github.com/gophercloud/gophercloud"
"github.com/gophercloud/gophercloud/pagination"
)
// GetResult temporarily holds the response from a Get call.
type GetResult struct {
gophercloud.Result
}
// Extract provides access to the individual Flavor returned by the Get function.
func (r GetResult) Extract() (*Flavor, error) {
var s struct {
Flavor *Flavor `json:"flavor"`
}
err := r.ExtractInto(&s)
return s.Flavor, err
}
// Flavor records represent (virtual) hardware configurations for server resources in a region.
type Flavor struct {
// The flavor's unique identifier.
ID int `json:"id"`
// The RAM capacity for the flavor. |
// Links to access the flavor.
Links []gophercloud.Link
}
// FlavorPage contains a single page of the response from a List call.
type FlavorPage struct {
pagination.LinkedPageBase
}
// IsEmpty determines if a page contains any results.
func (page FlavorPage) IsEmpty() (bool, error) {
flavors, err := ExtractFlavors(page)
return len(flavors) == 0, err
}
// NextPageURL uses the response's embedded link reference to navigate to the next page of results.
func (page FlavorPage) NextPageURL() (string, error) {
var s struct {
Links []gophercloud.Link `json:"flavors_links"`
}
err := page.ExtractInto(&s)
if err != nil {
return "", err
}
return gophercloud.ExtractNextURL(s.Links)
}
// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation.
func ExtractFlavors(r pagination.Page) ([]Flavor, error) {
var s struct {
Flavors []Flavor `json:"flavors"`
}
err := (r.(FlavorPage)).ExtractInto(&s)
return s.Flavors, err
} | RAM int `json:"ram"`
// The Name field provides a human-readable moniker for the flavor.
Name string `json:"name"` |
filter-field-readonly-non-editable-tags-example.ts | /**
* @license
* Copyright 2019 Dynatrace LLC
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {
AfterViewInit,
ChangeDetectorRef,
Component,
ViewChild,
} from '@angular/core';
import {
DtFilterField,
DtFilterFieldDefaultDataSource,
} from '@dynatrace/barista-components/filter-field';
@Component({
selector: 'dt-example-filter-field-read-only-tags',
templateUrl: 'filter-field-readonly-non-editable-tags-example.html',
})
export class | <T> implements AfterViewInit {
@ViewChild(DtFilterField, { static: true }) filterField: DtFilterField<T>;
private DATA = {
autocomplete: [
{
name: 'AUT',
autocomplete: ['Linz', 'Vienna', 'Graz'],
},
{
name: 'USA',
autocomplete: [
'San Francisco',
'Los Angeles',
'New York',
{ name: 'Custom', suggestions: [] },
],
},
{
name: 'Requests per minute',
range: {
operators: {
range: true,
equal: true,
greaterThanEqual: true,
lessThanEqual: true,
},
unit: 's',
},
},
],
};
private _linzFilter = [
this.DATA.autocomplete[0],
this.DATA.autocomplete[0].autocomplete![0],
];
_filters = [this._linzFilter];
_dataSource = new DtFilterFieldDefaultDataSource(this.DATA);
constructor(private _changeDetectorRef: ChangeDetectorRef) {}
ngAfterViewInit(): void {
this.filterField.currentTags.subscribe(() => {
const linzTag = this.filterField.getTagForFilter(this._linzFilter);
if (linzTag) {
linzTag.editable = false;
linzTag.deletable = false;
this._changeDetectorRef.markForCheck();
}
});
}
}
| DtExampleFilterFieldReadOnlyTags |
button_spec.js | var Vue = require('vue');
var Icon = require('../../../src/components/icons/icon.vue');
describe('Button Spec', function() {
var vm;
beforeEach(function () {
vm = new Vue({
template: '<icon value="edit">',
replace: false,
components: {
'icon': Icon
}
}).$mount('body');
}); | expect(true).toBe(true);
});
it('button test 2', function() {
expect(true).toBe(true);
});
}); |
it('button test 1', function() { |
literal_test.go | package exp
import (
"testing"
"github.com/stretchr/testify/suite"
)
type literalExpressionSuite struct {
suite.Suite
le LiteralExpression
}
func TestLiteralExpressionSuite(t *testing.T) {
suite.Run(t, &literalExpressionSuite{
le: NewLiteralExpression("? + ?", 1, 2),
})
}
func (les *literalExpressionSuite) TestClone() {
les.Equal(les.le, les.le.Clone())
}
func (les *literalExpressionSuite) TestExpression() {
les.Equal(les.le, les.le.Expression())
}
func (les *literalExpressionSuite) TestLiteral() {
les.Equal("? + ?", les.le.Literal()) |
func (les *literalExpressionSuite) TestArgs() {
les.Equal([]interface{}{1, 2}, les.le.Args())
}
func (les *literalExpressionSuite) TestAllOthers() {
le := les.le
rv := NewRangeVal(1, 2)
pattern := "literal like%"
inVals := []interface{}{1, 2}
testCases := []struct {
Ex Expression
Expected Expression
}{
{Ex: le.As("a"), Expected: aliased(le, "a")},
{Ex: le.Eq(1), Expected: NewBooleanExpression(EqOp, le, 1)},
{Ex: le.Neq(1), Expected: NewBooleanExpression(NeqOp, le, 1)},
{Ex: le.Gt(1), Expected: NewBooleanExpression(GtOp, le, 1)},
{Ex: le.Gte(1), Expected: NewBooleanExpression(GteOp, le, 1)},
{Ex: le.Lt(1), Expected: NewBooleanExpression(LtOp, le, 1)},
{Ex: le.Lte(1), Expected: NewBooleanExpression(LteOp, le, 1)},
{Ex: le.Asc(), Expected: asc(le)},
{Ex: le.Desc(), Expected: desc(le)},
{Ex: le.Between(rv), Expected: between(le, rv)},
{Ex: le.NotBetween(rv), Expected: notBetween(le, rv)},
{Ex: le.Like(pattern), Expected: NewBooleanExpression(LikeOp, le, pattern)},
{Ex: le.NotLike(pattern), Expected: NewBooleanExpression(NotLikeOp, le, pattern)},
{Ex: le.ILike(pattern), Expected: NewBooleanExpression(ILikeOp, le, pattern)},
{Ex: le.NotILike(pattern), Expected: NewBooleanExpression(NotILikeOp, le, pattern)},
{Ex: le.RegexpLike(pattern), Expected: NewBooleanExpression(RegexpLikeOp, le, pattern)},
{Ex: le.RegexpNotLike(pattern), Expected: NewBooleanExpression(RegexpNotLikeOp, le, pattern)},
{Ex: le.RegexpILike(pattern), Expected: NewBooleanExpression(RegexpILikeOp, le, pattern)},
{Ex: le.RegexpNotILike(pattern), Expected: NewBooleanExpression(RegexpNotILikeOp, le, pattern)},
{Ex: le.In(inVals), Expected: NewBooleanExpression(InOp, le, inVals)},
{Ex: le.NotIn(inVals), Expected: NewBooleanExpression(NotInOp, le, inVals)},
{Ex: le.Is(true), Expected: NewBooleanExpression(IsOp, le, true)},
{Ex: le.IsNot(true), Expected: NewBooleanExpression(IsNotOp, le, true)},
{Ex: le.IsNull(), Expected: NewBooleanExpression(IsOp, le, nil)},
{Ex: le.IsNotNull(), Expected: NewBooleanExpression(IsNotOp, le, nil)},
{Ex: le.IsTrue(), Expected: NewBooleanExpression(IsOp, le, true)},
{Ex: le.IsNotTrue(), Expected: NewBooleanExpression(IsNotOp, le, true)},
{Ex: le.IsFalse(), Expected: NewBooleanExpression(IsOp, le, false)},
{Ex: le.IsNotFalse(), Expected: NewBooleanExpression(IsNotOp, le, false)},
}
for _, tc := range testCases {
les.Equal(tc.Expected, tc.Ex)
}
} | } |
settings.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { FtrProviderContext } from '../../ftr_provider_context';
export function | ({ getService }: FtrProviderContext) {
const testSubjects = getService('testSubjects');
return {
async assertSettingsManageCalendarsLinkExists() {
await testSubjects.existOrFail('mlCalendarsMngButton');
},
async assertSettingsCreateCalendarLinkExists() {
await testSubjects.existOrFail('mlCalendarsCreateButton');
},
async assertSettingsManageFilterListsLinkExists() {
await testSubjects.existOrFail('mlFilterListsMngButton');
},
async assertSettingsCreateFilterListLinkExists() {
await testSubjects.existOrFail('mlFilterListsCreateButton');
},
};
}
| MachineLearningSettingsProvider |
filter.rs | use schema::*;
use diesel::*;
macro_rules! assert_sets_eq {
($set1:expr, $set2:expr) => {
let set1 = {$set1};
let set2 = {$set2};
let s1r : Vec<_> = set1.iter().filter(|&si| !set2.contains(si)).collect();
assert!(s1r.len() == 0, format!("left set contains items not found in right set: {:?}", s1r));
let s2r : Vec<_> = set2.iter().filter(|&si| !set1.contains(si)).collect();
assert!(s2r.len() == 0, format!("right set contains items not found in left set: {:?}", s2r));
};
}
#[test]
fn | () {
use schema::users::dsl::*;
let connection = connection_with_sean_and_tess_in_users_table();
let sean_id = find_user_by_name("Sean", &connection).id;
let tess_id = find_user_by_name("Tess", &connection).id;
let unused_id = sean_id + tess_id;
let sean = User::new(sean_id, "Sean");
let tess = User::new(tess_id, "Tess");
assert_eq!(Ok(sean), users.filter(id.eq(sean_id)).first(&connection));
assert_eq!(Ok(tess), users.filter(id.eq(tess_id)).first(&connection));
assert_eq!(Err(NotFound), users.filter(id.eq(unused_id)).first::<User>(&connection));
}
#[test]
fn filter_by_string_equality() {
use schema::users::dsl::*;
let connection = connection_with_sean_and_tess_in_users_table();
let sean = User::new(1, "Sean");
let tess = User::new(2, "Tess");
assert_eq!(Ok(sean), users.filter(name.eq("Sean")).first(&connection));
assert_eq!(Ok(tess), users.filter(name.eq("Tess")).first(&connection));
assert_eq!(Err(NotFound), users.filter(name.eq("Jim")).first::<User>(&connection));
}
#[test]
fn filter_by_equality_on_nullable_columns() {
use schema::users::dsl::*;
let connection = connection();
let data = vec![
NewUser::new("Sean", Some("black")),
NewUser::new("Tess", Some("brown")),
NewUser::new("Jim", Some("black")),
];
insert(&data).into(users).execute(&connection).unwrap();
let data = users.order(id).load::<User>(&connection).unwrap();
let sean = data[0].clone();
let tess = data[1].clone();
let jim = data[2].clone();
let source = users.filter(hair_color.eq("black"));
assert_sets_eq!(vec![sean, jim], source.load(&connection).unwrap());
let source = users.filter(hair_color.eq("brown"));
assert_eq!(vec![tess], source.load(&connection).unwrap());
}
#[test]
fn filter_by_is_not_null_on_nullable_columns() {
use schema::users::dsl::*;
let connection = connection();
let data = vec![
NewUser::new("Derek", Some("red")),
NewUser::new("Gordon", None),
];
insert(&data).into(users).execute(&connection).unwrap();
let data = users.order(id).load::<User>(&connection).unwrap();
let derek = data[0].clone();
let source = users.filter(hair_color.is_not_null());
assert_eq!(vec![derek], source.load(&connection).unwrap());
}
#[test]
fn filter_by_is_null_on_nullable_columns() {
use schema::users::dsl::*;
let connection = connection();
let data = vec![
NewUser::new("Derek", Some("red")),
NewUser::new("Gordon", None),
];
insert(&data).into(users).execute(&connection).unwrap();
let data = users.order(id).load::<User>(&connection).unwrap();
let gordon = data[1].clone();
let source = users.filter(hair_color.is_null());
assert_eq!(vec![gordon], source.load(&connection).unwrap());
}
#[test]
fn filter_after_joining() {
use schema::users::name;
let connection = connection_with_sean_and_tess_in_users_table();
connection.execute("INSERT INTO posts (id, title, user_id) VALUES
(1, 'Hello', 1), (2, 'World', 2)")
.unwrap();
let sean = User::new(1, "Sean");
let tess = User::new(2, "Tess");
let seans_post = Post::new(1, 1, "Hello", None);
let tess_post = Post::new(2, 2, "World", None);
let source = users::table.inner_join(posts::table);
assert_eq!(Ok((sean, seans_post)),
source.filter(name.eq("Sean")).first(&connection));
assert_eq!(Ok((tess, tess_post)),
source.filter(name.eq("Tess")).first(&connection));
assert_eq!(Err(NotFound),
source.filter(name.eq("Jim")).first::<(User, Post)>(&connection));
}
#[test]
fn select_then_filter() {
use schema::users::dsl::*;
let connection = connection_with_sean_and_tess_in_users_table();
let source = users.select(name);
assert_eq!(Ok("Sean".to_string()),
source.filter(name.eq("Sean")).first(&connection));
assert_eq!(Ok("Tess".to_string()),
source.filter(name.eq("Tess")).first(&connection));
assert_eq!(Err(NotFound), source.filter(name.eq("Jim")).first::<String>(&connection));
}
#[test]
fn filter_then_select() {
use schema::users::dsl::*;
let connection = connection();
let data = vec![NewUser::new("Sean", None), NewUser::new("Tess", None)];
insert(&data).into(users).execute(&connection).unwrap();
assert_eq!(Ok("Sean".to_string()),
users.filter(name.eq("Sean")).select(name).first(&connection));
assert_eq!(Ok("Tess".to_string()),
users.filter(name.eq("Tess")).select(name).first(&connection));
assert_eq!(Err(NotFound), users.filter(name.eq("Jim")).select(name)
.first::<String>(&connection));
}
#[test]
fn filter_on_multiple_columns() {
use schema::users::dsl::*;
let connection = connection();
let data: &[_] = &[
NewUser::new("Sean", Some("black")),
NewUser::new("Sean", Some("brown")),
NewUser::new("Sean", None),
NewUser::new("Tess", Some("black")),
NewUser::new("Tess", Some("brown")),
];
insert(data).into(users).execute(&connection).unwrap();
let data = users.order(id).load::<User>(&connection).unwrap();
let black_haired_sean = data[0].clone();
let brown_haired_sean = data[1].clone();
let black_haired_tess = data[3].clone();
let brown_haired_tess = data[4].clone();
let source = users.filter(name.eq("Sean").and(hair_color.eq("black")));
assert_eq!(vec![black_haired_sean], source.load(&connection).unwrap());
let source = users.filter(name.eq("Sean").and(hair_color.eq("brown")));
assert_eq!(vec![brown_haired_sean], source.load(&connection).unwrap());
let source = users.filter(name.eq("Tess").and(hair_color.eq("black")));
assert_eq!(vec![black_haired_tess], source.load(&connection).unwrap());
let source = users.filter(name.eq("Tess").and(hair_color.eq("brown")));
assert_eq!(vec![brown_haired_tess], source.load(&connection).unwrap());
}
#[test]
fn filter_called_twice_means_same_thing_as_and() {
use schema::users::dsl::*;
let connection = connection();
let data: &[_] = &[
NewUser::new("Sean", Some("black")),
NewUser::new("Sean", Some("brown")),
NewUser::new("Sean", None),
NewUser::new("Tess", Some("black")),
NewUser::new("Tess", Some("brown")),
];
insert(data).into(users).execute(&connection).unwrap();
let data = users.order(id).load::<User>(&connection).unwrap();
let black_haired_sean = data[0].clone();
let brown_haired_sean = data[1].clone();
let black_haired_tess = data[3].clone();
let brown_haired_tess = data[4].clone();
let source = users.filter(name.eq("Sean")).filter(hair_color.eq("black"));
assert_eq!(vec![black_haired_sean], source.load(&connection).unwrap());
let source = users.filter(name.eq("Sean")).filter(hair_color.eq("brown"));
assert_eq!(vec![brown_haired_sean], source.load(&connection).unwrap());
let source = users.filter(name.eq("Tess")).filter(hair_color.eq("black"));
assert_eq!(vec![black_haired_tess], source.load(&connection).unwrap());
let source = users.filter(name.eq("Tess")).filter(hair_color.eq("brown"));
assert_eq!(vec![brown_haired_tess], source.load(&connection).unwrap());
}
table! {
points (x) {
x -> Integer,
y -> Integer,
}
}
#[test]
fn filter_on_column_equality() {
use self::points::dsl::*;
let connection = connection();
connection.execute("INSERT INTO points (x, y) VALUES (1, 1), (1, 2), (2, 2)").unwrap();
let expected_data = vec![(1, 1), (2, 2)];
let query = points.order(x).filter(x.eq(y));
let data: Vec<_> = query.load(&connection).unwrap();
assert_sets_eq!(expected_data, data);
}
#[test]
fn filter_with_or() {
use schema::users::dsl::*;
let connection = connection_with_sean_and_tess_in_users_table();
insert(&NewUser::new("Jim", None)).into(users).execute(&connection).unwrap();
let expected_users = vec![User::new(1, "Sean"), User::new(2, "Tess")];
let data: Vec<_> = users.order(id)
.filter(name.eq("Sean").or(name.eq("Tess")))
.load(&connection).unwrap();
assert_sets_eq!(expected_users, data);
}
#[test]
fn or_doesnt_mess_with_precidence_of_previous_statements() {
use schema::users::dsl::*;
use diesel::expression::AsExpression;
let connection = connection_with_sean_and_tess_in_users_table();
let f = AsExpression::<types::Bool>::as_expression(false);
let count = users.filter(f).filter(f.or(true))
.count().first(&connection);
assert_eq!(Ok(0), count);
let count = users.filter(f.or(f).and(f.or(true)))
.count().first(&connection);
assert_eq!(Ok(0), count);
}
use diesel::types::VarChar;
sql_function!(lower, lower_t, (x: VarChar) -> VarChar);
#[test]
fn filter_by_boxed_predicate() {
fn by_name(name: &str) -> Box<BoxableExpression<users::table, TestBackend, SqlType=types::Bool>> {
Box::new(lower(users::name).eq(name.to_string()))
}
let connection = connection_with_sean_and_tess_in_users_table();
let sean = User::new(1, "Sean");
let tess = User::new(2, "Tess");
let queried_sean = users::table.filter(by_name("sean")).first(&connection);
let queried_tess = users::table.filter(by_name("tess")).first(&connection);
assert_eq!(Ok(sean), queried_sean);
assert_eq!(Ok(tess), queried_tess);
}
| filter_by_int_equality |
sqlite.py | import os
import sqlite3
from typing import List, Optional
from .storage_base import Storage
class SqliteStorage(Storage):
def __init__(self, db_path):
"""
Init table "data" with the attribute "key" being the primary key
:param db_path: str. Path to database file
"""
super().__init__()
db_path = os.path.expanduser(db_path)
if len(os.path.dirname(db_path)) > 0 and not os.path.exists(os.path.dirname(db_path)):
try:
os.makedirs(os.path.dirname(db_path))
except PermissionError:
raise PermissionError(f'Could not create database directory: {db_path}') from None
self.conn = sqlite3.connect(os.path.expanduser(db_path))
c = self.conn.cursor()
c.execute("""
CREATE TABLE IF NOT EXISTS data (
key BLOB PRIMARY KEY,
value BLOB,
expire_time_ms INTEGER
)
""")
self.conn.commit()
def _put(self, key: bytes, value: bytes, expire_time_ms=None):
""" | :param expire_time_ms: Optional[int]. Value is not fresh if expire_time_ms is not specified.
"""
c = self.conn.cursor()
c.execute('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)',
(key, value, expire_time_ms))
self.conn.commit()
def _put_batch(self, keys: List[bytes], values: List[bytes], expire_time_mss:List[Optional[int]]):
"""
Batch insert.
:param key: List[bytes].
:param value: List[bytes].
:param expire_time_ms: List[Optional[int]].
"""
c = self.conn.cursor()
c.executemany('INSERT OR REPLACE INTO data (key, value, expire_time_ms) VALUES (?, ?, ?)',
zip(keys, values, expire_time_mss))
self.conn.commit()
def _get(self, key: bytes, can_be_prefix=False, must_be_fresh=False) -> Optional[bytes]:
"""
Get value from sqlite3.
:param key: bytes.
:param can_be_prefix: bool.
:param must_be_fresh: bool.
:return: bytes.
"""
c = self.conn.cursor()
query = 'SELECT value FROM data WHERE '
if must_be_fresh:
query += f'(expire_time_ms > {time_ms()}) AND '
if can_be_prefix:
query += 'hex(key) LIKE ?'
c.execute(query, (key.hex() + '%', ))
else:
query += 'key = ?'
c.execute(query, (key, ))
ret = c.fetchone()
return ret[0] if ret else None
def _remove(self, key: bytes) -> bool:
"""
Remove value from sqlite. Return whether removal is successful.
:param key: bytes.
:return: bool.
"""
c = self.conn.cursor()
n_removed = c.execute('DELETE FROM data WHERE key = ?', (key, )).rowcount
self.conn.commit()
return n_removed > 0 | Insert value and its expiration time into sqlite3, overwrite if already exists.
:param key: bytes.
:param value: bytes. |
dialog.tsx | import { useState } from 'react';
import type { FilterConfig, FoodConfig } from '@/types/config';
export default function Dialog({
filterer,
config,
setConfig,
}: {
filterer: (filter: FilterConfig) => void;
config: FoodConfig;
setConfig: (config: FoodConfig) => void;
}) {
const [showModal, setShowModal] = useState(false);
const [deliverable, setDeliverable] = useState('true');
const [size, setSize] = useState('4 people');
const [effort, setEffort] = useState('5');
const [cheeseometer, setCheeseometer] = useState('0');
function saveFilter() {
filterer({
effort: effort,
size: size,
deliverable: deliverable,
cheeseometer: cheeseometer,
});
setConfig({
filter: true,
random: config.random,
search: config.search,
searchInput: config.searchInput,
});
setShowModal(false);
}
function clearFilter() {
setConfig({
filter: false,
random: config.random,
search: config.search,
searchInput: config.searchInput,
});
setShowModal(false);
}
return (
<>
<button
className="p-2 px-5 m-3 mb-4 text-lg text-gray-100 bg-purple-600 rounded-lg hover:ring-4 ring-purple-400"
type="button"
onClick={() => setShowModal(true)}
>
Filter food
</button>
{showModal && (
<>
<div className="fixed inset-0 z-50 flex items-center justify-center overflow-x-hidden overflow-y-auto outline-none focus:outline-none">
<div className="relative w-auto max-w-3xl mx-auto my-6"> | {/*header*/}
<div className="flex items-start justify-between p-5 mx-16 border-b border-solid rounded-t border-blueGray-200">
<h3 className="mr-4 text-3xl font-semibold">Filter food</h3>
<button
className="float-right p-1 ml-4 text-3xl font-semibold leading-none text-black bg-transparent border-0 outline-none focus:outline-none"
onClick={() => setShowModal(false)}
>
<span className="block w-6 h-6 text-2xl text-black outline-none dark:text-white hover:text-gray-800 focus:outline-none">
×
</span>
</button>
</div>
{/*body*/}
<div className="relative flex-auto p-6">
<p className="my-4 text-lg leading-relaxed text-blueGray-500">
<label className="block max-w-lg mt-2 text-left">
<span className="text-gray-700 dark:text-gray-300">
Size
</span>
<select
className="block w-full mt-1 form-select"
onChange={(e) => setSize(e.target.value)}
value={size}
>
<option value="-">-</option>
<option value="1 person">1 person</option>
<option value="4 people">4 people</option>
<option value="all">all</option>
</select>
</label>
<label className="block max-w-lg mt-2 text-left">
<span className="text-gray-700 dark:text-gray-300">
Cheeseometer
</span>
<select
className="block w-full mt-1 form-select"
onChange={(e) => setCheeseometer(e.target.value)}
value={cheeseometer}
>
<option value="-">-</option>
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
</select>
</label>
<label className="block max-w-lg mt-2 text-left">
<span className="text-gray-700 dark:text-gray-300">
Deliverable
</span>
<select
className="block w-full mt-1 form-select"
onChange={(e) => setDeliverable(e.target.value)}
value={deliverable}
>
<option value="-">-</option>
<option value="true">Yes</option>
<option value="false">No</option>
</select>
</label>
<label className="block max-w-lg mt-2 text-left">
<span className="text-gray-700 dark:text-gray-300">
Effort
</span>
<select
className="block w-full mt-1 form-select"
onChange={(e) => setEffort(e.target.value)}
value={effort}
>
<option value="-">-</option>
<option value="0">0</option>
<option value="1">1</option>
<option value="2">2</option>
<option value="3">3</option>
<option value="4">4</option>
<option value="5">5</option>
<option value="6">6</option>
<option value="7">7</option>
<option value="8">8</option>
<option value="9">9</option>
<option value="10">10</option>
</select>
</label>
</p>
</div>
{/*footer*/}
<button
className="p-2 px-5 m-3 mb-4 text-lg text-gray-100 bg-green-600 rounded-lg hover:ring-4 ring-green-400"
type="button"
onClick={() => saveFilter()}
>
Show selection
</button>
<button
className="p-2 px-5 m-3 mb-4 text-lg text-gray-100 bg-red-600 rounded-lg hover:ring-4 ring-red-400"
type="button"
onClick={() => clearFilter()}
>
Remove filter
</button>
</div>
</div>
</div>
<div className="fixed inset-0 z-40 bg-gray-900 opacity-25"></div>
</>
)}
</>
);
} | {/*content*/}
<div className="relative flex flex-col w-full border-0 rounded-lg shadow-lg outline-none bg-gray-50 dark:bg-gray-900 focus:outline-none"> |
gen.rs | use std::collections::{HashMap, VecDeque};
use std::fs;
use std::io::prelude::*;
use avro_rs::{schema::RecordField, Schema};
use crate::error::{Error, Result};
use crate::templates::*;
/// Represents a schema input source.
pub enum Source<'a> {
/// An Avro schema enum from `avro-rs` crate.
Schema(&'a Schema),
/// An Avro schema string in json format.
SchemaStr(&'a str),
/// Pattern for files containing Avro schemas in json format.
GlobPattern(&'a str),
}
/// The main component of this library.
/// It is stateless and can be reused many times.
pub struct Generator {
templater: Templater,
}
impl Generator {
/// Create a new `Generator` through a builder with default config.
pub fn new() -> Result<Generator> {
GeneratorBuilder::new().build()
}
/// Returns a fluid builder for custom `Generator` instantiation.
pub fn builder() -> GeneratorBuilder {
GeneratorBuilder::new()
}
/// Generates Rust code from an Avro schema `Source`.
/// Writes all generated types to the ouput.
pub fn gen(&self, source: &Source, output: &mut impl Write) -> Result<()> {
if self.templater.nullable {
output.write_all(DESER_NULLABLE.as_bytes())?;
}
match source {
Source::Schema(schema) => {
let mut deps = deps_stack(schema, vec![]);
self.gen_in_order(&mut deps, output)?;
}
Source::SchemaStr(raw_schema) => {
let schema = Schema::parse_str(&raw_schema)?;
let mut deps = deps_stack(&schema, vec![]);
self.gen_in_order(&mut deps, output)?;
}
Source::GlobPattern(pattern) => {
let mut raw_schemas = vec![];
for entry in glob::glob(pattern)? {
let path = entry.map_err(|e| e.into_error())?;
if !path.is_dir() {
raw_schemas.push(fs::read_to_string(path)?);
}
}
let schemas = &raw_schemas.iter().map(|s| s.as_str()).collect::<Vec<_>>();
let schemas = Schema::parse_list(&schemas)?;
let mut deps = schemas
.iter()
.fold(vec![], |deps, schema| deps_stack(&schema, deps));
self.gen_in_order(&mut deps, output)?;
}
}
Ok(())
}
/// Given an Avro `schema`:
/// * Find its ordered, nested dependencies with `deps_stack(schema)`
/// * Pops sub-schemas and generate appropriate Rust types
/// * Keeps tracks of nested schema->name with `GenState` mapping
/// * Appends generated Rust types to the output
fn gen_in_order(&self, deps: &mut Vec<Schema>, output: &mut impl Write) -> Result<()> {
let mut gs = GenState::new();
while let Some(s) = deps.pop() {
match s {
// Simply generate code
Schema::Fixed { .. } => {
let code = &self.templater.str_fixed(&s)?;
output.write_all(code.as_bytes())?
}
Schema::Enum { .. } => {
let code = &self.templater.str_enum(&s)?;
output.write_all(code.as_bytes())?
}
// Generate code with potentially nested types
Schema::Record { .. } => {
let code = &self.templater.str_record(&s, &gs)?;
output.write_all(code.as_bytes())?
}
// Register inner type for it to be used as a nested type later
Schema::Array(ref inner) => {
let type_str = array_type(inner, &gs)?;
gs.put_type(&s, type_str)
}
Schema::Map(ref inner) => {
let type_str = map_type(inner, &gs)?;
gs.put_type(&s, type_str)
}
Schema::Union(ref union) => {
// Generate custom enum with potentially nested types
if (union.is_nullable() && union.variants().len() > 2)
|| (!union.is_nullable() && union.variants().len() > 1)
{
let code = &self.templater.str_union_enum(&s, &gs)?;
output.write_all(code.as_bytes())?
}
// Register inner union for it to be used as a nested type later
let type_str = union_type(union, &gs, true)?;
gs.put_type(&s, type_str)
}
_ => Err(Error::Schema(format!("Not a valid root schema: {:?}", s)))?,
}
}
Ok(())
}
}
/// Utility function to find the ordered, nested dependencies of an Avro `schema`.
/// Explores nested `schema`s in a breadth-first fashion, pushing them on a stack
/// at the same time in order to have them ordered.
/// It is similar to traversing the `schema` tree in a post-order fashion.
fn deps_stack(schema: &Schema, mut deps: Vec<Schema>) -> Vec<Schema> {
fn push_unique(deps: &mut Vec<Schema>, s: Schema) {
if !deps.contains(&s) {
deps.push(s);
}
}
let mut q = VecDeque::new();
q.push_back(schema);
while !q.is_empty() {
let s = q.pop_front().unwrap();
match s {
// No nested schemas, add them to the result stack
Schema::Enum { .. } => push_unique(&mut deps, s.clone()),
Schema::Fixed { .. } => push_unique(&mut deps, s.clone()),
Schema::Decimal { inner, .. } if matches!(**inner, Schema::Fixed { .. }) => {
push_unique(&mut deps, s.clone())
}
// Explore the record fields for potentially nested schemas
Schema::Record { fields, .. } => {
push_unique(&mut deps, s.clone());
let by_pos = fields
.iter()
.map(|f| (f.position, f))
.collect::<HashMap<_, _>>();
let mut i = 0;
while let Some(RecordField { schema: sr, .. }) = by_pos.get(&i) {
match sr {
// No nested schemas, add them to the result stack
Schema::Fixed { .. } => push_unique(&mut deps, sr.clone()),
Schema::Enum { .. } => push_unique(&mut deps, sr.clone()),
// Push to the exploration queue for further checks
Schema::Record { .. } => q.push_back(sr),
// Push to the exploration queue, depending on the inner schema format
Schema::Map(sc) | Schema::Array(sc) => match &**sc {
Schema::Fixed { .. }
| Schema::Enum { .. }
| Schema::Record { .. }
| Schema::Map(..)
| Schema::Array(..)
| Schema::Union(..) => q.push_back(&**sc),
_ => (),
},
Schema::Union(union) => {
if (union.is_nullable() && union.variants().len() > 2)
|| (!union.is_nullable() && union.variants().len() > 1)
{
push_unique(&mut deps, sr.clone());
}
union.variants().iter().for_each(|sc| match sc {
Schema::Fixed { .. }
| Schema::Enum { .. }
| Schema::Record { .. }
| Schema::Map(..)
| Schema::Array(..)
| Schema::Union(..) => {
q.push_back(sc);
push_unique(&mut deps, sc.clone());
}
_ => (),
});
}
_ => (),
};
i += 1;
}
}
// Depending on the inner schema type ...
Schema::Map(sc) | Schema::Array(sc) => match &**sc {
// ... Needs further checks, push to the exploration queue
Schema::Fixed { .. }
| Schema::Enum { .. }
| Schema::Record { .. }
| Schema::Map(..)
| Schema::Array(..)
| Schema::Union(..) => q.push_back(&**sc),
// ... Not nested, can be pushed to the result stack
_ => push_unique(&mut deps, s.clone()),
},
Schema::Union(union) => {
if (union.is_nullable() && union.variants().len() > 2)
|| (!union.is_nullable() && union.variants().len() > 1)
{
push_unique(&mut deps, s.clone());
}
union.variants().iter().for_each(|sc| match sc {
// ... Needs further checks, push to the exploration queue
Schema::Fixed { .. }
| Schema::Enum { .. }
| Schema::Record { .. }
| Schema::Map(..)
| Schema::Array(..)
| Schema::Union(..) => q.push_back(sc),
// ... Not nested, can be pushed to the result stack
_ => push_unique(&mut deps, s.clone()),
});
}
// Ignore all other schema formats
_ => (),
}
}
deps
}
/// A builder class to customize `Generator`.
pub struct GeneratorBuilder {
precision: usize,
nullable: bool,
use_variant_access: bool,
use_avro_rs_unions: bool,
derive_builders: bool,
}
impl GeneratorBuilder {
/// Creates a new `GeneratorBuilder`.
pub fn new() -> GeneratorBuilder {
GeneratorBuilder {
precision: 3,
nullable: false,
use_variant_access: false,
use_avro_rs_unions: false,
derive_builders: false,
}
}
/// Sets the precision for default values of f32/f64 fields.
pub fn precision(mut self, precision: usize) -> GeneratorBuilder {
self.precision = precision;
self
}
/// Puts default value when deserializing `null` field.
/// Doesn't apply to union fields ["null", "Foo"], which are `Option<Foo>`.
pub fn nullable(mut self, nullable: bool) -> GeneratorBuilder {
self.nullable = nullable;
self
}
/// Adds variant_access_derive to the enums generated from union types.
pub fn use_variant_access(mut self, use_variant_access: bool) -> GeneratorBuilder {
self.use_variant_access = use_variant_access;
self
}
/// Adds support for deserializing union types from the `avro-rs` crate.
/// Only necessary for unions of 3 or more types or 2-type unions without "null".
/// Note that only int, long, float, double, and boolean values are currently supported.
pub fn use_avro_rs_unions(mut self, use_avro_rs_unions: bool) -> GeneratorBuilder {
self.use_avro_rs_unions = use_avro_rs_unions;
self
}
/// Adds support to derive builders using the `rust-derive-builder` crate.
/// Will derive builders for record structs.
pub fn derive_builders(mut self, derive_builders: bool) -> GeneratorBuilder {
self.derive_builders = derive_builders;
self
}
/// Create a `Generator` with the builder parameters.
pub fn build(self) -> Result<Generator> {
let mut templater = Templater::new()?;
templater.precision = self.precision;
templater.nullable = self.nullable;
templater.use_variant_access = self.use_variant_access;
templater.use_avro_rs_unions = self.use_avro_rs_unions;
templater.derive_builders = self.derive_builders;
Ok(Generator { templater })
}
}
#[cfg(test)]
mod tests {
use avro_rs::schema::Name;
use super::*;
macro_rules! assert_schema_gen (
($generator:expr, $expected:expr, $raw_schema:expr) => (
let schema = Schema::parse_str($raw_schema).unwrap();
let source = Source::Schema(&schema);
let mut buf = vec![];
$generator.gen(&source, &mut buf).unwrap();
let res = String::from_utf8(buf).unwrap();
assert_eq!($expected, &res);
);
);
#[test]
fn simple() {
let raw_schema = r#"
{
"type": "record",
"name": "test",
"fields": [
{"name": "a", "type": "long", "default": 42},
{"name": "b", "type": "string"}
]
}
"#;
let expected = "
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Test {
pub a: i64,
pub b: String,
}
impl Default for Test {
fn default() -> Test {
Test {
a: 42,
b: String::default(),
}
}
}
";
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn simple_with_builders() {
let raw_schema = r#"
{
"type": "record",
"name": "test",
"fields": [
{"name": "a", "type": "long", "default": 42},
{"name": "b", "type": "string"}
]
}
"#;
let expected = "
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize, derive_builder::Builder)]
#[serde(default)]
#[builder(setter(into))]
pub struct Test {
pub a: i64,
pub b: String,
}
impl Default for Test {
fn default() -> Test {
Test {
a: 42,
b: String::default(),
}
}
}
";
let g = GeneratorBuilder::new()
.derive_builders(true)
.build()
.unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn complex() {
let raw_schema = r#"
{
"type": "record",
"name": "User",
"doc": "Hi there.",
"fields": [
{"name": "name", "type": "string", "default": ""},
{"name": "favorite_number", "type": "int", "default": 7},
{"name": "likes_pizza", "type": "boolean", "default": false},
{"name": "oye", "type": "float", "default": 1.1},
{"name": "aa-i32",
"type": {"type": "array", "items": {"type": "array", "items": "int"}},
"default": [[0], [12, -1]]}
]
}
"#;
let expected = r#"
/// Hi there.
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct User {
pub name: String,
pub favorite_number: i32,
pub likes_pizza: bool,
pub oye: f32,
#[serde(rename = "aa-i32")]
pub aa_i32: Vec<Vec<i32>>,
}
impl Default for User {
fn default() -> User {
User {
name: "".to_owned(),
favorite_number: 7,
likes_pizza: false,
oye: 1.100,
aa_i32: vec![vec![0], vec![12, -1]],
}
}
}
"#;
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn optional_array() {
let raw_schema = r#"
{
"name": "Snmp",
"type": "record",
"fields": [ {
"name": "v1",
"type": [ "null", {
"name": "V1",
"type": "record",
"fields": [ {
"name": "pdu",
"type": [ "null", {
"name": "TrapV1",
"type": "record",
"fields": [ {
"name": "var",
"type": ["null", {
"type": "array",
"items": {
"name": "Variable",
"type": "record",
"fields": [ {
"name": "oid",
"type": ["null", {
"type":"array",
"items": "long"
} ],
"default": null
}, {
"name": "val",
"type": ["null", "string"],
"default": null
} ],
"default": {}
}
} ],
"default": null
} ]
} ],
"default": null
} ]
} ],
"default": null
} ],
"default": {}
}
"#;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Variable {
pub oid: Option<Vec<i64>>,
pub val: Option<String>,
}
impl Default for Variable {
fn default() -> Variable {
Variable {
oid: None,
val: None,
}
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct TrapV1 {
pub var: Option<Vec<Variable>>,
}
impl Default for TrapV1 {
fn default() -> TrapV1 {
TrapV1 {
var: None,
}
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct V1 {
pub pdu: Option<TrapV1>,
}
impl Default for V1 {
fn default() -> V1 {
V1 {
pdu: None,
}
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Snmp {
pub v1: Option<V1>,
}
impl Default for Snmp {
fn default() -> Snmp {
Snmp {
v1: None,
}
}
}
"#;
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn optional_arrays() {
let raw_schema = r#"
{
"type": "record",
"name": "KsqlDataSourceSchema",
"namespace": "io.confluent.ksql.avro_schemas",
"fields": [ {
"name": "ID",
"type": ["null", "string"],
"default": null
}, {
"name": "GROUP_IDS",
"type": ["null", {
"type": "array",
"items": ["null", "string"]
} ],
"default": null
}, {
"name": "GROUP_NAMES",
"type": ["null", {
"type": "array",
"items": ["null", "string"]
} ],
"default": null
} ]
}
"#;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct KsqlDataSourceSchema {
#[serde(rename = "ID")]
pub id: Option<String>,
#[serde(rename = "GROUP_IDS")]
pub group_ids: Option<Vec<Option<String>>>,
#[serde(rename = "GROUP_NAMES")]
pub group_names: Option<Vec<Option<String>>>,
}
impl Default for KsqlDataSourceSchema {
fn default() -> KsqlDataSourceSchema {
KsqlDataSourceSchema {
id: None,
group_ids: None,
group_names: None,
}
}
}
"#;
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn multi_valued_union() {
let raw_schema = r#"
{
"type": "record",
"name": "Contact",
"namespace": "com.test",
"fields": [ {
"name": "extra",
"type": "map",
"values" : [ "null", "string", "long", "double", "boolean" ]
} ]
}
"#;
let expected = r#"
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
pub enum UnionStringLongDoubleBoolean {
String(String),
Long(i64),
Double(f64),
Boolean(bool),
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Contact {
pub extra: ::std::collections::HashMap<String, Option<UnionStringLongDoubleBoolean>>,
}
impl Default for Contact {
fn default() -> Contact {
Contact {
extra: ::std::collections::HashMap::new(),
}
}
}
"#;
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
let raw_schema = r#"
{
"type": "record",
"name": "AvroFileId",
"fields": [ {
"name": "id",
"type": [
"string", {
"type": "record",
"name": "AvroShortUUID",
"fields": [ {
"name": "mostBits",
"type": "long"
}, {
"name": "leastBits",
"type": "long"
} ]
} ]
} ]
}
"#;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroShortUuid {
#[serde(rename = "mostBits")]
pub most_bits: i64,
#[serde(rename = "leastBits")]
pub least_bits: i64,
}
impl Default for AvroShortUuid {
fn default() -> AvroShortUuid {
AvroShortUuid {
most_bits: 0,
least_bits: 0,
}
}
}
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
pub enum UnionStringAvroShortUuid {
String(String),
AvroShortUuid(AvroShortUuid),
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroFileId {
pub id: UnionStringAvroShortUuid,
}
impl Default for AvroFileId {
fn default() -> AvroFileId {
AvroFileId {
id: UnionStringAvroShortUuid::String(String::default()),
}
}
}
"#;
let g = Generator::new().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn multi_valued_union_with_variant_access() {
let raw_schema = r#"
{
"type": "record",
"name": "Contact",
"namespace": "com.test",
"fields": [ {
"name": "extra",
"type": "map",
"values" : [ "null", "string", "long", "double", "boolean" ]
} ]
}
"#;
let expected = r#"
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize, variant_access_derive::VariantAccess)]
pub enum UnionStringLongDoubleBoolean {
String(String),
Long(i64),
Double(f64),
Boolean(bool),
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Contact {
pub extra: ::std::collections::HashMap<String, Option<UnionStringLongDoubleBoolean>>,
}
impl Default for Contact {
fn default() -> Contact {
Contact {
extra: ::std::collections::HashMap::new(),
}
}
}
"#;
let g = Generator::builder()
.use_variant_access(true)
.build()
.unwrap();
assert_schema_gen!(g, expected, raw_schema);
let raw_schema = r#"
{
"type": "record",
"name": "AvroFileId",
"fields": [ {
"name": "id",
"type": [
"string", {
"type": "record",
"name": "AvroShortUUID",
"fields": [ {
"name": "mostBits",
"type": "long"
}, {
"name": "leastBits",
"type": "long"
} ]
} ]
} ]
}
"#;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroShortUuid {
#[serde(rename = "mostBits")]
pub most_bits: i64,
#[serde(rename = "leastBits")]
pub least_bits: i64,
}
impl Default for AvroShortUuid {
fn default() -> AvroShortUuid {
AvroShortUuid {
most_bits: 0,
least_bits: 0,
}
}
}
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize, variant_access_derive::VariantAccess)]
pub enum UnionStringAvroShortUuid {
String(String),
AvroShortUuid(AvroShortUuid),
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroFileId {
pub id: UnionStringAvroShortUuid,
}
impl Default for AvroFileId {
fn default() -> AvroFileId {
AvroFileId {
id: UnionStringAvroShortUuid::String(String::default()),
}
}
}
"#;
let g = Generator::builder()
.use_variant_access(true)
.build()
.unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn multi_valued_union_with_avro_rs_unions() {
let raw_schema = r#"
{
"type": "record",
"name": "Contact",
"namespace": "com.test",
"fields": [ {
"name": "extra",
"type": "map",
"values" : [ "null", "string", "long", "double", "boolean" ]
} ]
}
"#;
let expected = r#"
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Serialize)]
pub enum UnionStringLongDoubleBoolean {
String(String),
Long(i64),
Double(f64),
Boolean(bool),
}
impl<'de> serde::Deserialize<'de> for UnionStringLongDoubleBoolean {
fn deserialize<D>(deserializer: D) -> Result<UnionStringLongDoubleBoolean, D::Error>
where
D: serde::Deserializer<'de>,
{
/// Serde visitor for the auto-generated unnamed Avro union type.
struct UnionStringLongDoubleBooleanVisitor;
impl<'de> serde::de::Visitor<'de> for UnionStringLongDoubleBooleanVisitor {
type Value = UnionStringLongDoubleBoolean;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a UnionStringLongDoubleBoolean")
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(UnionStringLongDoubleBoolean::Long(value))
}
fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(UnionStringLongDoubleBoolean::Double(value))
}
fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(UnionStringLongDoubleBoolean::Boolean(value))
}
}
deserializer.deserialize_any(UnionStringLongDoubleBooleanVisitor)
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Contact {
pub extra: ::std::collections::HashMap<String, Option<UnionStringLongDoubleBoolean>>,
}
impl Default for Contact {
fn default() -> Contact {
Contact {
extra: ::std::collections::HashMap::new(),
}
}
}
"#;
let g = Generator::builder()
.use_avro_rs_unions(true)
.build()
.unwrap();
assert_schema_gen!(g, expected, raw_schema);
let raw_schema = r#"
{
"type": "record",
"name": "AvroFileId",
"fields": [ {
"name": "id",
"type": [
"string", {
"type": "record",
"name": "AvroShortUUID",
"fields": [ {
"name": "mostBits",
"type": "long"
}, {
"name": "leastBits",
"type": "long"
} ]
} ]
} ]
}
"#;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroShortUuid {
#[serde(rename = "mostBits")]
pub most_bits: i64,
#[serde(rename = "leastBits")]
pub least_bits: i64,
}
impl Default for AvroShortUuid {
fn default() -> AvroShortUuid {
AvroShortUuid {
most_bits: 0,
least_bits: 0,
}
}
}
/// Auto-generated type for unnamed Avro union variants.
#[derive(Debug, PartialEq, Clone, serde::Serialize)]
pub enum UnionStringAvroShortUuid {
String(String),
AvroShortUuid(AvroShortUuid),
}
impl<'de> serde::Deserialize<'de> for UnionStringAvroShortUuid {
fn deserialize<D>(deserializer: D) -> Result<UnionStringAvroShortUuid, D::Error>
where
D: serde::Deserializer<'de>,
{
/// Serde visitor for the auto-generated unnamed Avro union type.
struct UnionStringAvroShortUuidVisitor;
impl<'de> serde::de::Visitor<'de> for UnionStringAvroShortUuidVisitor {
type Value = UnionStringAvroShortUuid;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("a UnionStringAvroShortUuid")
}
}
deserializer.deserialize_any(UnionStringAvroShortUuidVisitor)
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct AvroFileId {
pub id: UnionStringAvroShortUuid,
}
impl Default for AvroFileId {
fn default() -> AvroFileId {
AvroFileId {
id: UnionStringAvroShortUuid::String(String::default()),
}
}
}
"#;
let g = Generator::builder()
.use_avro_rs_unions(true)
.build()
.unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn nullable_gen() {
let raw_schema = r#"
{
"type": "record",
"name": "test",
"fields": [
{"name": "a", "type": "long", "default": 42},
{"name": "b-b", "type": "string", "default": "na"},
{"name": "c", "type": ["null", "int"], "default": null}
]
}
"#;
let expected = r#"
macro_rules! deser(
($name:ident, $rtype:ty, $val:expr) => (
fn $name<'de, D>(deserializer: D) -> Result<$rtype, D::Error>
where
D: serde::Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(|| $val))
}
);
);
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Test {
#[serde(deserialize_with = "nullable_test_a")]
pub a: i64,
#[serde(rename = "b-b", deserialize_with = "nullable_test_b_b")]
pub b_b: String,
pub c: Option<i32>,
}
deser!(nullable_test_a, i64, 42);
deser!(nullable_test_b_b, String, "na".to_owned());
impl Default for Test {
fn default() -> Test {
Test {
a: 42,
b_b: "na".to_owned(),
c: None,
}
}
}
"#;
let g = Generator::builder().nullable(true).build().unwrap();
assert_schema_gen!(g, expected, raw_schema);
}
#[test]
fn nullable_code() {
use serde::{Deserialize, Deserializer};
macro_rules! deser(
($name:ident, $rtype:ty, $val:expr) => (
fn $name<'de, D>(deserializer: D) -> std::result::Result<$rtype, D::Error>
where
D: Deserializer<'de>,
{
let opt = Option::deserialize(deserializer)?;
Ok(opt.unwrap_or_else(|| $val))
}
);
);
#[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct Test {
#[serde(deserialize_with = "nullable_test_a")]
pub a: i64,
#[serde(rename = "b-b", deserialize_with = "nullable_test_b_b")]
pub b_b: String,
pub c: Option<i32>,
}
deser!(nullable_test_a, i64, 42);
deser!(nullable_test_b_b, String, "na".to_owned());
impl Default for Test {
fn default() -> Test {
Test {
a: 42,
b_b: "na".to_owned(),
c: None,
}
}
}
let json = r#"{"a": null, "b-b": null, "c": null}"#;
let res: Test = serde_json::from_str(json).unwrap();
assert_eq!(Test::default(), res);
}
#[test]
fn deps() {
let raw_schema = r#"
{
"type": "record",
"name": "User",
"fields": [
{"name": "name", "type": "string", "default": "unknown"},
{"name": "address",
"type": {
"type": "record",
"name": "Address",
"fields": [
{"name": "city", "type": "string", "default": "unknown"},
{"name": "country",
"type": {"type": "enum", "name": "Country", "symbols": ["FR", "JP"]}
}
]
}
}
]
}
"#;
let schema = Schema::parse_str(&raw_schema).unwrap();
let mut deps = deps_stack(&schema, vec![]);
let s = deps.pop().unwrap();
assert!(matches!(s, Schema::Enum{ name: Name { ref name, ..}, ..} if name == "Country"));
let s = deps.pop().unwrap();
assert!(matches!(s, Schema::Record{ name: Name { ref name, ..}, ..} if name == "Address"));
let s = deps.pop().unwrap();
assert!(matches!(s, Schema::Record{ name: Name { ref name, ..}, ..} if name == "User"));
let s = deps.pop();
assert!(matches!(s, None));
}
#[test]
fn cross_deps() -> std::result::Result<(), Box<dyn std::error::Error>> {
use std::fs::File;
use std::io::Write;
use tempfile::tempdir; |
let dir = tempdir()?;
let mut schema_a_file = File::create(dir.path().join("schema_a.avsc"))?;
let schema_a_str = r#"
{
"name": "A",
"type": "record",
"fields": [ {"name": "field_one", "type": "float"} ]
}
"#;
schema_a_file.write_all(schema_a_str.as_bytes())?;
let mut schema_b_file = File::create(dir.path().join("schema_b.avsc"))?;
let schema_b_str = r#"
{
"name": "B",
"type": "record",
"fields": [ {"name": "field_one", "type": "A"} ]
}
"#;
schema_b_file.write_all(schema_b_str.as_bytes())?;
let expected = r#"
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct B {
pub field_one: A,
}
impl Default for B {
fn default() -> B {
B {
field_one: A::default(),
}
}
}
#[derive(Debug, PartialEq, Clone, serde::Deserialize, serde::Serialize)]
#[serde(default)]
pub struct A {
pub field_one: f32,
}
impl Default for A {
fn default() -> A {
A {
field_one: 0.0,
}
}
}
"#;
let pattern = format!("{}/*.avsc", dir.path().display());
let source = Source::GlobPattern(pattern.as_str());
let g = Generator::new()?;
let mut buf = vec![];
g.gen(&source, &mut buf)?;
let res = String::from_utf8(buf)?;
println!("{}", res);
assert_eq!(expected, res);
drop(schema_a_file);
drop(schema_b_file);
dir.close()?;
Ok(())
}
} | |
main.go | // Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START automl_v1beta1_generated_AutoMl_ExportModel_sync]
package main
import (
"context"
automl "cloud.google.com/go/automl/apiv1beta1"
automlpb "google.golang.org/genproto/googleapis/cloud/automl/v1beta1"
)
func main() {
ctx := context.Background()
c, err := automl.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
defer c.Close()
req := &automlpb.ExportModelRequest{
// TODO: Fill request struct fields.
}
op, err := c.ExportModel(ctx, req) | if err != nil {
// TODO: Handle error.
}
err = op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
}
// [END automl_v1beta1_generated_AutoMl_ExportModel_sync] | |
parameter_tuning.py | import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn import neighbors
from sklearn import svm
from sklearn.model_selection import GridSearchCV
from sundial.price_model.utils.file_utils import *
from sundial.price_model.utils.settings import *
import multiprocessing
def get_train_test_data(df_price_frame):
df_price_frame = df_price_frame.dropna()
X_price_frame = df_price_frame.drop('lmp_value', axis=1).\
reset_index().drop('time', axis=1)
Y_price_frame = df_price_frame['lmp_value']
return train_test_split(X_price_frame, Y_price_frame, shuffle=False)
def tune_models(df_price_frame):
"""
tune models using different regressors
:param df_price_frame:
:return:
"""
x_train, x_test, y_train, y_test = \
get_train_test_data(df_price_frame)
tune_svr(x_train, y_train, x_test, y_test)
tune_knn(x_train, y_train, x_test, y_test)
tune_lin_reg(x_train, y_train, x_test, y_test)
def tune_svr(x_train, y_train, x_test, y_test):
"""
SVR regressor with grid search
:param x_train:
:param y_train:
:param x_test:
:param y_test:
:return:
"""
C_range = range(1000, 3000, 1000)
tuned_parameters = [{
"C": C_range,
"kernel": ["rbf"]
}]
svr_reg = GridSearchCV(svm.SVR(gamma=0.001, epsilon=10),
param_grid=tuned_parameters, verbose=1,
n_jobs=multiprocessing.cpu_count())
y_pred = svr_reg.fit(x_train, y_train).predict(x_test)
print('Optimum parameters epsilon and kernel for SVR: ',
svr_reg.best_params)
print("The test score R2 for SVR: ", svr_reg.score(x_test, y_test))
print("SVR mean squared error: %.2f"
% np.mean((y_test - svr_reg.predict(x_test)) ** 2))
target_folder = os.path.join(PLOTS_FOLDER, "svr")
make_dir(target_folder)
plot_predictions(x_test, y_test, y_pred, "svr", target_folder)
plot_pred_test_relation(y_test, y_pred, "svr", target_folder)
def | (x_train, y_train, x_test, y_test):
"""
KNN regressor with grid search
:param x_train:
:param y_train:
:param x_test:
:param y_test:
:return:
"""
n_range = range(1, 10, 1)
tuned_parameters = [{
"n_neighbors": n_range
}]
knn_reg = GridSearchCV(neighbors.KNeighborsRegressor(),
param_grid=tuned_parameters, verbose=1,
n_jobs=multiprocessing.cpu_count())
y_pred = knn_reg.fit(x_train, y_train).predict(x_test)
print('Optimum parameters epsilon and kernel for KNN: ',
knn_reg.best_params_)
print("The test score R2 for KNN: ", knn_reg.score(x_test, y_test))
print("KNN mean squared error: %.2f"
% np.mean((y_test - knn_reg.predict(x_test)) ** 2))
target_folder = os.path.join(PLOTS_FOLDER, "knn")
make_dir(target_folder)
plot_predictions(x_test, y_test, y_pred, "knn", target_folder)
plot_pred_test_relation(y_test, y_pred, "knn", target_folder)
def tune_lin_reg(x_train, y_train, x_test, y_test):
"""
Linear regressor with grid search
:param x_train:
:param y_train:
:param x_test:
:param y_test:
:return:
"""
lin_reg = linear_model.LinearRegression(normalize=True)
y_pred = lin_reg.fit(x_train, y_train).predict(x_test)
print("The test score R2 for Lin Reg: ", lin_reg.score(x_test, y_test))
print("Lin Reg mean squared error: %.2f"
% np.mean((y_test - lin_reg.predict(x_test)) ** 2))
target_folder = os.path.join(PLOTS_FOLDER, "lin")
make_dir(target_folder)
plot_predictions(x_test, y_test, y_pred, "lin", target_folder)
plot_pred_test_relation(y_test, y_pred, "lin", target_folder)
def plot_predictions(x_test, y_test, y_pred, model_name, path):
"""
plot predictions from models
:param x_test:
:param y_test:
:param y_pred:
:param model_name:
:param path:
:return:
"""
plt.figure(figsize=(15, 7))
plt.scatter(x_test.index, y_test, c='k', label='Observed')
plt.plot(x_test.index, y_pred, c='r', label='Predicted')
plt.xlabel('data')
plt.ylabel('lmp_value')
plt.title(model_name)
plt.legend()
plt.savefig(os.path.join(path, "{0}_predictions".format(model_name)))
def plot_pred_test_relation(y_test, y_pred, model_name, path):
"""
plot the confusion matrix type graph
:param y_test:
:param y_pred:
:param model_name:
:param path:
:return:
"""
plt.figure(figsize=(6, 6))
plt.scatter(y_test, y_test, c='k')
plt.scatter(y_test, y_pred, c='r')
plt.xlabel('Observed Elec. Price (MWhr)')
plt.ylabel("Predicted Elec. Price (MWWh): $\hat{Y}_i$")
plt.title("Energy vs Predicted Energy: $Y_i$ vs $\hat{Y}_i$ " + model_name)
plt.savefig(os.path.join(path, "{0}_relation".format(model_name)))
def main():
price_frame = pd.read_csv(PRICE_DATA_FILENAME, index_col=0)
df_price_frame = price_frame.set_index("time")
tune_models(df_price_frame)
if __name__ == '__main__':
main()
| tune_knn |
peripheral.rs | // btleplug Source Code File
//
// Copyright 2020 Nonpolynomial Labs LLC. All rights reserved.
//
// Licensed under the BSD 3-Clause license. See LICENSE file in the project root
// for full license information.
//
// Some portions of this file are taken and/or modified from Rumble
// (https://github.com/mwylde/rumble), using a dual MIT/Apache License under the
// following copyright:
//
// Copyright (c) 2014 The Rust Project Developers
use super::{
advertisement_data_type, ble::characteristic::BLECharacteristic, ble::device::BLEDevice,
ble::service::BLEService, utils,
};
use crate::{
api::{
bleuuid::{uuid_from_u16, uuid_from_u32},
AddressType, BDAddr, CentralEvent, Characteristic, Peripheral as ApiPeripheral,
PeripheralProperties, Service, ValueNotification, WriteType,
},
common::{adapter_manager::AdapterManager, util::notifications_stream_from_broadcast_receiver},
Error, Result,
};
use async_trait::async_trait;
use dashmap::DashMap;
use futures::stream::Stream;
use log::{error, trace};
#[cfg(feature = "serde")]
use serde::{Deserialize, Serialize};
#[cfg(feature = "serde")]
use serde_cr as serde;
use std::{
collections::{BTreeSet, HashMap, HashSet},
convert::TryInto,
fmt::{self, Debug, Display, Formatter},
pin::Pin,
sync::atomic::{AtomicBool, Ordering},
sync::{Arc, RwLock},
};
use tokio::sync::broadcast;
use uuid::Uuid;
use std::sync::Weak;
use windows::Devices::Bluetooth::{Advertisement::*, BluetoothAddressType};
#[cfg_attr(
feature = "serde",
derive(Serialize, Deserialize),
serde(crate = "serde_cr")
)]
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct PeripheralId(BDAddr);
/// Implementation of [api::Peripheral](crate::api::Peripheral).
#[derive(Clone)]
pub struct Peripheral {
shared: Arc<Shared>,
}
struct Shared {
device: tokio::sync::Mutex<Option<BLEDevice>>,
adapter: Weak<AdapterManager<Peripheral>>,
address: BDAddr,
connected: AtomicBool,
ble_services: DashMap<Uuid, BLEService>,
notifications_channel: broadcast::Sender<ValueNotification>,
// Mutable, advertised, state...
address_type: RwLock<Option<AddressType>>,
local_name: RwLock<Option<String>>,
last_tx_power_level: RwLock<Option<i16>>, // XXX: would be nice to avoid lock here!
last_rssi: RwLock<Option<i16>>, // XXX: would be nice to avoid lock here!
latest_manufacturer_data: RwLock<HashMap<u16, Vec<u8>>>,
latest_service_data: RwLock<HashMap<Uuid, Vec<u8>>>,
services: RwLock<HashSet<Uuid>>,
}
impl Peripheral {
pub(crate) fn new(adapter: Weak<AdapterManager<Self>>, address: BDAddr) -> Self {
let (broadcast_sender, _) = broadcast::channel(16);
Peripheral {
shared: Arc::new(Shared {
adapter: adapter,
device: tokio::sync::Mutex::new(None),
address: address,
connected: AtomicBool::new(false),
ble_services: DashMap::new(),
notifications_channel: broadcast_sender,
address_type: RwLock::new(None),
local_name: RwLock::new(None),
last_tx_power_level: RwLock::new(None),
last_rssi: RwLock::new(None),
latest_manufacturer_data: RwLock::new(HashMap::new()),
latest_service_data: RwLock::new(HashMap::new()),
services: RwLock::new(HashSet::new()),
}),
}
}
// TODO: see if the other backends can also be similarly decoupled from PeripheralProperties
// so it can potentially be replaced by individial state getters
fn derive_properties(&self) -> PeripheralProperties {
PeripheralProperties {
address: self.address(),
address_type: *self.shared.address_type.read().unwrap(),
local_name: self.shared.local_name.read().unwrap().clone(),
tx_power_level: *self.shared.last_tx_power_level.read().unwrap(),
rssi: *self.shared.last_rssi.read().unwrap(),
manufacturer_data: self.shared.latest_manufacturer_data.read().unwrap().clone(),
service_data: self.shared.latest_service_data.read().unwrap().clone(),
services: self
.shared
.services
.read()
.unwrap()
.iter()
.map(|uuid| *uuid)
.collect(),
}
}
pub(crate) fn update_properties(&self, args: &BluetoothLEAdvertisementReceivedEventArgs) {
let advertisement = args.Advertisement().unwrap();
// Advertisements are cumulative: set/replace data only if it's set
if let Ok(name) = advertisement.LocalName() {
if !name.is_empty() {
// XXX: we could probably also assume that we've seen the
// advertisement before and speculatively take a read lock
// to confirm that the name hasn't changed...
let mut local_name_guard = self.shared.local_name.write().unwrap();
*local_name_guard = Some(name.to_string());
}
}
if let Ok(manufacturer_data) = advertisement.ManufacturerData() {
let mut manufacturer_data_guard = self.shared.latest_manufacturer_data.write().unwrap();
*manufacturer_data_guard = manufacturer_data
.into_iter()
.map(|d| {
let manufacturer_id = d.CompanyId().unwrap();
let data = utils::to_vec(&d.Data().unwrap());
(manufacturer_id, data)
})
.collect();
// Emit event of newly received advertisement
self.emit_event(CentralEvent::ManufacturerDataAdvertisement {
id: self.shared.address.into(),
manufacturer_data: manufacturer_data_guard.clone(),
});
}
// The Windows Runtime API (as of 19041) does not directly expose Service Data as a friendly API (like Manufacturer Data above)
// Instead they provide data sections for access to raw advertising data. That is processed here.
if let Ok(data_sections) = advertisement.DataSections() {
// See if we have any advertised service data before taking a lock to update...
let mut found_service_data = false;
for section in &data_sections {
match section.DataType().unwrap() {
advertisement_data_type::SERVICE_DATA_16_BIT_UUID
| advertisement_data_type::SERVICE_DATA_32_BIT_UUID
| advertisement_data_type::SERVICE_DATA_128_BIT_UUID => {
found_service_data = true;
break;
}
_ => {}
}
}
if found_service_data {
let mut service_data_guard = self.shared.latest_service_data.write().unwrap();
*service_data_guard = data_sections
.into_iter()
.filter_map(|d| {
let data = utils::to_vec(&d.Data().unwrap());
match d.DataType().unwrap() {
advertisement_data_type::SERVICE_DATA_16_BIT_UUID => {
let (uuid, data) = data.split_at(2);
let uuid =
uuid_from_u16(u16::from_le_bytes(uuid.try_into().unwrap()));
Some((uuid, data.to_owned()))
}
advertisement_data_type::SERVICE_DATA_32_BIT_UUID => {
let (uuid, data) = data.split_at(4);
let uuid =
uuid_from_u32(u32::from_le_bytes(uuid.try_into().unwrap()));
Some((uuid, data.to_owned()))
}
advertisement_data_type::SERVICE_DATA_128_BIT_UUID => {
let (uuid, data) = data.split_at(16);
let uuid = Uuid::from_slice(uuid).unwrap();
Some((uuid, data.to_owned()))
}
_ => None,
}
})
.collect();
// Emit event of newly received advertisement
self.emit_event(CentralEvent::ServiceDataAdvertisement {
id: self.shared.address.into(),
service_data: service_data_guard.clone(),
});
}
}
if let Ok(services) = advertisement.ServiceUuids() {
let mut found_new_service = false;
// Limited scope for read-only lock...
{
let services_guard_ro = self.shared.services.read().unwrap();
// In all likelihood we've already seen all the advertised services before so lets
// check to see if we can avoid taking the write lock and emitting an event...
for uuid in &services {
if !services_guard_ro.contains(&utils::to_uuid(&uuid)) {
found_new_service = true;
break;
}
}
}
if found_new_service {
let mut services_guard = self.shared.services.write().unwrap();
// ServicesUuids combines all the 16, 32 and 128 bit, 'complete' and 'incomplete'
// service IDs that may be part of this advertisement into one single list with
// a consistent (128bit) format. Considering that we don't practically know
// whether the aggregate list is ever complete we always union the IDs with the
// IDs already tracked.
for uuid in services {
services_guard.insert(utils::to_uuid(&uuid));
}
self.emit_event(CentralEvent::ServicesAdvertisement {
id: self.shared.address.into(),
services: services_guard.iter().map(|uuid| *uuid).collect(),
});
}
}
if let Ok(address_type) = args.BluetoothAddressType() {
let mut address_type_guard = self.shared.address_type.write().unwrap();
*address_type_guard = match address_type {
BluetoothAddressType::Public => Some(AddressType::Public),
BluetoothAddressType::Random => Some(AddressType::Random),
_ => None,
};
}
if let Ok(tx_reference) = args.TransmitPowerLevelInDBm() {
// IReference is (ironically) a crazy foot gun in Rust since it very easily
// panics if you look at it wrong. Calling GetInt16(), IsNumericScalar() or Type()
// all panic here without returning a Result as documented.
// Value() is apparently the _right_ way to extract something from an IReference<T>...
if let Ok(tx) = tx_reference.Value() {
let mut tx_power_level_guard = self.shared.last_tx_power_level.write().unwrap();
*tx_power_level_guard = Some(tx);
}
}
if let Ok(rssi) = args.RawSignalStrengthInDBm() {
let mut rssi_guard = self.shared.last_rssi.write().unwrap();
*rssi_guard = Some(rssi);
}
}
fn emit_event(&self, event: CentralEvent) {
if let Some(manager) = self.shared.adapter.upgrade() {
manager.emit(event);
} else {
trace!("Could not emit an event. AdapterManager has been dropped");
}
}
}
impl Display for Peripheral {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let connected = if self.shared.connected.load(Ordering::Relaxed) {
" connected"
} else {
""
};
write!(
f,
"{} {}{}",
self.shared.address,
self.shared
.local_name
.read()
.unwrap()
.clone()
.unwrap_or_else(|| "(unknown)".to_string()),
connected
)
}
}
impl Debug for Peripheral {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let connected = if self.shared.connected.load(Ordering::Relaxed) {
" connected"
} else {
""
};
let properties = self.derive_properties();
write!(
f,
"{} properties: {:?}, services: {:?} {}",
self.shared.address, properties, self.shared.ble_services, connected
)
}
}
#[async_trait]
impl ApiPeripheral for Peripheral {
fn | (&self) -> PeripheralId {
PeripheralId(self.shared.address)
}
/// Returns the address of the peripheral.
fn address(&self) -> BDAddr {
self.shared.address
}
/// Returns the set of properties associated with the peripheral. These may be updated over time
/// as additional advertising reports are received.
async fn properties(&self) -> Result<Option<PeripheralProperties>> {
Ok(Some(self.derive_properties()))
}
fn services(&self) -> BTreeSet<Service> {
self.shared
.ble_services
.iter()
.map(|item| item.value().to_service())
.collect()
}
/// Returns true iff we are currently connected to the device.
async fn is_connected(&self) -> Result<bool> {
Ok(self.shared.connected.load(Ordering::Relaxed))
}
/// Creates a connection to the device. This is a synchronous operation; if this method returns
/// Ok there has been successful connection. Note that peripherals allow only one connection at
/// a time. Operations that attempt to communicate with a device will fail until it is connected.
async fn connect(&self) -> Result<()> {
let shared_clone = Arc::downgrade(&self.shared);
let adapter_clone = self.shared.adapter.clone();
let address = self.shared.address;
let device = BLEDevice::new(
self.shared.address,
Box::new(move |is_connected| {
if let Some(shared) = shared_clone.upgrade() {
shared.connected.store(is_connected, Ordering::Relaxed);
}
if !is_connected {
if let Some(adapter) = adapter_clone.upgrade() {
adapter.emit(CentralEvent::DeviceDisconnected(address.into()));
}
}
}),
)
.await?;
device.connect().await?;
let mut d = self.shared.device.lock().await;
*d = Some(device);
self.shared.connected.store(true, Ordering::Relaxed);
self.emit_event(CentralEvent::DeviceConnected(self.shared.address.into()));
Ok(())
}
/// Terminates a connection to the device. This is a synchronous operation.
async fn disconnect(&self) -> Result<()> {
let mut device = self.shared.device.lock().await;
*device = None;
self.emit_event(CentralEvent::DeviceDisconnected(self.shared.address.into()));
Ok(())
}
/// Discovers all characteristics for the device. This is a synchronous operation.
async fn discover_services(&self) -> Result<()> {
let device = self.shared.device.lock().await;
if let Some(ref device) = *device {
let gatt_services = device.discover_services().await?;
for service in &gatt_services {
let uuid = utils::to_uuid(&service.Uuid().unwrap());
match BLEDevice::get_characteristics(&service).await {
Ok(characteristics) => {
let characteristics = characteristics
.into_iter()
.map(|gatt_characteristic| {
let characteristic = BLECharacteristic::new(gatt_characteristic);
(characteristic.uuid(), characteristic)
})
.collect();
self.shared.ble_services.insert(
uuid,
BLEService {
uuid,
characteristics,
},
);
}
Err(e) => {
error!("get_characteristics_async {:?}", e);
}
}
}
return Ok(());
}
Err(Error::NotConnected)
}
/// Write some data to the characteristic. Returns an error if the write couldn't be send or (in
/// the case of a write-with-response) if the device returns an error.
async fn write(
&self,
characteristic: &Characteristic,
data: &[u8],
write_type: WriteType,
) -> Result<()> {
let ble_service = &*self
.shared
.ble_services
.get(&characteristic.service_uuid)
.ok_or_else(|| Error::NotSupported("Service not found for write".into()))?;
let ble_characteristic = ble_service
.characteristics
.get(&characteristic.uuid)
.ok_or_else(|| Error::NotSupported("Characteristic not found for write".into()))?;
ble_characteristic.write_value(data, write_type).await
}
/// Enables either notify or indicate (depending on support) for the specified characteristic.
/// This is a synchronous call.
async fn subscribe(&self, characteristic: &Characteristic) -> Result<()> {
let ble_service = &mut *self
.shared
.ble_services
.get_mut(&characteristic.service_uuid)
.ok_or_else(|| Error::NotSupported("Service not found for subscribe".into()))?;
let ble_characteristic = ble_service
.characteristics
.get_mut(&characteristic.uuid)
.ok_or_else(|| Error::NotSupported("Characteristic not found for subscribe".into()))?;
let notifications_sender = self.shared.notifications_channel.clone();
let uuid = characteristic.uuid;
ble_characteristic
.subscribe(Box::new(move |value| {
let notification = ValueNotification { uuid: uuid, value };
// Note: we ignore send errors here which may happen while there are no
// receivers...
let _ = notifications_sender.send(notification);
}))
.await
}
/// Disables either notify or indicate (depending on support) for the specified characteristic.
/// This is a synchronous call.
async fn unsubscribe(&self, characteristic: &Characteristic) -> Result<()> {
let ble_service = &mut *self
.shared
.ble_services
.get_mut(&characteristic.service_uuid)
.ok_or_else(|| Error::NotSupported("Service not found for unsubscribe".into()))?;
let ble_characteristic = ble_service
.characteristics
.get_mut(&characteristic.uuid)
.ok_or_else(|| {
Error::NotSupported("Characteristic not found for unsubscribe".into())
})?;
ble_characteristic.unsubscribe().await
}
async fn read(&self, characteristic: &Characteristic) -> Result<Vec<u8>> {
let ble_service = &*self
.shared
.ble_services
.get(&characteristic.service_uuid)
.ok_or_else(|| Error::NotSupported("Service not found for read".into()))?;
let ble_characteristic = ble_service
.characteristics
.get(&characteristic.uuid)
.ok_or_else(|| Error::NotSupported("Characteristic not found for read".into()))?;
ble_characteristic.read_value().await
}
async fn notifications(&self) -> Result<Pin<Box<dyn Stream<Item = ValueNotification> + Send>>> {
let receiver = self.shared.notifications_channel.subscribe();
Ok(notifications_stream_from_broadcast_receiver(receiver))
}
}
impl From<BDAddr> for PeripheralId {
fn from(address: BDAddr) -> Self {
PeripheralId(address)
}
}
| id |
environment.py | from rdb.rdb import db, LowerCaseText
from enum import Enum
import datetime
from resources.adminAccess import is_admin_user
from flask import g
import rdb.models.image as Image
from dockerUtil.dockerClient import dockerClient, wait_for_it
import config
import requests
import uuid
from flask_restful import abort
import rdb.models.user as User
class Environment(db.Model):
"""Environment Class"""
__tablename__ = "environment"
id = db.Column(db.Integer, autoincrement=True, primary_key=True)
name = db.Column(db.Text, nullable=False)
container_id = db.Column(db.Text, nullable=False)
container_name = db.Column(db.Text, nullable=False)
status = db.Column(LowerCaseText, nullable=False)
jupyter_port = db.Column(db.Text, nullable=False)
jupyter_token = db.Column(db.Text, nullable=False)
jupyter_url = None
description = db.Column(db.Text)
creator_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
# authorized_users = db.relationship('User', lazy='subquery', secondary='user_environment_access')
image_id = db.Column(db.Integer, db.ForeignKey('image.id'), nullable=False)
ml_models = db.relationship('MLModel', lazy='select', cascade='delete, delete-orphan', backref='environment')
created_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now())
updated_at = db.Column(db.DateTime(timezone=True), server_default=db.func.now(), onupdate=datetime.datetime.now)
def __init__(self):
super(Environment, self).__init__()
def __repr__(self):
"""Display when printing a environment object"""
return "<ID: {}, Name: {}, description: {}>".format(self.id, self.name, self.description)
def as_dict(self):
|
def handle_jupyter_data(self):
if is_admin_user() or g.user.id == self.creator_id:
self.set_jupyter_url()
else:
self.hide_jupyter_data()
def start_jupyter(self):
# wait for container api to be up and running
wait_for_it(self.container_name, 5000)
# start jupyter notebook and get jupyter token
resp = requests.post('http://' + self.container_name + ':5000/jupyter').json()
self.jupyter_token = str(resp['jupyter_token'])
self.status = Environment.Status.running.value
def set_jupyter_url(self):
# TODO: read host address from os - for now use config
host = config.KETOS_HOST
self.jupyter_url = host + ':' + self.jupyter_port + '/?token=' + self.jupyter_token
def hide_jupyter_data(self):
self.jupyter_port = None
self.jupyter_token = None
self.jupyter_url = None
def get_data_directory(self):
return config.KETOS_DATA_FOLDER + '/environments_data/' + self.container_name
class Status(Enum):
running = 'running'
stopped = 'stopped'
def get_open_port():
import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def create(name, desc, image_id, raise_abort=True):
e = Environment()
e.name = name
e.description = desc
i = Image.get(image_id, raise_abort=raise_abort)
e.image_id = i.id
e.creator_id = g.user.id
image_name = config.DOCKER_REGISTRY_DOMAIN + "/" + i.name
e.jupyter_port = get_open_port()
e.container_name = str(uuid.uuid4().hex)
container = dockerClient.containers.run(image_name,
name=e.container_name,
detach=True,
network=config.PROJECT_NAME+"_environment",
ports={"8000/tcp": e.jupyter_port},
volumes={e.get_data_directory(): {'bind': '/mlenvironment/models', 'mode': 'rw'},
config.KETOS_DATA_FOLDER+'/auth': {'bind': '/root/src/auth', 'mode': 'ro'}}
)
e.container_id = container.id
e.start_jupyter()
db.session.add(e)
db.session.commit()
e.set_jupyter_url()
return e
def abort_if_environment_doesnt_exist(env_id):
abort(404, message="environment {} doesn't exist".format(env_id))
def get(env_id, raise_abort=True):
e = Environment.query.get(env_id)
if raise_abort and not e:
abort_if_environment_doesnt_exist(env_id)
e.handle_jupyter_data()
return e
def get_all():
envs = Environment.query.all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_all_for_user(user_id):
envs = Environment.query.filter_by(creator_id=user_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def get_by_image_id(image_id):
envs = Environment.query.filter_by(image_id=image_id).all()
for e in envs:
e.handle_jupyter_data()
return envs
def update(env_id, status=None, name=None, desc=None, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if status and not e.status == status:
if status == Environment.Status.running.value:
dockerClient.containers.get(e.container_id).start()
e.start_jupyter()
elif status == Environment.Status.stopped.value:
dockerClient.containers.get(e.container_id).stop()
else:
if raise_abort:
abort(400, message="status {} is not allowed".format(status))
else:
return None
e.status = status
if name:
e.name = name
if desc:
e.description = desc
db.session.commit()
return e
def delete(env_id, raise_abort=True):
e = get(env_id, raise_abort=raise_abort)
User.check_request_for_logged_in_user(e.creator_id)
if not e.status == 'stopped':
if raise_abort:
abort(405, message="environment must be stopped before it can be deleted")
else:
return None
container = dockerClient.containers.get(e.container_id)
container.remove(force=True)
db.session.delete(e)
db.session.commit()
return env_id
| """Convert object to dictionary"""
return {c.name: getattr(self, c.name) for c in self.__table__.columns} |
bert_ner.py | #! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
Copyright 2018 The Google AI Language Team Authors.
BASED ON Google_BERT.
@Author:zhoukaiyin
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
from bert import modeling
from bert import optimization
from bert import tokenization
import tensorflow as tf
from sklearn.metrics import f1_score,precision_score,recall_score
from tensorflow.python.ops import math_ops
import tf_metrics
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"data_dir", './drive/My Drive/ai/NERdata',
"The input datadir.",
)
flags.DEFINE_string(
"bert_config_file", './drive/My Drive/ai/checkpoint/bert_config.json',
"The config json file corresponding to the pre-trained BERT model."
)
flags.DEFINE_string(
"task_name", 'NER', "The name of the task to train."
)
flags.DEFINE_string(
"output_dir", './drive/My Drive/ai/output/result_dir/',
"The output directory where the model checkpoints will be written."
)
flags.DEFINE_string(
"tpu_name", 'gcp_tpu',
"Use Google Cloud Colaborator TPU to train"
)
## Other parameters
flags.DEFINE_string(
"init_checkpoint", './drive/My Drive/ai/checkpoint/bert_model.ckpt',
"Initial checkpoint (usually from a pre-trained BERT model)."
)
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text."
)
flags.DEFINE_integer(
"max_seq_length", 128,
"The maximum total input sequence length after WordPiece tokenization."
)
flags.DEFINE_bool(
"do_train", True,
"Whether to run training."
)
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_string("vocab_file", './drive/My Drive/ai/checkpoint/vocab.txt',
"The vocabulary file that the BERT model was trained on.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text = text
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_ids):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_data(cls, input_file):
"""Reads a BIO data."""
with open(input_file) as f:
lines = []
words = []
labels = []
for line in f:
contends = line.strip()
word = line.strip().split(' ')[0]
label = line.strip().split(' ')[-1]
if contends.startswith("-DOCSTART-"):
words.append('')
continue
if len(contends) == 0 and words[-1] == '.':
l = ' '.join([label for label in labels if len(label) > 0])
w = ' '.join([word for word in words if len(word) > 0])
lines.append([l, w])
words = []
labels = []
continue
words.append(word)
labels.append(label)
return lines
class NerProcessor(DataProcessor):
def get_train_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "train.txt")), "train"
)
def get_dev_examples(self, data_dir):
return self._create_example(
self._read_data(os.path.join(data_dir, "dev.txt")), "dev"
)
def get_labels(self):
return ["B-MISC", "I-MISC", "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X"]
def _create_example(self, lines, set_type):
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[0])
examples.append(InputExample(guid=guid, text=text, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer):
label_map = {}
for (i, label) in enumerate(label_list, 1):
label_map[label] = i
textlist = example.text.split(' ')
labellist = example.label.split(' ')
tokens = []
labels = []
for i, word in enumerate(textlist):
token = tokenizer.tokenize(word)
tokens.extend(token)
label_1 = labellist[i]
for m in range(len(token)):
if m == 0:
labels.append(label_1)
else:
labels.append("X")
# tokens = tokenizer.tokenize(example.text)
if len(tokens) >= max_seq_length - 1:
tokens = tokens[0:(max_seq_length - 2)]
labels = labels[0:(max_seq_length - 2)]
ntokens = []
segment_ids = []
label_ids = []
ntokens.append("[CLS]")
segment_ids.append(0)
label_ids.append(0)
for i, token in enumerate(tokens):
ntokens.append(token)
segment_ids.append(0)
label_ids.append(label_map[labels[i]])
ntokens.append("[SEP]")
segment_ids.append(0)
label_ids.append(0)
input_ids = tokenizer.convert_tokens_to_ids(ntokens)
input_mask = [1] * len(input_ids)
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
label_ids.append(0)
# print(len(input_ids))
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
assert len(label_ids) == max_seq_length
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids]))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids
)
return feature
def filed_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file
):
|
def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder):
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([seq_length], tf.int64),
}
def _decode_record(record, name_to_features):
example = tf.parse_single_example(record, name_to_features)
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
batch_size = params["batch_size"]
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=drop_remainder
))
return d
return input_fn
def create_model(bert_config, is_training, input_ids, input_mask,
segment_ids, labels, num_labels, use_one_hot_embeddings):
model = modeling.BertModel(
config=bert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings
)
output_layer = model.get_sequence_output()
hidden_size = output_layer.shape[-1].value
output_weight = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02)
)
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer()
)
with tf.variable_scope("loss"):
if is_training:
output_layer = tf.nn.dropout(output_layer, keep_prob=0.9)
output_layer = tf.reshape(output_layer, [-1, hidden_size])
logits = tf.matmul(output_layer, output_weight, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11])
log_probs = tf.nn.log_softmax(logits, axis=-1)
# labels = tf.cast(labels,dtype=tf.float32)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_sum(per_example_loss)
return (loss, per_example_loss, logits)
def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
def model_fn(features, labels, mode, params):
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, logits) = create_model(
bert_config, is_training, input_ids, input_mask, segment_ids, label_ids,
num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint)
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, logits):
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro")
loss = tf.metrics.mean(per_example_loss)
return {
"eval_precision":precision,
"eval_recall":recall,
"eval_f": f,
"eval_loss": loss,
}
eval_metrics = (metric_fn, [per_example_loss, label_ids, logits])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode))
return output_spec
return model_fn
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"ner": NerProcessor
}
if not FLAGS.do_train and not FLAGS.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR'])
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
bert_config=bert_config,
num_labels=len(label_list)+1,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
filed_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
filed_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d", len(eval_examples))
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
eval_steps = None
if FLAGS.use_tpu:
eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
tf.logging.info("***** Eval results *****")
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if __name__ == "__main__":
tf.app.run()
| writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature(feature.label_ids)
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString()) |
reporter.py | """
The reporter is the service responsible for handling NGSI notifications,
validating them, and feeding the corresponding updates to the translator.
The reporter needs to know the form of the entity (i.e, name and types of its
attributes). There are two approaches:
1 - Clients tell reporter which entities they care about and Reporter goes
find the metadata in Context Broker
2 - The reporter only consumes the Context Broker notifications and builds
little by little the whole entity.
In this case, the notifications must come with some mimimum amount of
required data (e.g, entity_type, entity_id, a time index and the
updated value[s]). Ideally, in the first notification the reporter
would be notified of all the entity attributes so that it can tell the
translator how to create the complete corresponding table[s] in the
database.
For now, we have adopted approach 2.
TODO:
- Validate entity and attribute names against valid NGSI names and valid
[Crate names](https://crate.io/docs/crate/reference/en/latest/sql/ddl/basics.html#naming-restrictions)
- Raise warning and act accordingly when receiving entity with equal lowercased
attributes.
- Consider offering an API endpoint to receive just the user's entities of
interest and make QL actually perform the corresponding subscription to orion.
I.e, QL must be told where orion is.
"""
from flask import has_request_context, request
from geocoding import geocoding
from geocoding.factory import get_geo_cache, is_geo_coding_available
from requests import RequestException
from translators.sql_translator import SQLTranslator
from translators.factory import translator_for
from utils.common import iter_entity_attrs, TIME_INDEX_NAME
import json
import logging
import requests
from reporter.subscription_builder import build_subscription
from reporter.timex import select_time_index_value_as_iso, \
TIME_INDEX_HEADER_NAME
from geocoding.location import normalize_location, LOCATION_ATTR_NAME
from exceptions.exceptions import AmbiguousNGSIIdError, UnsupportedOption, \
NGSIUsageError, InvalidParameterValue, InvalidHeaderValue
def log():
logger = logging.getLogger(__name__)
return logger
def is_text(attr_type):
return SQLTranslator.is_text(attr_type)
def has_value(entity, attr_name):
attr = entity.get(attr_name, {})
if attr is None:
attr = {}
attr_value = attr.get('value', None)
attr_type = attr.get('type', None)
if attr_value is None:
return False
if is_text(attr_type):
return True
if isinstance(attr_value, str):
attr_value = attr_value.strip()
# If type != Text and value == '', make value = null
return attr_value != ''
def _validate_payload(payload):
"""
:param payload:
The received json data in the notification.
:return: str | None
Error message, if any.
Note that some attributes are actually verified by Connexion framework (
e.g type and id). We leave the checks as double-checking.
"""
# The entity must be uniquely identifiable
if 'type' not in payload:
return 'Entity type is required in notifications'
if 'id' not in payload:
return 'Entity id is required in notifications'
# There should be at least one attribute other than id and type
# (i.e, the changed value)
attrs = list(iter_entity_attrs(payload))
if len(attrs) == 0:
log().warning("Received notification containing an entity update "
"without attributes other than 'type' and 'id'")
# Attributes should have a value and the modification time
for attr in attrs:
if not has_value(payload, attr):
payload[attr].update({'value': None})
log().warning(
'An entity update is missing value '
'for attribute {}'.format(attr))
def _filter_empty_entities(payload):
log().debug('Received payload')
attrs = list(iter_entity_attrs(payload))
empty = False
attrs.remove('time_index')
for j in attrs:
value = payload[j]['value']
if isinstance(value, int) and value is not None:
empty = True
elif value:
empty = True
if empty:
return payload
else:
return None
def _filter_no_type_no_value_entities(payload):
attrs = list(iter_entity_attrs(payload))
attrs.remove('time_index')
for i in attrs:
attr = payload.get(i, {})
attr_value = attr.get('value', None)
attr_type = attr.get('type', None)
if not attr_type and not attr_value:
del payload[i]
return payload
def notify():
if request.json is None:
return 'Discarding notification due to lack of request body. ' \
'Lost in a redirect maybe?', 400
if 'data' not in request.json:
return 'Discarding notification due to lack of request body ' \
'content.', 400
payload = request.json['data']
# preprocess and validate each entity update
for entity in payload:
# Validate entity update
error = _validate_payload(entity)
if error:
# TODO in this way we return error for even if only one entity
# is wrong
return error, 400
# Add TIME_INDEX attribute
custom_index = request.headers.get(TIME_INDEX_HEADER_NAME, None)
entity[TIME_INDEX_NAME] = \
select_time_index_value_as_iso(custom_index, entity)
# Add GEO-DATE if enabled
if not entity.get(LOCATION_ATTR_NAME, None):
add_geodata(entity)
# Always normalize location if there's one
normalize_location(entity)
# Get FIWARE CORRELATOR - if any
fiware_c = request.headers.get('fiware_correlator', None)
# Get Remote address
remote_addr = request.remote_addr
# Define FIWARE tenant
fiware_s = request.headers.get('fiware-service', None)
# It seems orion always sends a 'Fiware-Servicepath' header with value '/'
# But this is not correctly documented in the API, so in order not to
# depend on this, QL will not treat servicepath if there's no service
# specified.
if fiware_s:
fiware_sp = request.headers.get('fiware-servicepath', None)
else:
fiware_sp = None
res_entity = []
e = None
for entity in payload:
# Validate entity update
e = _filter_empty_entities(entity)
if e is not None:
e_new = _filter_no_type_no_value_entities(e)
res_entity.append(e_new)
payload = res_entity
entity_id = [i["id"] for i in payload]
# Send valid entities to translator
try:
with translator_for(fiware_s) as trans:
trans.insert(payload, fiware_s, fiware_sp)
except Exception as e:
msg = "Notification not processed or not updated: {}"
log().error(msg.format(e), exc_info=True)
error_code = 500
if e.__class__ == InvalidHeaderValue or \
e.__class__ == InvalidParameterValue or \
e.__class__ == NGSIUsageError:
error_code = 400
return msg, error_code
msg = "Notification successfully processed"
log().info(msg)
return msg
def add_geodata(entity):
if is_geo_coding_available():
cache = get_geo_cache()
geocoding.add_location(entity, cache=cache)
def | ():
r = {
"error": "Not Implemented",
"description": "This API method is not yet implemented."
}
return r, 501
def subscribe(orion_url,
quantumleap_url,
entity_type=None,
entity_id=None,
id_pattern=None,
attributes=None,
observed_attributes=None,
notified_attributes=None,
throttling=None,
time_index_attribute=None):
# Validate Orion
log().warning("This API is deprecated, it will be removed in version 0.9")
try:
r = requests.get(orion_url)
except RequestException:
r = None
if r is None or not r.ok:
msg = {
"error": "Bad Request",
"description": "Orion is not reachable at {}".format(orion_url)
}
return msg, 400
# Prepare subscription
subscription = build_subscription(
quantumleap_url,
entity_type, entity_id, id_pattern,
attributes, observed_attributes, notified_attributes,
throttling, time_index_attribute)
# Send subscription
endpoint = '{}/subscriptions'.format(orion_url)
data = json.dumps(subscription)
headers = {'Content-Type': 'application/json'}
fiware_s = request.headers.get('fiware-service', None)
if fiware_s:
headers['fiware-service'] = fiware_s
fiware_sp = request.headers.get('fiware-servicepath', None)
if fiware_sp:
headers['fiware-servicepath'] = fiware_sp
r = requests.post(endpoint, data=data, headers=headers)
if not r.ok:
log().debug("subscribing to {} with headers: {} and data: {}")
msg = r.text + \
" - This API is deprecated, it will be removed in version 0.9"
return msg, r.status_code
def _validate_query_params(attr_names, aggr_period, aggr_method,
aggr_scope=None, options=None):
if aggr_period and not aggr_method:
r = {
"error": "Bad parameters use",
"description": "aggrMethod is compulsory when using aggrPeriod."
}
return r, 400
if options or aggr_scope not in (None, 'entity'):
r = {
"error": "Not implemented option",
"description": "aggrScope and options are not yet implemented."
}
return r, 501
if aggr_method and not attr_names:
msg = "Specified aggrMethod = {} but missing attrs parameter."
r = {
"error": "Bad parameters use",
"description": msg.format(aggr_method)
}
return r, 400
return "OK", 200
| config |
controls.tsx | import {
h, Host, Component,
Prop, Element, Watch,
State,
} from '@stencil/core';
import { PlayerProps } from '../../../core/player/PlayerProps';
import { Dispatcher, createDispatcher } from '../../../core/player/PlayerDispatcher';
import { Disposal } from '../../../core/player/Disposal';
import { listen, isColliding } from '../../../../utils/dom';
import { isNullOrUndefined } from '../../../../utils/unit';
import { debounce } from '../../../../utils/timing';
import { findRootPlayer } from '../../../core/player/utils';
import { findUIRoot } from '../../ui/utils';
import { withPlayerContext } from '../../../core/player/PlayerContext';
/**
* We want to keep the controls active state in-sync per player.
*/
const playerRef: Record<any, HTMLVimePlayerElement> = {};
const hideControlsTimeout: Record<any, number | undefined> = {};
const captionsCollisions = new Map<any, number>();
const settingsCollisions = new Map<any, number>();
/**
* @slot - Used to pass in controls.
*/
@Component({
tag: 'vime-controls',
styleUrl: 'controls.scss',
})
export class Controls {
private dispatch!: Dispatcher;
private disposal = new Disposal();
@Element() el!: HTMLVimeControlsElement;
@State() isInteracting = false;
/**
* Whether the controls are visible or not.
*/
@Prop() hidden = false;
/**
* Whether the controls container should be 100% width. This has no effect if the view is of
* type `audio`.
*/
@Prop() fullWidth = false;
/**
* Whether the controls container should be 100% height. This has no effect if the view is of
* type `audio`.
*/
@Prop() fullHeight = false;
/**
* Sets the `flex-direction` property that manages the direction in which the controls are layed
* out.
*/
@Prop() direction: 'row' | 'column' = 'row';
/**
* Sets the `align-items` flex property that aligns the individual controls on the cross-axis.
*/
@Prop() align: 'start' | 'center' | 'end' = 'center';
/**
* Sets the `justify-content` flex property that aligns the individual controls on the main-axis.
*/
@Prop() justify: 'start'
| 'center'
| 'end'
| 'space-around'
| 'space-between'
| 'space-evenly' = 'start';
/**
* Pins the controls to the defined position inside the video player. This has no effect when
* the view is of type `audio`.
*/
@Prop({
reflect: true,
}) pin: 'topLeft' | 'topRight' | 'bottomLeft' | 'bottomRight' | 'center' = 'bottomLeft';
/**
* The length in milliseconds that the controls are active for before fading out. Audio players
* are not effected by this prop.
*/
@Prop() activeDuration = 2750;
/**
* Whether the controls should wait for playback to start before being shown. Audio players
* are not effected by this prop.
*/
@Prop() waitForPlaybackStart = false;
/**
* Whether the controls should show/hide when paused. Audio players are not effected by this prop.
*/
@Prop() hideWhenPaused = false;
/**
* Whether the controls should hide when the mouse leaves the player. Audio players are not
* effected by this prop.
*/
@Prop() hideOnMouseLeave = false;
/**
* @internal
*/
@Prop() isAudioView: PlayerProps['isAudioView'] = false;
/**
* @internal
*/
@Prop() isSettingsActive: PlayerProps['isSettingsActive'] = false;
/**
* @internal
*/
@Prop() playbackReady: PlayerProps['playbackReady'] = false;
/**
* @internal
*/
@Prop() isControlsActive: PlayerProps['isControlsActive'] = false;
/**
* @internal
*/
@Prop() paused: PlayerProps['paused'] = true;
/**
* @internal
*/
@Prop() playbackStarted: PlayerProps['playbackStarted'] = false; |
constructor() {
withPlayerContext(this, [
'playbackReady',
'isAudioView',
'isControlsActive',
'isSettingsActive',
'paused',
'playbackStarted',
]);
}
connectedCallback() {
this.dispatch = createDispatcher(this);
this.onControlsChange();
this.setupPlayerListeners();
this.checkForCaptionsCollision();
this.checkForSettingsCollision();
}
componentWillLoad() {
this.onControlsChange();
}
componentDidRender() {
this.checkForCaptionsCollision();
this.checkForSettingsCollision();
}
disconnectedCallback() {
this.disposal.empty();
delete hideControlsTimeout[playerRef[this]];
delete playerRef[this];
captionsCollisions.delete(this);
settingsCollisions.delete(this);
}
private setupPlayerListeners() {
const player = findRootPlayer(this);
const events = ['focus', 'keydown', 'click', 'touchstart', 'mouseleave'];
events.forEach((event) => {
this.disposal.add(listen(player, event, this.onControlsChange.bind(this)));
});
this.disposal.add(
listen(player, 'mousemove', debounce(this.onControlsChange, 50, true).bind(this)),
);
// @ts-ignore
playerRef[this] = player;
}
private getHeight() {
return parseFloat(window.getComputedStyle(this.el).height);
}
private adjustHeightOnCollision(selector: 'vime-captions' | 'vime-settings', marginTop = 0) {
const el = findUIRoot(this)?.querySelector(selector);
if (isNullOrUndefined(el)) return;
const height = this.getHeight() + marginTop;
const aboveControls = (selector === 'vime-settings')
&& ((el as HTMLVimeSettingsElement).pin.startsWith('top'));
const hasCollided = isColliding(el, this.el);
const willCollide = isColliding(el, this.el, 0, aboveControls ? -height : height);
const collisions = (selector === 'vime-captions') ? captionsCollisions : settingsCollisions;
collisions.set(this, (hasCollided || willCollide) ? height : 0);
el.controlsHeight = Math.max(0, Math.max(...collisions.values()));
}
private checkForCaptionsCollision() {
if (this.isAudioView) return;
this.adjustHeightOnCollision('vime-captions');
}
private checkForSettingsCollision() {
this.adjustHeightOnCollision('vime-settings', (this.isAudioView ? 4 : 0));
}
private show() {
this.dispatch('isControlsActive', true);
}
private hide() {
this.dispatch('isControlsActive', false);
}
private hideWithDelay() {
// @ts-ignore
clearTimeout(hideControlsTimeout[playerRef[this]]);
hideControlsTimeout[playerRef[this]] = setTimeout(() => {
this.hide();
}, this.activeDuration) as any;
}
@Watch('paused')
@Watch('hidden')
@Watch('isAudioView')
@Watch('isInteracting')
@Watch('isSettingsActive')
@Watch('hideWhenPaused')
@Watch('hideOnMouseLeave')
@Watch('playbackStarted')
@Watch('waitForPlaybackStart')
@Watch('playbackReady')
private onControlsChange(event?: Event) {
// @ts-ignore
clearTimeout(hideControlsTimeout[playerRef[this]]);
if (this.hidden || !this.playbackReady) {
this.hide();
return;
}
if (this.isAudioView) {
this.show();
return;
}
if (this.waitForPlaybackStart && !this.playbackStarted) {
this.hide();
return;
}
if (this.isInteracting || this.isSettingsActive) {
this.show();
return;
}
if (this.hideWhenPaused && this.paused) {
this.hideWithDelay();
return;
}
if (this.hideOnMouseLeave && !this.paused && (event?.type === 'mouseleave')) {
this.hide();
return;
}
if (!this.paused) {
this.show();
this.hideWithDelay();
return;
}
this.show();
}
private getPosition() {
if (this.isAudioView) return {};
if (this.pin === 'center') {
return {
top: '50%',
left: '50%',
transform: 'translate(-50%, -50%)',
};
}
// topLeft => { top: 0, left: 0 }
const pos = this.pin.split(/(?=[L|R])/).map((s) => s.toLowerCase());
return { [pos[0]]: 0, [pos[1]]: 0 };
}
private onStartInteraction() {
this.isInteracting = true;
}
private onEndInteraction() {
this.isInteracting = false;
}
render() {
return (
<Host
style={{
...this.getPosition(),
flexDirection: this.direction,
alignItems: (this.align === 'center') ? 'center' : `flex-${this.align}`,
justifyContent: this.justify,
}}
class={{
audio: this.isAudioView,
hidden: this.hidden,
active: this.playbackReady && this.isControlsActive,
fullWidth: this.isAudioView || this.fullWidth,
fullHeight: !this.isAudioView && this.fullHeight,
}}
onMouseEnter={this.onStartInteraction.bind(this)}
onMouseLeave={this.onEndInteraction.bind(this)}
onTouchStart={this.onStartInteraction.bind(this)}
onTouchEnd={this.onEndInteraction.bind(this)}
>
<slot />
</Host>
);
}
} | |
dynamic-bootstrap-form-group.component.ts | import { Component, EventEmitter, Input, Output, QueryList } from "@angular/core";
import { FormGroup } from "@angular/forms";
import {
DynamicFormControlComponent,
DynamicFormControlCustomEvent,
DynamicFormGroupModel,
DynamicFormLayout,
DynamicFormLayoutService,
DynamicFormValidationService,
DynamicTemplateDirective
} from "@ng-dynamic-forms/core";
@Component({
selector: "dynamic-bootstrap-form-group",
templateUrl: "./dynamic-bootstrap-form-group.component.html"
})
export class | extends DynamicFormControlComponent {
@Input() bindId: boolean = true;
@Input() group: FormGroup;
@Input() layout: DynamicFormLayout;
@Input() model: DynamicFormGroupModel;
@Input() templates: QueryList<DynamicTemplateDirective> | DynamicTemplateDirective[] | undefined;
@Output() blur: EventEmitter<any> = new EventEmitter();
@Output() change: EventEmitter<any> = new EventEmitter();
@Output() customEvent: EventEmitter<DynamicFormControlCustomEvent> = new EventEmitter();
@Output() focus: EventEmitter<any> = new EventEmitter();
constructor(protected layoutService: DynamicFormLayoutService,
protected validationService: DynamicFormValidationService) {
super(layoutService, validationService);
}
} | DynamicBootstrapFormGroupComponent |
test_merge_if_blocks.py | from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks_once
from rpython.translator.backendopt.merge_if_blocks import merge_if_blocks
from rpython.translator.backendopt.all import backend_optimizations
from rpython.translator.translator import TranslationContext, graphof as tgraphof
from rpython.flowspace.model import Block, checkgraph
from rpython.translator.backendopt.removenoops import remove_same_as
from rpython.rtyper.llinterp import LLInterpreter
from rpython.rlib.rarithmetic import r_uint, r_ulonglong, r_longlong, r_int
from rpython.annotator.model import SomeChar, SomeUnicodeCodePoint
from rpython.rlib.objectmodel import CDefinedIntSymbolic
def do_test_merge(fn, testvalues):
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [type(testvalues[0])])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, fn)
assert len(list(graph.iterblocks())) == 4 #startblock, blocks, returnblock
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
assert len(list(graph.iterblocks())) == 2 #startblock, returnblock
interp = LLInterpreter(rtyper)
for i in testvalues:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_merge1():
def merge_int(n):
n += 1
if n == 1:
return 1
elif n == 2:
return 2
elif n == 3:
return 3
return 4
do_test_merge(merge_int, range(4))
do_test_merge(merge_int, [r_uint(i) for i in range(4)])
# this has been disabled:
#if r_longlong is not r_int:
# do_test_merge(merge_int, [r_longlong(i) for i in range(4)])
#do_test_merge(merge_int, [r_ulonglong(i) for i in range(4)])
def merge_chr(n):
c = chr(n + 1)
if c == 'a':
return 'a'
elif c == 'b':
return 'b'
elif c == 'c':
return 'c'
return 'd'
do_test_merge(merge_chr, range(96, 101))
def merge_uchr(n):
c = unichr(n + 1)
if c == u'a':
return u'a'
elif c == u'b':
return u'b'
elif c == u'c':
return u'c'
return u'd'
do_test_merge(merge_uchr, range(96, 101))
def test_merge_passonvars():
def merge(n, m):
if n == 1:
return m + 1
elif n == 2:
return m + 2
elif n == 3:
return m + 3
return m + 4
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
assert len(list(graph.iterblocks())) == 8
remove_same_as(graph)
merge_if_blocks_once(graph)
assert len(graph.startblock.exits) == 4
interp = LLInterpreter(rtyper)
for i in range(1, 5):
res = interp.eval_graph(graph, [i, 1])
assert res == i + 1
def test_merge_several():
def merge(n, m):
r = -1
if n == 0:
|
elif n == 1:
r = 4
else:
r = 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
merge_if_blocks(graph)
assert len(graph.startblock.exits) == 3
assert len(list(graph.iterblocks())) == 3
interp = LLInterpreter(rtyper)
for m in range(3):
res = interp.eval_graph(graph, [0, m])
assert res == m
res = interp.eval_graph(graph, [1, 0])
assert res == 4
res = interp.eval_graph(graph, [2, 0])
assert res == 6
def test_merge_with_or():
def merge(n):
if n == 5:
return 4
elif n == 14 or n == 2:
return 16
else:
return 7
do_test_merge(merge, [5, 6, 14, 2, 3, 123])
def test_dont_merge():
def merge(n, m):
r = -1
if n == 0:
r += m
if n == 1:
r += 2 * m
else:
r += 6
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(merge, [int, int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = tgraphof(t, merge)
remove_same_as(graph)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_two_constants():
def fn():
r = range(10, 37, 4)
r.reverse()
return r[0]
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
blocknum = len(list(graph.iterblocks()))
merge_if_blocks(graph)
assert blocknum == len(list(graph.iterblocks()))
def test_same_cases():
def fn(x):
if x == 42:
r = 1
elif x == 42:
r = 2
else:
r = 3
return r
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
backend_optimizations(t, merge_if_blocks=True)
graph = tgraphof(t, fn)
assert len(graph.startblock.exits) == 2
interp = LLInterpreter(rtyper)
for i in [42, 43]:
expected = fn(i)
actual = interp.eval_graph(graph, [i])
assert actual == expected
def test_replace_exitswitch_by_constant_bug():
class X:
pass
def constant9():
x = X()
x.n = 3
x.n = 9
return x.n
def fn():
n = constant9()
if n == 1: return 5
elif n == 2: return 6
elif n == 3: return 8
elif n == 4: return -123
elif n == 5: return 12973
else: return n
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
merge_if_blocks_once(graph)
from rpython.translator.backendopt import malloc, inline
inline.auto_inlining(t, 20)
malloc.remove_mallocs(t, t.graphs)
from rpython.translator import simplify
simplify.join_blocks(graph)
def test_switch_on_symbolic():
symb1 = CDefinedIntSymbolic("1", 1)
symb2 = CDefinedIntSymbolic("2", 2)
symb3 = CDefinedIntSymbolic("3", 3)
def fn(x):
res = 0
if x == symb1:
res += x + 1
elif x == symb2:
res += x + 2
elif x == symb3:
res += x + 3
res += 1
return res
t = TranslationContext()
a = t.buildannotator()
a.build_types(fn, [int])
rtyper = t.buildrtyper()
rtyper.specialize()
graph = t.graphs[0]
remove_same_as(graph)
res = merge_if_blocks_once(graph)
assert not res
checkgraph(graph)
| if m == 0:
r = 0
elif m == 1:
r = 1
else:
r = 2 |
umm_client_async.py | import json
from app.service.http_client_async import async_http_client
service_name = 'umm'
async def create_task(task, jwt):
return await async_http_client('post', service_name, '/api/tasks', body=json.dumps(task.__dict__), jwt=jwt)
async def update_task(task, jwt):
return await async_http_client('put', service_name, '/api/tasks', body=json.dumps(task.__dict__), jwt=jwt)
async def get_tasks(uuid, jwt):
return await async_http_client('get', service_name, '/api/tasks?uuid={}'.format(uuid), jwt=jwt)
async def create_task_step(task_step, jwt):
return await async_http_client('post', service_name, '/api/task-steps', body=json.dumps(task_step.__dict__), jwt=jwt)
async def get_solutions(uuid, jwt=None):
return await async_http_client('get', service_name, '/api/solutions?uuid={}'.format(uuid), jwt=jwt)
async def create_solution(solution, jwt):
return await async_http_client('post', service_name, '/api/solutions', body=json.dumps(solution.__dict__), jwt=jwt)
async def delete_solution(id, jwt):
return await async_http_client('delete', service_name, '/api/solutions/{}'.format(id), jwt=jwt)
async def create_artifact(artifact, jwt):
return await async_http_client('post', service_name, '/api/artifacts', body=json.dumps(artifact.__dict__), jwt=jwt)
async def get_all_artifacts(solution_uuid, jwt=None):
return await async_http_client('get', service_name, '/api/artifacts?solutionUuid={}'.format(solution_uuid), jwt=jwt)
async def delete_artifact(id, jwt):
return await async_http_client('delete', service_name, '/api/artifacts/{}'.format(id), jwt=jwt)
async def create_document(document, jwt):
return await async_http_client('post', service_name, '/api/documents', body=json.dumps(document.__dict__), jwt=jwt)
|
async def delete_document(id, jwt):
return await async_http_client('delete', service_name, '/api/documents/{}'.format(id), jwt=jwt) | async def get_document(id, jwt=None):
return await async_http_client('get', service_name, '/api/documents/{}'.format(id), jwt=jwt)
|
apps.py | from django.apps import AppConfig
class | (AppConfig):
name = 'shortnsweet'
| ShortnsweetConfig |
CHAIN_INSPECTION.py | """
CHAIN INSPECTION.
https://www.codeeval.com/open_challenges/119/
We use special generator to produce chains of elements. Don't ask why we need
them, we're not sure that somebody knows the answer for this question. A
chain is represented by a string of name-address pairs. So the first element
is the name of a pair and the second one (address) is pointing to the name of
a next pair. E.g.
BEGIN-3;4-2;3-4;2-END # GOOD
77-END;BEGIN-8;8-11;11-77 # GOOD
In examples above we can pass trough the chains from the 'BEGIN' to the 'END'
without missing any single pair. In the first case we moved from 'BEGIN' to 3,
from 3 to 4, from 4 to 2, from 2 to 'END'. In the second case we moved from
'BEGIN' to 8, from 8 to 11, from 11 to 77, from 77 to 'END'.
Our generator was producing only good chains, but something went wrong and
now it generates random chains and we are not sure if it's a good chain or a
bad one. E.g.
BEGIN-3;4-3;3-4;2-END # BAD
77-END;BEGIN-8;8-77;11-11 # BAD
In the first case the 'END' is unreachable because we have a loop between
3 and 4.
In the second case we can reach the 'END' but we missed one pair (11-11).
We know that for a BAD chain the generator first produces a GOOD chain but
after that it may replace existing address in some pairs with an address from
another pair. It never replaces an address in a pair to an addresses which
isn't present in original chain.
You can help us by writing a program that investigates the input and finds
GOOD and BAD chains.
INPUT SAMPLE:
Your program should accept as its first argument a path to a filename. Each
string in this file is a chain. The pairs are separating by semicolon, the
names and the address are separating by dash. E.g.
4-2;BEGIN-3;3-4;2-END
4-2;BEGIN-3;3-4;2-3
OUTPUT SAMPLE:
For each line of input print out the chain status. E.g.
GOOD
BAD
Constraints:
The number of pairs in a chain is in range [1, 500]
The addresses are integers in range [2, 10000]
-------------------------------------------------------
process:
Read a line from the input file
split the chain parts into pairs
create a dictionary of chain connections
determine if chain is good or bad:
Trace from BEGIN
Find END
Detect loops
Detect missed chain links
"""
from sys import argv
def make_links(line):
"""Split a line into parts, return a dictionary of chain links."""
link_parts = line.split(";")
chain_dict = {
k: v for k, v in tuple([x.split('-') for x in link_parts])
}
return chain_dict
def | (chain):
"""Return whether a chain is 'GOOD' or 'BAD'."""
next_key = chain.pop('BEGIN')
while True:
try:
next_key = chain.pop(next_key)
if next_key == "END":
break
except KeyError:
return "BAD"
if len(chain) > 0:
return "BAD"
return "GOOD"
def main(input_file):
"""Run the process as described in the challenge description."""
with open(input_file, "r") as file:
for line in file:
line = line.rstrip()
# split the chain parts into pairs
# create a dictionary of chain connections
links = make_links(line)
chain_state = inspect_chain(links)
print(chain_state)
if __name__ == "__main__":
main(argv[1])
| inspect_chain |
admin.py | from django.contrib import admin
from . import tasks
from .app_settings import FREIGHT_DEVELOPER_MODE
from .models import (
Contract,
ContractCustomerNotification,
ContractHandler,
EveEntity,
Location,
Pricing,
)
from .tasks import update_locations
@admin.register(Location)
class LocationAdmin(admin.ModelAdmin):
list_display = ("id", "name", "_category", "_solar_system")
list_filter = ("category_id",)
search_fields = ["name"]
list_select_related = True
actions = ["update_location"]
if not FREIGHT_DEVELOPER_MODE:
list_display_links = None
def _category(self, obj):
return obj.get_category_id_display()
_category.admin_order_field = "category_id"
def _solar_system(self, obj):
return obj.solar_system_name
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def update_location(self, request, queryset):
location_ids = list()
for obj in queryset:
location_ids.append(obj.pk)
update_locations.delay(location_ids)
self.message_user(
request,
"Started updating {} locations. "
"This can take a short while to complete.".format(len(location_ids)),
)
update_location.short_description = "Update selected locations from ESI"
if FREIGHT_DEVELOPER_MODE:
@admin.register(EveEntity)
class EveEntityAdmin(admin.ModelAdmin):
list_display = ("name", "category")
list_filter = ("category",)
@admin.register(Pricing)
class PricingAdmin(admin.ModelAdmin):
list_display = (
"name",
"start_location",
"end_location",
"_bidirectional",
"_default",
"_active",
)
list_filter = (
"is_bidirectional",
"is_active",
("start_location", admin.RelatedOnlyFieldListFilter),
("end_location", admin.RelatedOnlyFieldListFilter),
)
list_select_related = True
def _bidirectional(self, obj):
return obj.is_bidirectional
_bidirectional.boolean = True
def _active(self, obj):
return obj.is_active
_active.boolean = True
def _default(self, obj):
return obj.is_default
_default.boolean = True
@admin.register(ContractHandler)
class ContractHandlerAdmin(admin.ModelAdmin):
list_display = (
"organization",
"character",
"operation_mode",
"last_sync",
"_is_sync_ok",
)
actions = ("start_sync", "send_notifications", "update_pricing")
if not FREIGHT_DEVELOPER_MODE:
readonly_fields = (
"organization",
"character",
"operation_mode",
"version_hash",
"last_sync",
"last_error",
)
def _is_sync_ok(self, obj):
|
_is_sync_ok.boolean = True
_is_sync_ok.short_description = "sync ok"
def start_sync(self, request, queryset):
for obj in queryset:
tasks.run_contracts_sync.delay(force_sync=True, user_pk=request.user.pk)
text = "Started syncing contracts for: {} ".format(obj)
text += "You will receive a report once it is completed."
self.message_user(request, text)
start_sync.short_description = "Fetch contracts from Eve Online server"
def send_notifications(self, request, queryset):
for obj in queryset:
tasks.send_contract_notifications.delay(force_sent=True)
text = "Started sending notifications for: {} ".format(obj)
self.message_user(request, text)
send_notifications.short_description = (
"Send notifications for outstanding contracts"
)
def update_pricing(self, request, queryset):
del queryset
tasks.update_contracts_pricing.delay()
self.message_user(
request, "Started updating pricing relations for all contracts"
)
update_pricing.short_description = "Update pricing info for all contracts"
def has_add_permission(self, request):
return False
@admin.register(Contract)
class ContractAdmin(admin.ModelAdmin):
list_display = [
"contract_id",
"status",
"date_issued",
"issuer",
"_pilots_notified",
"_customer_notified",
]
list_filter = (
"status",
("issuer", admin.RelatedOnlyFieldListFilter),
)
search_fields = ["issuer"]
list_select_related = True
actions = ["send_pilots_notification", "send_customer_notification"]
def get_queryset(self, request):
qs = super().get_queryset(request)
return qs.prefetch_related("customer_notifications")
def _pilots_notified(self, contract):
return contract.date_notified is not None
_pilots_notified.boolean = True
def _customer_notified(self, contract):
return ", ".join(
sorted(
[x.status for x in contract.customer_notifications.all()],
reverse=True,
)
)
def send_pilots_notification(self, request, queryset):
for obj in queryset:
obj.send_pilot_notification()
self.message_user(
request,
"Sent pilots notification for contract {} to Discord".format(
obj.contract_id
),
)
send_pilots_notification.short_description = (
"Sent pilots notification for selected contracts to Discord"
)
def send_customer_notification(self, request, queryset):
for obj in queryset:
obj.send_customer_notification(force_sent=True)
self.message_user(
request,
"Sent customer notification for contract {} to Discord".format(
obj.contract_id
),
)
send_customer_notification.short_description = (
"Sent customer notification for selected contracts to Discord"
)
def has_add_permission(self, request):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
def has_change_permission(self, request, obj=None):
if FREIGHT_DEVELOPER_MODE:
return True
else:
return False
if FREIGHT_DEVELOPER_MODE:
@admin.register(ContractCustomerNotification)
class ContractCustomerNotificationAdmin(admin.ModelAdmin):
pass
| return obj.is_sync_ok |
FormattingUtils.ts | /*
Copyright 2016 OpenMarket Ltd
Copyright 2019, 2020 FM Foundation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import { _t } from '../languageHandler';
import { jsxJoin } from './ReactUtils';
/**
* formats numbers to fit into ~3 characters, suitable for badge counts
* e.g: 999, 9.9K, 99K, 0.9M, 9.9M, 99M, 0.9B, 9.9B
*/
export function formatCount(count: number): string {
if (count < 1000) return count.toString();
if (count < 10000) return (count / 1000).toFixed(1) + "K";
if (count < 100000) return (count / 1000).toFixed(0) + "K";
if (count < 10000000) return (count / 1000000).toFixed(1) + "M";
if (count < 100000000) return (count / 1000000).toFixed(0) + "M";
return (count / 1000000000).toFixed(1) + "B"; // 10B is enough for anyone, right? :S
}
/**
* Format a count showing the whole number but making it a bit more readable.
* e.g: 1000 => 1,000
*/
export function formatCountLong(count: number): string {
const formatter = new Intl.NumberFormat();
return formatter.format(count);
}
/**
* format a size in bytes into a human readable form
* e.g: 1024 -> 1.00 KB
*/
export function formatBytes(bytes: number, decimals = 2): string {
if (bytes === 0) return '0 Bytes';
const k = 1024;
const dm = decimals < 0 ? 0 : decimals;
const sizes = ['Bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return parseFloat((bytes / Math.pow(k, i)).toFixed(dm)) + ' ' + sizes[i];
}
/**
* format a key into groups of 4 characters, for easier visual inspection
*
* @param {string} key key to format
*
* @return {string}
*/
export function formatCryptoKey(key: string): string {
return key.match(/.{1,4}/g).join(" ");
}
/**
* calculates a numeric hash for a given string
*
* @param {string} str string to hash
*
* @return {number}
*/
export function | (str: string): number {
let hash = 0;
let i;
let chr;
if (str.length === 0) {
return hash;
}
for (i = 0; i < str.length; i++) {
chr = str.charCodeAt(i);
hash = ((hash << 5) - hash) + chr;
hash |= 0;
}
return Math.abs(hash);
}
export function getUserNameColorClass(userId: string): string {
const colorNumber = (hashCode(userId) % 8) + 1;
return `mx_Username_color${colorNumber}`;
}
/**
* Constructs a written English string representing `items`, with an optional
* limit on the number of items included in the result. If specified and if the
* length of `items` is greater than the limit, the string "and n others" will
* be appended onto the result. If `items` is empty, returns the empty string.
* If there is only one item, return it.
* @param {string[]} items the items to construct a string from.
* @param {number?} itemLimit the number by which to limit the list.
* @returns {string} a string constructed by joining `items` with a comma
* between each item, but with the last item appended as " and [lastItem]".
*/
export function formatCommaSeparatedList(items: string[], itemLimit?: number): string;
export function formatCommaSeparatedList(items: JSX.Element[], itemLimit?: number): JSX.Element;
export function formatCommaSeparatedList(items: Array<JSX.Element | string>, itemLimit?: number): JSX.Element | string;
export function formatCommaSeparatedList(items: Array<JSX.Element | string>, itemLimit?: number): JSX.Element | string {
const remaining = itemLimit === undefined ? 0 : Math.max(
items.length - itemLimit, 0,
);
if (items.length === 0) {
return "";
} else if (items.length === 1) {
return items[0];
} else {
let lastItem;
if (remaining > 0) {
items = items.slice(0, itemLimit);
} else {
lastItem = items.pop();
}
let joinedItems;
if (items.every(e => typeof e === "string")) {
joinedItems = items.join(", ");
} else {
joinedItems = jsxJoin(items, ", ");
}
if (remaining > 0) {
return _t("%(items)s and %(count)s others", { items: joinedItems, count: remaining } );
} else {
return _t("%(items)s and %(lastItem)s", { items: joinedItems, lastItem });
}
}
}
| hashCode |
lib.rs | #![deny(missing_docs)]
//! Honest Intervals is an interval arithmetic library with correct rounding.
//!
//! It implements elementary arithmetic (addition, subtraction, multiplication and division) as well
//! as complicated mathematical functions such as logarithm and power over intervals and interval
//! sets. Bounds of the return values are always correctly rounded up or down to ensure that all
//! possible results are contained.
//!
//! In addition to the `Interval` and `IntervalSet` structs, the library also provides the `Mpfr`
//! struct that wraps the GNU MPFR library. The `Mpfr` struct is an ideal (and currently only)
//! bound type for intervals.
//!
//! Honest Intervals tries to be a pragmatic implementation of interval arithmetic rather than an
//! abstract basis for all possible implementations. Users do not have to implement any traits; they
//! can create a correctly rounding interval right away by calling `IntervalSet::<Mpfr>::new()`.
extern crate libc;
/// Finite precision module.
///
/// This module defines `fp::Float` trait and related traits where the floating point operations can
/// round up or down depending on which version of the operation is used.
pub mod fp;
/// Transcendental trait module.
///
/// This module defines transcendental functions such as `log` and `exp`.
pub mod transc;
/// MPFR wrapper module.
///
/// GNU MPFR is a C library that provides arbitrary precision floating-point functionality. This
/// module defines `mpfr::Mpfr` struct which implements `fp::Float` using GNU MPFR.
pub mod mpfr;
/// A naive implementation of `fp::Float` for f64.
///
/// Default rounding mode is used for all operations. Therefore, the actual results may be outside
/// of the lower and upper bounds. Use only when accuracy is not critical.
pub mod ieee;
/// Interval module.
/// | ///
/// This module defines `IntervalSet` struct that represents a non-intersecting set of `Interval`s.
pub mod intervalset;
pub use interval::{Interval, ParseIntervalError, SignClass};
pub use intervalset::{IntervalSet, ParseIntervalSetError};
#[cfg(test)]
mod tests {
#[test]
fn test_quick_start() {
use intervalset::IntervalSet;
use mpfr::Mpfr;
use std::str::FromStr;
let x = IntervalSet::<Mpfr>::from_str("{0; <1, 2>}").unwrap();
let y = IntervalSet::<Mpfr>::singleton(Mpfr::from(3.0));
assert_eq!("{3; <4, 5>}", format!("{}", x + y));
let x = IntervalSet::<Mpfr>::from_str("<1, 2>").unwrap();
let y = IntervalSet::<Mpfr>::from_str("<-1, 1>").unwrap();
assert_eq!("{<-inf, -1>; <1, inf>}", format!("{}", x / y));
}
} | /// This module defines `Interval` struct that represents an interval bounded by two `fp::Float`s.
pub mod interval;
/// Interval module. |
box_ref.rs | use ownref::{BoxRefA, BoxRefC};
#[test]
fn box_ref_any_owner() {
let x = BoxRefA::new(['a', 'b']);
let x = x.map(|array| &mut array[0]);
let x = BoxRefA::into_any_owner(x);
let _: BoxRefA<[char; 2], _> = BoxRefA::downcast_owner(x)
.map_err(|_| ())
.expect("unable to downcast");
}
#[test]
fn box_ref_any_owner_local() {
let x = BoxRefA::new(['a', 'b']);
let x = x.map(|array| &mut array[0]);
let x = BoxRefA::into_any_owner_local(x);
let _: BoxRefA<[char; 2], _> = BoxRefA::downcast_owner_local(x)
.map_err(|_| ())
.expect("unable to downcast");
}
#[test]
fn box_ref_a() {
let owner = BoxRefA::new(['a', 'b']);
let _: &[char; 2] = &*owner;
let ref_a: BoxRefA<[char; 2], char> = owner.map(|array| &mut array[0]);
assert_eq!(*ref_a, 'a');
let owner: BoxRefA<[char; 2], [char; 2]> = BoxRefA::into_owner_ref(ref_a);
let ref_b: BoxRefA<[char; 2], char> = owner.map(|array| &mut array[1]);
assert_eq!(*ref_b, 'b');
let array: [char; 2] = BoxRefA::into_owner(ref_b);
assert_eq!(array, ['a', 'b']);
}
#[test]
fn | () {
let owner = BoxRefC::new(['a', 'b']);
let _: &[char; 2] = &*owner;
let ref_a: BoxRefC<[char; 2], char> = owner.map(|array| &mut array[0]);
assert_eq!(*ref_a, 'a');
let owner: BoxRefC<[char; 2], [char; 2]> = BoxRefC::into_owner_ref(ref_a);
let ref_b: BoxRefC<[char; 2], char> = owner.map(|array| &mut array[1]);
assert_eq!(*ref_b, 'b');
let array: [char; 2] = BoxRefC::into_owner(ref_b);
assert_eq!(array, ['a', 'b']);
}
| box_ref_c |
console_connection_summary.go | // Copyright (c) 2016, 2018, 2021, Oracle and/or its affiliates. All rights reserved.
// This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
// Code generated. DO NOT EDIT.
// Database Service API
//
// The API for the Database Service. Use this API to manage resources such as databases and DB Systems. For more information, see Overview of the Database Service (https://docs.cloud.oracle.com/iaas/Content/Database/Concepts/databaseoverview.htm).
//
package database
import (
"github.com/oracle/oci-go-sdk/v41/common"
)
// ConsoleConnectionSummary The `InstanceConsoleConnection` API provides you with console access to dbnode
// enabling you to troubleshoot malfunctioning dbnode.
type ConsoleConnectionSummary struct {
// The OCID of the console connection.
Id *string `mandatory:"true" json:"id"`
// The OCID of the compartment to contain the console connection.
CompartmentId *string `mandatory:"true" json:"compartmentId"`
// The OCID of the database node.
DbNodeId *string `mandatory:"true" json:"dbNodeId"` | // The SSH connection string for the console connection.
ConnectionString *string `mandatory:"true" json:"connectionString"`
// The SSH public key fingerprint for the console connection.
Fingerprint *string `mandatory:"true" json:"fingerprint"`
// The current state of the console connection.
LifecycleState ConsoleConnectionSummaryLifecycleStateEnum `mandatory:"true" json:"lifecycleState"`
}
func (m ConsoleConnectionSummary) String() string {
return common.PointerString(m)
}
// ConsoleConnectionSummaryLifecycleStateEnum Enum with underlying type: string
type ConsoleConnectionSummaryLifecycleStateEnum string
// Set of constants representing the allowable values for ConsoleConnectionSummaryLifecycleStateEnum
const (
ConsoleConnectionSummaryLifecycleStateActive ConsoleConnectionSummaryLifecycleStateEnum = "ACTIVE"
ConsoleConnectionSummaryLifecycleStateCreating ConsoleConnectionSummaryLifecycleStateEnum = "CREATING"
ConsoleConnectionSummaryLifecycleStateDeleted ConsoleConnectionSummaryLifecycleStateEnum = "DELETED"
ConsoleConnectionSummaryLifecycleStateDeleting ConsoleConnectionSummaryLifecycleStateEnum = "DELETING"
ConsoleConnectionSummaryLifecycleStateFailed ConsoleConnectionSummaryLifecycleStateEnum = "FAILED"
)
var mappingConsoleConnectionSummaryLifecycleState = map[string]ConsoleConnectionSummaryLifecycleStateEnum{
"ACTIVE": ConsoleConnectionSummaryLifecycleStateActive,
"CREATING": ConsoleConnectionSummaryLifecycleStateCreating,
"DELETED": ConsoleConnectionSummaryLifecycleStateDeleted,
"DELETING": ConsoleConnectionSummaryLifecycleStateDeleting,
"FAILED": ConsoleConnectionSummaryLifecycleStateFailed,
}
// GetConsoleConnectionSummaryLifecycleStateEnumValues Enumerates the set of values for ConsoleConnectionSummaryLifecycleStateEnum
func GetConsoleConnectionSummaryLifecycleStateEnumValues() []ConsoleConnectionSummaryLifecycleStateEnum {
values := make([]ConsoleConnectionSummaryLifecycleStateEnum, 0)
for _, v := range mappingConsoleConnectionSummaryLifecycleState {
values = append(values, v)
}
return values
} | |
api_op_AuthorizeClientVpnIngress.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package ec2
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Adds an ingress authorization rule to a Client VPN endpoint. Ingress
// authorization rules act as firewall rules that grant access to networks. You
// must configure ingress authorization rules to enable clients to access resources
// in AWS or on-premises networks.
func (c *Client) AuthorizeClientVpnIngress(ctx context.Context, params *AuthorizeClientVpnIngressInput, optFns ...func(*Options)) (*AuthorizeClientVpnIngressOutput, error) {
if params == nil {
params = &AuthorizeClientVpnIngressInput{}
}
result, metadata, err := c.invokeOperation(ctx, "AuthorizeClientVpnIngress", params, optFns, c.addOperationAuthorizeClientVpnIngressMiddlewares)
if err != nil {
return nil, err
}
out := result.(*AuthorizeClientVpnIngressOutput)
out.ResultMetadata = metadata
return out, nil
}
type AuthorizeClientVpnIngressInput struct {
// The ID of the Client VPN endpoint.
//
// This member is required.
ClientVpnEndpointId *string
// The IPv4 address range, in CIDR notation, of the network for which access is
// being authorized.
//
// This member is required.
TargetNetworkCidr *string
// The ID of the group to grant access to, for example, the Active Directory group
// or identity provider (IdP) group. Required if AuthorizeAllGroups is false or not
// specified.
AccessGroupId *string
// Indicates whether to grant access to all clients. Specify true to grant all
// clients who successfully establish a VPN connection access to the network. Must
// be set to true if AccessGroupId is not specified.
AuthorizeAllGroups *bool
// Unique, case-sensitive identifier that you provide to ensure the idempotency of
// the request. For more information, see How to Ensure Idempotency
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string
// A brief description of the authorization rule.
Description *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation. Otherwise, it is
// UnauthorizedOperation.
DryRun *bool
noSmithyDocumentSerde
}
type AuthorizeClientVpnIngressOutput struct {
// The current state of the authorization rule.
Status *types.ClientVpnAuthorizationRuleStatus
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationAuthorizeClientVpnIngressMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpAuthorizeClientVpnIngress{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpAuthorizeClientVpnIngress{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil |
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addIdempotencyToken_opAuthorizeClientVpnIngressMiddleware(stack, options); err != nil {
return err
}
if err = addOpAuthorizeClientVpnIngressValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opAuthorizeClientVpnIngress(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpAuthorizeClientVpnIngress struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpAuthorizeClientVpnIngress) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpAuthorizeClientVpnIngress) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*AuthorizeClientVpnIngressInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *AuthorizeClientVpnIngressInput ")
}
if input.ClientToken == nil {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
}
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opAuthorizeClientVpnIngressMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpAuthorizeClientVpnIngress{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opAuthorizeClientVpnIngress(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "AuthorizeClientVpnIngress",
}
}
| {
return err
} |
image-thumbnail.ts | export type ImageThumbnailFileSchema = BaseFileSchema<
FileTypeEnum.ImageThumbnail,
ImageMeta
>; | import { BaseFileSchema } from '..';
import { ImageMeta } from '../common/image-meta';
import { FileTypeEnum } from '../enums/file-type.enum';
|
|
deleted-nodes-api.service.js | /**
* @fileoverview added by tsickle
* @suppress {checkTypes,extraRequire,missingOverride,missingReturn,unusedPrivateMembers,uselessCode} checked by tsc
*/
/*!
* @license
* Copyright 2019 Alfresco Software, Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { Injectable } from '@angular/core';
import { from, of } from 'rxjs';
import { AlfrescoApiService } from './alfresco-api.service';
import { UserPreferencesService } from './user-preferences.service';
import { catchError } from 'rxjs/operators';
import * as i0 from "@angular/core";
import * as i1 from "./alfresco-api.service";
import * as i2 from "./user-preferences.service";
export class | {
/**
* @param {?} apiService
* @param {?} preferences
*/
constructor(apiService, preferences) {
this.apiService = apiService;
this.preferences = preferences;
}
/**
* @private
* @return {?}
*/
get nodesApi() {
return this.apiService.getInstance().core.nodesApi;
}
/**
* Gets a list of nodes in the trash.
* @param {?=} options Options for JS-API call
* @return {?} List of nodes in the trash
*/
getDeletedNodes(options) {
/** @type {?} */
const defaultOptions = {
include: ['path', 'properties'],
maxItems: this.preferences.paginationSize,
skipCount: 0
};
/** @type {?} */
const queryOptions = Object.assign(defaultOptions, options);
/** @type {?} */
const promise = this.nodesApi.getDeletedNodes(queryOptions);
return from(promise).pipe(catchError((/**
* @param {?} err
* @return {?}
*/
(err) => of(err))));
}
}
DeletedNodesApiService.decorators = [
{ type: Injectable, args: [{
providedIn: 'root'
},] }
];
/** @nocollapse */
DeletedNodesApiService.ctorParameters = () => [
{ type: AlfrescoApiService },
{ type: UserPreferencesService }
];
/** @nocollapse */ DeletedNodesApiService.ngInjectableDef = i0.defineInjectable({ factory: function DeletedNodesApiService_Factory() { return new DeletedNodesApiService(i0.inject(i1.AlfrescoApiService), i0.inject(i2.UserPreferencesService)); }, token: DeletedNodesApiService, providedIn: "root" });
if (false) {
/**
* @type {?}
* @private
*/
DeletedNodesApiService.prototype.apiService;
/**
* @type {?}
* @private
*/
DeletedNodesApiService.prototype.preferences;
}
//# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoiZGVsZXRlZC1ub2Rlcy1hcGkuc2VydmljZS5qcyIsInNvdXJjZVJvb3QiOiJuZzovL0BhbGZyZXNjby9hZGYtY29yZS8iLCJzb3VyY2VzIjpbInNlcnZpY2VzL2RlbGV0ZWQtbm9kZXMtYXBpLnNlcnZpY2UudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6Ijs7Ozs7Ozs7Ozs7Ozs7Ozs7Ozs7QUFpQkEsT0FBTyxFQUFFLFVBQVUsRUFBRSxNQUFNLGVBQWUsQ0FBQztBQUMzQyxPQUFPLEVBQWMsSUFBSSxFQUFFLEVBQUUsRUFBRSxNQUFNLE1BQU0sQ0FBQztBQUc1QyxPQUFPLEVBQUUsa0JBQWtCLEVBQUUsTUFBTSx3QkFBd0IsQ0FBQztBQUM1RCxPQUFPLEVBQUUsc0JBQXNCLEVBQUUsTUFBTSw0QkFBNEIsQ0FBQztBQUNwRSxPQUFPLEVBQUUsVUFBVSxFQUFFLE1BQU0sZ0JBQWdCLENBQUM7Ozs7QUFLNUMsTUFBTSxPQUFPLHNCQUFzQjs7Ozs7SUFDL0IsWUFDWSxVQUE4QixFQUM5QixXQUFtQztRQURuQyxlQUFVLEdBQVYsVUFBVSxDQUFvQjtRQUM5QixnQkFBVyxHQUFYLFdBQVcsQ0FBd0I7SUFDNUMsQ0FBQzs7Ozs7SUFFSixJQUFZLFFBQVE7UUFDakIsT0FBTyxJQUFJLENBQUMsVUFBVSxDQUFDLFdBQVcsRUFBRSxDQUFDLElBQUksQ0FBQyxRQUFRLENBQUM7SUFDdEQsQ0FBQzs7Ozs7O0lBT0QsZUFBZSxDQUFDLE9BQWdCOztjQUN0QixjQUFjLEdBQUc7WUFDbkIsT0FBTyxFQUFFLENBQUUsTUFBTSxFQUFFLFlBQVksQ0FBRTtZQUNqQyxRQUFRLEVBQUUsSUFBSSxDQUFDLFdBQVcsQ0FBQyxjQUFjO1lBQ3pDLFNBQVMsRUFBRSxDQUFDO1NBQ2Y7O2NBQ0ssWUFBWSxHQUFHLE1BQU0sQ0FBQyxNQUFNLENBQUMsY0FBYyxFQUFFLE9BQU8sQ0FBQzs7Y0FDckQsT0FBTyxHQUFHLElBQUksQ0FBQyxRQUFRLENBQUMsZUFBZSxDQUFDLFlBQVksQ0FBQztRQUUzRCxPQUFPLElBQUksQ0FBQyxPQUFPLENBQUMsQ0FBQyxJQUFJLENBQ3JCLFVBQVU7Ozs7UUFBQyxDQUFDLEdBQUcsRUFBRSxFQUFFLENBQUMsRUFBRSxDQUFDLEdBQUcsQ0FBQyxFQUFDLENBQy9CLENBQUM7SUFDTixDQUFDOzs7WUE5QkosVUFBVSxTQUFDO2dCQUNSLFVBQVUsRUFBRSxNQUFNO2FBQ3JCOzs7O1lBTlEsa0JBQWtCO1lBQ2xCLHNCQUFzQjs7Ozs7Ozs7SUFRdkIsNENBQXNDOzs7OztJQUN0Qyw2Q0FBMkMiLCJzb3VyY2VzQ29udGVudCI6WyIvKiFcbiAqIEBsaWNlbnNlXG4gKiBDb3B5cmlnaHQgMjAxOSBBbGZyZXNjbyBTb2Z0d2FyZSwgTHRkLlxuICpcbiAqIExpY2Vuc2VkIHVuZGVyIHRoZSBBcGFjaGUgTGljZW5zZSwgVmVyc2lvbiAyLjAgKHRoZSBcIkxpY2Vuc2VcIik7XG4gKiB5b3UgbWF5IG5vdCB1c2UgdGhpcyBmaWxlIGV4Y2VwdCBpbiBjb21wbGlhbmNlIHdpdGggdGhlIExpY2Vuc2UuXG4gKiBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXRcbiAqXG4gKiAgICAgaHR0cDovL3d3dy5hcGFjaGUub3JnL2xpY2Vuc2VzL0xJQ0VOU0UtMi4wXG4gKlxuICogVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZVxuICogZGlzdHJpYnV0ZWQgdW5kZXIgdGhlIExpY2Vuc2UgaXMgZGlzdHJpYnV0ZWQgb24gYW4gXCJBUyBJU1wiIEJBU0lTLFxuICogV0lUSE9VVCBXQVJSQU5USUVTIE9SIENPTkRJVElPTlMgT0YgQU5ZIEtJTkQsIGVpdGhlciBleHByZXNzIG9yIGltcGxpZWQuXG4gKiBTZWUgdGhlIExpY2Vuc2UgZm9yIHRoZSBzcGVjaWZpYyBsYW5ndWFnZSBnb3Zlcm5pbmcgcGVybWlzc2lvbnMgYW5kXG4gKiBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS5cbiAqL1xuXG5pbXBvcnQgeyBJbmplY3RhYmxlIH0gZnJvbSAnQGFuZ3VsYXIvY29yZSc7XG5pbXBvcnQgeyBPYnNlcnZhYmxlLCBmcm9tLCBvZiB9IGZyb20gJ3J4anMnO1xuXG5pbXBvcnQgeyBOb2RlUGFnaW5nIH0gZnJvbSAnQGFsZnJlc2NvL2pzLWFwaSc7XG5pbXBvcnQgeyBBbGZyZXNjb0FwaVNlcnZpY2UgfSBmcm9tICcuL2FsZnJlc2NvLWFwaS5zZXJ2aWNlJztcbmltcG9ydCB7IFVzZXJQcmVmZXJlbmNlc1NlcnZpY2UgfSBmcm9tICcuL3VzZXItcHJlZmVyZW5jZXMuc2VydmljZSc7XG5pbXBvcnQgeyBjYXRjaEVycm9yIH0gZnJvbSAncnhqcy9vcGVyYXRvcnMnO1xuXG5ASW5qZWN0YWJsZSh7XG4gICAgcHJvdmlkZWRJbjogJ3Jvb3QnXG59KVxuZXhwb3J0IGNsYXNzIERlbGV0ZWROb2Rlc0FwaVNlcnZpY2Uge1xuICAgIGNvbnN0cnVjdG9yKFxuICAgICAgICBwcml2YXRlIGFwaVNlcnZpY2U6IEFsZnJlc2NvQXBpU2VydmljZSxcbiAgICAgICAgcHJpdmF0ZSBwcmVmZXJlbmNlczogVXNlclByZWZlcmVuY2VzU2VydmljZVxuICAgICkge31cblxuICAgIHByaXZhdGUgZ2V0IG5vZGVzQXBpKCkge1xuICAgICAgIHJldHVybiB0aGlzLmFwaVNlcnZpY2UuZ2V0SW5zdGFuY2UoKS5jb3JlLm5vZGVzQXBpO1xuICAgIH1cblxuICAgIC8qKlxuICAgICAqIEdldHMgYSBsaXN0IG9mIG5vZGVzIGluIHRoZSB0cmFzaC5cbiAgICAgKiBAcGFyYW0gb3B0aW9ucyBPcHRpb25zIGZvciBKUy1BUEkgY2FsbFxuICAgICAqIEByZXR1cm5zIExpc3Qgb2Ygbm9kZXMgaW4gdGhlIHRyYXNoXG4gICAgICovXG4gICAgZ2V0RGVsZXRlZE5vZGVzKG9wdGlvbnM/OiBPYmplY3QpOiBPYnNlcnZhYmxlPE5vZGVQYWdpbmc+IHtcbiAgICAgICAgY29uc3QgZGVmYXVsdE9wdGlvbnMgPSB7XG4gICAgICAgICAgICBpbmNsdWRlOiBbICdwYXRoJywgJ3Byb3BlcnRpZXMnIF0sXG4gICAgICAgICAgICBtYXhJdGVtczogdGhpcy5wcmVmZXJlbmNlcy5wYWdpbmF0aW9uU2l6ZSxcbiAgICAgICAgICAgIHNraXBDb3VudDogMFxuICAgICAgICB9O1xuICAgICAgICBjb25zdCBxdWVyeU9wdGlvbnMgPSBPYmplY3QuYXNzaWduKGRlZmF1bHRPcHRpb25zLCBvcHRpb25zKTtcbiAgICAgICAgY29uc3QgcHJvbWlzZSA9IHRoaXMubm9kZXNBcGkuZ2V0RGVsZXRlZE5vZGVzKHF1ZXJ5T3B0aW9ucyk7XG5cbiAgICAgICAgcmV0dXJuIGZyb20ocHJvbWlzZSkucGlwZShcbiAgICAgICAgICAgIGNhdGNoRXJyb3IoKGVycikgPT4gb2YoZXJyKSlcbiAgICAgICAgKTtcbiAgICB9XG59XG4iXX0= | DeletedNodesApiService |
test.rs | use proptest::prop_assert_eq;
use proptest::test_runner::{Config, TestRunner};
use crate::otp::erlang::is_atom_1::native;
use crate::scheduler::with_process_arc;
use crate::test::strategy;
#[test]
fn without_atom_returns_false() |
#[test]
fn with_atom_returns_true() {
TestRunner::new(Config::with_source_file(file!()))
.run(&strategy::term::atom(), |term| {
prop_assert_eq!(native(term), true.into());
Ok(())
})
.unwrap();
}
| {
with_process_arc(|arc_process| {
TestRunner::new(Config::with_source_file(file!()))
.run(&strategy::term::is_not_atom(arc_process.clone()), |term| {
prop_assert_eq!(native(term), false.into());
Ok(())
})
.unwrap();
});
} |
lib.rs | use std::{
collections::{HashMap, VecDeque},
sync::{Arc, Mutex},
};
use misc_utils::abort_on_poison;
use tokio::sync::oneshot::{channel, Receiver, Sender};
type LocksDB = Arc<Mutex<HashMap<String, LockQueue>>>;
pub struct ParallelTaskQueue {
locks: LocksDB,
}
impl ParallelTaskQueue {
pub async fn acquire_permit(&self, key: String) -> LockGuard {
let receiver = {
let mut locks = self.locks.lock().unwrap();
let lock_queue = LockQueue::new();
let queue = (*locks).entry(key.clone()).or_insert(lock_queue);
queue.add_task()
};
receiver.await.unwrap();
LockGuard {
db: self.locks.clone(),
key,
}
}
pub fn new() -> ParallelTaskQueue {
ParallelTaskQueue {
locks: Arc::new(Mutex::new(HashMap::new())),
}
}
}
impl Default for ParallelTaskQueue {
fn default() -> Self {
Self::new()
}
}
pub struct LockQueue {
is_blocked: bool,
queue: VecDeque<Sender<()>>,
}
impl LockQueue {
fn new() -> LockQueue |
fn add_task(&mut self) -> Receiver<()> {
let (tx, rx) = channel();
match self.is_blocked {
true => {
self.queue.push_back(tx);
}
false => {
self.is_blocked = true;
tx.send(()).expect("Receiver dropped");
}
};
rx
}
fn end_task(&mut self) {
match self.queue.pop_front() {
None => {
self.is_blocked = false;
}
Some(tx) => {
let result = tx.send(());
if result.is_err() {
// receiver dropped, start next task
self.end_task()
}
}
}
}
}
pub struct LockGuard {
db: LocksDB,
key: String,
}
impl Drop for LockGuard {
fn drop(&mut self) {
let mut guard = self.db.lock().unwrap_or_else(abort_on_poison);
let entry = (*guard)
.get_mut(&self.key)
.expect("LockGuard already dropped");
entry.end_task();
if !entry.is_blocked {
(*guard).remove(&self.key);
}
}
}
#[cfg(test)]
mod tests {
use std::{
sync::atomic::{AtomicU32, Ordering},
time::Duration,
};
use tokio::{
sync::{oneshot, Barrier},
time::sleep,
try_join,
};
use super::*;
async fn wait_for_barrier(
wait_barrier: Arc<Barrier>,
task_queue: Arc<ParallelTaskQueue>,
lock_key: String,
) {
let _guard = task_queue.acquire_permit(lock_key).await;
wait_barrier.wait().await;
}
async fn return_task_execution_order(
waiter: Receiver<()>,
task_queue: Arc<ParallelTaskQueue>,
lock_key: String,
counter: Arc<AtomicU32>,
) -> u32 {
let _guard = task_queue.acquire_permit(lock_key).await;
waiter.await.unwrap();
counter.fetch_add(1, Ordering::SeqCst)
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // `Miri` doesn't support kqueue() syscall in this test
async fn should_process_unrelated_tasks_in_parallel() -> anyhow::Result<()> {
let task_queue = Arc::new(ParallelTaskQueue::new());
let barrier = Arc::new(Barrier::new(2));
let first_task = tokio::spawn(wait_for_barrier(
barrier.clone(),
task_queue.clone(),
"A".to_string(),
));
let second_task = tokio::spawn(wait_for_barrier(
barrier.clone(),
task_queue.clone(),
"B".to_string(),
));
sleep(Duration::from_millis(200)).await;
try_join!(first_task, second_task)?;
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // `Miri` doesn't support kqueue() syscall in this test
async fn should_wait_for_related_task_to_finish() -> anyhow::Result<()> {
let task_queue = Arc::new(ParallelTaskQueue::new());
let (tx1, rx1) = oneshot::channel();
let (tx2, rx2) = oneshot::channel();
let (tx3, rx3) = oneshot::channel();
let counter = Arc::new(AtomicU32::new(0));
let first_task = tokio::spawn(return_task_execution_order(
rx1,
task_queue.clone(),
"A".to_string(),
counter.clone(),
));
let second_task = tokio::spawn(return_task_execution_order(
rx2,
task_queue.clone(),
"A".to_string(),
counter.clone(),
));
let third_task = tokio::spawn(return_task_execution_order(
rx3,
task_queue.clone(),
"A".to_string(),
counter.clone(),
));
tx3.send(()).unwrap();
sleep(Duration::from_millis(200)).await;
tx2.send(()).unwrap();
sleep(Duration::from_millis(200)).await;
tx1.send(()).unwrap();
let results = try_join!(first_task, second_task, third_task)?;
assert_eq!(3, counter.load(Ordering::SeqCst));
assert_eq!(0, results.0);
assert_eq!(1, results.1);
assert_eq!(2, results.2);
Ok(())
}
#[tokio::test]
#[cfg_attr(miri, ignore)] // `Miri` doesn't support kqueue() syscall in this test
async fn should_clean_up_after_task_group_is_done() {
let task_queue = Arc::new(ParallelTaskQueue::new());
{
let _guard = task_queue.acquire_permit("A".to_string()).await;
// DO SOME WORK
}
assert_eq!(
task_queue.locks.lock().unwrap().len(),
0,
"Lock key was not removed from parallel task queue"
);
}
}
| {
LockQueue {
is_blocked: false,
queue: VecDeque::new(),
}
} |
utils.py | from contextlib import suppress
from datetime import timedelta
from dateutil.parser import parse
from django.db import transaction
from django_scopes import scope
from pretalx.person.models import SpeakerProfile, User
from pretalx.schedule.models import Room, TalkSlot
from pretalx.submission.models import (
Submission, SubmissionStates, SubmissionType, Track,
)
def guess_schedule_version(event):
|
@transaction.atomic()
def process_frab(root, event):
"""
Takes an xml document root and an event, and releases a schedule with the data from the xml document.
Called from the `import_schedule` manage command, at least.
"""
with scope(event=event):
for day in root.findall('day'):
for rm in day.findall('room'):
room, _ = Room.objects.get_or_create(event=event, name=rm.attrib['name'])
for talk in rm.findall('event'):
_create_talk(talk=talk, room=room, event=event)
schedule_version = root.find('version').text
try:
event.wip_schedule.freeze(schedule_version, notify_speakers=False)
schedule = event.schedules.get(version=schedule_version)
except Exception:
raise Exception(
f'Could not import "{event.name}" schedule version "{schedule_version}": failed creating schedule release.'
)
schedule.talks.update(is_visible=True)
start = schedule.talks.order_by('start').first().start
end = schedule.talks.order_by('-end').first().end
event.date_from = start.date()
event.date_to = end.date()
event.save()
return (
f'Successfully imported "{event.name}" schedule version "{schedule_version}".'
)
def _create_talk(*, talk, room, event):
date = talk.find('date').text
start = parse(date + ' ' + talk.find('start').text)
hours, minutes = talk.find('duration').text.split(':')
duration = timedelta(hours=int(hours), minutes=int(minutes))
duration_in_minutes = duration.total_seconds() / 60
try:
end = parse(date + ' ' + talk.find('end').text)
except AttributeError:
end = start + duration
sub_type = SubmissionType.objects.filter(
event=event, name=talk.find('type').text, default_duration=duration_in_minutes
).first()
if not sub_type:
sub_type = SubmissionType.objects.create(
name=talk.find('type').text or 'default',
event=event,
default_duration=duration_in_minutes,
)
track = Track.objects.filter(event=event, name=talk.find('track').text).first()
if not track:
track = Track.objects.create(
name=talk.find('track').text or 'default', event=event
)
optout = False
with suppress(AttributeError):
optout = talk.find('recording').find('optout').text == 'true'
code = None
if (
Submission.objects.filter(code__iexact=talk.attrib['id'], event=event).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['id']).exists()
):
code = talk.attrib['id']
elif (
Submission.objects.filter(
code__iexact=talk.attrib['guid'][:16], event=event
).exists()
or not Submission.objects.filter(code__iexact=talk.attrib['guid'][:16]).exists()
):
code = talk.attrib['guid'][:16]
sub, _ = Submission.objects.get_or_create(
event=event, code=code, defaults={'submission_type': sub_type}
)
sub.submission_type = sub_type
sub.track = track
sub.title = talk.find('title').text
sub.description = talk.find('description').text
if talk.find('subtitle').text:
sub.description = talk.find('subtitle').text + '\n' + (sub.description or '')
sub.abstract = talk.find('abstract').text
sub.content_locale = talk.find('language').text or 'en'
sub.do_not_record = optout
sub.state = SubmissionStates.CONFIRMED
sub.save()
for person in talk.find('persons').findall('person'):
user = User.objects.filter(name=person.text[:60]).first()
if not user:
user = User(name=person.text, email=f'{person.text}@localhost')
user.save()
SpeakerProfile.objects.create(user=user, event=event)
sub.speakers.add(user)
slot, _ = TalkSlot.objects.get_or_create(
submission=sub, schedule=event.wip_schedule, is_visible=True
)
slot.room = room
slot.is_visible = True
slot.start = start
slot.end = end
slot.save()
| if not event.current_schedule:
return '0.1'
version = event.current_schedule.version
prefix = ''
for separator in [',', '.', '-', '_']:
if separator in version:
prefix, version = version.rsplit(separator, maxsplit=1)
break
if version.isdigit():
version = str(int(version) + 1)
return prefix + separator + version
return '' |
index.js | var parse = require('url').parse
module.exports = function (string) {
// user/repo#version
var m = /^([\w-.]+)\/([\w-.]+)((?:#|@).+)?$/.exec(string)
if (m) return format(m)
string = string.replace('//www.', '//')
// normalize git@ and https:git@ urls
string = string.replace(/^git@/, 'https://')
string = string.replace(/^https:git@/, 'https://')
string = string.replace('.com:', '.com/')
if (!~string.indexOf('://')) {
return false
}
var url = parse(string)
var path = url.pathname.replace(/\.git$/, '')
// https://www.npmjs.org/doc/json.html#Git-URLs-as-Dependencies
var m = /^\/([\w-.]+)\/([\w-.]+)$/.exec(path)
if (m) return m.slice(1, 3).concat((url.hash || '').slice(1))
// archive link
// https://developer.github.com/v3/repos/contents/#get-archive-link
var m = /^\/repos\/([\w-.]+)\/([\w-.]+)\/(?:tarball|zipball)(\/.+)?$/.exec(path)
if (m) return format(m)
// codeload link
// https://developer.github.com/v3/repos/contents/#response-4
var m = /^\/([\w-.]+)\/([\w-.]+)\/(?:legacy\.(?:zip|tar\.gz))(\/.+)?$/.exec(path)
if (m) return format(m)
// tarball link
// https://github.com/LearnBoost/socket.io-client/blob/master/package.json#L14
var m = /^\/([\w-]+)\/([\w-.]+)\/archive\/(.+)\.tar\.gz?$/.exec(path)
if (m) return m.slice(1, 4)
return false
}
function format(m) {
var version = (m[3] || '').slice(1) | if (/^['"]/.test(version)) version = version.slice(1, -1)
return [m[1], m[2], version]
} |
|
printing.py | from __future__ import absolute_import
import pprint
def pp(stuff): | pretty_printer = pprint.PrettyPrinter(indent=3)
pretty_printer.pprint(stuff) |
|
react_chess_to_IZII_state.py | from engine.utils import RF_sq64, sq64_to_sq120, print_board
def react_chess_board_to_IZII_board(board):
| if board is None:
exit()
izii_board = ["x"] * 120
pieces = board.split(',')
for i in range(len(izii_board)):
if i >= 20 and i < 100:
if i % 10 != 0 and i % 10 != 9:
izii_board[i] = 'o'
for p in pieces:
# print("pp", p)
piece_with_RF = p.split('@')
# print("look: ", piece_with_RF)
piece = piece_with_RF[0]
RF = piece_with_RF[1]
sq64 = RF_sq64(RF[0], RF[1])
sq120 = sq64_to_sq120(sq64)
izii_board[sq120] = piece
return ''.join(izii_board) |
|
review.module.ts | import { Module } from '@nestjs/common';
import { ReviewController } from './review.controller';
import { TypegooseModule } from "nestjs-typegoose";
import { ReviewModel } from "./review.model";
import { ReviewService } from './review.service';
@Module({
controllers: [ReviewController],
imports: [
TypegooseModule.forFeature([
{
typegooseClass: ReviewModel,
schemaOptions: {
collection: 'Review'
}
}
])
],
providers: [ReviewService]
})
export class | {}
| ReviewModule |
swift.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from typing import Any
from typing import Dict
from typing import Iterable | from ospurge.resources.base import BaseServiceResource
from ospurge.resources import glance
class ListObjectsMixin(BaseServiceResource):
def list_objects(self) -> Iterator[Dict[str, Any]]:
for container in self.cloud.list_containers():
for obj in self.cloud.list_objects(container['name']):
obj['container_name'] = container['name']
yield obj
class Objects(base.ServiceResource, glance.ListImagesMixin, ListObjectsMixin):
ORDER = 73
def check_prerequisite(self) -> bool:
return (self.list_images_by_owner() == [] and
self.cloud.list_volume_backups() == [])
def list(self) -> Iterable:
yield from self.list_objects()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_object(resource['container_name'], resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Object '{}' from Container '{}'".format(
resource['name'], resource['container_name'])
class Containers(base.ServiceResource, ListObjectsMixin):
ORDER = 75
def check_prerequisite(self) -> bool:
return list(self.list_objects()) == []
def list(self) -> Iterable:
return self.cloud.list_containers()
def delete(self, resource: Dict[str, Any]) -> None:
self.cloud.delete_container(resource['name'])
@staticmethod
def to_str(resource: Dict[str, Any]) -> str:
return "Container (name='{}')".format(resource['name']) | from typing import Iterator
from ospurge.resources import base |
projecoes.js | function perspective(P, d){
if(!d)
d = 12;
let ident = [[d, 0, 0, 0],
[0, d, 0, 0],
[0, 0, d, 0],
[0, 0, 1, 0]];
let out = matriz_mult(ident,P);
for(let i=0;i<out[3].length; i++){
out[0][i] = out[0][i]/out[3][i];
out[1][i] = out[1][i]/out[3][i];
out[2][i] = out[2][i]/out[3][i];
out[3][i] = out[3][i]/out[3][i];
}
return out;
}
function Plano_XY(P,tz){
let ident = [
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 0, tz],
[0, 0, 0, 1]];
return multiply(ident,P);
}
function Plano_XZ(P,ty){
let ident = [
[1, 0, 0, 0],
[0, 0, 0, ty],
[0, 0, 1, 0],
[0, 0, 0, 1]];
return multiply(ident,P);
}
| [0, 0, 0, tx],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]];
return multiply(ident,P);
}
function Oblique_Projection(P,ang,e){
let ident = [
[1, 0, e*cos(ang),0],
[0, 1, e*sin(ang), 0],
[0, 0, 1, 0],
[0, 0, 0, 1]];
return multiply(ident,P);
} | function Plano_YZ(P,tx){
let ident = [ |
random_load.py | import numpy as np
import pandas as pd
from openmodelica_microgrid_gym.util import RandProcess
class RandomLoad:
def __init__(self, train_episode_length: int, ts: float, rand_process: RandProcess, loadstep_time: int = None,
load_curve: pd.DataFrame = None, bounds=None, bounds_std=None):
"""
:param max_episode_steps: number of steps per training episode (can differ from env.max_episode_steps)
:param ts: sampletime of env
:param rand_pocess: Instance of random process defines noise added to load
:param loadstep_time: number of env step where load step should happen
:param load_curve: Stored load data to sample from instead of smaple from distribution
:param bounds: Bounds to clip the sampled load data
:param bounds_std: Chosen bounds are sampled from a distribution with std=bounds_std and mean=bounds
"""
self.train_episode_length = train_episode_length
self.ts = ts
self.rand_process = rand_process
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
self.load_curve = load_curve
if bounds is None:
self.bounds = (-np.inf, np.inf)
else:
self.bounds = bounds
if bounds_std is None:
self.bounds_std = (0, 0)
else:
self.bounds_std = bounds_std
self.lowerbound_std = 0
self.upperbound_std = 0
def reset(self, loadstep_time=None):
if loadstep_time is None:
self.loadstep_time = np.random.randint(0, self.train_episode_length)
else:
self.loadstep_time = loadstep_time
def load_step(self, t, gain):
"""
Changes the load parameters
:param t:
:param gain: device parameter
:return: Sample from SP
"""
# Defines a load step after 0.01 s
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
self.rand_process.proc.mean = gain * 0.55
self.rand_process.reserve = gain * 0.55
elif t <= self.ts:
self.rand_process.proc.mean = gain
return self.rand_process.sample(t)
def clipped_step(self, t):
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def one_random_loadstep_per_episode(self, t):
if self.loadstep_time * self.ts < t <= self.loadstep_time * self.ts + self.ts:
# do with 100 percent propability
self.do_change(1002, 102)
# else:
# with 2 permill change drift
# self.do_change(2, 0)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def give_dataframe_value(self, t, col):
"""
Gives load values from a stored dataframe (self.load_curve)
:parma t: time - represents here the row of the dataframe
:param col: colon name of the dataframe (typically str)
"""
if t < 0:
# return None
return self.load_curve[col][0]
if self.load_curve is None:
raise ValueError('No dataframe given! Please feed load class (.load_curve) with data')
return self.load_curve[col][int(t / self.ts)]
def random_load_step(self, t, event_prob: int = 2, step_prob: int = 50):
"""
Changes the load parameters applying a loadstep with 0.2% probability which is a pure step with 50 %
probability otherwise a drift. In every event the random process variance is drawn randomly [1, 150].
:param t: time
:param event_prob: probability (in pre mill) that the step event is triggered in the current step
:param step_prob: probability (in pre cent) that event is a abrupt step (drift otherwise!, random process speed
not adjustable yet
:return: Sample from SP
"""
# Changes rand process data with probability of 5% and sets new value randomly
if np.random.randint(0, 1001) < 2:
gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
# define sdt for clipping once every event
# np.maximum to not allow negative values
self.lowerbound_std = np.maximum(np.random.normal(scale=self.bounds_std[0]), 0.0001)
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
# With 50% probability do a step or a drift
if np.random.randint(0, 101) < 50:
# step
self.rand_process.reserve = gain
else:
# drift -> Lower speed to allow
self.rand_process.proc.speed = np.random.randint(10, 100)
return np.clip(self.rand_process.sample(t),
self.bounds[0] + self.lowerbound_std,
self.bounds[1] + self.upperbound_std
)
def do_change(self, event_prob_permill=2, step_prob_percent=50):
if np.random.randint(0, 1001) < event_prob_permill:
| gain = np.random.randint(self.rand_process.bounds[0], self.rand_process.bounds[1])
self.rand_process.proc.mean = gain
self.rand_process.proc.vol = np.random.randint(1, 150)
self.rand_process.proc.speed = np.random.randint(10, 1200)
# define sdt for clipping once every event
self.lowerbound_std = np.random.normal(scale=self.bounds_std[0])
self.upperbound_std = np.random.normal(scale=self.bounds_std[1])
# With 50% probability do a step or a drift
if np.random.randint(0, 101) < step_prob_percent:
# step
self.rand_process.reserve = gain
else:
# drift -> Lower speed to allow
self.rand_process.proc.speed = np.random.randint(10, 100) |
|
azure_file_volume_source.rs | // Generated from definition io.k8s.api.core.v1.AzureFileVolumeSource
/// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct AzureFileVolumeSource {
/// Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.
pub read_only: Option<bool>,
/// the name of secret that contains Azure Storage Account Name and Key
pub secret_name: String,
/// Share Name
pub share_name: String,
}
impl<'de> serde::Deserialize<'de> for AzureFileVolumeSource {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_read_only,
Key_secret_name,
Key_share_name,
Other,
}
impl<'de> serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: serde::Deserializer<'de> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result |
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: serde::de::Error {
Ok(match v {
"readOnly" => Field::Key_read_only,
"secretName" => Field::Key_secret_name,
"shareName" => Field::Key_share_name,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = AzureFileVolumeSource;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("AzureFileVolumeSource")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: serde::de::MapAccess<'de> {
let mut value_read_only: Option<bool> = None;
let mut value_secret_name: Option<String> = None;
let mut value_share_name: Option<String> = None;
while let Some(key) = serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_read_only => value_read_only = serde::de::MapAccess::next_value(&mut map)?,
Field::Key_secret_name => value_secret_name = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Key_share_name => value_share_name = Some(serde::de::MapAccess::next_value(&mut map)?),
Field::Other => { let _: serde::de::IgnoredAny = serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(AzureFileVolumeSource {
read_only: value_read_only,
secret_name: value_secret_name.ok_or_else(|| serde::de::Error::missing_field("secretName"))?,
share_name: value_share_name.ok_or_else(|| serde::de::Error::missing_field("shareName"))?,
})
}
}
deserializer.deserialize_struct(
"AzureFileVolumeSource",
&[
"readOnly",
"secretName",
"shareName",
],
Visitor,
)
}
}
impl serde::Serialize for AzureFileVolumeSource {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: serde::Serializer {
let mut state = serializer.serialize_struct(
"AzureFileVolumeSource",
2 +
self.read_only.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.read_only {
serde::ser::SerializeStruct::serialize_field(&mut state, "readOnly", value)?;
}
serde::ser::SerializeStruct::serialize_field(&mut state, "secretName", &self.secret_name)?;
serde::ser::SerializeStruct::serialize_field(&mut state, "shareName", &self.share_name)?;
serde::ser::SerializeStruct::end(state)
}
}
| {
f.write_str("field identifier")
} |
route.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from .. import utilities, tables
class Route(pulumi.CustomResource):
description: pulumi.Output[str]
dest_range: pulumi.Output[str]
name: pulumi.Output[str]
network: pulumi.Output[str]
next_hop_gateway: pulumi.Output[str]
next_hop_instance: pulumi.Output[str]
next_hop_instance_zone: pulumi.Output[str]
"""
(Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
"""
next_hop_ip: pulumi.Output[str]
next_hop_network: pulumi.Output[str]
next_hop_vpn_tunnel: pulumi.Output[str]
priority: pulumi.Output[float]
project: pulumi.Output[str]
"""
The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
self_link: pulumi.Output[str]
"""
The URI of the created resource.
"""
tags: pulumi.Output[list]
def __init__(__self__, resource_name, opts=None, description=None, dest_range=None, name=None, network=None, next_hop_gateway=None, next_hop_instance=None, next_hop_instance_zone=None, next_hop_ip=None, next_hop_vpn_tunnel=None, priority=None, project=None, tags=None, __name__=None, __opts__=None):
"""
Represents a Route resource.
A route is a rule that specifies how certain packets should be handled by
the virtual network. Routes are associated with virtual machines by tag,
and the set of routes for a particular virtual machine is called its
routing table. For each packet leaving a virtual machine, the system
searches that virtual machine's routing table for a single best matching
route.
Routes match packets by destination IP address, preferring smaller or more
specific ranges over larger ones. If there is a tie, the system selects
the route with the smallest priority value. If there is still a tie, it
uses the layer three and four packet headers to select just one of the
remaining matching routes. The packet is then forwarded as specified by
the next_hop field of the winning route -- either to another virtual
machine destination, a virtual machine gateway or a Compute
Engine-operated gateway. Packets that do not match any route in the
sending virtual machine's routing table will be dropped.
A Route resource must have exactly one specification of either
nextHopGateway, nextHopInstance, nextHopIp, or nextHopVpnTunnel.
To get more information about Route, see:
* [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/routes)
* How-to Guides
* [Using Routes](https://cloud.google.com/vpc/docs/using-routes)
<div class = "oics-button" style="float: right; margin: 0 0 -15px">
<a href="https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https%3A%2F%2Fgithub.com%2Fterraform-google-modules%2Fdocs-examples.git&cloudshell_working_dir=route_basic&cloudshell_image=gcr.io%2Fgraphite-cloud-shell-images%2Fterraform%3Alatest&open_in_editor=main.tf&cloudshell_print=.%2Fmotd&cloudshell_tutorial=.%2Ftutorial.md" target="_blank">
<img alt="Open in Cloud Shell" src="//gstatic.com/cloudssh/images/open-btn.svg" style="max-height: 44px; margin: 32px auto; max-width: 100%;">
</a>
</div>
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] next_hop_instance_zone: (Optional when `next_hop_instance` is
specified) The zone of the instance specified in
`next_hop_instance`. Omit if `next_hop_instance` is specified as
a URL.
:param pulumi.Input[str] project: The ID of the project in which the resource belongs.
If it is not provided, the provider project is used.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if not resource_name:
raise TypeError('Missing resource name argument (for URN creation)')
if not isinstance(resource_name, str):
raise TypeError('Expected resource name to be a string')
if opts and not isinstance(opts, pulumi.ResourceOptions):
|
__props__ = dict()
__props__['description'] = description
if dest_range is None:
raise TypeError('Missing required property dest_range')
__props__['dest_range'] = dest_range
__props__['name'] = name
if network is None:
raise TypeError('Missing required property network')
__props__['network'] = network
__props__['next_hop_gateway'] = next_hop_gateway
__props__['next_hop_instance'] = next_hop_instance
__props__['next_hop_instance_zone'] = next_hop_instance_zone
__props__['next_hop_ip'] = next_hop_ip
__props__['next_hop_vpn_tunnel'] = next_hop_vpn_tunnel
__props__['priority'] = priority
__props__['project'] = project
__props__['tags'] = tags
__props__['next_hop_network'] = None
__props__['self_link'] = None
super(Route, __self__).__init__(
'gcp:compute/route:Route',
resource_name,
__props__,
opts)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| raise TypeError('Expected resource options to be a ResourceOptions instance') |
dialogue.rs | use mlua::{UserData, UserDataFields};
use crate::models::common::Time;
pub struct | {
row: i32,
/// Is this a comment line?
comment: bool,
/// Layer number
layer: i32,
margin_left: i32,
margin_right: i32,
margin_vertical: i32,
/// Starting time
start_time: Time,
/// Ending time
end_time: Time,
/// Style name
style: String,
/// Actor name
actor: String,
/// Effect name
effect: String,
/// IDs of extra data entries for line
extra_ids: Vec<u32>,
/// Raw text data
text: String,
}
impl AssDialogue {
pub fn entry_data(&self) -> String {
// TODO
String::new()
}
}
impl UserData for AssDialogue {
fn add_fields<'lua, F: UserDataFields<'lua, Self>>(fields: &mut F) {
field_raw_str!(fields, "class", "dialogue");
field_raw_str!(fields, "section", "[Events]");
fields.add_field_method_get("raw", |lua, this| Ok(lua.create_string(&this.entry_data())?));
field_this!(fields, comment);
field_this!(fields, layer);
field_this!(fields, start_time, get);
field_this!(fields, end_time, get);
field_this_str!(fields, style);
field_this_str!(fields, actor);
field_this_str!(fields, effect);
field_this!(fields, "margin_l", margin_left);
field_this!(fields, "margin_r", margin_right);
field_this!(fields, "margin_t", margin_vertical);
field_this!(fields, "margin_b", margin_vertical);
field_this_str!(fields, text);
// TODO: extra
fields.add_field_method_get("extra", |lua, _| lua.create_table());
}
} | AssDialogue |
wake-lock.js | module.exports={A:{A:{"2":"I D F E A B kB"},B:{"2":"C O H P J K L","194":"M KB N R WB T"},C:{"2":"0 1 2 3 4 5 6 7 8 9 dB ZB G X I D F E A B C O H P J K L Y Z a b c d e f g h i j k l m n o p q r s t u v w x y z AB BB CB DB NB FB GB HB IB JB Q LB EB V U TB OB PB QB RB SB MB M KB N jB R rB uB"},D:{"1":"T vB aB bB","2":"0 1 2 3 4 5 6 7 8 9 G X I D F E A B C O H P J K L Y Z a b c d e f g h i j k l m n o p q r s t u v w x y z AB BB CB DB NB FB GB HB IB JB Q LB EB V","194":"U TB OB PB QB RB SB MB M KB N R WB"},E:{"2":"G X I D F E A B C O H cB UB eB fB gB hB VB S W lB mB"},F:{"2":"0 1 2 3 4 5 6 7 8 9 E B C P J K L Y Z a b c d e f g h i j k l m n o p q r s t u v w x y z AB nB oB pB qB S XB sB W","194":"BB DB FB GB HB IB JB Q LB EB V U"},G:{"2":"F UB tB YB TC wB xB yB zB 0B 1B 2B 3B 4B 5B 6B 7B 8B 9B AC BC"},H:{"2":"CC"},I:{"1":"N","2":"ZB G DC EC FC GC YB HC IC"},J:{"2":"D A"},K:{"2":"A B C CB S XB W"},L:{"1":"T"},M:{"2":"M"},N:{"2":"A B"},O:{"2":"JC"},P:{"2":"G KC LC MC NC OC VB PC QC"},Q:{"2":"RC"},R:{"2":"SC"},S:{"2":"iB"}},B:4,C:"Screen Wake Lock API"}; |
||
session.go | package tunnel
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"time"
"github.com/mikespook/golib/log"
"github.com/mikespook/sts/iface"
"golang.org/x/crypto/ssh"
"gopkg.in/mgo.v2/bson"
)
type session struct {
id bson.ObjectId
ssh.Conn
channels <-chan ssh.NewChannel
oobReqs <-chan *ssh.Request
bus iface.Bus
etime time.Time
}
func newSession(conn net.Conn, config *ssh.ServerConfig) (s *session, err error) |
func (s *session) ETime() time.Time {
return s.etime
}
func (s *session) Id() bson.ObjectId {
return s.id
}
func (s *session) serveOOBRequest() {
for req := range s.oobReqs {
log.Messagef("OOB Request: %+v", req)
}
}
func (s *session) serveChannels() {
for newChan := range s.channels {
ch, reqCh, err := newChan.Accept()
if err != nil {
log.Errorf("Channel: %s", err)
return
}
chType := newChan.ChannelType()
switch chType {
case "session":
go s.session(newChan, ch, reqCh)
case "direct-tcpip":
go s.directTcpIp(newChan, ch)
default:
msg := fmt.Sprintf("%s is not supported\n\r", chType)
if _, err := ch.Write([]byte(msg)); err != nil {
log.Errorf("Write: %s", err)
return
}
}
}
}
func (s *session) status(ch io.Writer) {
outputs := []string{
"\x1b[2J\x1b[1;1H",
fmt.Sprintf("Secure Tunnel Server (%s)\r\n", s.ServerVersion()),
fmt.Sprintf("User: %s@%s\r\n", s.User(),
s.RemoteAddr()),
"\n* Press any key to refresh status *\r\n* Press [Ctrl+C] to disconnect *\r\n",
}
for _, line := range outputs {
ch.Write([]byte(line))
}
}
func (s *session) session(newChan ssh.NewChannel,
ch ssh.Channel, reqChan <-chan *ssh.Request) {
defer ch.Close()
buf := make([]byte, 1)
LOOP:
for {
s.status(ch)
if _, err := ch.Read(buf); err != nil {
if err != io.EOF {
log.Errorf("Read: %s", err)
}
return
}
switch buf[0] {
case 0x03:
s.Close()
break LOOP
default:
}
}
}
func parseAddr(data []byte) (addr string, err error) {
buf := bytes.NewReader(data)
var size uint32
if err = binary.Read(buf, binary.BigEndian, &size); err != nil {
return
}
ip := make([]byte, size)
if err = binary.Read(buf, binary.BigEndian, ip); err != nil {
return
}
var port uint32
if err = binary.Read(buf, binary.BigEndian, &port); err != nil {
return
}
addr = fmt.Sprintf("%s:%d", ip, port)
return
}
func (s *session) directTcpIp(newChan ssh.NewChannel,
ch ssh.Channel) {
defer ch.Close()
addr, err := parseAddr(newChan.ExtraData())
if err != nil {
log.Error(err)
return
}
a, err := newAgent(addr, ch)
if err != nil {
log.Error(err)
return
}
a.session = s
defer a.Close()
s.bus.AddAgent(a)
defer s.bus.RemoveAgent(a)
if err := a.Serve(); err != nil {
return
}
}
func (s *session) Close() error {
return s.Conn.Close()
}
func (s *session) Agents() map[bson.ObjectId]iface.Agent {
agents := make(map[bson.ObjectId]iface.Agent)
all := s.bus.Agents()
for k, v := range all {
if v.SessionId() == s.id {
agents[k] = v
}
}
return agents
}
func (s *session) Serve() {
s.etime = time.Now()
go s.serveOOBRequest()
log.Messagef("SSH-Connect: %s [%s@%s] (%s)", s.id.Hex(), s.User(),
s.RemoteAddr(), s.ClientVersion())
s.serveChannels()
log.Messagef("SSH-Disconnect: %s", s.id.Hex())
}
| {
s = &session{
id: bson.NewObjectId(),
}
if s.Conn, s.channels, s.oobReqs,
err = ssh.NewServerConn(conn, config); err != nil {
if err != io.EOF {
return
}
}
return
} |
load.py | import json
import os
import boto3
import yaml
from lib.dynamodb import accounts_table, requirements_table, user_table, config_table
from lib.lambda_decorator.decorator import states_decorator
client_s3 = boto3.client('s3')
user_bucket = os.getenv('USER_BUCKET')
account_bucket = os.getenv('ACCOUNT_BUCKET')
requirements_bucket = os.getenv('REQUIREMENTS_BUCKET')
@states_decorator
def load_handler(event, context):
"""
Imports users, accounts, and requirements files.
Returns assorted information regarding the scan
including account ids, accounts to scan with
cloudsploit, payer account ids, cloudsploit settings,
user emails, s3 import requirements, etc
Expected input event format
{}
"""
accounts = load_accounts()
load_user()
requirements = load_requirements()
return {
'accountIds': list({a['accountId'] for a in accounts}),
'payerIds': list({a.get('payer_id') for a in accounts if a.get('payer_id')}),
's3RequirementIds': list({r_id for r_id, r in requirements['requirements'].items() if r.get('source') == 's3Import'}),
'cloudsploitSettingsMap': requirements['cloudsploitSettingsMap']
}
def load_accounts():
"""Syncs accounts in accounts table with those present in the S3 bucket"""
account_ids_to_delete = []
accounts_to_add = []
s3_response = client_s3.get_object(Bucket=account_bucket, Key=os.getenv('ACCOUNT_FILE_PATH'))
account_list_from_s3 = json.loads(s3_response['Body'].read())['accounts']
for account in account_list_from_s3:
accounts_table.normalize_account_record(account)
accounts_from_s3 = {account['accountId']: account for account in account_list_from_s3}
ddb_data = accounts_table.scan_all()
accounts_from_ddb = {account['accountId']: account for account in ddb_data}
for account_id in accounts_from_ddb:
if account_id in accounts_from_s3:
if accounts_from_ddb[account_id] != accounts_from_s3[account_id]:
accounts_to_add.append(accounts_from_s3[account_id])
else:
account_ids_to_delete.append(account_id)
for account_id in accounts_from_s3:
if account_id not in accounts_from_ddb:
accounts_to_add.append(accounts_from_s3[account_id])
with accounts_table.batch_writer() as batch:
for account_id in account_ids_to_delete:
batch.delete_item(Key={'accountId': account_id})
for account in accounts_to_add:
batch.put_item(Item=account)
return account_list_from_s3
def load_user():
"""Syncs users in user's table with those present in S3 bucket,
ensures admin permissions are retained"""
user_emails_to_delete = []
users_to_add = []
s3_response = client_s3.get_object(Bucket=user_bucket, Key=os.getenv('USER_FILE_PATH'))
user_list_from_s3 = json.loads(s3_response['Body'].read())
users_from_s3 = {}
for user in user_list_from_s3:
user['email'] = user['email'].lower()
users_from_s3[user['email']] = user
ddb_data = user_table.scan_all()
users_from_ddb = {user['email']: user for user in ddb_data}
for user_email, existing_user in users_from_ddb.items():
if user_email in users_from_s3:
if existing_user != users_from_s3[user_email]:
if existing_user.get('isAdmin', False):
# update incoming user
users_to_add.append(dict(
users_from_s3[user_email],
**{
'isAdmin': existing_user.get('isAdmin'),
}))
else:
users_to_add.append(users_from_s3[user_email])
else:
if existing_user.get('isAdmin', False):
users_to_add.append({
'email': existing_user.get('email'),
'isAdmin': existing_user.get('isAdmin'),
})
else:
user_emails_to_delete.append(user_email)
for user_email in users_from_s3:
if user_email not in users_from_ddb:
users_to_add.append(users_from_s3[user_email])
with user_table.batch_writer() as batch:
for user_email in user_emails_to_delete:
batch.delete_item(Key={'email': user_email})
for user in users_to_add:
batch.put_item(Item=user)
return user_list_from_s3
def load_requirements():
"""Loads requirements yaml from s3 and updates
requirements in requirements table along with
various other configs"""
s3_response = client_s3.get_object(Bucket=requirements_bucket, Key=os.getenv('REQUIREMENTS_FILE_PATH'))
requirements_file = yaml.safe_load(s3_response['Body'].read())
cloudsploit_settings_map = requirements_file['cloudsploitSettings']
severity_weight_map = requirements_file['severityWeightings']
exclusion_types = requirements_file['exclusionTypes']
version = requirements_file['version']
severity_colors = requirements_file['severityColors']
remediations = requirements_file['remediations']
requirements = requirements_file['database']
# denormalize weights and add requirement id inside object for dynamodb storage
for requirement_id, requirement in requirements.items():
requirement['requirementId'] = requirement_id
requirement['weight'] = severity_weight_map[requirement['severity']]
update_requirements(requirements)
update_exclusion_types(exclusion_types)
update_version(version)
update_severity_colors(severity_colors)
update_severity_weights(severity_weight_map)
update_remediations(remediations)
return {
'requirements': requirements,
'cloudsploitSettingsMap': cloudsploit_settings_map,
}
|
def update_requirements(requirements):
"""Syncs requirements in requirements table
with the parameters that are passed"""
requirement_ids_to_delete = []
reqs_to_add = []
# load requirements saved in dynamodb
ddb_data = requirements_table.scan_all()
requirements_from_ddb = {requirement['requirementId']: requirement for requirement in ddb_data}
for requirement_id in requirements_from_ddb:
if requirement_id in requirements:
if requirements_from_ddb[requirement_id] != requirements[requirement_id]:
reqs_to_add.append(requirements[requirement_id])
else:
requirement_ids_to_delete.append(requirement_id)
for requirement_id in requirements:
if requirement_id not in requirements_from_ddb:
reqs_to_add.append(requirements[requirement_id])
with requirements_table.batch_writer() as batch:
for requirement_id in requirement_ids_to_delete:
batch.delete_item(Key={'requirementId': requirement_id})
for requirement in reqs_to_add:
batch.put_item(Item=requirement)
def update_version(version):
config_table.set_config(config_table.VERSION, version)
def update_exclusion_types(exclusions):
config_table.set_config(config_table.EXCLUSIONS, exclusions)
def update_severity_colors(severity_colors):
config_table.set_config(config_table.SEVERITYCOLORS, severity_colors)
def update_severity_weights(severity_weight_map):
config_table.set_config(config_table.SEVERITYWEIGHTS, severity_weight_map)
def update_remediations(remediations):
config_table.set_config(config_table.REMEDIATIONS, remediations) | |
enums.py | """ Enums used in different API endpoints """
from enum import Enum
class PluginStatusState(str, Enum):
"""State of the plugin"""
NOTRUNNING = "NotRunning"
STARTING = "Starting" |
def __str__(self) -> str:
return str(self.value)
class SidebarCategoryType(str, Enum):
"""None"""
CHANNELS = "channels"
CUSTOM = "custom"
DIRECT_MESSAGES = "direct_messages"
FAVORITES = "favorites"
def __str__(self) -> str:
return str(self.value)
class SidebarCategoryWithChannelsType(str, Enum):
"""None"""
CHANNELS = "channels"
CUSTOM = "custom"
DIRECT_MESSAGES = "direct_messages"
FAVORITES = "favorites"
def __str__(self) -> str:
return str(self.value)
class UploadSessionType(str, Enum):
"""The type of the upload."""
ATTACHMENT = "attachment"
IMPORT_ = "import"
def __str__(self) -> str:
return str(self.value)
class PostMetadataEmbedsItemType(str, Enum):
"""The type of content that is embedded in this point."""
IMAGE = "image"
MESSAGE_ATTACHMENT = "message_attachment"
OPENGRAPH = "opengraph"
LINK = "link"
def __str__(self) -> str:
return str(self.value) | RUNNING = "Running"
FAILEDTOSTART = "FailedToStart"
FAILEDTOSTAYRUNNING = "FailedToStayRunning"
STOPPING = "Stopping" |
models.go | package servicebus
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// The package's fully qualified name.
const fqdn = "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus"
// AccessRights enumerates the values for access rights.
type AccessRights string
const (
// Listen ...
Listen AccessRights = "Listen"
// Manage ...
Manage AccessRights = "Manage"
// Send ...
Send AccessRights = "Send"
)
// PossibleAccessRightsValues returns an array of possible values for the AccessRights const type.
func PossibleAccessRightsValues() []AccessRights {
return []AccessRights{Listen, Manage, Send}
}
// DefaultAction enumerates the values for default action.
type DefaultAction string
const (
// Allow ...
Allow DefaultAction = "Allow"
// Deny ...
Deny DefaultAction = "Deny"
)
// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type.
func PossibleDefaultActionValues() []DefaultAction {
return []DefaultAction{Allow, Deny}
}
// EncodingCaptureDescription enumerates the values for encoding capture description.
type EncodingCaptureDescription string
const (
// Avro ...
Avro EncodingCaptureDescription = "Avro"
// AvroDeflate ...
AvroDeflate EncodingCaptureDescription = "AvroDeflate"
)
// PossibleEncodingCaptureDescriptionValues returns an array of possible values for the EncodingCaptureDescription const type.
func PossibleEncodingCaptureDescriptionValues() []EncodingCaptureDescription {
return []EncodingCaptureDescription{Avro, AvroDeflate}
}
// EntityStatus enumerates the values for entity status.
type EntityStatus string
const (
// Active ...
Active EntityStatus = "Active"
// Creating ...
Creating EntityStatus = "Creating"
// Deleting ...
Deleting EntityStatus = "Deleting"
// Disabled ...
Disabled EntityStatus = "Disabled"
// ReceiveDisabled ...
ReceiveDisabled EntityStatus = "ReceiveDisabled"
// Renaming ...
Renaming EntityStatus = "Renaming"
// Restoring ...
Restoring EntityStatus = "Restoring"
// SendDisabled ...
SendDisabled EntityStatus = "SendDisabled"
// Unknown ...
Unknown EntityStatus = "Unknown"
)
// PossibleEntityStatusValues returns an array of possible values for the EntityStatus const type.
func PossibleEntityStatusValues() []EntityStatus {
return []EntityStatus{Active, Creating, Deleting, Disabled, ReceiveDisabled, Renaming, Restoring, SendDisabled, Unknown}
}
// FilterType enumerates the values for filter type.
type FilterType string
const (
// FilterTypeCorrelationFilter ...
FilterTypeCorrelationFilter FilterType = "CorrelationFilter"
// FilterTypeSQLFilter ...
FilterTypeSQLFilter FilterType = "SqlFilter"
)
// PossibleFilterTypeValues returns an array of possible values for the FilterType const type.
func PossibleFilterTypeValues() []FilterType {
return []FilterType{FilterTypeCorrelationFilter, FilterTypeSQLFilter}
}
// KeyType enumerates the values for key type.
type KeyType string
const (
// PrimaryKey ...
PrimaryKey KeyType = "PrimaryKey"
// SecondaryKey ...
SecondaryKey KeyType = "SecondaryKey"
)
// PossibleKeyTypeValues returns an array of possible values for the KeyType const type.
func PossibleKeyTypeValues() []KeyType {
return []KeyType{PrimaryKey, SecondaryKey}
}
// NameSpaceType enumerates the values for name space type.
type NameSpaceType string
const (
// EventHub ...
EventHub NameSpaceType = "EventHub"
// Messaging ...
Messaging NameSpaceType = "Messaging"
// Mixed ...
Mixed NameSpaceType = "Mixed"
// NotificationHub ...
NotificationHub NameSpaceType = "NotificationHub"
// Relay ...
Relay NameSpaceType = "Relay"
)
// PossibleNameSpaceTypeValues returns an array of possible values for the NameSpaceType const type.
func PossibleNameSpaceTypeValues() []NameSpaceType {
return []NameSpaceType{EventHub, Messaging, Mixed, NotificationHub, Relay}
}
// NetworkRuleIPAction enumerates the values for network rule ip action.
type NetworkRuleIPAction string
const (
// NetworkRuleIPActionAllow ...
NetworkRuleIPActionAllow NetworkRuleIPAction = "Allow"
)
// PossibleNetworkRuleIPActionValues returns an array of possible values for the NetworkRuleIPAction const type.
func PossibleNetworkRuleIPActionValues() []NetworkRuleIPAction {
return []NetworkRuleIPAction{NetworkRuleIPActionAllow}
}
// ProvisioningStateDR enumerates the values for provisioning state dr.
type ProvisioningStateDR string
const (
// Accepted ...
Accepted ProvisioningStateDR = "Accepted"
// Failed ...
Failed ProvisioningStateDR = "Failed"
// Succeeded ...
Succeeded ProvisioningStateDR = "Succeeded"
)
// PossibleProvisioningStateDRValues returns an array of possible values for the ProvisioningStateDR const type.
func PossibleProvisioningStateDRValues() []ProvisioningStateDR {
return []ProvisioningStateDR{Accepted, Failed, Succeeded}
}
// RoleDisasterRecovery enumerates the values for role disaster recovery.
type RoleDisasterRecovery string
const (
// Primary ...
Primary RoleDisasterRecovery = "Primary"
// PrimaryNotReplicating ...
PrimaryNotReplicating RoleDisasterRecovery = "PrimaryNotReplicating"
// Secondary ...
Secondary RoleDisasterRecovery = "Secondary"
)
// PossibleRoleDisasterRecoveryValues returns an array of possible values for the RoleDisasterRecovery const type.
func PossibleRoleDisasterRecoveryValues() []RoleDisasterRecovery {
return []RoleDisasterRecovery{Primary, PrimaryNotReplicating, Secondary}
}
// SkuName enumerates the values for sku name.
type SkuName string
const (
// Basic ...
Basic SkuName = "Basic"
// Premium ...
Premium SkuName = "Premium"
// Standard ...
Standard SkuName = "Standard"
)
// PossibleSkuNameValues returns an array of possible values for the SkuName const type.
func PossibleSkuNameValues() []SkuName {
return []SkuName{Basic, Premium, Standard}
}
// SkuTier enumerates the values for sku tier.
type SkuTier string
const (
// SkuTierBasic ...
SkuTierBasic SkuTier = "Basic"
// SkuTierPremium ...
SkuTierPremium SkuTier = "Premium"
// SkuTierStandard ...
SkuTierStandard SkuTier = "Standard"
)
// PossibleSkuTierValues returns an array of possible values for the SkuTier const type.
func PossibleSkuTierValues() []SkuTier {
return []SkuTier{SkuTierBasic, SkuTierPremium, SkuTierStandard}
}
// UnavailableReason enumerates the values for unavailable reason.
type UnavailableReason string
const (
// InvalidName ...
InvalidName UnavailableReason = "InvalidName"
// NameInLockdown ...
NameInLockdown UnavailableReason = "NameInLockdown"
// NameInUse ...
NameInUse UnavailableReason = "NameInUse"
// None ...
None UnavailableReason = "None"
// SubscriptionIsDisabled ...
SubscriptionIsDisabled UnavailableReason = "SubscriptionIsDisabled"
// TooManyNamespaceInCurrentSubscription ...
TooManyNamespaceInCurrentSubscription UnavailableReason = "TooManyNamespaceInCurrentSubscription"
)
// PossibleUnavailableReasonValues returns an array of possible values for the UnavailableReason const type.
func PossibleUnavailableReasonValues() []UnavailableReason {
return []UnavailableReason{InvalidName, NameInLockdown, NameInUse, None, SubscriptionIsDisabled, TooManyNamespaceInCurrentSubscription}
}
// AccessKeys namespace/ServiceBus Connection String
type AccessKeys struct {
autorest.Response `json:"-"`
// PrimaryConnectionString - READ-ONLY; Primary connection string of the created namespace authorization rule.
PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"`
// SecondaryConnectionString - READ-ONLY; Secondary connection string of the created namespace authorization rule.
SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"`
// AliasPrimaryConnectionString - READ-ONLY; Primary connection string of the alias if GEO DR is enabled
AliasPrimaryConnectionString *string `json:"aliasPrimaryConnectionString,omitempty"`
// AliasSecondaryConnectionString - READ-ONLY; Secondary connection string of the alias if GEO DR is enabled
AliasSecondaryConnectionString *string `json:"aliasSecondaryConnectionString,omitempty"`
// PrimaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token.
PrimaryKey *string `json:"primaryKey,omitempty"`
// SecondaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token.
SecondaryKey *string `json:"secondaryKey,omitempty"`
// KeyName - READ-ONLY; A string that describes the authorization rule.
KeyName *string `json:"keyName,omitempty"`
}
// Action represents the filter actions which are allowed for the transformation of a message that have
// been matched by a filter expression.
type Action struct {
// SQLExpression - SQL expression. e.g. MyProperty='ABC'
SQLExpression *string `json:"sqlExpression,omitempty"`
// CompatibilityLevel - This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.
CompatibilityLevel *int32 `json:"compatibilityLevel,omitempty"`
// RequiresPreprocessing - Value that indicates whether the rule action requires preprocessing.
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
}
// ArmDisasterRecovery single item in List or Get Alias(Disaster Recovery configuration) operation
type ArmDisasterRecovery struct {
autorest.Response `json:"-"`
// ArmDisasterRecoveryProperties - Properties required to the Create Or Update Alias(Disaster Recovery configurations)
*ArmDisasterRecoveryProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for ArmDisasterRecovery.
func (adr ArmDisasterRecovery) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if adr.ArmDisasterRecoveryProperties != nil {
objectMap["properties"] = adr.ArmDisasterRecoveryProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for ArmDisasterRecovery struct.
func (adr *ArmDisasterRecovery) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var armDisasterRecoveryProperties ArmDisasterRecoveryProperties
err = json.Unmarshal(*v, &armDisasterRecoveryProperties)
if err != nil {
return err
}
adr.ArmDisasterRecoveryProperties = &armDisasterRecoveryProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
adr.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
adr.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
adr.Type = &typeVar
}
}
}
return nil
}
// ArmDisasterRecoveryListResult the result of the List Alias(Disaster Recovery configuration) operation.
type ArmDisasterRecoveryListResult struct {
autorest.Response `json:"-"`
// Value - List of Alias(Disaster Recovery configurations)
Value *[]ArmDisasterRecovery `json:"value,omitempty"`
// NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of Alias(Disaster Recovery configuration)
NextLink *string `json:"nextLink,omitempty"`
}
// ArmDisasterRecoveryListResultIterator provides access to a complete listing of ArmDisasterRecovery
// values.
type ArmDisasterRecoveryListResultIterator struct {
i int
page ArmDisasterRecoveryListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *ArmDisasterRecoveryListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *ArmDisasterRecoveryListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter ArmDisasterRecoveryListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter ArmDisasterRecoveryListResultIterator) Response() ArmDisasterRecoveryListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter ArmDisasterRecoveryListResultIterator) Value() ArmDisasterRecovery {
if !iter.page.NotDone() {
return ArmDisasterRecovery{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the ArmDisasterRecoveryListResultIterator type.
func NewArmDisasterRecoveryListResultIterator(page ArmDisasterRecoveryListResultPage) ArmDisasterRecoveryListResultIterator {
return ArmDisasterRecoveryListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (adrlr ArmDisasterRecoveryListResult) IsEmpty() bool {
return adrlr.Value == nil || len(*adrlr.Value) == 0
}
// armDisasterRecoveryListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (adrlr ArmDisasterRecoveryListResult) armDisasterRecoveryListResultPreparer(ctx context.Context) (*http.Request, error) {
if adrlr.NextLink == nil || len(to.String(adrlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(adrlr.NextLink)))
}
// ArmDisasterRecoveryListResultPage contains a page of ArmDisasterRecovery values.
type ArmDisasterRecoveryListResultPage struct {
fn func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)
adrlr ArmDisasterRecoveryListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *ArmDisasterRecoveryListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.adrlr)
if err != nil {
return err
}
page.adrlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *ArmDisasterRecoveryListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page ArmDisasterRecoveryListResultPage) NotDone() bool {
return !page.adrlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page ArmDisasterRecoveryListResultPage) Response() ArmDisasterRecoveryListResult {
return page.adrlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page ArmDisasterRecoveryListResultPage) Values() []ArmDisasterRecovery {
if page.adrlr.IsEmpty() {
return nil
}
return *page.adrlr.Value
}
// Creates a new instance of the ArmDisasterRecoveryListResultPage type.
func NewArmDisasterRecoveryListResultPage(getNextPage func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)) ArmDisasterRecoveryListResultPage {
return ArmDisasterRecoveryListResultPage{fn: getNextPage}
}
// ArmDisasterRecoveryProperties properties required to the Create Or Update Alias(Disaster Recovery
// configurations)
type ArmDisasterRecoveryProperties struct {
// ProvisioningState - READ-ONLY; Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or 'Succeeded' or 'Failed'. Possible values include: 'Accepted', 'Succeeded', 'Failed'
ProvisioningState ProvisioningStateDR `json:"provisioningState,omitempty"`
// PendingReplicationOperationsCount - READ-ONLY; Number of entities pending to be replicated.
PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"`
// PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairing
PartnerNamespace *string `json:"partnerNamespace,omitempty"`
// AlternateName - Primary/Secondary eventhub namespace name, which is part of GEO DR pairing
AlternateName *string `json:"alternateName,omitempty"`
// Role - READ-ONLY; role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary'. Possible values include: 'Primary', 'PrimaryNotReplicating', 'Secondary'
Role RoleDisasterRecovery `json:"role,omitempty"`
}
// AuthorizationRuleProperties authorizationRule properties.
type AuthorizationRuleProperties struct {
// Rights - The rights associated with the rule.
Rights *[]AccessRights `json:"rights,omitempty"`
}
// CaptureDescription properties to configure capture description for eventhub
type CaptureDescription struct {
// Enabled - A value that indicates whether capture description is enabled.
Enabled *bool `json:"enabled,omitempty"`
// Encoding - Enumerates the possible values for the encoding format of capture description. Possible values include: 'Avro', 'AvroDeflate'
Encoding EncodingCaptureDescription `json:"encoding,omitempty"`
// IntervalInSeconds - The time window allows you to set the frequency with which the capture to Azure Blobs will happen, value should between 60 to 900 seconds
IntervalInSeconds *int32 `json:"intervalInSeconds,omitempty"`
// SizeLimitInBytes - The size window defines the amount of data built up in your Event Hub before an capture operation, value should be between 10485760 and 524288000 bytes
SizeLimitInBytes *int32 `json:"sizeLimitInBytes,omitempty"`
// Destination - Properties of Destination where capture will be stored. (Storage Account, Blob Names)
Destination *Destination `json:"destination,omitempty"`
}
// CheckNameAvailability description of a Check Name availability request properties.
type CheckNameAvailability struct {
// Name - The Name to check the namespace name availability and The namespace name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number.
Name *string `json:"name,omitempty"`
}
// CheckNameAvailabilityResult description of a Check Name availability request properties.
type CheckNameAvailabilityResult struct {
autorest.Response `json:"-"`
// Message - READ-ONLY; The detailed info regarding the reason associated with the namespace.
Message *string `json:"message,omitempty"`
// NameAvailable - Value indicating namespace is availability, true if the namespace is available; otherwise, false.
NameAvailable *bool `json:"nameAvailable,omitempty"`
// Reason - The reason for unavailability of a namespace. Possible values include: 'None', 'InvalidName', 'SubscriptionIsDisabled', 'NameInUse', 'NameInLockdown', 'TooManyNamespaceInCurrentSubscription'
Reason UnavailableReason `json:"reason,omitempty"`
}
// CorrelationFilter represents the correlation filter expression.
type CorrelationFilter struct {
// Properties - dictionary object for custom filters
Properties map[string]*string `json:"properties"`
// CorrelationID - Identifier of the correlation.
CorrelationID *string `json:"correlationId,omitempty"`
// MessageID - Identifier of the message.
MessageID *string `json:"messageId,omitempty"`
// To - Address to send to.
To *string `json:"to,omitempty"`
// ReplyTo - Address of the queue to reply to.
ReplyTo *string `json:"replyTo,omitempty"`
// Label - Application specific label.
Label *string `json:"label,omitempty"`
// SessionID - Session identifier.
SessionID *string `json:"sessionId,omitempty"`
// ReplyToSessionID - Session identifier to reply to.
ReplyToSessionID *string `json:"replyToSessionId,omitempty"`
// ContentType - Content type of the message.
ContentType *string `json:"contentType,omitempty"`
// RequiresPreprocessing - Value that indicates whether the rule action requires preprocessing.
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
}
// MarshalJSON is the custom marshaler for CorrelationFilter.
func (cf CorrelationFilter) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if cf.Properties != nil {
objectMap["properties"] = cf.Properties
}
if cf.CorrelationID != nil {
objectMap["correlationId"] = cf.CorrelationID
}
if cf.MessageID != nil {
objectMap["messageId"] = cf.MessageID
}
if cf.To != nil {
objectMap["to"] = cf.To
}
if cf.ReplyTo != nil {
objectMap["replyTo"] = cf.ReplyTo
}
if cf.Label != nil {
objectMap["label"] = cf.Label
}
if cf.SessionID != nil {
objectMap["sessionId"] = cf.SessionID
}
if cf.ReplyToSessionID != nil {
objectMap["replyToSessionId"] = cf.ReplyToSessionID
}
if cf.ContentType != nil {
objectMap["contentType"] = cf.ContentType
}
if cf.RequiresPreprocessing != nil {
objectMap["requiresPreprocessing"] = cf.RequiresPreprocessing
}
return json.Marshal(objectMap)
}
// Destination capture storage details for capture description
type Destination struct {
// Name - Name for capture destination
Name *string `json:"name,omitempty"`
// DestinationProperties - Properties describing the storage account, blob container and archive name format for capture destination
*DestinationProperties `json:"properties,omitempty"`
}
// MarshalJSON is the custom marshaler for Destination.
func (d Destination) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if d.Name != nil {
objectMap["name"] = d.Name
}
if d.DestinationProperties != nil {
objectMap["properties"] = d.DestinationProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Destination struct.
func (d *Destination) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
d.Name = &name
}
case "properties":
if v != nil {
var destinationProperties DestinationProperties
err = json.Unmarshal(*v, &destinationProperties)
if err != nil {
return err
}
d.DestinationProperties = &destinationProperties
}
}
}
return nil
}
// DestinationProperties properties describing the storage account, blob container and archive name format
// for capture destination
type DestinationProperties struct {
// StorageAccountResourceID - Resource id of the storage account to be used to create the blobs
StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"`
// BlobContainer - Blob container Name
BlobContainer *string `json:"blobContainer,omitempty"`
// ArchiveNameFormat - Blob naming convention for archive, e.g. {Namespace}/{EventHub}/{PartitionId}/{Year}/{Month}/{Day}/{Hour}/{Minute}/{Second}. Here all the parameters (Namespace,EventHub .. etc) are mandatory irrespective of order
ArchiveNameFormat *string `json:"archiveNameFormat,omitempty"`
}
// ErrorResponse error response indicates ServiceBus service is not able to process the incoming request.
// The reason is provided in the error message.
type ErrorResponse struct {
// Code - Error code.
Code *string `json:"code,omitempty"`
// Message - Error message indicating why the operation failed.
Message *string `json:"message,omitempty"`
}
// Eventhub single item in List or Get Event Hub operation
type Eventhub struct {
// EventhubProperties - Properties supplied to the Create Or Update Event Hub operation.
*EventhubProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for Eventhub.
func (e Eventhub) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if e.EventhubProperties != nil {
objectMap["properties"] = e.EventhubProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Eventhub struct.
func (e *Eventhub) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var eventhubProperties EventhubProperties
err = json.Unmarshal(*v, &eventhubProperties)
if err != nil {
return err
}
e.EventhubProperties = &eventhubProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
e.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
e.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
e.Type = &typeVar
}
}
}
return nil
}
// EventHubListResult the result of the List EventHubs operation.
type EventHubListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List EventHubs operation.
Value *[]Eventhub `json:"value,omitempty"`
// NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of EventHubs.
NextLink *string `json:"nextLink,omitempty"`
}
// EventHubListResultIterator provides access to a complete listing of Eventhub values.
type EventHubListResultIterator struct {
i int
page EventHubListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *EventHubListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EventHubListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *EventHubListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter EventHubListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter EventHubListResultIterator) Response() EventHubListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter EventHubListResultIterator) Value() Eventhub {
if !iter.page.NotDone() {
return Eventhub{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the EventHubListResultIterator type.
func NewEventHubListResultIterator(page EventHubListResultPage) EventHubListResultIterator {
return EventHubListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (ehlr EventHubListResult) IsEmpty() bool {
return ehlr.Value == nil || len(*ehlr.Value) == 0
}
// eventHubListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (ehlr EventHubListResult) eventHubListResultPreparer(ctx context.Context) (*http.Request, error) {
if ehlr.NextLink == nil || len(to.String(ehlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(ehlr.NextLink)))
}
// EventHubListResultPage contains a page of Eventhub values.
type EventHubListResultPage struct {
fn func(context.Context, EventHubListResult) (EventHubListResult, error)
ehlr EventHubListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *EventHubListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/EventHubListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.ehlr)
if err != nil {
return err
}
page.ehlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *EventHubListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page EventHubListResultPage) NotDone() bool {
return !page.ehlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page EventHubListResultPage) Response() EventHubListResult {
return page.ehlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page EventHubListResultPage) Values() []Eventhub {
if page.ehlr.IsEmpty() {
return nil
}
return *page.ehlr.Value
}
// Creates a new instance of the EventHubListResultPage type.
func NewEventHubListResultPage(getNextPage func(context.Context, EventHubListResult) (EventHubListResult, error)) EventHubListResultPage {
return EventHubListResultPage{fn: getNextPage}
}
// EventhubProperties properties supplied to the Create Or Update Event Hub operation.
type EventhubProperties struct {
// PartitionIds - READ-ONLY; Current number of shards on the Event Hub.
PartitionIds *[]string `json:"partitionIds,omitempty"`
// CreatedAt - READ-ONLY; Exact time the Event Hub was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
// UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days
MessageRetentionInDays *int64 `json:"messageRetentionInDays,omitempty"`
// PartitionCount - Number of partitions created for the Event Hub, allowed values are from 1 to 32 partitions.
PartitionCount *int64 `json:"partitionCount,omitempty"`
// Status - Enumerates the possible values for the status of the Event Hub. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled', 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown'
Status EntityStatus `json:"status,omitempty"`
// CaptureDescription - Properties of capture description
CaptureDescription *CaptureDescription `json:"captureDescription,omitempty"`
}
// MessageCountDetails message Count Details.
type MessageCountDetails struct {
// ActiveMessageCount - READ-ONLY; Number of active messages in the queue, topic, or subscription.
ActiveMessageCount *int64 `json:"activeMessageCount,omitempty"`
// DeadLetterMessageCount - READ-ONLY; Number of messages that are dead lettered.
DeadLetterMessageCount *int64 `json:"deadLetterMessageCount,omitempty"`
// ScheduledMessageCount - READ-ONLY; Number of scheduled messages.
ScheduledMessageCount *int64 `json:"scheduledMessageCount,omitempty"`
// TransferMessageCount - READ-ONLY; Number of messages transferred to another queue, topic, or subscription.
TransferMessageCount *int64 `json:"transferMessageCount,omitempty"`
// TransferDeadLetterMessageCount - READ-ONLY; Number of messages transferred into dead letters.
TransferDeadLetterMessageCount *int64 `json:"transferDeadLetterMessageCount,omitempty"`
}
// MigrationConfigListResult the result of the List migrationConfigurations operation.
type MigrationConfigListResult struct {
autorest.Response `json:"-"`
// Value - List of Migration Configs
Value *[]MigrationConfigProperties `json:"value,omitempty"`
// NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of migrationConfigurations
NextLink *string `json:"nextLink,omitempty"`
}
// MigrationConfigListResultIterator provides access to a complete listing of MigrationConfigProperties
// values.
type MigrationConfigListResultIterator struct {
i int
page MigrationConfigListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *MigrationConfigListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *MigrationConfigListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter MigrationConfigListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter MigrationConfigListResultIterator) Response() MigrationConfigListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter MigrationConfigListResultIterator) Value() MigrationConfigProperties {
if !iter.page.NotDone() {
return MigrationConfigProperties{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the MigrationConfigListResultIterator type.
func NewMigrationConfigListResultIterator(page MigrationConfigListResultPage) MigrationConfigListResultIterator {
return MigrationConfigListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (mclr MigrationConfigListResult) IsEmpty() bool {
return mclr.Value == nil || len(*mclr.Value) == 0
}
// migrationConfigListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (mclr MigrationConfigListResult) migrationConfigListResultPreparer(ctx context.Context) (*http.Request, error) {
if mclr.NextLink == nil || len(to.String(mclr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(mclr.NextLink)))
}
// MigrationConfigListResultPage contains a page of MigrationConfigProperties values.
type MigrationConfigListResultPage struct {
fn func(context.Context, MigrationConfigListResult) (MigrationConfigListResult, error)
mclr MigrationConfigListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *MigrationConfigListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.mclr)
if err != nil {
return err
}
page.mclr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *MigrationConfigListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page MigrationConfigListResultPage) NotDone() bool {
return !page.mclr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page MigrationConfigListResultPage) Response() MigrationConfigListResult {
return page.mclr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page MigrationConfigListResultPage) Values() []MigrationConfigProperties {
if page.mclr.IsEmpty() {
return nil
}
return *page.mclr.Value
}
// Creates a new instance of the MigrationConfigListResultPage type.
func NewMigrationConfigListResultPage(getNextPage func(context.Context, MigrationConfigListResult) (MigrationConfigListResult, error)) MigrationConfigListResultPage {
return MigrationConfigListResultPage{fn: getNextPage}
}
// MigrationConfigProperties single item in List or Get Migration Config operation
type MigrationConfigProperties struct {
autorest.Response `json:"-"`
// MigrationConfigPropertiesProperties - Properties required to the Create Migration Configuration
*MigrationConfigPropertiesProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for MigrationConfigProperties.
func (mcp MigrationConfigProperties) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if mcp.MigrationConfigPropertiesProperties != nil {
objectMap["properties"] = mcp.MigrationConfigPropertiesProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for MigrationConfigProperties struct.
func (mcp *MigrationConfigProperties) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var migrationConfigPropertiesProperties MigrationConfigPropertiesProperties
err = json.Unmarshal(*v, &migrationConfigPropertiesProperties)
if err != nil {
return err
}
mcp.MigrationConfigPropertiesProperties = &migrationConfigPropertiesProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
mcp.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
mcp.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
mcp.Type = &typeVar
}
}
}
return nil
}
// MigrationConfigPropertiesProperties properties required to the Create Migration Configuration
type MigrationConfigPropertiesProperties struct {
// ProvisioningState - READ-ONLY; Provisioning state of Migration Configuration
ProvisioningState *string `json:"provisioningState,omitempty"`
// PendingReplicationOperationsCount - READ-ONLY; Number of entities pending to be replicated.
PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"`
// TargetNamespace - Existing premium Namespace ARM Id name which has no entities, will be used for migration
TargetNamespace *string `json:"targetNamespace,omitempty"`
// PostMigrationName - Name to access Standard Namespace after migration
PostMigrationName *string `json:"postMigrationName,omitempty"`
// MigrationState - READ-ONLY; State in which Standard to Premium Migration is, possible values : Unknown, Reverting, Completing, Initiating, Syncing, Active
MigrationState *string `json:"migrationState,omitempty"`
}
// MigrationConfigsCreateAndStartMigrationFuture an abstraction for monitoring and retrieving the results
// of a long-running operation.
type MigrationConfigsCreateAndStartMigrationFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *MigrationConfigsCreateAndStartMigrationFuture) Result(client MigrationConfigsClient) (mcp MigrationConfigProperties, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.MigrationConfigsCreateAndStartMigrationFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("servicebus.MigrationConfigsCreateAndStartMigrationFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if mcp.Response.Response, err = future.GetResult(sender); err == nil && mcp.Response.Response.StatusCode != http.StatusNoContent {
mcp, err = client.CreateAndStartMigrationResponder(mcp.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.MigrationConfigsCreateAndStartMigrationFuture", "Result", mcp.Response.Response, "Failure responding to request")
}
}
return
}
// NamespacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
// long-running operation.
type NamespacesCreateOrUpdateFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *NamespacesCreateOrUpdateFuture) Result(client NamespacesClient) (sn SBNamespace, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.NamespacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("servicebus.NamespacesCreateOrUpdateFuture")
return
}
sender := autorest.DecorateSender(client, autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...))
if sn.Response.Response, err = future.GetResult(sender); err == nil && sn.Response.Response.StatusCode != http.StatusNoContent {
sn, err = client.CreateOrUpdateResponder(sn.Response.Response)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.NamespacesCreateOrUpdateFuture", "Result", sn.Response.Response, "Failure responding to request")
}
}
return
}
// NamespacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
// operation.
type NamespacesDeleteFuture struct {
azure.Future
}
// Result returns the result of the asynchronous operation.
// If the operation has not completed it will return an error.
func (future *NamespacesDeleteFuture) Result(client NamespacesClient) (ar autorest.Response, err error) {
var done bool
done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.NamespacesDeleteFuture", "Result", future.Response(), "Polling failure")
return
}
if !done {
err = azure.NewAsyncOpIncompleteError("servicebus.NamespacesDeleteFuture")
return
}
ar.Response = future.Response()
return
}
// NetworkRuleSet description of NetworkRuleSet resource.
type NetworkRuleSet struct {
autorest.Response `json:"-"`
// NetworkRuleSetProperties - NetworkRuleSet properties
*NetworkRuleSetProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for NetworkRuleSet.
func (nrs NetworkRuleSet) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if nrs.NetworkRuleSetProperties != nil {
objectMap["properties"] = nrs.NetworkRuleSetProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for NetworkRuleSet struct.
func (nrs *NetworkRuleSet) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var networkRuleSetProperties NetworkRuleSetProperties
err = json.Unmarshal(*v, &networkRuleSetProperties)
if err != nil {
return err
}
nrs.NetworkRuleSetProperties = &networkRuleSetProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
nrs.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
nrs.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
nrs.Type = &typeVar
}
}
}
return nil
}
// NetworkRuleSetListResult the response of the List NetworkRuleSet operation.
type NetworkRuleSetListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List NetworkRuleSet operation.
Value *[]NetworkRuleSet `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of NetworkRuleSet.
NextLink *string `json:"nextLink,omitempty"`
}
// NetworkRuleSetListResultIterator provides access to a complete listing of NetworkRuleSet values.
type NetworkRuleSetListResultIterator struct {
i int
page NetworkRuleSetListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *NetworkRuleSetListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/NetworkRuleSetListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *NetworkRuleSetListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter NetworkRuleSetListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter NetworkRuleSetListResultIterator) Response() NetworkRuleSetListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter NetworkRuleSetListResultIterator) Value() NetworkRuleSet {
if !iter.page.NotDone() {
return NetworkRuleSet{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the NetworkRuleSetListResultIterator type.
func NewNetworkRuleSetListResultIterator(page NetworkRuleSetListResultPage) NetworkRuleSetListResultIterator {
return NetworkRuleSetListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (nrslr NetworkRuleSetListResult) IsEmpty() bool {
return nrslr.Value == nil || len(*nrslr.Value) == 0
}
// networkRuleSetListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (nrslr NetworkRuleSetListResult) networkRuleSetListResultPreparer(ctx context.Context) (*http.Request, error) {
if nrslr.NextLink == nil || len(to.String(nrslr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(nrslr.NextLink)))
}
// NetworkRuleSetListResultPage contains a page of NetworkRuleSet values.
type NetworkRuleSetListResultPage struct {
fn func(context.Context, NetworkRuleSetListResult) (NetworkRuleSetListResult, error)
nrslr NetworkRuleSetListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *NetworkRuleSetListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/NetworkRuleSetListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.nrslr)
if err != nil {
return err
}
page.nrslr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *NetworkRuleSetListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page NetworkRuleSetListResultPage) NotDone() bool {
return !page.nrslr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page NetworkRuleSetListResultPage) Response() NetworkRuleSetListResult {
return page.nrslr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page NetworkRuleSetListResultPage) Values() []NetworkRuleSet {
if page.nrslr.IsEmpty() {
return nil
}
return *page.nrslr.Value
}
// Creates a new instance of the NetworkRuleSetListResultPage type.
func NewNetworkRuleSetListResultPage(getNextPage func(context.Context, NetworkRuleSetListResult) (NetworkRuleSetListResult, error)) NetworkRuleSetListResultPage {
return NetworkRuleSetListResultPage{fn: getNextPage}
}
// NetworkRuleSetProperties networkRuleSet properties
type NetworkRuleSetProperties struct {
// DefaultAction - Default Action for Network Rule Set. Possible values include: 'Allow', 'Deny'
DefaultAction DefaultAction `json:"defaultAction,omitempty"`
// VirtualNetworkRules - List VirtualNetwork Rules
VirtualNetworkRules *[]NWRuleSetVirtualNetworkRules `json:"virtualNetworkRules,omitempty"`
// IPRules - List of IpRules
IPRules *[]NWRuleSetIPRules `json:"ipRules,omitempty"`
}
// NWRuleSetIPRules description of NetWorkRuleSet - IpRules resource.
type NWRuleSetIPRules struct {
// IPMask - IP Mask
IPMask *string `json:"ipMask,omitempty"`
// Action - The IP Filter Action. Possible values include: 'NetworkRuleIPActionAllow'
Action NetworkRuleIPAction `json:"action,omitempty"`
}
// NWRuleSetVirtualNetworkRules description of VirtualNetworkRules - NetworkRules resource.
type NWRuleSetVirtualNetworkRules struct {
// Subnet - Subnet properties
Subnet *Subnet `json:"subnet,omitempty"`
// IgnoreMissingVnetServiceEndpoint - Value that indicates whether to ignore missing VNet Service Endpoint
IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty"`
}
// Operation a ServiceBus REST API operation
type Operation struct {
// Name - READ-ONLY; Operation name: {provider}/{resource}/{operation}
Name *string `json:"name,omitempty"`
// Display - The object that represents the operation.
Display *OperationDisplay `json:"display,omitempty"`
}
// OperationDisplay the object that represents the operation.
type OperationDisplay struct {
// Provider - READ-ONLY; Service provider: Microsoft.ServiceBus
Provider *string `json:"provider,omitempty"`
// Resource - READ-ONLY; Resource on which the operation is performed: Invoice, etc.
Resource *string `json:"resource,omitempty"`
// Operation - READ-ONLY; Operation type: Read, write, delete, etc.
Operation *string `json:"operation,omitempty"`
}
// OperationListResult result of the request to list ServiceBus operations. It contains a list of
// operations and a URL link to get the next set of results.
type OperationListResult struct {
autorest.Response `json:"-"`
// Value - READ-ONLY; List of ServiceBus operations supported by the Microsoft.ServiceBus resource provider.
Value *[]Operation `json:"value,omitempty"`
// NextLink - READ-ONLY; URL to get the next set of operation list results if there are any.
NextLink *string `json:"nextLink,omitempty"`
}
// OperationListResultIterator provides access to a complete listing of Operation values.
type OperationListResultIterator struct {
i int
page OperationListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *OperationListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter OperationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter OperationListResultIterator) Response() OperationListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter OperationListResultIterator) Value() Operation {
if !iter.page.NotDone() {
return Operation{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the OperationListResultIterator type.
func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
return OperationListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (olr OperationListResult) IsEmpty() bool {
return olr.Value == nil || len(*olr.Value) == 0
}
// operationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(olr.NextLink)))
}
// OperationListResultPage contains a page of Operation values.
type OperationListResultPage struct {
fn func(context.Context, OperationListResult) (OperationListResult, error)
olr OperationListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.olr)
if err != nil {
return err
}
page.olr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *OperationListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page OperationListResultPage) NotDone() bool {
return !page.olr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page OperationListResultPage) Response() OperationListResult {
return page.olr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page OperationListResultPage) Values() []Operation {
if page.olr.IsEmpty() {
return nil
}
return *page.olr.Value
}
// Creates a new instance of the OperationListResultPage type.
func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
return OperationListResultPage{fn: getNextPage}
}
// PremiumMessagingRegions premium Messaging Region
type PremiumMessagingRegions struct {
Properties *PremiumMessagingRegionsProperties `json:"properties,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for PremiumMessagingRegions.
func (pmr PremiumMessagingRegions) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if pmr.Properties != nil {
objectMap["properties"] = pmr.Properties
}
if pmr.Location != nil {
objectMap["location"] = pmr.Location
}
if pmr.Tags != nil {
objectMap["tags"] = pmr.Tags
}
return json.Marshal(objectMap)
}
// PremiumMessagingRegionsListResult the response of the List PremiumMessagingRegions operation.
type PremiumMessagingRegionsListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List PremiumMessagingRegions type.
Value *[]PremiumMessagingRegions `json:"value,omitempty"`
// NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of PremiumMessagingRegions.
NextLink *string `json:"nextLink,omitempty"`
}
// PremiumMessagingRegionsListResultIterator provides access to a complete listing of
// PremiumMessagingRegions values.
type PremiumMessagingRegionsListResultIterator struct {
i int
page PremiumMessagingRegionsListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *PremiumMessagingRegionsListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *PremiumMessagingRegionsListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter PremiumMessagingRegionsListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter PremiumMessagingRegionsListResultIterator) Response() PremiumMessagingRegionsListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter PremiumMessagingRegionsListResultIterator) Value() PremiumMessagingRegions {
if !iter.page.NotDone() {
return PremiumMessagingRegions{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the PremiumMessagingRegionsListResultIterator type.
func | (page PremiumMessagingRegionsListResultPage) PremiumMessagingRegionsListResultIterator {
return PremiumMessagingRegionsListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (pmrlr PremiumMessagingRegionsListResult) IsEmpty() bool {
return pmrlr.Value == nil || len(*pmrlr.Value) == 0
}
// premiumMessagingRegionsListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (pmrlr PremiumMessagingRegionsListResult) premiumMessagingRegionsListResultPreparer(ctx context.Context) (*http.Request, error) {
if pmrlr.NextLink == nil || len(to.String(pmrlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(pmrlr.NextLink)))
}
// PremiumMessagingRegionsListResultPage contains a page of PremiumMessagingRegions values.
type PremiumMessagingRegionsListResultPage struct {
fn func(context.Context, PremiumMessagingRegionsListResult) (PremiumMessagingRegionsListResult, error)
pmrlr PremiumMessagingRegionsListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *PremiumMessagingRegionsListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.pmrlr)
if err != nil {
return err
}
page.pmrlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *PremiumMessagingRegionsListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page PremiumMessagingRegionsListResultPage) NotDone() bool {
return !page.pmrlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page PremiumMessagingRegionsListResultPage) Response() PremiumMessagingRegionsListResult {
return page.pmrlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page PremiumMessagingRegionsListResultPage) Values() []PremiumMessagingRegions {
if page.pmrlr.IsEmpty() {
return nil
}
return *page.pmrlr.Value
}
// Creates a new instance of the PremiumMessagingRegionsListResultPage type.
func NewPremiumMessagingRegionsListResultPage(getNextPage func(context.Context, PremiumMessagingRegionsListResult) (PremiumMessagingRegionsListResult, error)) PremiumMessagingRegionsListResultPage {
return PremiumMessagingRegionsListResultPage{fn: getNextPage}
}
// PremiumMessagingRegionsProperties ...
type PremiumMessagingRegionsProperties struct {
// Code - READ-ONLY; Region code
Code *string `json:"code,omitempty"`
// FullName - READ-ONLY; Full name of the region
FullName *string `json:"fullName,omitempty"`
}
// RegenerateAccessKeyParameters parameters supplied to the Regenerate Authorization Rule operation,
// specifies which key needs to be reset.
type RegenerateAccessKeyParameters struct {
// KeyType - The access key to regenerate. Possible values include: 'PrimaryKey', 'SecondaryKey'
KeyType KeyType `json:"keyType,omitempty"`
// Key - Optional, if the key value provided, is reset for KeyType value or autogenerate Key value set for keyType
Key *string `json:"key,omitempty"`
}
// Resource the Resource definition for other than namespace.
type Resource struct {
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// ResourceNamespacePatch the Resource definition.
type ResourceNamespacePatch struct {
// Location - Resource location
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for ResourceNamespacePatch.
func (rnp ResourceNamespacePatch) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if rnp.Location != nil {
objectMap["location"] = rnp.Location
}
if rnp.Tags != nil {
objectMap["tags"] = rnp.Tags
}
return json.Marshal(objectMap)
}
// Rule description of Rule Resource.
type Rule struct {
autorest.Response `json:"-"`
// Ruleproperties - Properties of Rule resource
*Ruleproperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for Rule.
func (r Rule) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if r.Ruleproperties != nil {
objectMap["properties"] = r.Ruleproperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for Rule struct.
func (r *Rule) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var ruleproperties Ruleproperties
err = json.Unmarshal(*v, &ruleproperties)
if err != nil {
return err
}
r.Ruleproperties = &ruleproperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
r.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
r.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
r.Type = &typeVar
}
}
}
return nil
}
// RuleListResult the response of the List rule operation.
type RuleListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Rules operation.
Value *[]Rule `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of rules
NextLink *string `json:"nextLink,omitempty"`
}
// RuleListResultIterator provides access to a complete listing of Rule values.
type RuleListResultIterator struct {
i int
page RuleListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *RuleListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RuleListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *RuleListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RuleListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter RuleListResultIterator) Response() RuleListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter RuleListResultIterator) Value() Rule {
if !iter.page.NotDone() {
return Rule{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the RuleListResultIterator type.
func NewRuleListResultIterator(page RuleListResultPage) RuleListResultIterator {
return RuleListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (rlr RuleListResult) IsEmpty() bool {
return rlr.Value == nil || len(*rlr.Value) == 0
}
// ruleListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (rlr RuleListResult) ruleListResultPreparer(ctx context.Context) (*http.Request, error) {
if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rlr.NextLink)))
}
// RuleListResultPage contains a page of Rule values.
type RuleListResultPage struct {
fn func(context.Context, RuleListResult) (RuleListResult, error)
rlr RuleListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *RuleListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/RuleListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.rlr)
if err != nil {
return err
}
page.rlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *RuleListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RuleListResultPage) NotDone() bool {
return !page.rlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page RuleListResultPage) Response() RuleListResult {
return page.rlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page RuleListResultPage) Values() []Rule {
if page.rlr.IsEmpty() {
return nil
}
return *page.rlr.Value
}
// Creates a new instance of the RuleListResultPage type.
func NewRuleListResultPage(getNextPage func(context.Context, RuleListResult) (RuleListResult, error)) RuleListResultPage {
return RuleListResultPage{fn: getNextPage}
}
// Ruleproperties description of Rule Resource.
type Ruleproperties struct {
// Action - Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
Action *Action `json:"action,omitempty"`
// FilterType - Filter type that is evaluated against a BrokeredMessage. Possible values include: 'FilterTypeSQLFilter', 'FilterTypeCorrelationFilter'
FilterType FilterType `json:"filterType,omitempty"`
// SQLFilter - Properties of sqlFilter
SQLFilter *SQLFilter `json:"sqlFilter,omitempty"`
// CorrelationFilter - Properties of correlationFilter
CorrelationFilter *CorrelationFilter `json:"correlationFilter,omitempty"`
}
// SBAuthorizationRule description of a namespace authorization rule.
type SBAuthorizationRule struct {
autorest.Response `json:"-"`
// SBAuthorizationRuleProperties - AuthorizationRule properties.
*SBAuthorizationRuleProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBAuthorizationRule.
func (sar SBAuthorizationRule) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if sar.SBAuthorizationRuleProperties != nil {
objectMap["properties"] = sar.SBAuthorizationRuleProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBAuthorizationRule struct.
func (sar *SBAuthorizationRule) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var sBAuthorizationRuleProperties SBAuthorizationRuleProperties
err = json.Unmarshal(*v, &sBAuthorizationRuleProperties)
if err != nil {
return err
}
sar.SBAuthorizationRuleProperties = &sBAuthorizationRuleProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
sar.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
sar.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
sar.Type = &typeVar
}
}
}
return nil
}
// SBAuthorizationRuleListResult the response to the List Namespace operation.
type SBAuthorizationRuleListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Authorization Rules operation.
Value *[]SBAuthorizationRule `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of Authorization Rules.
NextLink *string `json:"nextLink,omitempty"`
}
// SBAuthorizationRuleListResultIterator provides access to a complete listing of SBAuthorizationRule
// values.
type SBAuthorizationRuleListResultIterator struct {
i int
page SBAuthorizationRuleListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SBAuthorizationRuleListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBAuthorizationRuleListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SBAuthorizationRuleListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBAuthorizationRuleListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SBAuthorizationRuleListResultIterator) Response() SBAuthorizationRuleListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SBAuthorizationRuleListResultIterator) Value() SBAuthorizationRule {
if !iter.page.NotDone() {
return SBAuthorizationRule{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SBAuthorizationRuleListResultIterator type.
func NewSBAuthorizationRuleListResultIterator(page SBAuthorizationRuleListResultPage) SBAuthorizationRuleListResultIterator {
return SBAuthorizationRuleListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (sarlr SBAuthorizationRuleListResult) IsEmpty() bool {
return sarlr.Value == nil || len(*sarlr.Value) == 0
}
// sBAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (sarlr SBAuthorizationRuleListResult) sBAuthorizationRuleListResultPreparer(ctx context.Context) (*http.Request, error) {
if sarlr.NextLink == nil || len(to.String(sarlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sarlr.NextLink)))
}
// SBAuthorizationRuleListResultPage contains a page of SBAuthorizationRule values.
type SBAuthorizationRuleListResultPage struct {
fn func(context.Context, SBAuthorizationRuleListResult) (SBAuthorizationRuleListResult, error)
sarlr SBAuthorizationRuleListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SBAuthorizationRuleListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBAuthorizationRuleListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.sarlr)
if err != nil {
return err
}
page.sarlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SBAuthorizationRuleListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBAuthorizationRuleListResultPage) NotDone() bool {
return !page.sarlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SBAuthorizationRuleListResultPage) Response() SBAuthorizationRuleListResult {
return page.sarlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SBAuthorizationRuleListResultPage) Values() []SBAuthorizationRule {
if page.sarlr.IsEmpty() {
return nil
}
return *page.sarlr.Value
}
// Creates a new instance of the SBAuthorizationRuleListResultPage type.
func NewSBAuthorizationRuleListResultPage(getNextPage func(context.Context, SBAuthorizationRuleListResult) (SBAuthorizationRuleListResult, error)) SBAuthorizationRuleListResultPage {
return SBAuthorizationRuleListResultPage{fn: getNextPage}
}
// SBAuthorizationRuleProperties authorizationRule properties.
type SBAuthorizationRuleProperties struct {
// Rights - The rights associated with the rule.
Rights *[]AccessRights `json:"rights,omitempty"`
}
// SBNamespace description of a namespace resource.
type SBNamespace struct {
autorest.Response `json:"-"`
// Sku - Properties of Sku
Sku *SBSku `json:"sku,omitempty"`
// SBNamespaceProperties - Properties of the namespace.
*SBNamespaceProperties `json:"properties,omitempty"`
// Location - The Geo-location where the resource lives
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBNamespace.
func (sn SBNamespace) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if sn.Sku != nil {
objectMap["sku"] = sn.Sku
}
if sn.SBNamespaceProperties != nil {
objectMap["properties"] = sn.SBNamespaceProperties
}
if sn.Location != nil {
objectMap["location"] = sn.Location
}
if sn.Tags != nil {
objectMap["tags"] = sn.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBNamespace struct.
func (sn *SBNamespace) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "sku":
if v != nil {
var sku SBSku
err = json.Unmarshal(*v, &sku)
if err != nil {
return err
}
sn.Sku = &sku
}
case "properties":
if v != nil {
var sBNamespaceProperties SBNamespaceProperties
err = json.Unmarshal(*v, &sBNamespaceProperties)
if err != nil {
return err
}
sn.SBNamespaceProperties = &sBNamespaceProperties
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
sn.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
sn.Tags = tags
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
sn.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
sn.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
sn.Type = &typeVar
}
}
}
return nil
}
// SBNamespaceListResult the response of the List Namespace operation.
type SBNamespaceListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Namespace operation.
Value *[]SBNamespace `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of Namespaces.
NextLink *string `json:"nextLink,omitempty"`
}
// SBNamespaceListResultIterator provides access to a complete listing of SBNamespace values.
type SBNamespaceListResultIterator struct {
i int
page SBNamespaceListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SBNamespaceListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBNamespaceListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SBNamespaceListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBNamespaceListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SBNamespaceListResultIterator) Response() SBNamespaceListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SBNamespaceListResultIterator) Value() SBNamespace {
if !iter.page.NotDone() {
return SBNamespace{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SBNamespaceListResultIterator type.
func NewSBNamespaceListResultIterator(page SBNamespaceListResultPage) SBNamespaceListResultIterator {
return SBNamespaceListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (snlr SBNamespaceListResult) IsEmpty() bool {
return snlr.Value == nil || len(*snlr.Value) == 0
}
// sBNamespaceListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (snlr SBNamespaceListResult) sBNamespaceListResultPreparer(ctx context.Context) (*http.Request, error) {
if snlr.NextLink == nil || len(to.String(snlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(snlr.NextLink)))
}
// SBNamespaceListResultPage contains a page of SBNamespace values.
type SBNamespaceListResultPage struct {
fn func(context.Context, SBNamespaceListResult) (SBNamespaceListResult, error)
snlr SBNamespaceListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SBNamespaceListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBNamespaceListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.snlr)
if err != nil {
return err
}
page.snlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SBNamespaceListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBNamespaceListResultPage) NotDone() bool {
return !page.snlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SBNamespaceListResultPage) Response() SBNamespaceListResult {
return page.snlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SBNamespaceListResultPage) Values() []SBNamespace {
if page.snlr.IsEmpty() {
return nil
}
return *page.snlr.Value
}
// Creates a new instance of the SBNamespaceListResultPage type.
func NewSBNamespaceListResultPage(getNextPage func(context.Context, SBNamespaceListResult) (SBNamespaceListResult, error)) SBNamespaceListResultPage {
return SBNamespaceListResultPage{fn: getNextPage}
}
// SBNamespaceMigrate namespace Migrate Object
type SBNamespaceMigrate struct {
// TargetNamespaceType - Type of namespaces. Possible values include: 'Messaging', 'NotificationHub', 'Mixed', 'EventHub', 'Relay'
TargetNamespaceType NameSpaceType `json:"targetNamespaceType,omitempty"`
}
// SBNamespaceProperties properties of the namespace.
type SBNamespaceProperties struct {
// ProvisioningState - READ-ONLY; Provisioning state of the namespace.
ProvisioningState *string `json:"provisioningState,omitempty"`
// CreatedAt - READ-ONLY; The time the namespace was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
// UpdatedAt - READ-ONLY; The time the namespace was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// ServiceBusEndpoint - READ-ONLY; Endpoint you can use to perform Service Bus operations.
ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"`
// MetricID - READ-ONLY; Identifier for Azure Insights metrics
MetricID *string `json:"metricId,omitempty"`
}
// SBNamespaceUpdateParameters description of a namespace resource.
type SBNamespaceUpdateParameters struct {
// Sku - Properties of Sku
Sku *SBSku `json:"sku,omitempty"`
// SBNamespaceProperties - Properties of the namespace.
*SBNamespaceProperties `json:"properties,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBNamespaceUpdateParameters.
func (snup SBNamespaceUpdateParameters) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if snup.Sku != nil {
objectMap["sku"] = snup.Sku
}
if snup.SBNamespaceProperties != nil {
objectMap["properties"] = snup.SBNamespaceProperties
}
if snup.Location != nil {
objectMap["location"] = snup.Location
}
if snup.Tags != nil {
objectMap["tags"] = snup.Tags
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBNamespaceUpdateParameters struct.
func (snup *SBNamespaceUpdateParameters) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "sku":
if v != nil {
var sku SBSku
err = json.Unmarshal(*v, &sku)
if err != nil {
return err
}
snup.Sku = &sku
}
case "properties":
if v != nil {
var sBNamespaceProperties SBNamespaceProperties
err = json.Unmarshal(*v, &sBNamespaceProperties)
if err != nil {
return err
}
snup.SBNamespaceProperties = &sBNamespaceProperties
}
case "location":
if v != nil {
var location string
err = json.Unmarshal(*v, &location)
if err != nil {
return err
}
snup.Location = &location
}
case "tags":
if v != nil {
var tags map[string]*string
err = json.Unmarshal(*v, &tags)
if err != nil {
return err
}
snup.Tags = tags
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
snup.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
snup.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
snup.Type = &typeVar
}
}
}
return nil
}
// SBQueue description of queue Resource.
type SBQueue struct {
autorest.Response `json:"-"`
// SBQueueProperties - Queue Properties
*SBQueueProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBQueue.
func (sq SBQueue) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if sq.SBQueueProperties != nil {
objectMap["properties"] = sq.SBQueueProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBQueue struct.
func (sq *SBQueue) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var sBQueueProperties SBQueueProperties
err = json.Unmarshal(*v, &sBQueueProperties)
if err != nil {
return err
}
sq.SBQueueProperties = &sBQueueProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
sq.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
sq.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
sq.Type = &typeVar
}
}
}
return nil
}
// SBQueueListResult the response to the List Queues operation.
type SBQueueListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Queues operation.
Value *[]SBQueue `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of queues.
NextLink *string `json:"nextLink,omitempty"`
}
// SBQueueListResultIterator provides access to a complete listing of SBQueue values.
type SBQueueListResultIterator struct {
i int
page SBQueueListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SBQueueListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBQueueListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SBQueueListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBQueueListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SBQueueListResultIterator) Response() SBQueueListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SBQueueListResultIterator) Value() SBQueue {
if !iter.page.NotDone() {
return SBQueue{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SBQueueListResultIterator type.
func NewSBQueueListResultIterator(page SBQueueListResultPage) SBQueueListResultIterator {
return SBQueueListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (sqlr SBQueueListResult) IsEmpty() bool {
return sqlr.Value == nil || len(*sqlr.Value) == 0
}
// sBQueueListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (sqlr SBQueueListResult) sBQueueListResultPreparer(ctx context.Context) (*http.Request, error) {
if sqlr.NextLink == nil || len(to.String(sqlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sqlr.NextLink)))
}
// SBQueueListResultPage contains a page of SBQueue values.
type SBQueueListResultPage struct {
fn func(context.Context, SBQueueListResult) (SBQueueListResult, error)
sqlr SBQueueListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SBQueueListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBQueueListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.sqlr)
if err != nil {
return err
}
page.sqlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SBQueueListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBQueueListResultPage) NotDone() bool {
return !page.sqlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SBQueueListResultPage) Response() SBQueueListResult {
return page.sqlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SBQueueListResultPage) Values() []SBQueue {
if page.sqlr.IsEmpty() {
return nil
}
return *page.sqlr.Value
}
// Creates a new instance of the SBQueueListResultPage type.
func NewSBQueueListResultPage(getNextPage func(context.Context, SBQueueListResult) (SBQueueListResult, error)) SBQueueListResultPage {
return SBQueueListResultPage{fn: getNextPage}
}
// SBQueueProperties the Queue Properties definition.
type SBQueueProperties struct {
// CountDetails - READ-ONLY; Message Count Details.
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
// CreatedAt - READ-ONLY; The exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
// UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// AccessedAt - READ-ONLY; Last time a message was sent, or the last time there was a receive request to this queue.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
// SizeInBytes - READ-ONLY; The size of the queue, in bytes.
SizeInBytes *int64 `json:"sizeInBytes,omitempty"`
// MessageCount - READ-ONLY; The number of messages in the queue.
MessageCount *int64 `json:"messageCount,omitempty"`
// LockDuration - ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1 minute.
LockDuration *string `json:"lockDuration,omitempty"`
// MaxSizeInMegabytes - The maximum size of the queue in megabytes, which is the size of memory allocated for the queue. Default is 1024.
MaxSizeInMegabytes *int32 `json:"maxSizeInMegabytes,omitempty"`
// RequiresDuplicateDetection - A value indicating if this queue requires duplicate detection.
RequiresDuplicateDetection *bool `json:"requiresDuplicateDetection,omitempty"`
// RequiresSession - A value that indicates whether the queue supports the concept of sessions.
RequiresSession *bool `json:"requiresSession,omitempty"`
// DefaultMessageTimeToLive - ISO 8601 default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
DefaultMessageTimeToLive *string `json:"defaultMessageTimeToLive,omitempty"`
// DeadLetteringOnMessageExpiration - A value that indicates whether this queue has dead letter support when a message expires.
DeadLetteringOnMessageExpiration *bool `json:"deadLetteringOnMessageExpiration,omitempty"`
// DuplicateDetectionHistoryTimeWindow - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
DuplicateDetectionHistoryTimeWindow *string `json:"duplicateDetectionHistoryTimeWindow,omitempty"`
// MaxDeliveryCount - The maximum delivery count. A message is automatically deadlettered after this number of deliveries. default value is 10.
MaxDeliveryCount *int32 `json:"maxDeliveryCount,omitempty"`
// Status - Enumerates the possible values for the status of a messaging entity. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled', 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown'
Status EntityStatus `json:"status,omitempty"`
// EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty"`
// AutoDeleteOnIdle - ISO 8061 timeSpan idle interval after which the queue is automatically deleted. The minimum duration is 5 minutes.
AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty"`
// EnablePartitioning - A value that indicates whether the queue is to be partitioned across multiple message brokers.
EnablePartitioning *bool `json:"enablePartitioning,omitempty"`
// EnableExpress - A value that indicates whether Express Entities are enabled. An express queue holds a message in memory temporarily before writing it to persistent storage.
EnableExpress *bool `json:"enableExpress,omitempty"`
// ForwardTo - Queue/Topic name to forward the messages
ForwardTo *string `json:"forwardTo,omitempty"`
// ForwardDeadLetteredMessagesTo - Queue/Topic name to forward the Dead Letter message
ForwardDeadLetteredMessagesTo *string `json:"forwardDeadLetteredMessagesTo,omitempty"`
}
// SBSku SKU of the namespace.
type SBSku struct {
// Name - Name of this SKU. Possible values include: 'Basic', 'Standard', 'Premium'
Name SkuName `json:"name,omitempty"`
// Tier - The billing tier of this particular SKU. Possible values include: 'SkuTierBasic', 'SkuTierStandard', 'SkuTierPremium'
Tier SkuTier `json:"tier,omitempty"`
// Capacity - The specified messaging units for the tier. For Premium tier, capacity are 1,2 and 4.
Capacity *int32 `json:"capacity,omitempty"`
}
// SBSubscription description of subscription resource.
type SBSubscription struct {
autorest.Response `json:"-"`
// SBSubscriptionProperties - Properties of subscriptions resource.
*SBSubscriptionProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBSubscription.
func (ss SBSubscription) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if ss.SBSubscriptionProperties != nil {
objectMap["properties"] = ss.SBSubscriptionProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBSubscription struct.
func (ss *SBSubscription) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var sBSubscriptionProperties SBSubscriptionProperties
err = json.Unmarshal(*v, &sBSubscriptionProperties)
if err != nil {
return err
}
ss.SBSubscriptionProperties = &sBSubscriptionProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
ss.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
ss.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
ss.Type = &typeVar
}
}
}
return nil
}
// SBSubscriptionListResult the response to the List Subscriptions operation.
type SBSubscriptionListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Subscriptions operation.
Value *[]SBSubscription `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of subscriptions.
NextLink *string `json:"nextLink,omitempty"`
}
// SBSubscriptionListResultIterator provides access to a complete listing of SBSubscription values.
type SBSubscriptionListResultIterator struct {
i int
page SBSubscriptionListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SBSubscriptionListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBSubscriptionListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SBSubscriptionListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBSubscriptionListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SBSubscriptionListResultIterator) Response() SBSubscriptionListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SBSubscriptionListResultIterator) Value() SBSubscription {
if !iter.page.NotDone() {
return SBSubscription{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SBSubscriptionListResultIterator type.
func NewSBSubscriptionListResultIterator(page SBSubscriptionListResultPage) SBSubscriptionListResultIterator {
return SBSubscriptionListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (sslr SBSubscriptionListResult) IsEmpty() bool {
return sslr.Value == nil || len(*sslr.Value) == 0
}
// sBSubscriptionListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (sslr SBSubscriptionListResult) sBSubscriptionListResultPreparer(ctx context.Context) (*http.Request, error) {
if sslr.NextLink == nil || len(to.String(sslr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sslr.NextLink)))
}
// SBSubscriptionListResultPage contains a page of SBSubscription values.
type SBSubscriptionListResultPage struct {
fn func(context.Context, SBSubscriptionListResult) (SBSubscriptionListResult, error)
sslr SBSubscriptionListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SBSubscriptionListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBSubscriptionListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.sslr)
if err != nil {
return err
}
page.sslr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SBSubscriptionListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBSubscriptionListResultPage) NotDone() bool {
return !page.sslr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SBSubscriptionListResultPage) Response() SBSubscriptionListResult {
return page.sslr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SBSubscriptionListResultPage) Values() []SBSubscription {
if page.sslr.IsEmpty() {
return nil
}
return *page.sslr.Value
}
// Creates a new instance of the SBSubscriptionListResultPage type.
func NewSBSubscriptionListResultPage(getNextPage func(context.Context, SBSubscriptionListResult) (SBSubscriptionListResult, error)) SBSubscriptionListResultPage {
return SBSubscriptionListResultPage{fn: getNextPage}
}
// SBSubscriptionProperties description of Subscription Resource.
type SBSubscriptionProperties struct {
// MessageCount - READ-ONLY; Number of messages.
MessageCount *int64 `json:"messageCount,omitempty"`
// CreatedAt - READ-ONLY; Exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
// AccessedAt - READ-ONLY; Last time there was a receive request to this subscription.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
// UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// CountDetails - READ-ONLY; Message count details
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
// LockDuration - ISO 8061 lock duration timespan for the subscription. The default value is 1 minute.
LockDuration *string `json:"lockDuration,omitempty"`
// RequiresSession - Value indicating if a subscription supports the concept of sessions.
RequiresSession *bool `json:"requiresSession,omitempty"`
// DefaultMessageTimeToLive - ISO 8061 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
DefaultMessageTimeToLive *string `json:"defaultMessageTimeToLive,omitempty"`
// DeadLetteringOnFilterEvaluationExceptions - Value that indicates whether a subscription has dead letter support on filter evaluation exceptions.
DeadLetteringOnFilterEvaluationExceptions *bool `json:"deadLetteringOnFilterEvaluationExceptions,omitempty"`
// DeadLetteringOnMessageExpiration - Value that indicates whether a subscription has dead letter support when a message expires.
DeadLetteringOnMessageExpiration *bool `json:"deadLetteringOnMessageExpiration,omitempty"`
// DuplicateDetectionHistoryTimeWindow - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
DuplicateDetectionHistoryTimeWindow *string `json:"duplicateDetectionHistoryTimeWindow,omitempty"`
// MaxDeliveryCount - Number of maximum deliveries.
MaxDeliveryCount *int32 `json:"maxDeliveryCount,omitempty"`
// Status - Enumerates the possible values for the status of a messaging entity. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled', 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown'
Status EntityStatus `json:"status,omitempty"`
// EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty"`
// AutoDeleteOnIdle - ISO 8061 timeSpan idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.
AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty"`
// ForwardTo - Queue/Topic name to forward the messages
ForwardTo *string `json:"forwardTo,omitempty"`
// ForwardDeadLetteredMessagesTo - Queue/Topic name to forward the Dead Letter message
ForwardDeadLetteredMessagesTo *string `json:"forwardDeadLetteredMessagesTo,omitempty"`
}
// SBTopic description of topic resource.
type SBTopic struct {
autorest.Response `json:"-"`
// SBTopicProperties - Properties of topic resource.
*SBTopicProperties `json:"properties,omitempty"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for SBTopic.
func (st SBTopic) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if st.SBTopicProperties != nil {
objectMap["properties"] = st.SBTopicProperties
}
return json.Marshal(objectMap)
}
// UnmarshalJSON is the custom unmarshaler for SBTopic struct.
func (st *SBTopic) UnmarshalJSON(body []byte) error {
var m map[string]*json.RawMessage
err := json.Unmarshal(body, &m)
if err != nil {
return err
}
for k, v := range m {
switch k {
case "properties":
if v != nil {
var sBTopicProperties SBTopicProperties
err = json.Unmarshal(*v, &sBTopicProperties)
if err != nil {
return err
}
st.SBTopicProperties = &sBTopicProperties
}
case "id":
if v != nil {
var ID string
err = json.Unmarshal(*v, &ID)
if err != nil {
return err
}
st.ID = &ID
}
case "name":
if v != nil {
var name string
err = json.Unmarshal(*v, &name)
if err != nil {
return err
}
st.Name = &name
}
case "type":
if v != nil {
var typeVar string
err = json.Unmarshal(*v, &typeVar)
if err != nil {
return err
}
st.Type = &typeVar
}
}
}
return nil
}
// SBTopicListResult the response to the List Topics operation.
type SBTopicListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List Topics operation.
Value *[]SBTopic `json:"value,omitempty"`
// NextLink - Link to the next set of results. Not empty if Value contains incomplete list of topics.
NextLink *string `json:"nextLink,omitempty"`
}
// SBTopicListResultIterator provides access to a complete listing of SBTopic values.
type SBTopicListResultIterator struct {
i int
page SBTopicListResultPage
}
// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
func (iter *SBTopicListResultIterator) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBTopicListResultIterator.NextWithContext")
defer func() {
sc := -1
if iter.Response().Response.Response != nil {
sc = iter.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
}
iter.i = 0
return nil
}
// Next advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (iter *SBTopicListResultIterator) Next() error {
return iter.NextWithContext(context.Background())
}
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBTopicListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
}
// Response returns the raw server response from the last page request.
func (iter SBTopicListResultIterator) Response() SBTopicListResult {
return iter.page.Response()
}
// Value returns the current value or a zero-initialized value if the
// iterator has advanced beyond the end of the collection.
func (iter SBTopicListResultIterator) Value() SBTopic {
if !iter.page.NotDone() {
return SBTopic{}
}
return iter.page.Values()[iter.i]
}
// Creates a new instance of the SBTopicListResultIterator type.
func NewSBTopicListResultIterator(page SBTopicListResultPage) SBTopicListResultIterator {
return SBTopicListResultIterator{page: page}
}
// IsEmpty returns true if the ListResult contains no values.
func (stlr SBTopicListResult) IsEmpty() bool {
return stlr.Value == nil || len(*stlr.Value) == 0
}
// sBTopicListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
func (stlr SBTopicListResult) sBTopicListResultPreparer(ctx context.Context) (*http.Request, error) {
if stlr.NextLink == nil || len(to.String(stlr.NextLink)) < 1 {
return nil, nil
}
return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(stlr.NextLink)))
}
// SBTopicListResultPage contains a page of SBTopic values.
type SBTopicListResultPage struct {
fn func(context.Context, SBTopicListResult) (SBTopicListResult, error)
stlr SBTopicListResult
}
// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
func (page *SBTopicListResultPage) NextWithContext(ctx context.Context) (err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/SBTopicListResultPage.NextWithContext")
defer func() {
sc := -1
if page.Response().Response.Response != nil {
sc = page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
next, err := page.fn(ctx, page.stlr)
if err != nil {
return err
}
page.stlr = next
return nil
}
// Next advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
// Deprecated: Use NextWithContext() instead.
func (page *SBTopicListResultPage) Next() error {
return page.NextWithContext(context.Background())
}
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBTopicListResultPage) NotDone() bool {
return !page.stlr.IsEmpty()
}
// Response returns the raw server response from the last page request.
func (page SBTopicListResultPage) Response() SBTopicListResult {
return page.stlr
}
// Values returns the slice of values for the current page or nil if there are no values.
func (page SBTopicListResultPage) Values() []SBTopic {
if page.stlr.IsEmpty() {
return nil
}
return *page.stlr.Value
}
// Creates a new instance of the SBTopicListResultPage type.
func NewSBTopicListResultPage(getNextPage func(context.Context, SBTopicListResult) (SBTopicListResult, error)) SBTopicListResultPage {
return SBTopicListResultPage{fn: getNextPage}
}
// SBTopicProperties the Topic Properties definition.
type SBTopicProperties struct {
// SizeInBytes - READ-ONLY; Size of the topic, in bytes.
SizeInBytes *int64 `json:"sizeInBytes,omitempty"`
// CreatedAt - READ-ONLY; Exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
// UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// AccessedAt - READ-ONLY; Last time the message was sent, or a request was received, for this topic.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
// SubscriptionCount - READ-ONLY; Number of subscriptions.
SubscriptionCount *int32 `json:"subscriptionCount,omitempty"`
// CountDetails - READ-ONLY; Message count details
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
// DefaultMessageTimeToLive - ISO 8601 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
DefaultMessageTimeToLive *string `json:"defaultMessageTimeToLive,omitempty"`
// MaxSizeInMegabytes - Maximum size of the topic in megabytes, which is the size of the memory allocated for the topic. Default is 1024.
MaxSizeInMegabytes *int32 `json:"maxSizeInMegabytes,omitempty"`
// RequiresDuplicateDetection - Value indicating if this topic requires duplicate detection.
RequiresDuplicateDetection *bool `json:"requiresDuplicateDetection,omitempty"`
// DuplicateDetectionHistoryTimeWindow - ISO8601 timespan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
DuplicateDetectionHistoryTimeWindow *string `json:"duplicateDetectionHistoryTimeWindow,omitempty"`
// EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
EnableBatchedOperations *bool `json:"enableBatchedOperations,omitempty"`
// Status - Enumerates the possible values for the status of a messaging entity. Possible values include: 'Active', 'Disabled', 'Restoring', 'SendDisabled', 'ReceiveDisabled', 'Creating', 'Deleting', 'Renaming', 'Unknown'
Status EntityStatus `json:"status,omitempty"`
// SupportOrdering - Value that indicates whether the topic supports ordering.
SupportOrdering *bool `json:"supportOrdering,omitempty"`
// AutoDeleteOnIdle - ISO 8601 timespan idle interval after which the topic is automatically deleted. The minimum duration is 5 minutes.
AutoDeleteOnIdle *string `json:"autoDeleteOnIdle,omitempty"`
// EnablePartitioning - Value that indicates whether the topic to be partitioned across multiple message brokers is enabled.
EnablePartitioning *bool `json:"enablePartitioning,omitempty"`
// EnableExpress - Value that indicates whether Express Entities are enabled. An express topic holds a message in memory temporarily before writing it to persistent storage.
EnableExpress *bool `json:"enableExpress,omitempty"`
}
// SQLFilter represents a filter which is a composition of an expression and an action that is executed in
// the pub/sub pipeline.
type SQLFilter struct {
// SQLExpression - The SQL expression. e.g. MyProperty='ABC'
SQLExpression *string `json:"sqlExpression,omitempty"`
// CompatibilityLevel - This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.
CompatibilityLevel *int32 `json:"compatibilityLevel,omitempty"`
// RequiresPreprocessing - Value that indicates whether the rule action requires preprocessing.
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
}
// SQLRuleAction represents set of actions written in SQL language-based syntax that is performed against a
// ServiceBus.Messaging.BrokeredMessage
type SQLRuleAction struct {
// SQLExpression - SQL expression. e.g. MyProperty='ABC'
SQLExpression *string `json:"sqlExpression,omitempty"`
// CompatibilityLevel - This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.
CompatibilityLevel *int32 `json:"compatibilityLevel,omitempty"`
// RequiresPreprocessing - Value that indicates whether the rule action requires preprocessing.
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
}
// Subnet properties supplied for Subnet
type Subnet struct {
// ID - Resource ID of Virtual Network Subnet
ID *string `json:"id,omitempty"`
}
// TrackedResource the Resource definition.
type TrackedResource struct {
// Location - The Geo-location where the resource lives
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
// ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
// Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
// Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
// MarshalJSON is the custom marshaler for TrackedResource.
func (tr TrackedResource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
if tr.Location != nil {
objectMap["location"] = tr.Location
}
if tr.Tags != nil {
objectMap["tags"] = tr.Tags
}
return json.Marshal(objectMap)
}
| NewPremiumMessagingRegionsListResultIterator |
bootstrap.go | package secretstorage
import (
"io/ioutil"
"os"
"strings"
"github.com/hashicorp/vault/api"
"github.com/pkg/errors"
"github.com/solo-io/gloo/pkg/bootstrap"
"github.com/solo-io/gloo/pkg/storage/dependencies"
"github.com/solo-io/gloo/pkg/storage/dependencies/file"
"github.com/solo-io/gloo/pkg/storage/dependencies/kube"
"github.com/solo-io/gloo/pkg/storage/dependencies/vault"
"k8s.io/client-go/tools/clientcmd"
)
func Bootstrap(opts bootstrap.Options) (dependencies.SecretStorage, error) | {
switch opts.SecretStorageOptions.Type {
case bootstrap.WatcherTypeFile:
return file.NewSecretStorage(opts.FileOptions.SecretDir, opts.SecretStorageOptions.SyncFrequency)
case bootstrap.WatcherTypeKube:
cfg, err := clientcmd.BuildConfigFromFlags(opts.KubeOptions.MasterURL, opts.KubeOptions.KubeConfig)
if err != nil {
return nil, errors.Wrap(err, "building kube restclient")
}
return kube.NewSecretStorage(cfg, opts.KubeOptions.Namespace, opts.SecretStorageOptions.SyncFrequency)
case bootstrap.WatcherTypeVault:
cfg := api.DefaultConfig()
cfg.MaxRetries = opts.VaultOptions.Retries
cfg.Address = opts.VaultOptions.VaultAddr
vaultClient, err := api.NewClient(cfg)
if err != nil {
return nil, errors.Wrapf(err, "failed to create vault client")
}
token := opts.VaultOptions.VaultToken
if token == "" {
token = os.Getenv("VAULT_TOKEN")
if token == "" {
if opts.VaultOptions.VaultTokenFile == "" {
return nil, errors.Errorf("the Vault token must be made available somehow. " +
"either --vault.token or --vault.tokenfile must be specified, or the VAULT_TOKEN " +
"environment variable must be set")
}
b, err := ioutil.ReadFile(opts.VaultOptions.VaultTokenFile)
if err != nil {
return nil, errors.Wrap(err, "failed to read vault token file")
}
token = strings.TrimSuffix(string(b), "\n")
}
}
vaultClient.SetToken(token)
return vault.NewSecretStorage(vaultClient, opts.VaultOptions.RootPath, opts.SecretStorageOptions.SyncFrequency), nil
}
return nil, errors.Errorf("unknown or unspecified secret watcher type: %v", opts.SecretStorageOptions.Type)
} |
|
main.rs | extern crate keyvalueextractor;
use std::path::Path;
fn print(t: &keyvalueextractor::KeyValueExtractor, path: &str)
|
fn main() {
match keyvalueextractor::KeyValueExtractor::new("%album%/%artist%-%title%") {
Err(err) => println!("Failed to parse: {:?}", err),
Ok(t) => {
println!("//////////////////////////////////");
print(&t, "songs/crap/Cannibal/Ke$ha-Crazy Beautiful Life.mp3");
print(&t, "songs/One Of The Boys/Katy Perry-I Kissed A Girl.mp3");
print(&t, "All I Ever Wanted/Kelly Clarkson-Long Shot.mp3");
print(&t, "music.mp3");
print(&t, "the las - There she goes again.mp3");
}
}
}
| {
let extracted = t.extract(Path::new(path));
println!("{}", path);
println!("{:?}", extracted);
println!("");
} |
service.py | import logging
from logging import getLogger, StreamHandler, INFO
import json
import os
import tensorflow as tf
import grpc
from pkg.apis.manager.v1beta1.python import api_pb2
from pkg.apis.manager.v1beta1.python import api_pb2_grpc
from pkg.suggestion.v1beta1.nas.enas.Controller import Controller
from pkg.suggestion.v1beta1.nas.enas.Operation import SearchSpace
from pkg.suggestion.v1beta1.nas.enas.AlgorithmSettings import (
parseAlgorithmSettings, algorithmSettingsValidator, enableNoneSettingsList)
from pkg.suggestion.v1beta1.internal.base_health_service import HealthServicer
class EnasExperiment:
def __init__(self, request, logger):
self.logger = logger
self.experiment_name = request.experiment.name
self.experiment = request.experiment
self.num_trials = 1
self.tf_graph = tf.Graph()
self.ctrl_cache_file = "ctrl_cache/{}.ckpt".format(
self.experiment_name)
self.suggestion_step = 0
self.algorithm_settings = None
self.controller = None
self.num_layers = None
self.input_sizes = None
self.output_sizes = None
self.num_operations = None
self.search_space = None
self.opt_direction = None
self.objective_name = None
self.logger.info("-" * 100 + "\nSetting Up Suggestion for Experiment {}\n".format(
self.experiment_name) + "-" * 100)
self._get_experiment_param()
self._setup_controller()
self.logger.info(">>> Suggestion for Experiment {} has been initialized.\n".format(
self.experiment_name))
def _get_experiment_param(self):
# this function need to
# 1) get the number of layers
# 2) get the I/O size
# 3) get the available operations
# 4) get the optimization direction (i.e. minimize or maximize)
# 5) get the objective name
# 6) get the algorithm settings
# Get Search Space
self.opt_direction = self.experiment.spec.objective.type
self.objective_name = self.experiment.spec.objective.objective_metric_name
nas_config = self.experiment.spec.nas_config
graph_config = nas_config.graph_config
self.num_layers = int(graph_config.num_layers)
self.input_sizes = list(map(int, graph_config.input_sizes))
self.output_sizes = list(map(int, graph_config.output_sizes))
search_space_raw = nas_config.operations
search_space_object = SearchSpace(search_space_raw)
self.search_space = search_space_object.search_space
self.num_operations = search_space_object.num_operations
self.print_search_space()
# Get Experiment Algorithm Settings
settings_raw = self.experiment.spec.algorithm.algorithm_settings
self.algorithm_settings = parseAlgorithmSettings(settings_raw)
self.print_algorithm_settings()
def _setup_controller(self):
with self.tf_graph.as_default():
self.controller = Controller(
num_layers=self.num_layers,
num_operations=self.num_operations,
controller_hidden_size=self.algorithm_settings['controller_hidden_size'],
controller_temperature=self.algorithm_settings['controller_temperature'],
controller_tanh_const=self.algorithm_settings['controller_tanh_const'],
controller_entropy_weight=self.algorithm_settings['controller_entropy_weight'],
controller_baseline_decay=self.algorithm_settings['controller_baseline_decay'],
controller_learning_rate=self.algorithm_settings["controller_learning_rate"],
controller_skip_target=self.algorithm_settings['controller_skip_target'],
controller_skip_weight=self.algorithm_settings['controller_skip_weight'],
controller_name="Ctrl_" + self.experiment_name,
logger=self.logger)
self.controller.build_trainer()
def print_search_space(self):
if self.search_space is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(
">>> Search Space for Experiment {}".format(self.experiment_name))
for opt in self.search_space:
opt.print_op(self.logger)
self.logger.info(
"There are {} operations in total.\n".format(self.num_operations))
def print_algorithm_settings(self):
if self.algorithm_settings is None:
self.logger.warning(
"Error! The Suggestion has not yet been initialized!")
return
self.logger.info(">>> Parameters of LSTM Controller for Experiment {}\n".format(
self.experiment_name))
for spec in self.algorithm_settings:
if len(spec) > 22:
self.logger.info("{}:\t{}".format(
spec, self.algorithm_settings[spec]))
else:
self.logger.info("{}:\t\t{}".format(
spec, self.algorithm_settings[spec]))
self.logger.info("")
class EnasService(api_pb2_grpc.SuggestionServicer, HealthServicer):
def __init__(self, logger=None):
super(EnasService, self).__init__()
self.is_first_run = True
self.experiment = None
if logger == None:
self.logger = getLogger(__name__)
FORMAT = '%(asctime)-15s Experiment %(experiment_name)s %(message)s'
logging.basicConfig(format=FORMAT)
handler = StreamHandler()
handler.setLevel(INFO)
self.logger.setLevel(INFO)
self.logger.addHandler(handler)
self.logger.propagate = False
else:
self.logger = logger
if not os.path.exists("ctrl_cache/"):
os.makedirs("ctrl_cache/")
def | (self, request, context):
self.logger.info("Validate Algorithm Settings start")
graph_config = request.experiment.spec.nas_config.graph_config
# Validate GraphConfig
# Check InputSize
if not graph_config.input_sizes:
return self.SetValidateContextError(context, "Missing InputSizes in GraphConfig:\n{}".format(graph_config))
# Check OutputSize
if not graph_config.output_sizes:
return self.SetValidateContextError(context, "Missing OutputSizes in GraphConfig:\n{}".format(graph_config))
# Check NumLayers
if not graph_config.num_layers:
return self.SetValidateContextError(context, "Missing NumLayers in GraphConfig:\n{}".format(graph_config))
# Validate each operation
operations_list = list(
request.experiment.spec.nas_config.operations.operation)
for operation in operations_list:
# Check OperationType
if not operation.operation_type:
return self.SetValidateContextError(context, "Missing operationType in Operation:\n{}".format(operation))
# Check ParameterConfigs
if not operation.parameter_specs.parameters:
return self.SetValidateContextError(context, "Missing ParameterConfigs in Operation:\n{}".format(operation))
# Validate each ParameterConfig in Operation
parameters_list = list(operation.parameter_specs.parameters)
for parameter in parameters_list:
# Check Name
if not parameter.name:
return self.SetValidateContextError(context, "Missing Name in ParameterConfig:\n{}".format(parameter))
# Check ParameterType
if not parameter.parameter_type:
return self.SetValidateContextError(context, "Missing ParameterType in ParameterConfig:\n{}".format(parameter))
# Check List in Categorical or Discrete Type
if parameter.parameter_type == api_pb2.CATEGORICAL or parameter.parameter_type == api_pb2.DISCRETE:
if not parameter.feasible_space.list:
return self.SetValidateContextError(context, "Missing List in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Check Max, Min, Step in Int or Double Type
elif parameter.parameter_type == api_pb2.INT or parameter.parameter_type == api_pb2.DOUBLE:
if not parameter.feasible_space.min and not parameter.feasible_space.max:
return self.SetValidateContextError(context, "Missing Max and Min in ParameterConfig.feasibleSpace:\n{}".format(parameter))
if parameter.parameter_type == api_pb2.DOUBLE and (not parameter.feasible_space.step or float(parameter.feasible_space.step) <= 0):
return self.SetValidateContextError(context, "Step parameter should be > 0 in ParameterConfig.feasibleSpace:\n{}".format(parameter))
# Validate Algorithm Settings
settings_raw = request.experiment.spec.algorithm.algorithm_settings
for setting in settings_raw:
if setting.name in algorithmSettingsValidator.keys():
if setting.name in enableNoneSettingsList and setting.value == "None":
continue
setting_type = algorithmSettingsValidator[setting.name][0]
setting_range = algorithmSettingsValidator[setting.name][1]
try:
converted_value = setting_type(setting.value)
except:
return self.SetValidateContextError(context, "Algorithm Setting {} must be {} type".format(setting.name, setting_type.__name__))
if setting_type == float:
if converted_value <= setting_range[0] or (setting_range[1] != 'inf' and converted_value > setting_range[1]):
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range ({}, {}]".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
elif converted_value < setting_range[0]:
return self.SetValidateContextError(context, "Algorithm Setting {}: {} with {} type must be in range [{}, {})".format(
setting.name, converted_value, setting_type.__name__, setting_range[0], setting_range[1]
))
else:
return self.SetValidateContextError(context, "Unknown Algorithm Setting name: {}".format(setting.name))
self.logger.info("All Experiment Settings are Valid")
return api_pb2.ValidateAlgorithmSettingsReply()
def SetValidateContextError(self, context, error_message):
context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
context.set_details(error_message)
self.logger.info(error_message)
return api_pb2.ValidateAlgorithmSettingsReply()
def GetSuggestions(self, request, context):
if self.is_first_run:
self.experiment = EnasExperiment(request, self.logger)
experiment = self.experiment
if request.request_number > 0:
experiment.num_trials = request.request_number
self.logger.info("-" * 100 + "\nSuggestion Step {} for Experiment {}\n".format(
experiment.suggestion_step, experiment.experiment_name) + "-" * 100)
self.logger.info("")
self.logger.info(">>> RequestNumber:\t\t{}".format(experiment.num_trials))
self.logger.info("")
with experiment.tf_graph.as_default():
saver = tf.compat.v1.train.Saver()
ctrl = experiment.controller
controller_ops = {
"loss": ctrl.loss,
"entropy": ctrl.sample_entropy,
"grad_norm": ctrl.grad_norm,
"baseline": ctrl.baseline,
"skip_rate": ctrl.skip_rate,
"train_op": ctrl.train_op,
"train_step": ctrl.train_step,
"sample_arc": ctrl.sample_arc,
"child_val_accuracy": ctrl.child_val_accuracy,
}
if self.is_first_run:
self.logger.info(">>> First time running suggestion for {}. Random architecture will be given.".format(
experiment.experiment_name))
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
# TODO: will use PVC to store the checkpoint to protect against unexpected suggestion pod restart
saver.save(sess, experiment.ctrl_cache_file)
self.is_first_run = False
else:
with tf.compat.v1.Session() as sess:
saver.restore(sess, experiment.ctrl_cache_file)
result = self.GetEvaluationResult(request.trials)
# TODO: (andreyvelich) I deleted this part, should it be handle by controller?
# Sometimes training container may fail and GetEvaluationResult() will return None
# In this case, the Suggestion will:
# 1. Firstly try to respawn the previous trials after waiting for RESPAWN_SLEEP seconds
# 2. If respawning the trials for RESPAWN_LIMIT times still cannot collect valid results,
# then fail the task because it may indicate that the training container has errors.
if result is None:
self.logger.warning(
">>> Suggestion has spawned trials, but they all failed.")
self.logger.warning(
">>> Please check whether the training container is correctly implemented")
self.logger.info(">>> Experiment {} failed".format(
experiment.experiment_name))
return []
# This LSTM network is designed to maximize the metrics
# However, if the user wants to minimize the metrics, we can take the negative of the result
if experiment.opt_direction == api_pb2.MINIMIZE:
result = -result
self.logger.info(">>> Suggestion updated. LSTM Controller Training\n")
log_every = experiment.algorithm_settings["controller_log_every_steps"]
for ctrl_step in range(1, experiment.algorithm_settings["controller_train_steps"]+1):
run_ops = [
controller_ops["loss"],
controller_ops["entropy"],
controller_ops["grad_norm"],
controller_ops["baseline"],
controller_ops["skip_rate"],
controller_ops["train_op"]
]
loss, entropy, grad_norm, baseline, skip_rate, _ = sess.run(
fetches=run_ops,
feed_dict={controller_ops["child_val_accuracy"]: result})
controller_step = sess.run(controller_ops["train_step"])
if ctrl_step % log_every == 0:
log_string = ""
log_string += "Controller Step: {} - ".format(controller_step)
log_string += "Loss: {:.4f} - ".format(loss)
log_string += "Entropy: {:.9} - ".format(entropy)
log_string += "Gradient Norm: {:.7f} - ".format(grad_norm)
log_string += "Baseline={:.4f} - ".format(baseline)
log_string += "Skip Rate={:.4f}".format(skip_rate)
self.logger.info(log_string)
candidates = list()
for _ in range(experiment.num_trials):
candidates.append(
sess.run(controller_ops["sample_arc"]))
saver.save(sess, experiment.ctrl_cache_file)
organized_candidates = list()
parameter_assignments = list()
for i in range(experiment.num_trials):
arc = candidates[i].tolist()
organized_arc = [0 for _ in range(experiment.num_layers)]
record = 0
for l in range(experiment.num_layers):
organized_arc[l] = arc[record: record + l + 1]
record += l + 1
organized_candidates.append(organized_arc)
nn_config = dict()
nn_config['num_layers'] = experiment.num_layers
nn_config['input_sizes'] = experiment.input_sizes
nn_config['output_sizes'] = experiment.output_sizes
nn_config['embedding'] = dict()
for l in range(experiment.num_layers):
opt = organized_arc[l][0]
nn_config['embedding'][opt] = experiment.search_space[opt].get_dict()
organized_arc_json = json.dumps(organized_arc)
nn_config_json = json.dumps(nn_config)
organized_arc_str = str(organized_arc_json).replace('\"', '\'')
nn_config_str = str(nn_config_json).replace('\"', '\'')
self.logger.info(
"\n>>> New Neural Network Architecture Candidate #{} (internal representation):".format(i))
self.logger.info(organized_arc_json)
self.logger.info("\n>>> Corresponding Seach Space Description:")
self.logger.info(nn_config_str)
parameter_assignments.append(
api_pb2.GetSuggestionsReply.ParameterAssignments(
assignments=[
api_pb2.ParameterAssignment(
name="architecture",
value=organized_arc_str
),
api_pb2.ParameterAssignment(
name="nn_config",
value=nn_config_str
)
]
)
)
self.logger.info("")
self.logger.info(">>> {} Trials were created for Experiment {}".format(
experiment.num_trials, experiment.experiment_name))
self.logger.info("")
experiment.suggestion_step += 1
return api_pb2.GetSuggestionsReply(parameter_assignments=parameter_assignments)
def GetEvaluationResult(self, trials_list):
completed_trials = dict()
failed_trials = []
for t in trials_list:
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.SUCCEEDED:
target_value = None
for metric in t.status.observation.metrics:
if metric.name == t.spec.objective.objective_metric_name:
target_value = metric.value
break
# Take only the first metric value
# In current cifar-10 training container this value is the latest
completed_trials[t.name] = float(target_value)
if t.status.condition == api_pb2.TrialStatus.TrialConditionType.FAILED:
failed_trials.append(t.name)
n_completed = len(completed_trials)
self.logger.info(">>> By now: {} Trials succeeded, {} Trials failed".format(
n_completed, len(failed_trials)))
for tname in completed_trials:
self.logger.info("Trial: {}, Value: {}".format(
tname, completed_trials[tname]))
for tname in failed_trials:
self.logger.info("Trial: {} was failed".format(tname))
if n_completed > 0:
avg_metrics = sum(completed_trials.values()) / n_completed
self.logger.info("The average is {}\n".format(avg_metrics))
return avg_metrics
| ValidateAlgorithmSettings |
main.go | /*
Copyright 2014 Outbrain Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"os"
"github.com/github/orchestrator/go/app"
"github.com/github/orchestrator/go/config"
"github.com/github/orchestrator/go/inst"
"github.com/openark/golib/log"
)
var AppVersion, GitCommit string
// main is the application's entry point. It will either spawn a CLI or HTTP itnerfaces.
func | () {
configFile := flag.String("config", "", "config file name")
command := flag.String("c", "", "command, required. See full list of commands via 'orchestrator -c help'")
strict := flag.Bool("strict", false, "strict mode (more checks, slower)")
instance := flag.String("i", "", "instance, host_fqdn[:port] (e.g. db.company.com:3306, db.company.com)")
sibling := flag.String("s", "", "sibling instance, host_fqdn[:port]")
destination := flag.String("d", "", "destination instance, host_fqdn[:port] (synonym to -s)")
owner := flag.String("owner", "", "operation owner")
reason := flag.String("reason", "", "operation reason")
duration := flag.String("duration", "", "maintenance duration (format: 59s, 59m, 23h, 6d, 4w)")
pattern := flag.String("pattern", "", "regular expression pattern")
clusterAlias := flag.String("alias", "", "cluster alias")
pool := flag.String("pool", "", "Pool logical name (applies for pool-related commands)")
hostnameFlag := flag.String("hostname", "", "Hostname/fqdn/CNAME/VIP (applies for hostname/resolve related commands)")
discovery := flag.Bool("discovery", true, "auto discovery mode")
quiet := flag.Bool("quiet", false, "quiet")
verbose := flag.Bool("verbose", false, "verbose")
debug := flag.Bool("debug", false, "debug mode (very verbose)")
stack := flag.Bool("stack", false, "add stack trace upon error")
config.RuntimeCLIFlags.SkipBinlogSearch = flag.Bool("skip-binlog-search", false, "when matching via Pseudo-GTID, only use relay logs. This can save the hassle of searching for a non-existend pseudo-GTID entry, for example in servers with replication filters.")
config.RuntimeCLIFlags.SkipUnresolve = flag.Bool("skip-unresolve", false, "Do not unresolve a host name")
config.RuntimeCLIFlags.SkipUnresolveCheck = flag.Bool("skip-unresolve-check", false, "Skip/ignore checking an unresolve mapping (via hostname_unresolve table) resolves back to same hostname")
config.RuntimeCLIFlags.Noop = flag.Bool("noop", false, "Dry run; do not perform destructing operations")
config.RuntimeCLIFlags.BinlogFile = flag.String("binlog", "", "Binary log file name")
config.RuntimeCLIFlags.Statement = flag.String("statement", "", "Statement/hint")
config.RuntimeCLIFlags.GrabElection = flag.Bool("grab-election", false, "Grab leadership (only applies to continuous mode)")
config.RuntimeCLIFlags.PromotionRule = flag.String("promotion-rule", "prefer", "Promotion rule for register-andidate (prefer|neutral|prefer_not|must_not)")
config.RuntimeCLIFlags.Version = flag.Bool("version", false, "Print version and exit")
config.RuntimeCLIFlags.SkipContinuousRegistration = flag.Bool("skip-continuous-registration", false, "Skip cli commands performaing continuous registration (to reduce orchestratrator backend db load")
config.RuntimeCLIFlags.EnableDatabaseUpdate = flag.Bool("enable-database-update", false, "Enable database update, overrides SkipOrchestratorDatabaseUpdate")
config.RuntimeCLIFlags.IgnoreRaftSetup = flag.Bool("ignore-raft-setup", false, "Override RaftEnabled for CLI invocation (CLI by default not allowed for raft setups). NOTE: operations by CLI invocation may not reflect in all raft nodes.")
flag.Parse()
if *destination != "" && *sibling != "" {
log.Fatalf("-s and -d are synonyms, yet both were specified. You're probably doing the wrong thing.")
}
switch *config.RuntimeCLIFlags.PromotionRule {
case "prefer", "neutral", "prefer_not", "must_not":
{
// OK
}
default:
{
log.Fatalf("-promotion-rule only supports prefer|neutral|prefer_not|must_not")
}
}
if *destination == "" {
*destination = *sibling
}
log.SetLevel(log.ERROR)
if *verbose {
log.SetLevel(log.INFO)
}
if *debug {
log.SetLevel(log.DEBUG)
}
if *stack {
log.SetPrintStackTrace(*stack)
}
if *config.RuntimeCLIFlags.Version {
fmt.Println(AppVersion)
fmt.Println(GitCommit)
return
}
startText := "starting orchestrator"
if AppVersion != "" {
startText += ", version: " + AppVersion
}
if GitCommit != "" {
startText += ", git commit: " + GitCommit
}
log.Info(startText)
if len(*configFile) > 0 {
config.ForceRead(*configFile)
} else {
config.Read("/etc/orchestrator.conf.json", "conf/orchestrator.conf.json", "orchestrator.conf.json")
}
if *config.RuntimeCLIFlags.EnableDatabaseUpdate {
config.Config.SkipOrchestratorDatabaseUpdate = false
}
if config.Config.Debug {
log.SetLevel(log.DEBUG)
}
if *quiet {
// Override!!
log.SetLevel(log.ERROR)
}
if config.Config.EnableSyslog {
log.EnableSyslogWriter("orchestrator")
log.SetSyslogLevel(log.INFO)
}
if config.Config.AuditToSyslog {
inst.EnableAuditSyslog()
}
config.RuntimeCLIFlags.ConfiguredVersion = AppVersion
config.MarkConfigurationLoaded()
if len(flag.Args()) == 0 && *command == "" {
// No command, no argument: just prompt
fmt.Println(app.AppPrompt)
return
}
helpTopic := ""
if flag.Arg(0) == "help" {
if flag.Arg(1) != "" {
helpTopic = flag.Arg(1)
}
if helpTopic == "" {
helpTopic = *command
}
if helpTopic == "" {
// hacky way to make the CLI kick in as if the user typed `orchestrator -c help cli`
*command = "help"
flag.Args()[0] = "cli"
}
}
switch {
case helpTopic != "":
app.HelpCommand(helpTopic)
case len(flag.Args()) == 0 || flag.Arg(0) == "cli":
app.CliWrapper(*command, *strict, *instance, *destination, *owner, *reason, *duration, *pattern, *clusterAlias, *pool, *hostnameFlag)
case flag.Arg(0) == "http":
app.Http(*discovery)
default:
fmt.Fprintln(os.Stderr, `Usage:
orchestrator --options... [cli|http]
See complete list of commands:
orchestrator -c help
Full blown documentation:
orchestrator`)
os.Exit(1)
}
}
| main |
Sugared_Logger.go | package logging
import (
"Goez/pkg/config"
"github.com/natefinch/lumberjack"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"net/http"
)
var sugarLogger *zap.SugaredLogger
//func main() {
// InitLogger()
// defer sugarLogger.Sync()
// simpleHttpGet("www.google.com")
// simpleHttpGet("http://159.75.82.148:8000/api/v1/articles?pageSize=50")
//}
func Setup() {
writeSyncer := getLogWriter()
encoder := getEncode()
core := zapcore.NewCore(encoder, writeSyncer, zapcore.DebugLevel)
logger := zap.New(core, zap.AddCaller())
sugarLogger = logger.Sugar()
}
func GetLogger() *zap.SugaredLogger {
return sugarLogger
}
func getEncode() zapcore.Encoder {
encoderConfig := zap.NewProductionEncoderConfig()
encoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder // 设置ISO08601 时间编码
encoderConfig.EncodeLevel = zapcore.CapitalLevelEncoder
return zapcore.NewConsoleEncoder(encoderConfig)
}
func getLogWriter() zapcore.WriteSyncer {
file := config.AppSetting.RuntimeRootPath + config.AppSetting.LogSavePath + config.AppSetting.LogSaveName
lumberJackLogger := &lumberjack.Logger{
Filename: file,
MaxSize: 10,
MaxBackups: 5,
MaxAge: 30,
Compress: false,
LocalTime: true, | }
func Info(info string, args interface{}) {
sugarLogger.Infof(info, args)
}
func Error(err string, args interface{}) {
sugarLogger.Errorf(err, args)
}
func simpleHttpGet(url string) {
sugarLogger.Debugf("Trying to hit GET request for %s", url)
resp, err := http.Get(url)
if err != nil {
sugarLogger.Errorf("Error fetching URL %s : Error = %s", url, err)
} else {
sugarLogger.Infof("Success! statusCode = %s for URL %s", resp.Status, url)
resp.Body.Close()
}
} | }
return zapcore.AddSync(lumberJackLogger) |
common_utils.py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for common.tags."""
__author__ = 'Mike Gainer ([email protected])'
import datetime
import os
import unittest
import appengine_config
from common import utils
class CommonUnitTests(unittest.TestCase):
# --------------------------- String-to-list.
def test_list_parsing(self):
self.assertListEqual(['foo'], utils.text_to_list('foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\t'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo\t'))
self.assertListEqual(['foo'], utils.text_to_list('foo '))
self.assertListEqual(['foo'], utils.text_to_list(' foo'))
self.assertListEqual(['foo'], utils.text_to_list(' foo '))
self.assertListEqual(['foo'], utils.text_to_list('foo\n'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo'))
self.assertListEqual(['foo'], utils.text_to_list('\nfoo\n'))
self.assertListEqual(['foo'], utils.text_to_list('foo,'))
self.assertListEqual(['foo'], utils.text_to_list(',foo'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,'))
self.assertListEqual(['foo'], utils.text_to_list(' foo ,\n'))
self.assertListEqual(['foo'], utils.text_to_list('\tfoo,\t\n'))
self.assertListEqual(['foo'], utils.text_to_list(',foo,\n'))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], ', utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo'],
utils.text_to_list(
'[foo], \n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' foo bar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo bar '))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\tfoo\tbar'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\tbar\t'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('foo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\nfoo\nbar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list('\n foo\n bar\n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(' \n foo \n bar \n'))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'[foo][bar]',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
' [foo] [bar] ',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n[foo]\n[bar]\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
self.assertListEqual(['foo', 'bar'],
utils.text_to_list(
'\n,[foo],\n[bar],\n',
utils.BACKWARD_COMPATIBLE_SPLITTER))
def test_none_split(self):
self.assertListEqual([], utils.text_to_list(None))
def | (self):
self.assertListEqual([], utils.text_to_list(''))
def test_all_separators_split(self):
self.assertListEqual([], utils.text_to_list(' ,,, \t\t\n\t '))
def test_one_item_split(self):
self.assertListEqual(['x'], utils.text_to_list('x'))
def test_join_none(self):
self.assertEquals('', utils.list_to_text(None))
def test_join_empty(self):
self.assertEquals('', utils.list_to_text([]))
def test_join_one(self):
self.assertEquals('x', utils.list_to_text(['x']))
def test_join_two(self):
self.assertEquals('x y', utils.list_to_text(['x', 'y']))
def test_join_split(self):
l = ['a', 'b', 'c']
self.assertListEqual(l, utils.text_to_list(utils.list_to_text(l)))
def test_split_join(self):
text = 'a b c'
self.assertEquals(text, utils.list_to_text(utils.text_to_list(text)))
class ZipAwareOpenTests(unittest.TestCase):
def test_find_in_lib_without_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'babel-0.9.6.zip',
'babel', 'localedata', 'root.dat')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(17490, len(data))
data = open(path, 'r').read()
self.assertEquals(17490, len(data))
data = open(path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path, mode='r').read()
self.assertEquals(17490, len(data))
data = open(name=path).read()
self.assertEquals(17490, len(data))
with self.assertRaises(IOError):
open(path) # This fails again; open has been reset to normal.
def test_find_in_lib_with_relative_path(self):
path = os.path.join(
appengine_config.BUNDLE_ROOT, 'lib', 'markdown-2.5.zip',
'setup.cfg')
with self.assertRaises(IOError):
open(path) # This fails.
with utils.ZipAwareOpen():
data = open(path).read()
self.assertEquals(12, len(data))
class ParseTimedeltaTests(unittest.TestCase):
def test_parse_empty_string(self):
self.assertEquals(
utils.parse_timedelta_string(''),
datetime.timedelta())
def test_parse_zero(self):
self.assertEquals(
utils.parse_timedelta_string('0'),
datetime.timedelta())
def test_parse_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('Amidst the mists and coldest frosts'),
datetime.timedelta())
def test_parse_leading_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string(
'5 days and a partridge in a pear tree'),
datetime.timedelta(days=5))
def test_parse_trailing_valid_partial_gibberish(self):
self.assertEquals(
utils.parse_timedelta_string('we will leave in 5 days'),
datetime.timedelta(days=5))
def test_parse_units(self):
for unit in ('week', 'day', 'hour', 'minute', 'second'):
self._test_parse_units(unit)
def _test_parse_units(self, unit):
expected1 = datetime.timedelta(**{unit + 's': 1})
expected2 = datetime.timedelta(**{unit + 's': 2})
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit[0]), expected1)
self.assertEquals(
utils.parse_timedelta_string('1%s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2%ss' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit[0]), expected2)
self.assertEquals(
utils.parse_timedelta_string('1 %s' % unit), expected1)
self.assertEquals(
utils.parse_timedelta_string('2 %s' % unit), expected2)
self.assertEquals(
utils.parse_timedelta_string('2 \t\t\n %ss' % unit), expected2)
def test_parse_out_of_bounds_handled_successfully(self):
self.assertEquals(
utils.parse_timedelta_string('86400s'),
datetime.timedelta(days=1))
self.assertEquals(
utils.parse_timedelta_string('19d, 86400s'),
datetime.timedelta(weeks=2, days=6))
def test_parse_combinations(self):
self.assertEquals(
utils.parse_timedelta_string('3w1d3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3w, 1d, 3m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 w 1 d 3 m'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks 1 day 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
self.assertEquals(
utils.parse_timedelta_string('3 weeks, 1 day, 3 minutes'),
datetime.timedelta(weeks=3, days=1, minutes=3))
class ValidateTimedeltaTests(unittest.TestCase):
def test_blank_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate('', errors)
self.assertEquals(0, len(errors))
def test_none_is_allowed(self):
errors = []
utils.ValidateTimedelta.validate(None, errors)
self.assertEquals(0, len(errors))
def test_bare_numbers_not_allowed(self):
errors = []
utils.ValidateTimedelta.validate('0', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('-1', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('100', errors)
self.assertEquals(1, len(errors))
def test_valid_items_allowed(self):
errors = []
utils.ValidateTimedelta.validate('1s', errors)
utils.ValidateTimedelta.validate('2m', errors)
utils.ValidateTimedelta.validate('3h', errors)
utils.ValidateTimedelta.validate('4d', errors)
utils.ValidateTimedelta.validate('5w', errors)
utils.ValidateTimedelta.validate('5 Weeks, 1D,2HOURS 3 seconds',
errors)
self.assertEquals(0, len(errors))
def test_invalid_items_disallowed(self):
errors = []
utils.ValidateTimedelta.validate('1t', errors)
self.assertEquals(1, len(errors))
errors = []
utils.ValidateTimedelta.validate('1 year', errors)
self.assertEquals(1, len(errors))
def test_parse_months_gives_error(self):
errors = []
utils.ValidateTimedelta.validate('3 months', errors)
self.assertEquals(1, len(errors))
| test_empty_split |
cast.rs | #[warn(
clippy::cast_precision_loss,
clippy::cast_possible_truncation,
clippy::cast_sign_loss,
clippy::cast_possible_wrap
)]
#[allow(clippy::no_effect, clippy::unnecessary_operation)]
fn | () {
// Test clippy::cast_precision_loss
let x0 = 1i32;
x0 as f32;
let x1 = 1i64;
x1 as f32;
x1 as f64;
let x2 = 1u32;
x2 as f32;
let x3 = 1u64;
x3 as f32;
x3 as f64;
// Test clippy::cast_possible_truncation
1f32 as i32;
1f32 as u32;
1f64 as f32;
1i32 as i8;
1i32 as u8;
1f64 as isize;
1f64 as usize;
// Test clippy::cast_possible_wrap
1u8 as i8;
1u16 as i16;
1u32 as i32;
1u64 as i64;
1usize as isize;
// Test clippy::cast_sign_loss
1i32 as u32;
-1i32 as u32;
1isize as usize;
-1isize as usize;
0i8 as u8;
i8::max_value() as u8;
i16::max_value() as u16;
i32::max_value() as u32;
i64::max_value() as u64;
i128::max_value() as u128;
}
| main |
user_handler.go | package handler
import (
"Backend-Api/models"
"Backend-Api/mydb"
"log"
"net/http"
"github.com/labstack/echo"
"github.com/thanhpk/randstr"
)
func Register(db mydb.DB) func(c echo.Context) error {
return func(c echo.Context) error {
user := new(models.User)
err := c.Bind(user)
if err != nil {
return c.JSON(http.StatusUnprocessableEntity, nil)
}
u, err := db.GetUserWithCellphone(user.Cellphone)
if u != nil {
return c.JSON(http.StatusServiceUnavailable, nil)
}
err = db.InsertUser(user)
if err != nil {
log.Print(err.Error())
return c.JSON(http.StatusServiceUnavailable, nil)
}
return c.JSON(http.StatusOK, nil)
}
}
func Update(db mydb.DB) func(c echo.Context) error {
return func(c echo.Context) error {
id, err := db.GetIDFromToken(c.Request().Header.Get("authorization"))
if err != nil || id == 0 {
return c.JSON(http.StatusUnauthorized, nil)
}
user := new(models.User)
err = c.Bind(user)
if err != nil {
return c.JSON(http.StatusUnprocessableEntity, nil)
}
err = db.UpdateUser(id, user)
if err != nil {
return c.JSON(http.StatusServiceUnavailable, nil)
}
return c.JSON(http.StatusOK, nil)
}
}
func | (db mydb.DB) func(c echo.Context) error {
return func(c echo.Context) error {
user := new(models.User)
err := c.Bind(user)
if err != nil {
return c.JSON(http.StatusUnprocessableEntity, nil)
}
oUser, err := db.GetUserWithCellphone(user.Cellphone)
if err != nil {
log.Print(err.Error())
return c.JSON(http.StatusServiceUnavailable, nil)
}
if oUser.Password == user.Password {
token := randstr.String(10)
err = db.SetToken(oUser.ID, token)
if err != nil {
log.Print(err.Error())
return c.JSON(http.StatusServiceUnavailable, nil)
}
return c.JSON(http.StatusOK, echo.Map{"token":token})
}
return c.JSON(http.StatusUnauthorized, nil)
}
}
| LogIn |
struct___t_o_p_s_e_n_s___j_o_i_n_t.js | var struct___t_o_p_s_e_n_s___j_o_i_n_t =
[
[ "Orientation", "struct___t_o_p_s_e_n_s___j_o_i_n_t.html#adc2d7dce255f281b0debb5823f8f69f1", null ],
[ "Position", "struct___t_o_p_s_e_n_s___j_o_i_n_t.html#a2903185f44307386081a2311648cc313", null ],
[ "Rotation", "struct___t_o_p_s_e_n_s___j_o_i_n_t.html#ab5e3d8ffa6d74f5d5fde62c925ef85cb", null ] | ]; |
|
context.rs | use std::path::{Path, PathBuf};
use std::str::FromStr;
use anyhow::{anyhow, Error, Result};
use move_core_types::account_address::AccountAddress;
use lang::compiler::dialects::{Dialect, DialectName};
use crate::index::Index;
use crate::manifest::{default_dialect, DoveToml, MANIFEST, read_manifest};
use diem_crypto::hash::CryptoHash;
use crate::index::interface::{InterfaceBuilder, Interface};
use crate::metadata::MapDependencies;
/// Project context.
pub struct Context {
/// Project root directory.
pub project_dir: PathBuf,
/// Project manifest.
pub manifest: DoveToml,
/// Move dialect.
pub dialect: Box<dyn Dialect>,
}
impl Context {
/// Returns create absolute path in project as string.
pub fn str_path_for<P: AsRef<Path>>(&self, path: P) -> Result<String, Error> {
let mut abs_path = self.path_for(path);
if abs_path.exists() {
abs_path = dunce::canonicalize(abs_path)?;
}
abs_path
.to_str()
.map(|path| path.to_owned())
.ok_or_else(|| anyhow!("Failed to display absolute path:[{:?}]", abs_path))
}
/// Create absolute path in project.
pub fn path_for<P: AsRef<Path>>(&self, path: P) -> PathBuf {
self.project_dir.join(path)
}
/// Create absolute paths in project.
pub fn paths_for<P: AsRef<Path>>(&self, paths: &[P]) -> Vec<PathBuf> {
paths
.iter()
.map(|d| self.path_for(&d))
.filter(|p| p.exists())
.collect()
}
/// Build project index.
pub fn build_index(&self) -> Result<(Index, Interface), Error> {
let index_path = self.path_for(&self.manifest.layout.index);
let old_index = Index::load(&index_path)?.unwrap_or_default();
let package_hash = self.package_hash();
let index = if old_index.package_hash == package_hash {
old_index
} else {
let new_index = Index::build(package_hash, self)?;
new_index.store(&index_path)?;
new_index.remove_unused(old_index.diff(&new_index))?;
new_index.remove_unnecessary_elements_in_dependencies();
MapDependencies::create_and_save(self)?;
new_index
};
let builder = InterfaceBuilder::new(self, &index);
let interface = builder.build()?;
Ok((index, interface))
}
/// Returns project name or default name `project` if the name is not defined.
pub fn project_name(&self) -> String {
self.manifest.package.name.clone().unwrap_or_else(|| {
self.project_dir
.file_name()
.and_then(|name| name.to_str())
.unwrap_or("project")
.to_owned()
})
}
/// Returns provided account address.
pub fn account_address(&self) -> Result<AccountAddress> {
self.dialect
.parse_address(&self.manifest.package.account_address)
}
/// Returns provided account address.
pub fn account_address_str(&self) -> Result<String> {
Ok(format!(
"0x{}",
self.dialect
.parse_address(&self.manifest.package.account_address)?
))
}
/// Calculates package hash.
pub fn package_hash(&self) -> String {
self.manifest.package.hash().to_string()
}
/// Returns interface files dir.
pub fn interface_files_dir(&self) -> PathBuf {
self.path_for(&self.manifest.layout.system_folder)
.join("interface_files_dir")
}
/// Returns directory for dependency bytecode.
pub fn deps_mv_dir(&self) -> PathBuf {
self.path_for(&self.manifest.layout.system_folder)
.join("depmv")
}
/// Interface files lock.
pub fn interface_files_lock(&self) -> PathBuf {
self.path_for(&self.manifest.layout.system_folder)
.join("interface.lock")
}
}
pub(crate) fn get_context(project_dir: PathBuf, manifest: DoveToml) -> Result<Context> {
let dialect_name = manifest
.package
.dialect
.clone()
.unwrap_or_else(default_dialect);
let dialect = DialectName::from_str(&dialect_name)?;
Ok(Context {
project_dir,
manifest,
dialect: dialect.get_dialect(),
})
}
pub(crate) fn | (project_dir: &Path) -> Result<DoveToml> {
let manifest = project_dir.join(MANIFEST);
if !manifest.exists() {
Err(anyhow!(
"could not find `{}` in `{:?}`.",
MANIFEST,
project_dir
))
} else {
read_manifest(&manifest)
}
}
pub(crate) fn str_path<P: AsRef<Path>>(path: P) -> Result<String, Error> {
let path = path.as_ref();
path.to_str()
.map(|path| path.to_owned())
.ok_or_else(|| anyhow!("Failed to display absolute path:[{:?}]", path))
}
| load_manifest |
test.py | import robosoc2d
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4)
while robosoc2d.simulator_step_if_playing(sim_handle):
print(robosoc2d.simulator_get_state_string(sim_handle))
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete_all()
class MyPlayer:
def __init__(self):
self.c=0
def step(self, env, pitch, settings, team1, team2):
print("player step says that's tick per time= "+str(settings.ticks_per_time)+" , internal variable c="+str(self.c))
self.c+=1
action=(robosoc2d.ACTION_DASH, 1.5, 0.06, 0.0)
return action
my_team=[MyPlayer() for n in range(4) ]
sim_handle = robosoc2d.build_simpleplayer_simulator(my_team, 0, [], 4) #, "my team", "simple players team", robosoc2d.get_seed_by_current_time(),sett)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
robosoc2d.simulator_step_if_playing(handle=sim_handle)
print(robosoc2d.simulator_get_state_string(sim_handle))
robosoc2d.simulator_delete(sim_handle)
sett=robosoc2d.get_default_settings()
sett.ticks_per_time=421
sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", robosoc2d.get_seed_by_current_time(), sett)
#sim_handle = robosoc2d.build_simpleplayer_simulator([], 4, [], 4, "The snakes", "Pacific United", game_settings=sett, random_seed=robosoc2d.get_seed_by_current_time())
robosoc2d.simulator_play_game(sim_handle)
simState = robosoc2d.simulator_get_game_state(sim_handle)
print(simState[0])
mydict=eval(str(simState[0]))
print(str(mydict["n_players1"]))
print(simState[1])
mydict=eval(str(simState[1]))
print(str(mydict["goal_kick_rx"]))
print(simState[2])
mydict=eval(str(simState[2])) | mydict=eval(str(aplayerinfo))
print(str(mydict["direction"]))
print("random seed: "+str(robosoc2d.simulator_get_random_seed(sim_handle)))
print(robosoc2d.simulator_get_team_names(sim_handle))
simState = robosoc2d.simulator_get_game_state(sim_handle)
copiedEnv =simState[0].copy()
copiedEnv.tick=100
myState = robosoc2d.environment()
print(simState[0])
print(copiedEnv)
print(myState)
print(robosoc2d.simulator_is_valid(sim_handle))
print(robosoc2d.simulator_is_valid(4000)) | print(str(mydict["ticks_per_time"]))
aplayerinfo=simState[3][0]
print(aplayerinfo) |
EllipsisH.d.ts | import * as React from 'react';
import { StyledIconProps } from '@styled-icons/styled-icon';
export declare const EllipsisH: React.ForwardRefExoticComponent<Pick<StyledIconProps, "string" | "className" | "color" | "height" | "id" | "lang" | "max" | "media" | "method" | "min" | "name" | "style" | "target" | "type" | "width" | "role" | "tabIndex" | "accentHeight" | "accumulate" | "additive" | "alignmentBaseline" | "allowReorder" | "alphabetic" | "amplitude" | "arabicForm" | "ascent" | "attributeName" | "attributeType" | "autoReverse" | "azimuth" | "baseFrequency" | "baselineShift" | "baseProfile" | "bbox" | "begin" | "bias" | "by" | "calcMode" | "capHeight" | "clip" | "clipPath" | "clipPathUnits" | "clipRule" | "colorInterpolation" | "colorInterpolationFilters" | "colorProfile" | "colorRendering" | "contentScriptType" | "contentStyleType" | "cursor" | "cx" | "cy" | "d" | "decelerate" | "descent" | "diffuseConstant" | "direction" | "display" | "divisor" | "dominantBaseline" | "dur" | "dx" | "dy" | "edgeMode" | "elevation" | "enableBackground" | "end" | "exponent" | "externalResourcesRequired" | "fill" | "fillOpacity" | "fillRule" | "filter" | "filterRes" | "filterUnits" | "floodColor" | "floodOpacity" | "focusable" | "fontFamily" | "fontSize" | "fontSizeAdjust" | "fontStretch" | "fontStyle" | "fontVariant" | "fontWeight" | "format" | "from" | "fx" | "fy" | "g1" | "g2" | "glyphName" | "glyphOrientationHorizontal" | "glyphOrientationVertical" | "glyphRef" | "gradientTransform" | "gradientUnits" | "hanging" | "horizAdvX" | "horizOriginX" | "href" | "ideographic" | "imageRendering" | "in2" | "in" | "intercept" | "k1" | "k2" | "k3" | "k4" | "k" | "kernelMatrix" | "kernelUnitLength" | "kerning" | "keyPoints" | "keySplines" | "keyTimes" | "lengthAdjust" | "letterSpacing" | "lightingColor" | "limitingConeAngle" | "local" | "markerEnd" | "markerHeight" | "markerMid" | "markerStart" | "markerUnits" | "markerWidth" | "mask" | "maskContentUnits" | "maskUnits" | "mathematical" | "mode" | "numOctaves" | "offset" | "opacity" | "operator" | "order" | "orient" | "orientation" | "origin" | "overflow" | "overlinePosition" | "overlineThickness" | "paintOrder" | "panose1" | "pathLength" | "patternContentUnits" | "patternTransform" | "patternUnits" | "pointerEvents" | "points" | "pointsAtX" | "pointsAtY" | "pointsAtZ" | "preserveAlpha" | "preserveAspectRatio" | "primitiveUnits" | "r" | "radius" | "refX" | "refY" | "renderingIntent" | "repeatCount" | "repeatDur" | "requiredExtensions" | "requiredFeatures" | "restart" | "result" | "rotate" | "rx" | "ry" | "scale" | "seed" | "shapeRendering" | "slope" | "spacing" | "specularConstant" | "specularExponent" | "speed" | "spreadMethod" | "startOffset" | "stdDeviation" | "stemh" | "stemv" | "stitchTiles" | "stopColor" | "stopOpacity" | "strikethroughPosition" | "strikethroughThickness" | "stroke" | "strokeDasharray" | "strokeDashoffset" | "strokeLinecap" | "strokeLinejoin" | "strokeMiterlimit" | "strokeOpacity" | "strokeWidth" | "surfaceScale" | "systemLanguage" | "tableValues" | "targetX" | "targetY" | "textAnchor" | "textDecoration" | "textLength" | "textRendering" | "to" | "transform" | "u1" | "u2" | "underlinePosition" | "underlineThickness" | "unicode" | "unicodeBidi" | "unicodeRange" | "unitsPerEm" | "vAlphabetic" | "values" | "vectorEffect" | "version" | "vertAdvY" | "vertOriginX" | "vertOriginY" | "vHanging" | "vIdeographic" | "viewBox" | "viewTarget" | "visibility" | "vMathematical" | "widths" | "wordSpacing" | "writingMode" | "x1" | "x2" | "x" | "xChannelSelector" | "xHeight" | "xlinkActuate" | "xlinkArcrole" | "xlinkHref" | "xlinkRole" | "xlinkShow" | "xlinkTitle" | "xlinkType" | "xmlBase" | "xmlLang" | "xmlns" | "xmlnsXlink" | "xmlSpace" | "y1" | "y2" | "y" | "yChannelSelector" | "z" | "zoomAndPan" | "aria-activedescendant" | "aria-atomic" | "aria-autocomplete" | "aria-busy" | "aria-checked" | "aria-colcount" | "aria-colindex" | "aria-colspan" | "aria-controls" | "aria-current" | "aria-describedby" | "aria-details" | "aria-disabled" | "aria-dropeffect" | "aria-errormessage" | "aria-expanded" | "aria-flowto" | "aria-grabbed" | "aria-haspopup" | "aria-hidden" | "aria-invalid" | "aria-keyshortcuts" | "aria-label" | "aria-labelledby" | "aria-level" | "aria-live" | "aria-modal" | "aria-multiline" | "aria-multiselectable" | "aria-orientation" | "aria-owns" | "aria-placeholder" | "aria-posinset" | "aria-pressed" | "aria-readonly" | "aria-relevant" | "aria-required" | "aria-roledescription" | "aria-rowcount" | "aria-rowindex" | "aria-rowspan" | "aria-selected" | "aria-setsize" | "aria-sort" | "aria-valuemax" | "aria-valuemin" | "aria-valuenow" | "aria-valuetext" | "children" | "dangerouslySetInnerHTML" | "onCopy" | "onCopyCapture" | "onCut" | "onCutCapture" | "onPaste" | "onPasteCapture" | "onCompositionEnd" | "onCompositionEndCapture" | "onCompositionStart" | "onCompositionStartCapture" | "onCompositionUpdate" | "onCompositionUpdateCapture" | "onFocus" | "onFocusCapture" | "onBlur" | "onBlurCapture" | "onChange" | "onChangeCapture" | "onBeforeInput" | "onBeforeInputCapture" | "onInput" | "onInputCapture" | "onReset" | "onResetCapture" | "onSubmit" | "onSubmitCapture" | "onInvalid" | "onInvalidCapture" | "onLoad" | "onLoadCapture" | "onError" | "onErrorCapture" | "onKeyDown" | "onKeyDownCapture" | "onKeyPress" | "onKeyPressCapture" | "onKeyUp" | "onKeyUpCapture" | "onAbort" | "onAbortCapture" | "onCanPlay" | "onCanPlayCapture" | "onCanPlayThrough" | "onCanPlayThroughCapture" | "onDurationChange" | "onDurationChangeCapture" | "onEmptied" | "onEmptiedCapture" | "onEncrypted" | "onEncryptedCapture" | "onEnded" | "onEndedCapture" | "onLoadedData" | "onLoadedDataCapture" | "onLoadedMetadata" | "onLoadedMetadataCapture" | "onLoadStart" | "onLoadStartCapture" | "onPause" | "onPauseCapture" | "onPlay" | "onPlayCapture" | "onPlaying" | "onPlayingCapture" | "onProgress" | "onProgressCapture" | "onRateChange" | "onRateChangeCapture" | "onSeeked" | "onSeekedCapture" | "onSeeking" | "onSeekingCapture" | "onStalled" | "onStalledCapture" | "onSuspend" | "onSuspendCapture" | "onTimeUpdate" | "onTimeUpdateCapture" | "onVolumeChange" | "onVolumeChangeCapture" | "onWaiting" | "onWaitingCapture" | "onAuxClick" | "onAuxClickCapture" | "onClick" | "onClickCapture" | "onContextMenu" | "onContextMenuCapture" | "onDoubleClick" | "onDoubleClickCapture" | "onDrag" | "onDragCapture" | "onDragEnd" | "onDragEndCapture" | "onDragEnter" | "onDragEnterCapture" | "onDragExit" | "onDragExitCapture" | "onDragLeave" | "onDragLeaveCapture" | "onDragOver" | "onDragOverCapture" | "onDragStart" | "onDragStartCapture" | "onDrop" | "onDropCapture" | "onMouseDown" | "onMouseDownCapture" | "onMouseEnter" | "onMouseLeave" | "onMouseMove" | "onMouseMoveCapture" | "onMouseOut" | "onMouseOutCapture" | "onMouseOver" | "onMouseOverCapture" | "onMouseUp" | "onMouseUpCapture" | "onSelect" | "onSelectCapture" | "onTouchCancel" | "onTouchCancelCapture" | "onTouchEnd" | "onTouchEndCapture" | "onTouchMove" | "onTouchMoveCapture" | "onTouchStart" | "onTouchStartCapture" | "onPointerDown" | "onPointerDownCapture" | "onPointerMove" | "onPointerMoveCapture" | "onPointerUp" | "onPointerUpCapture" | "onPointerCancel" | "onPointerCancelCapture" | "onPointerEnter" | "onPointerEnterCapture" | "onPointerLeave" | "onPointerLeaveCapture" | "onPointerOver" | "onPointerOverCapture" | "onPointerOut" | "onPointerOutCapture" | "onGotPointerCapture" | "onGotPointerCaptureCapture" | "onLostPointerCapture" | "onLostPointerCaptureCapture" | "onScroll" | "onScrollCapture" | "onWheel" | "onWheelCapture" | "onAnimationStart" | "onAnimationStartCapture" | "onAnimationEnd" | "onAnimationEndCapture" | "onAnimationIteration" | "onAnimationIterationCapture" | "onTransitionEnd" | "onTransitionEndCapture" | "key" | "size" | "title"> & React.RefAttributes<SVGSVGElement>>;
export declare const EllipsisHDimensions: { | height: undefined;
width: undefined;
}; | |
into_cbuuid.rs | use objc::{class, msg_send, runtime::Object, sel, sel_impl};
use objc_foundation::{INSString, NSString};
use uuid::Uuid;
pub trait IntoCBUUID {
fn into_cbuuid(self) -> *mut Object;
}
impl IntoCBUUID for Uuid {
fn into_cbuuid(self) -> *mut Object {
let uuid = self.to_hyphenated().to_string();
let cls = class!(CBUUID);
unsafe {
let obj: *mut Object = msg_send![cls, alloc];
msg_send![obj, initWithString: NSString::from_str(&uuid)]
}
} | } |
|
lazy_acker_test.go | // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package lazy
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/fleetapi/acker/fleet"
repo "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/reporter"
)
func TestLazyAcker(t *testing.T) {
type ackRequest struct {
Events []fleetapi.AckEvent `json:"events"`
}
log, _ := logger.New("")
client := newTestingClient()
agentInfo := &testAgentInfo{}
acker, err := fleet.NewAcker(log, agentInfo, client)
if err != nil {
t.Fatal(err)
}
lacker := NewAcker(acker, log)
if acker == nil {
t.Fatal("acker not initialized")
}
testID1 := "ack-test-action-id"
testID2 := testID1 + "2"
testID3 := testID1 + "3"
testAction1 := &fleetapi.ActionUnknown{ActionID: testID1}
testAction2 := &actionImmediate{ActionID: testID2}
testAction3 := &fleetapi.ActionUnknown{ActionID: testID3}
ch := client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) {
content, err := ioutil.ReadAll(body)
assert.NoError(t, err)
cr := &ackRequest{}
err = json.Unmarshal(content, &cr)
assert.NoError(t, err)
if len(cr.Events) == 0 {
t.Fatal("expected events but got none")
}
if cr.Events[0].ActionID == testID1 {
assert.EqualValues(t, 2, len(cr.Events))
assert.EqualValues(t, testID1, cr.Events[0].ActionID)
assert.EqualValues(t, testID2, cr.Events[1].ActionID)
} else {
assert.EqualValues(t, 1, len(cr.Events))
}
resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`)
return resp, nil
})
go func() {
for range ch {
}
}()
c := context.Background()
if err := lacker.Ack(c, testAction1); err != nil {
t.Fatal(err)
}
if err := lacker.Ack(c, testAction2); err != nil {
t.Fatal(err)
}
if err := lacker.Ack(c, testAction3); err != nil {
t.Fatal(err)
}
if err := lacker.Commit(c); err != nil {
t.Fatal(err)
}
}
type actionImmediate struct {
ActionID string
ActionType string
originalType string
}
// Type returns the type of the Action.
func (a *actionImmediate) Type() string {
return "IMMEDIATE"
}
func (a *actionImmediate) ID() string {
return a.ActionID
}
func (a *actionImmediate) ForceAck() {}
func (a *actionImmediate) String() string {
var s strings.Builder
s.WriteString("action_id: ")
s.WriteString(a.ID())
s.WriteString(", type: ")
s.WriteString(a.Type())
s.WriteString(" (original type: ")
s.WriteString(a.OriginalType())
s.WriteString(")")
return s.String()
}
// OriginalType returns the original type of the action as returned by the API.
func (a *actionImmediate) OriginalType() string {
return a.originalType
}
type clientCallbackFunc func(headers http.Header, body io.Reader) (*http.Response, error)
type testingClient struct {
sync.Mutex
callback clientCallbackFunc
received chan struct{}
}
func (t *testingClient) Send(
_ context.Context,
method string,
path string,
params url.Values,
headers http.Header,
body io.Reader,
) (*http.Response, error) {
t.Lock()
defer t.Unlock()
defer func() { t.received <- struct{}{} }()
return t.callback(headers, body)
}
func (t *testingClient) URI() string {
return "http://localhost"
}
func (t *testingClient) Answer(fn clientCallbackFunc) <-chan struct{} {
t.Lock()
defer t.Unlock()
t.callback = fn
return t.received
}
func | () *testingClient {
return &testingClient{received: make(chan struct{}, 1)}
}
type testAgentInfo struct{}
func (testAgentInfo) AgentID() string { return "agent-secret" }
type testStateEvent struct{}
func (testStateEvent) Type() string { return repo.EventTypeState }
func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress }
func (testStateEvent) Time() time.Time { return time.Unix(0, 1) }
func (testStateEvent) Message() string { return "hello" }
func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} }
func wrapStrToResp(code int, body string) *http.Response {
return &http.Response{
Status: fmt.Sprintf("%d %s", code, http.StatusText(code)),
StatusCode: code,
Proto: "HTTP/1.1",
ProtoMajor: 1,
ProtoMinor: 1,
Body: ioutil.NopCloser(bytes.NewBufferString(body)),
ContentLength: int64(len(body)),
Header: make(http.Header, 0),
}
}
| newTestingClient |
config_command.go | package run
import (
"fmt"
"os"
"github.com/cnosdatabase/cnosdb/server"
"github.com/BurntSushi/toml"
"github.com/spf13/cobra"
)
var config_examples = ` cnosdb config`
func GetConfigCommand() *cobra.Command {
c := &cobra.Command{
Use: "config",
Short: "display the default configuration",
Long: "Displays the default configuration.", | DisableNoDescFlag: true,
},
RunE: func(cmd *cobra.Command, args []string) error {
var path string
if c := cmd.Flag("config"); c != nil {
path = c.Value.String()
}
c, err := server.NewDemoConfig()
if err != nil {
c = server.NewConfig()
}
if path != "" {
fmt.Fprintf(os.Stderr, "Merging with configuration at: %s\n", path)
if err := c.FromTomlFile(path); err != nil {
return err
}
if err := c.ApplyEnvOverrides(os.Getenv); err != nil {
return fmt.Errorf("apply env config: %v", err)
}
if err := c.Validate(); err != nil {
return fmt.Errorf("%s. To generate a valid configuration file run `cnosdb config > cnosdb.generated.conf`", err)
}
}
toml.NewEncoder(os.Stdout).Encode(c)
return nil
},
}
c.Flags().StringP("config", "c", "", `Set the path to the configuration file.
This defaults to the environment variable CNOSDB_CONFIG_PATH,
~/.cnosdb/cnosdb.conf, or /etc/cnosdb/cnosdb.conf if a file
is present at any of these locations.
Disable the automatic loading of a configuration file using
the null device (such as /dev/null)`)
return c
} | Example: config_examples,
CompletionOptions: cobra.CompletionOptions{
DisableDefaultCmd: true,
DisableDescriptions: true, |
message.rs | use std::io::net::tcp::TcpStream;
use std::io::{IoResult, IoError};
use std::io::IoErrorKind::InvalidInput;
use std::num;
pub use self::Payload::{Text, Binary, Empty};
pub use self::Opcode::{ContinuationOp, TextOp, BinaryOp, CloseOp, PingOp, PongOp};
#[derive(Clone, Show)]
pub enum Payload {
Text(String),
Binary(Vec<u8>),
Empty
}
#[derive(Clone, FromPrimitive, Show)]
pub enum Opcode {
ContinuationOp = 0x0,
TextOp = 0x1,
BinaryOp = 0x2,
CloseOp = 0x8,
PingOp = 0x9,
PongOp = 0xA,
}
impl Copy for Opcode { }
// this struct will eventually encapsulate data framing, opcodes, ws extensions, masks etc
// right now, only single frames, with a text payload are supported
#[derive(Clone, Show)]
pub struct Message {
pub payload: Payload,
pub opcode: Opcode
}
impl Message {
pub fn load(stream: &mut TcpStream) -> IoResult<Box<Message>> {
let vec1 = try!(stream.read_exact(2));
let buf1 = vec1.as_slice();
//let fin = buf1[0] >> 7; // TODO check this, required for handling fragmented messages
let rsv = (buf1[0] >> 4) & 0b0111;
if rsv != 0 {
return Err(IoError {
kind: InvalidInput,
desc: "Only RSV=0 allowed, no extension has been negotiated.",
detail: None
});
}
let opcode = buf1[0] & 0b0000_1111;
let opcode: Opcode = num::from_u8(opcode).unwrap(); // all valid RFC 6455 Opcodes specified, invalid ones will panic!()
//let mask = buf1[1] & 0b1000_0000; TODO use this to determine whether to unmask or not
let pay_len = buf1[1] & 0b0111_1111;
let payload_length = match pay_len {
127 => try!(stream.read_be_u64()), // 8 bytes in network byte order
126 => try!(stream.read_be_u16()) as u64, // 2 bytes in network byte order
_ => pay_len as u64
};
debug!("payload_length: {}", payload_length);
// payloads larger than 125 bytes are not allowed for control frames
match opcode {
CloseOp | PingOp if payload_length > 125 => panic!(),
_ => ()
}
let masking_key = try!(stream.read_exact(4));
let mut masked_payload_buf = try!(stream.read_exact(payload_length as uint));
// unmask the payload in-place
for (i, octet) in masked_payload_buf.iter_mut().enumerate() {
*octet = *octet ^ masking_key[i % 4];
}
let payload_buf = masked_payload_buf;
let payload: Payload = match opcode {
TextOp => Text(String::from_utf8(payload_buf).unwrap()), // unsafe unwrap? failures during autobahn
BinaryOp => Binary(payload_buf),
CloseOp => Empty,
PingOp => Binary(payload_buf),
PongOp => Binary(payload_buf),
_ => unimplemented!(), // ContinuationOp
};
let message = box Message {
payload: payload,
opcode: opcode
};
return Ok(message);
}
// FIXME support for clients - masking for clients, but need know whether
// it's a client or server doing the sending. maybe a private `send` with
// the common code, and public `client_send` and `server_send` methods.
// these methods will be called by the WebSokcetClient and WebSocketServer
// traits respectively, and the interface for both clients and servers is
// the same - just send on the channel, and the trait takes care of it
pub fn send(&self, stream: &mut TcpStream) -> IoResult<()> { | let payload_length = match self.payload {
Text(ref p) => p.len(),
Binary(ref p) => p.len(),
Empty => 0,
};
try!(stream.write_u8(0b1000_0000 | self.opcode as u8)); // fin: 1, rsv: 000, opcode: see Opcode
// FIXME: this assumes a server. the first bit, which is the "mask" bit, is implicitly set as 0 here, as required for ws servers
if payload_length <= 125 {
try!(stream.write_u8(payload_length as u8));
} else if payload_length <= 65535 {
try!(stream.write_u8(126));
try!(stream.write_be_u16(payload_length as u16));
} else if payload_length > 65535 {
try!(stream.write_u8(127));
try!(stream.write_be_u64(payload_length as u64));
}
match self.payload {
Text(ref p) => try!(stream.write((*p).as_slice().as_bytes())),
Binary(ref p) => try!(stream.write((*p).as_slice())),
Empty => {},
}
try!(stream.flush());
return Ok(());
}
} | |
user_modeling.py | #!/usr/bin/python
#coding:utf-8
import numpy as np
import logging | import mykmeans as ml
logger = logging.getLogger(__name__)
logger.setLevel(logging.ERROR)
def str2num(s):
a = ['very_low', 'Low', 'Middle', 'High']
for i in range(0, len(a)):
if a[i] == s:
return float(i)
if __name__ == '__main__':
filename = './data/data_user_modeling.txt'
train_data = np.loadtxt(filename, delimiter = ',', converters = {5:str2num})
logger.debug(train_data)
logger.debug(train_data.shape)
train_x = train_data[:,0:-1]
train_y = train_data[:,-1]
logger.debug(train_x)
logger.debug(train_y)
param = {}
param['use_random_for_k'] = 1
param['k'] = [i for i in range(0, 258, 1)]
param['n_clusters'] = 4
param['max_iter'] = 100
kmeans = ml.Kmeans(param)
kmeans.Fit(train_x)
# logger.debug(kmeans)
pred = kmeans.Predict(train_x)
logger.info('train_y:%s', train_y)
logger.info(' pred:%s', pred)
# logger.info('k-means准确率:%f', 1.0*sum(pred == train_y)/len(train_y))
# ml.PickingRightK(train_x, param)
import myplot
myplot.Figure()
ml.FitMulti(train_x, param, 100)
ml.BisectingFitMulti(train_x, param, 100)
myplot.Legend(['k-means','bisecting'])
myplot.Title('user modeling')
myplot.Show() | import mylog |
accept_channel.go | package lnwire
import (
"io"
"github.com/roasbeef/btcd/btcec"
"github.com/roasbeef/btcutil"
)
// AcceptChannel is the message Bob sends to Alice after she initiates the
// single funder channel workflow via a AcceptChannel message. Once Alice
// receives Bob's response, then she has all the items necessary to construct
// the funding transaction, and both commitment transactions.
type AcceptChannel struct {
// PendingChannelID serves to uniquely identify the future channel
// created by the initiated single funder workflow.
PendingChannelID [32]byte
// DustLimit is the specific dust limit the sender of this message
// would like enforced on their version of the commitment transaction.
// Any output below this value will be "trimmed" from the commitment
// transaction, with the amount of the HTLC going to dust.
DustLimit btcutil.Amount
// MaxValueInFlight represents the maximum amount of coins that can be
// pending within the channel at any given time. If the amount of funds
// in limbo exceeds this amount, then the channel will be failed.
MaxValueInFlight MilliSatoshi
// ChannelReserve is the amount of BTC that the receiving party MUST
// maintain a balance above at all times. This is a safety mechanism to
// ensure that both sides always have skin in the game during the
// channel's lifetime.
ChannelReserve btcutil.Amount
// HtlcMinimum is the smallest HTLC that the sender of this message
// will accept.
HtlcMinimum MilliSatoshi
// MinAcceptDepth is the minimum depth that the initiator of the
// channel should wait before considering the channel open.
MinAcceptDepth uint32
// CsvDelay is the number of blocks to use for the relative time lock
// in the pay-to-self output of both commitment transactions.
CsvDelay uint16
// MaxAcceptedHTLCs is the total number of incoming HTLC's that the
// sender of this channel will accept.
//
// TODO(roasbeef): acks the initiator's, same with max in flight?
MaxAcceptedHTLCs uint16
// FundingKey is the key that should be used on behalf of the sender
// within the 2-of-2 multi-sig output that it contained within the
// funding transaction.
FundingKey *btcec.PublicKey
// RevocationPoint is the base revocation point for the sending party.
// Any commitment transaction belonging to the receiver of this message
// should use this key and their per-commitment point to derive the
// revocation key for the commitment transaction.
RevocationPoint *btcec.PublicKey
// PaymentPoint is the base payment point for the sending party. This
// key should be combined with the per commitment point for a
// particular commitment state in order to create the key that should
// be used in any output that pays directly to the sending party, and
// also within the HTLC covenant transactions.
PaymentPoint *btcec.PublicKey
// DelayedPaymentPoint is the delay point for the sending party. This
// key should be combined with the per commitment point to derive the
// keys that are used in outputs of the sender's commitment transaction
// where they claim funds.
DelayedPaymentPoint *btcec.PublicKey
// FirstCommitmentPoint is the first commitment point for the sending
// party. This value should be combined with the receiver's revocation
// base point in order to derive the revocation keys that are placed
// within the commitment transaction of the sender.
FirstCommitmentPoint *btcec.PublicKey
}
// A compile time check to ensure AcceptChannel implements the lnwire.Message
// interface.
var _ Message = (*AcceptChannel)(nil)
// Encode serializes the target AcceptChannel into the passed io.Writer
// implementation. Serialization will observe the rules defined by the passed
// protocol version.
//
// This is part of the lnwire.Message interface.
func (a *AcceptChannel) Encode(w io.Writer, pver uint32) error {
return writeElements(w,
a.PendingChannelID[:],
a.DustLimit,
a.MaxValueInFlight,
a.ChannelReserve,
a.HtlcMinimum,
a.MinAcceptDepth,
a.CsvDelay,
a.MaxAcceptedHTLCs,
a.FundingKey,
a.RevocationPoint,
a.PaymentPoint,
a.DelayedPaymentPoint,
a.FirstCommitmentPoint,
)
}
// Decode deserializes the serialized AcceptChannel stored in the passed
// io.Reader into the target AcceptChannel using the deserialization rules
// defined by the passed protocol version.
//
// This is part of the lnwire.Message interface.
func (a *AcceptChannel) Decode(r io.Reader, pver uint32) error {
return readElements(r,
a.PendingChannelID[:],
&a.DustLimit,
&a.MaxValueInFlight,
&a.ChannelReserve,
&a.HtlcMinimum,
&a.MinAcceptDepth,
&a.CsvDelay,
&a.MaxAcceptedHTLCs,
&a.FundingKey,
&a.RevocationPoint,
&a.PaymentPoint,
&a.DelayedPaymentPoint,
&a.FirstCommitmentPoint,
)
}
// MsgType returns the MessageType code which uniquely identifies this message
// as a AcceptChannel on the wire.
// | return MsgAcceptChannel
}
// MaxPayloadLength returns the maximum allowed payload length for a
// AcceptChannel message.
//
// This is part of the lnwire.Message interface.
func (a *AcceptChannel) MaxPayloadLength(uint32) uint32 {
// 32 + (8 * 4) + (4 * 1) + (2 * 2) + (33 * 5)
return 237
} | // This is part of the lnwire.Message interface.
func (a *AcceptChannel) MsgType() MessageType { |
mac_address3_high.rs | #[doc = "Reader of register MAC_ADDRESS3_HIGH"]
pub type R = crate::R<u32, super::MAC_ADDRESS3_HIGH>;
#[doc = "Writer for register MAC_ADDRESS3_HIGH"]
pub type W = crate::W<u32, super::MAC_ADDRESS3_HIGH>;
#[doc = "Register MAC_ADDRESS3_HIGH `reset()`'s with value 0xffff"]
impl crate::ResetValue for super::MAC_ADDRESS3_HIGH {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0xffff
}
}
#[doc = "Reader of field `ADDRHI`"]
pub type ADDRHI_R = crate::R<u16, u16>;
#[doc = "Write proxy for field `ADDRHI`"]
pub struct ADDRHI_W<'a> {
w: &'a mut W,
}
impl<'a> ADDRHI_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u16) -> &'a mut W {
self.w.bits = (self.w.bits & !0xffff) | ((value as u32) & 0xffff);
self.w
}
}
#[doc = "Reader of field `MBC`"]
pub type MBC_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `MBC`"]
pub struct MBC_W<'a> {
w: &'a mut W,
}
impl<'a> MBC_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x3f << 24)) | (((value as u32) & 0x3f) << 24);
self.w
}
}
#[doc = "Reader of field `SA`"]
pub type SA_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `SA`"]
pub struct SA_W<'a> {
w: &'a mut W,
}
impl<'a> SA_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W { | pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 30)) | (((value as u32) & 0x01) << 30);
self.w
}
}
#[doc = "Reader of field `AE`"]
pub type AE_R = crate::R<bool, bool>;
#[doc = "Write proxy for field `AE`"]
pub struct AE_W<'a> {
w: &'a mut W,
}
impl<'a> AE_W<'a> {
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x01 << 31)) | (((value as u32) & 0x01) << 31);
self.w
}
}
impl R {
#[doc = "Bits 0:15 - MAC Address3 \\[47:32\\]"]
#[inline(always)]
pub fn addrhi(&self) -> ADDRHI_R {
ADDRHI_R::new((self.bits & 0xffff) as u16)
}
#[doc = "Bits 24:29 - Mask Byte Control"]
#[inline(always)]
pub fn mbc(&self) -> MBC_R {
MBC_R::new(((self.bits >> 24) & 0x3f) as u8)
}
#[doc = "Bit 30 - Source Address"]
#[inline(always)]
pub fn sa(&self) -> SA_R {
SA_R::new(((self.bits >> 30) & 0x01) != 0)
}
#[doc = "Bit 31 - Address Enable"]
#[inline(always)]
pub fn ae(&self) -> AE_R {
AE_R::new(((self.bits >> 31) & 0x01) != 0)
}
}
impl W {
#[doc = "Bits 0:15 - MAC Address3 \\[47:32\\]"]
#[inline(always)]
pub fn addrhi(&mut self) -> ADDRHI_W {
ADDRHI_W { w: self }
}
#[doc = "Bits 24:29 - Mask Byte Control"]
#[inline(always)]
pub fn mbc(&mut self) -> MBC_W {
MBC_W { w: self }
}
#[doc = "Bit 30 - Source Address"]
#[inline(always)]
pub fn sa(&mut self) -> SA_W {
SA_W { w: self }
}
#[doc = "Bit 31 - Address Enable"]
#[inline(always)]
pub fn ae(&mut self) -> AE_W {
AE_W { w: self }
}
} | self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)] |
serverinit_test.go | /*
Copyright 2012 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serverinit_test
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"reflect"
"regexp"
"runtime"
"sort"
"strings"
"testing"
"camlistore.org/pkg/auth"
"camlistore.org/pkg/httputil"
"camlistore.org/pkg/importer"
"camlistore.org/pkg/jsonconfig"
"camlistore.org/pkg/jsonsign/signhandler"
"camlistore.org/pkg/osutil"
"camlistore.org/pkg/search"
"camlistore.org/pkg/server"
"camlistore.org/pkg/serverinit"
"camlistore.org/pkg/test"
"camlistore.org/pkg/types/serverconfig"
// For registering all the handler constructors needed in TestInstallHandlers
_ "camlistore.org/pkg/blobserver/cond"
_ "camlistore.org/pkg/blobserver/replica"
_ "camlistore.org/pkg/importer/allimporters"
_ "camlistore.org/pkg/search"
_ "camlistore.org/pkg/server"
)
var (
updateGolden = flag.Bool("update_golden", false, "Update golden *.want files")
flagOnly = flag.String("only", "", "If non-empty, substring of foo.json input file to match.")
)
const (
// relativeRing points to a real secret ring, but serverinit
// rewrites it to be an absolute path. We then canonicalize
// it to secringPlaceholder in the golden files.
relativeRing = "../jsonsign/testdata/test-secring.gpg"
secringPlaceholder = "/path/to/secring"
)
func init() |
func sortedKeys(m map[string]interface{}) (keys []string) {
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
return
}
func prettyPrint(t *testing.T, w io.Writer, v interface{}) {
out, err := json.MarshalIndent(v, "", " ")
if err != nil {
t.Fatal(err)
}
w.Write(out)
}
func TestConfigs(t *testing.T) {
dir, err := os.Open("testdata")
if err != nil {
t.Fatal(err)
}
names, err := dir.Readdirnames(-1)
if err != nil {
t.Fatal(err)
}
for _, name := range names {
if *flagOnly != "" && !strings.Contains(name, *flagOnly) {
continue
}
if strings.HasSuffix(name, ".json") {
if strings.HasSuffix(name, "-want.json") {
continue
}
testConfig(filepath.Join("testdata", name), t)
}
}
}
type namedReadSeeker struct {
name string
io.ReadSeeker
}
func (n namedReadSeeker) Name() string { return n.name }
func (n namedReadSeeker) Close() error { return nil }
// configParser returns a custom jsonconfig ConfigParser whose reader rewrites
// "/path/to/secring" to the absolute path of the jsonconfig test-secring.gpg file.
// On windows, it also fixes the slash separated paths.
func configParser() *jsonconfig.ConfigParser {
return &jsonconfig.ConfigParser{
Open: func(path string) (jsonconfig.File, error) {
slurp, err := replaceRingPath(path)
if err != nil {
return nil, err
}
slurp = backslashEscape(slurp)
return namedReadSeeker{path, bytes.NewReader(slurp)}, nil
},
}
}
// replaceRingPath returns the contents of the file at path with secringPlaceholder replaced with the absolute path of relativeRing.
func replaceRingPath(path string) ([]byte, error) {
secRing, err := filepath.Abs(relativeRing)
if err != nil {
return nil, fmt.Errorf("Could not get absolute path of %v: %v", relativeRing, err)
}
secRing = strings.Replace(secRing, `\`, `\\`, -1)
slurpBytes, err := ioutil.ReadFile(path)
if err != nil {
return nil, err
}
return bytes.Replace(slurpBytes, []byte(secringPlaceholder), []byte(secRing), 1), nil
}
// We just need to make sure that we don't match the prefix handlers too.
var unixPathPattern = regexp.MustCompile(`"/.*/.+"`)
// backslashEscape, on windows, changes all the slash separated paths (which
// match unixPathPattern, to omit the prefix handler paths) with escaped
// backslashes.
func backslashEscape(b []byte) []byte {
if runtime.GOOS != "windows" {
return b
}
unixPaths := unixPathPattern.FindAll(b, -1)
if unixPaths == nil {
return b
}
var oldNew []string
for _, v := range unixPaths {
bStr := string(v)
oldNew = append(oldNew, bStr, strings.Replace(bStr, `/`, `\\`, -1))
}
r := strings.NewReplacer(oldNew...)
return []byte(r.Replace(string(b)))
}
func testConfig(name string, t *testing.T) {
wantedError := func() error {
slurp, err := ioutil.ReadFile(strings.Replace(name, ".json", ".err", 1))
if os.IsNotExist(err) {
return nil
}
if err != nil {
t.Fatalf("Error reading .err file: %v", err)
}
return errors.New(string(slurp))
}
b, err := replaceRingPath(name)
if err != nil {
t.Fatalf("Could not read %s: %v", name, err)
}
b = backslashEscape(b)
var hiLevelConf serverconfig.Config
if err := json.Unmarshal(b, &hiLevelConf); err != nil {
t.Fatalf("Could not unmarshal %s into a serverconfig.Config: %v", name, err)
}
lowLevelConf, err := serverinit.GenLowLevelConfig(&hiLevelConf)
if g, w := strings.TrimSpace(fmt.Sprint(err)), strings.TrimSpace(fmt.Sprint(wantedError())); g != w {
t.Fatalf("test %s: got GenLowLevelConfig error %q; want %q", name, g, w)
}
if err != nil {
return
}
if err := (&jsonconfig.ConfigParser{}).CheckTypes(lowLevelConf.Obj); err != nil {
t.Fatalf("Error while parsing low-level conf generated from %v: %v", name, err)
}
wantFile := strings.Replace(name, ".json", "-want.json", 1)
wantConf, err := configParser().ReadFile(wantFile)
if err != nil {
t.Fatalf("test %s: ReadFile: %v", name, err)
}
var got, want bytes.Buffer
prettyPrint(t, &got, lowLevelConf.Obj)
prettyPrint(t, &want, wantConf)
if *updateGolden {
contents, err := json.MarshalIndent(lowLevelConf.Obj, "", "\t")
if err != nil {
t.Fatal(err)
}
contents = canonicalizeGolden(t, contents)
if err := ioutil.WriteFile(wantFile, contents, 0644); err != nil {
t.Fatal(err)
}
}
if got.String() != want.String() {
t.Errorf("test %s configurations differ.\nGot:\n%s\nWant:\n%s\nDiff (want -> got), %s:\n%s",
name, &got, &want, name, test.Diff(want.Bytes(), got.Bytes()))
}
}
func canonicalizeGolden(t *testing.T, v []byte) []byte {
localPath, err := filepath.Abs(relativeRing)
if err != nil {
t.Fatal(err)
}
v = bytes.Replace(v, []byte(localPath), []byte(secringPlaceholder), 1)
if !bytes.HasSuffix(v, []byte("\n")) {
v = append(v, '\n')
}
return v
}
func TestExpansionsInHighlevelConfig(t *testing.T) {
camroot, err := osutil.GoPackagePath("camlistore.org")
if err != nil {
t.Fatalf("failed to find camlistore.org GOPATH root: %v", err)
}
const keyID = "26F5ABDA"
os.Setenv("TMP_EXPANSION_TEST", keyID)
os.Setenv("TMP_EXPANSION_SECRING", filepath.Join(camroot, filepath.FromSlash("pkg/jsonsign/testdata/test-secring.gpg")))
conf, err := serverinit.Load([]byte(`
{
"auth": "localhost",
"listen": ":4430",
"https": false,
"identity": ["_env", "${TMP_EXPANSION_TEST}"],
"identitySecretRing": ["_env", "${TMP_EXPANSION_SECRING}"],
"googlecloudstorage": ":camlistore-dev-blobs",
"kvIndexFile": "/tmp/camli-index.kvdb"
}
`))
if err != nil {
t.Fatal(err)
}
got := fmt.Sprintf("%#v", conf)
if !strings.Contains(got, keyID) {
t.Errorf("Expected key %s in resulting low-level config. Got: %s", keyID, got)
}
}
func TestInstallHandlers(t *testing.T) {
camroot, err := osutil.GoPackagePath("camlistore.org")
if err != nil {
t.Fatalf("failed to find camlistore.org GOPATH root: %v", err)
}
conf := serverinit.DefaultBaseConfig
conf.Identity = "26F5ABDA"
conf.IdentitySecretRing = filepath.Join(camroot, filepath.FromSlash("pkg/jsonsign/testdata/test-secring.gpg"))
conf.MemoryStorage = true
conf.MemoryIndex = true
confData, err := json.MarshalIndent(conf, "", " ")
if err != nil {
t.Fatalf("Could not json encode config: %v", err)
}
lowConf, err := serverinit.Load(confData)
if err != nil {
t.Fatal(err)
}
// because these two are normally consumed in camlistored.go
// TODO(mpl): serverinit.Load should consume these 2 as well. Once
// consumed, we should keep all the answers as private fields, and then we
// put accessors on serverinit.Config. Maybe we even stop embedding
// jsonconfig.Obj in serverinit.Config too, so none of those methods are
// accessible.
lowConf.OptionalBool("https", true)
lowConf.OptionalString("listen", "")
reindex := false
var context *http.Request // only used by App Engine. See handlerLoader in serverinit.go
hi := http.NewServeMux()
address := "http://" + conf.Listen
_, err = lowConf.InstallHandlers(hi, address, reindex, context)
if err != nil {
t.Fatal(err)
}
tests := []struct {
prefix string
authWrapped bool
prefixWrapped bool
handlerType reflect.Type
}{
{
prefix: "/",
handlerType: reflect.TypeOf(&server.RootHandler{}),
prefixWrapped: true,
},
{
prefix: "/sync/",
handlerType: reflect.TypeOf(&server.SyncHandler{}),
prefixWrapped: true,
authWrapped: true,
},
{
prefix: "/my-search/",
handlerType: reflect.TypeOf(&search.Handler{}),
prefixWrapped: true,
authWrapped: true,
},
{
prefix: "/ui/",
handlerType: reflect.TypeOf(&server.UIHandler{}),
prefixWrapped: true,
authWrapped: true,
},
{
prefix: "/importer/",
handlerType: reflect.TypeOf(&importer.Host{}),
prefixWrapped: true,
},
{
prefix: "/sighelper/",
handlerType: reflect.TypeOf(&signhandler.Handler{}),
prefixWrapped: true,
authWrapped: true,
},
{
prefix: "/status/",
handlerType: reflect.TypeOf(&server.StatusHandler{}),
prefixWrapped: true,
authWrapped: true,
},
{
prefix: "/setup/",
handlerType: reflect.TypeOf(&server.SetupHandler{}),
prefixWrapped: true,
},
{
prefix: "/bs/camli/",
handlerType: reflect.TypeOf(http.HandlerFunc(nil)),
},
{
prefix: "/index/camli/",
handlerType: reflect.TypeOf(http.HandlerFunc(nil)),
},
{
prefix: "/bs-and-index/camli/",
handlerType: reflect.TypeOf(http.HandlerFunc(nil)),
},
{
prefix: "/bs-and-maybe-also-index/camli/",
handlerType: reflect.TypeOf(http.HandlerFunc(nil)),
},
{
prefix: "/cache/camli/",
handlerType: reflect.TypeOf(http.HandlerFunc(nil)),
},
}
for _, v := range tests {
req, err := http.NewRequest("GET", address+v.prefix, nil)
if err != nil {
t.Error(err)
continue
}
h, _ := hi.Handler(req)
if v.authWrapped {
ah, ok := h.(auth.Handler)
if !ok {
t.Errorf("handler for %v should be auth wrapped", v.prefix)
continue
}
h = ah.Handler
}
if v.prefixWrapped {
ph, ok := h.(*httputil.PrefixHandler)
if !ok {
t.Errorf("handler for %v should be prefix wrapped", v.prefix)
continue
}
h = ph.Handler
}
if reflect.TypeOf(h) != v.handlerType {
t.Errorf("for %v: want %v, got %v", v.prefix, v.handlerType, reflect.TypeOf(h))
}
}
}
| {
// Avoid Linux vs. OS X differences in tests.
serverinit.SetTempDirFunc(func() string { return "/tmp" })
serverinit.SetNoMkdir(true)
} |
lib.rs | #![deny(missing_docs)]
//! A generic library for textures.
//!
//! This library is used in Piston for generic code when working with textures.
//!
//! The `ImageSize` trait is used for passing textures around for rendering.
//! For more information, see
//! [Piston-Graphics](https://github.com/pistondevelopers/graphics).
pub mod ops;
/// Implemented by all images to be used with generic algorithms.
pub trait ImageSize {
/// Get the image size.
fn get_size(&self) -> (u32, u32);
/// Gets the image width.
#[inline(always)]
fn get_width(&self) -> u32 {
let (w, _) = self.get_size();
w
}
/// Gets the image height.
#[inline(always)]
fn get_height(&self) -> u32 {
let (_, h) = self.get_size();
h
}
}
/// Texture creation parameters.
pub struct TextureSettings {
// Whether to convert gamma, treated as sRGB color space
convert_gamma: bool,
// Compress on GPU.
compress: bool,
// Generate mipmap chain.
generate_mipmap: bool,
// Filtering Mode for Minifying
min: Filter,
// Filtering Mode for Magnifying
mag: Filter,
// Filtering Mode for Minify Mipmapping
mipmap: Filter
}
impl TextureSettings {
/// Create default settings.
pub fn new() -> TextureSettings {
TextureSettings {
convert_gamma: false,
compress: false,
generate_mipmap: false,
min: Filter::Linear,
mag: Filter::Linear,
mipmap: Filter::Linear,
}
}
/// Gets whether to convert gamma, treated as sRGB color space.
pub fn get_convert_gamma(&self) -> bool { self.convert_gamma }
/// Sets convert gamma.
pub fn set_convert_gamma(&mut self, val: bool) { self.convert_gamma = val; }
/// Sets convert gamma.
pub fn convert_gamma(mut self, val: bool) -> Self {
self.set_convert_gamma(val);
self
}
/// Gets wheter compress on the GPU.
pub fn get_compress(&self) -> bool { self.compress }
/// Sets compress.
pub fn set_compress(&mut self, val: bool) { self.compress = val; }
/// Sets compress.
pub fn compress(mut self, val: bool) -> Self |
/// Gets generate mipmap.
pub fn get_generate_mipmap(&self) -> bool { self.generate_mipmap }
/// Sets generate mipmap.
pub fn set_generate_mipmap(&mut self, val: bool) {
self.generate_mipmap = val;
}
/// Sets generate mipmap.
pub fn generate_mipmap(mut self, val: bool) -> Self {
self.set_generate_mipmap(val);
self
}
/// Gets minify filter.
pub fn get_min(&self) -> Filter { self.min }
/// Sets minify filter.
pub fn set_min(&mut self, val: Filter) {
self.min = val
}
/// Sets minify filter.
pub fn min(mut self, val: Filter) -> Self {
self.set_min(val);
self
}
/// Gets magnify filter
pub fn get_mag(&self) -> Filter { self.mag }
/// Sets magnify filter
pub fn set_mag(&mut self, val: Filter) {
self.mag = val;
}
/// Sets magnify filter
pub fn mag(mut self, val: Filter) -> Self {
self.set_mag(val);
self
}
/// Gets minify mipmap filter
pub fn get_mipmap(&self) -> Filter { self.mipmap }
/// Sets magnify mipmap filter, and sets generate_mipmap to true.
pub fn set_mipmap(&mut self, val: Filter) {
self.set_generate_mipmap(true);
self.mag = val;
}
/// Sets magnify mipmap filter, and sets generate_mipmap to true
pub fn mipmap(mut self, val: Filter) -> Self {
self.set_mag(val);
self
}
/// Returns the min and mag filter
pub fn get_filter(&self) -> (Filter, Filter) { (self.min, self.mag) }
/// Sets the min and mag filter
pub fn set_filter(&mut self, val: Filter) {
self.set_min(val);
self.set_mag(val);
}
/// Sets the min and mag filter
pub fn filter(mut self, val: Filter) -> Self {
self.set_filter(val);
self
}
}
/// Texture format.
#[derive(Copy, Clone, Debug)]
pub enum Format {
/// `(red, green, blue, alpha)` with values 0-255.
Rgba8,
}
/// Implemented by textures for creation.
pub trait CreateTexture<F>: ImageSize + Sized {
/// The error when creating texture.
type Error;
/// Create texture from memory.
fn create<S: Into<[u32; 2]>>(
factory: &mut F,
format: Format,
memory: &[u8],
size: S,
settings: &TextureSettings
) -> Result<Self, Self::Error>;
}
/// Implemented by textures for updating.
pub trait UpdateTexture<F>: ImageSize + Sized {
/// The error when updating texture.
type Error;
/// Update texture.
fn update<S: Into<[u32; 2]>>(
&mut self,
factory: &mut F,
format: Format,
memory: &[u8],
size: S,
) -> Result<(), Self::Error>;
}
/// Sampling filter
#[derive(Copy, Clone, Debug)]
pub enum Filter {
/// A Weighted Linear Blend
Linear,
/// Nearest Texel
Nearest
}
| {
self.set_compress(val);
self
} |
parser.test.ts | // import { Parser } from "../src/parser/class.parser.ts";
// import { assertEquals } from "../src/deps.ts";
// Deno.test("should handle empty content", () => {
// const result = new Parser().parse(""); | // Deno.test("should handle heading level 1", () => {
// const result = new Parser().parse("# first level");
// assertEquals(result, {});
// });
// Deno.test("should handle heading level 2", () => {
// const result = new Parser().parse("## second level");
// assertEquals(result, "<h2>second level</h2>");
// }); | // assertEquals(result, {});
// }); |
iRowNodeBlock.d.ts | // Type definitions for @ag-grid-community/core v23.1.1
// Project: http://www.ag-grid.com/
// Definitions by: Niall Crosby <https://github.com/ag-grid/>
import { IEventEmitter } from "./iEventEmitter";
import { RowNode } from "../entities/rowNode";
import { NumberSequence } from "../utils";
export interface IRowNodeBlock extends IEventEmitter {
getDisplayIndexStart(): number;
getDisplayIndexEnd(): number;
getLastAccessed(): number;
getState(): string;
isAnyNodeOpen(rowCount: number): boolean; | getBlockNumber(): number;
forEachNodeDeep(callback: (rowNode: RowNode, index: number) => void, sequence: NumberSequence, rowCount: number): void;
destroy(): void;
forEachNodeShallow(callback: (rowNode: RowNode, index: number) => void, sequence: NumberSequence, rowCount: number): void;
load(): void;
} | |
TimeExtent.js | // All material copyright ESRI, All Rights Reserved, unless otherwise specified.
// See http://js.arcgis.com/3.34/esri/copyright.txt for details.
//>>built
define("esri/TimeExtent",["dojo/_base/declare","dojo/_base/lang","dojo/has","./kernel"],function(h,k,l,m){var g=h(null,{declaredClass:"esri.TimeExtent",constructor:function(a){if(1<arguments.length)this._create(arguments[0],arguments[1]);else if(a)if(k.isArray(a)){var b=a[0],c=a[1];this.startTime=null===b||"null"===b?null:new Date(b);this.endTime=null===c||"null"===c?null:new Date(c)}else a instanceof Date&&this._create(a,null)},offset:function(a,b){var c=new g,d=this.startTime,e=this.endTime;d&& | setter:"setUTCFullYear",multiplier:1},esriTimeUnitsDecades:{getter:"getUTCFullYear",setter:"setUTCFullYear",multiplier:10},esriTimeUnitsCenturies:{getter:"getUTCFullYear",setter:"setUTCFullYear",multiplier:100}},_intersection:function(a,b){if(a&&b){var c=a.startTime;a=a.endTime;var d=b.startTime;b=b.endTime;c=c?c.getTime():-Infinity;d=d?d.getTime():-Infinity;a=a?a.getTime():Infinity;b=b?b.getTime():Infinity;var e,f;d>=c&&d<=a?e=d:c>=d&&c<=b&&(e=c);a>=d&&a<=b?f=a:b>=c&&b<=a&&(f=b);if(isNaN(e)||isNaN(f))return null;
c=new g;c.startTime=-Infinity===e?null:new Date(e);c.endTime=Infinity===f?null:new Date(f);return c}return null},_getOffsettedDate:function(a,b,c){var d=this._refData;a=new Date(a.getTime());b&&c&&(d=d[c],a[d.setter](a[d.getter]()+b*d.multiplier));return a}});l("extend-esri")&&(m.TimeExtent=g);return g}); | (c.startTime=this._getOffsettedDate(d,a,b));e&&(c.endTime=this._getOffsettedDate(e,a,b));return c},intersection:function(a){return this._intersection(this,a)},toJson:function(){var a=[],b=this.startTime;a.push(b?b.getTime():"null");b=this.endTime;a.push(b?b.getTime():"null");return a},_create:function(a,b){this.startTime=a?new Date(a.getTime()):null;this.endTime=b?new Date(b.getTime()):null},_refData:{esriTimeUnitsMilliseconds:{getter:"getUTCMilliseconds",setter:"setUTCMilliseconds",multiplier:1},
esriTimeUnitsSeconds:{getter:"getUTCSeconds",setter:"setUTCSeconds",multiplier:1},esriTimeUnitsMinutes:{getter:"getUTCMinutes",setter:"setUTCMinutes",multiplier:1},esriTimeUnitsHours:{getter:"getUTCHours",setter:"setUTCHours",multiplier:1},esriTimeUnitsDays:{getter:"getUTCDate",setter:"setUTCDate",multiplier:1},esriTimeUnitsWeeks:{getter:"getUTCDate",setter:"setUTCDate",multiplier:7},esriTimeUnitsMonths:{getter:"getUTCMonth",setter:"setUTCMonth",multiplier:1},esriTimeUnitsYears:{getter:"getUTCFullYear", |
volume_manager.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package volumemanager
import (
"fmt"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/kubelet/config"
"k8s.io/kubernetes/pkg/kubelet/container"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/populator"
"k8s.io/kubernetes/pkg/kubelet/volumemanager/reconciler"
k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
const (
// reconcilerLoopSleepPeriod is the amount of time the reconciler loop waits
// between successive executions
reconcilerLoopSleepPeriod time.Duration = 100 * time.Millisecond
// reconcilerSyncStatesSleepPeriod is the amount of time the reconciler reconstruct process
// waits between successive executions
reconcilerSyncStatesSleepPeriod time.Duration = 3 * time.Minute
// desiredStateOfWorldPopulatorLoopSleepPeriod is the amount of time the
// DesiredStateOfWorldPopulator loop waits between successive executions
desiredStateOfWorldPopulatorLoopSleepPeriod time.Duration = 100 * time.Millisecond
// desiredStateOfWorldPopulatorGetPodStatusRetryDuration is the amount of
// time the DesiredStateOfWorldPopulator loop waits between successive pod
// cleanup calls (to prevent calling containerruntime.GetPodStatus too
// frequently).
desiredStateOfWorldPopulatorGetPodStatusRetryDuration time.Duration = 2 * time.Second
// podAttachAndMountTimeout is the maximum amount of time the
// WaitForAttachAndMount call will wait for all volumes in the specified pod
// to be attached and mounted. Even though cloud operations can take several
// minutes to complete, we set the timeout to 2 minutes because kubelet
// will retry in the next sync iteration. This frees the associated
// goroutine of the pod to process newer updates if needed (e.g., a delete
// request to the pod).
podAttachAndMountTimeout time.Duration = 2 * time.Minute
// podAttachAndMountRetryInterval is the amount of time the GetVolumesForPod
// call waits before retrying
podAttachAndMountRetryInterval time.Duration = 300 * time.Millisecond
// waitForAttachTimeout is the maximum amount of time a
// operationexecutor.Mount call will wait for a volume to be attached.
// Set to 10 minutes because we've seen attach operations take several
// minutes to complete for some volume plugins in some cases. While this
// operation is waiting it only blocks other operations on the same device,
// other devices are not affected.
waitForAttachTimeout time.Duration = 10 * time.Minute
// reconcilerStartGracePeriod is the maximum amount of time volume manager
// can wait to start reconciler
reconcilerStartGracePeriod time.Duration = 60 * time.Second
)
// VolumeManager runs a set of asynchronous loops that figure out which volumes
// need to be attached/mounted/unmounted/detached based on the pods scheduled on
// this node and makes it so.
type VolumeManager interface {
// Starts the volume manager and all the asynchronous loops that it controls
Run(sourcesReady config.SourcesReady, stopCh <-chan struct{})
// WaitForAttachAndMount processes the volumes referenced in the specified
// pod and blocks until they are all attached and mounted (reflected in
// actual state of the world).
// An error is returned if all volumes are not attached and mounted within
// the duration defined in podAttachAndMountTimeout.
WaitForAttachAndMount(pod *api.Pod) error
// GetMountedVolumesForPod returns a VolumeMap containing the volumes
// referenced by the specified pod that are successfully attached and
// mounted. The key in the map is the OuterVolumeSpecName (i.e.
// pod.Spec.Volumes[x].Name). It returns an empty VolumeMap if pod has no
// volumes.
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
// GetExtraSupplementalGroupsForPod returns a list of the extra
// supplemental groups for the Pod. These extra supplemental groups come
// from annotations on persistent volumes that the pod depends on.
GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64
// GetVolumesInUse returns a list of all volumes that implement the volume.Attacher
// interface and are currently in use according to the actual and desired
// state of the world caches. A volume is considered "in use" as soon as it
// is added to the desired state of world, indicating it *should* be
// attached to this node and remains "in use" until it is removed from both
// the desired state of the world and the actual state of the world, or it
// has been unmounted (as indicated in actual state of world).
// TODO(#27653): VolumesInUse should be handled gracefully on kubelet'
// restarts.
GetVolumesInUse() []api.UniqueVolumeName
// ReconcilerStatesHasBeenSynced returns true only after the actual states in reconciler
// has been synced at least once after kubelet starts so that it is safe to update mounted
// volume list retrieved from actual state.
ReconcilerStatesHasBeenSynced() bool
// VolumeIsAttached returns true if the given volume is attached to this
// node.
VolumeIsAttached(volumeName api.UniqueVolumeName) bool
// Marks the specified volume as having successfully been reported as "in
// use" in the nodes's volume status.
MarkVolumesAsReportedInUse(volumesReportedAsInUse []api.UniqueVolumeName)
}
// NewVolumeManager returns a new concrete instance implementing the
// VolumeManager interface.
//
// kubeClient - kubeClient is the kube API client used by DesiredStateOfWorldPopulator
// to communicate with the API server to fetch PV and PVC objects
// volumePluginMgr - the volume plugin manager used to access volume plugins.
// Must be pre-initialized.
func NewVolumeManager(
controllerAttachDetachEnabled bool,
nodeName k8stypes.NodeName,
podManager pod.Manager,
kubeClient internalclientset.Interface,
volumePluginMgr *volume.VolumePluginMgr,
kubeContainerRuntime kubecontainer.Runtime,
mounter mount.Interface,
kubeletPodsDir string,
recorder record.EventRecorder,
checkNodeCapabilitiesBeforeMount bool) (VolumeManager, error) |
// volumeManager implements the VolumeManager interface
type volumeManager struct {
// kubeClient is the kube API client used by DesiredStateOfWorldPopulator to
// communicate with the API server to fetch PV and PVC objects
kubeClient internalclientset.Interface
// volumePluginMgr is the volume plugin manager used to access volume
// plugins. It must be pre-initialized.
volumePluginMgr *volume.VolumePluginMgr
// desiredStateOfWorld is a data structure containing the desired state of
// the world according to the volume manager: i.e. what volumes should be
// attached and which pods are referencing the volumes).
// The data structure is populated by the desired state of the world
// populator using the kubelet pod manager.
desiredStateOfWorld cache.DesiredStateOfWorld
// actualStateOfWorld is a data structure containing the actual state of
// the world according to the manager: i.e. which volumes are attached to
// this node and what pods the volumes are mounted to.
// The data structure is populated upon successful completion of attach,
// detach, mount, and unmount actions triggered by the reconciler.
actualStateOfWorld cache.ActualStateOfWorld
// operationExecutor is used to start asynchronous attach, detach, mount,
// and unmount operations.
operationExecutor operationexecutor.OperationExecutor
// reconciler runs an asynchronous periodic loop to reconcile the
// desiredStateOfWorld with the actualStateOfWorld by triggering attach,
// detach, mount, and unmount operations using the operationExecutor.
reconciler reconciler.Reconciler
// desiredStateOfWorldPopulator runs an asynchronous periodic loop to
// populate the desiredStateOfWorld using the kubelet PodManager.
desiredStateOfWorldPopulator populator.DesiredStateOfWorldPopulator
}
func (vm *volumeManager) Run(sourcesReady config.SourcesReady, stopCh <-chan struct{}) {
defer runtime.HandleCrash()
go vm.desiredStateOfWorldPopulator.Run(stopCh)
glog.V(2).Infof("The desired_state_of_world populator starts")
glog.Infof("Starting Kubelet Volume Manager")
go vm.reconciler.Run(sourcesReady, stopCh)
<-stopCh
glog.Infof("Shutting down Kubelet Volume Manager")
}
func (vm *volumeManager) GetMountedVolumesForPod(
podName types.UniquePodName) container.VolumeMap {
podVolumes := make(container.VolumeMap)
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter}
}
return podVolumes
}
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
podName := volumehelper.GetUniquePodName(pod)
supplementalGroups := sets.NewString()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
if mountedVolume.VolumeGidValue != "" {
supplementalGroups.Insert(mountedVolume.VolumeGidValue)
}
}
result := make([]int64, 0, supplementalGroups.Len())
for _, group := range supplementalGroups.List() {
iGroup, extra := getExtraSupplementalGid(group, pod)
if !extra {
continue
}
result = append(result, int64(iGroup))
}
return result
}
func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName {
// Report volumes in desired state of world and actual state of world so
// that volumes are marked in use as soon as the decision is made that the
// volume *should* be attached to this node until it is safely unmounted.
desiredVolumes := vm.desiredStateOfWorld.GetVolumesToMount()
mountedVolumes := vm.actualStateOfWorld.GetGloballyMountedVolumes()
volumesToReportInUse :=
make(
[]api.UniqueVolumeName,
0, /* len */
len(desiredVolumes)+len(mountedVolumes) /* cap */)
desiredVolumesMap :=
make(
map[api.UniqueVolumeName]bool,
len(desiredVolumes)+len(mountedVolumes) /* cap */)
for _, volume := range desiredVolumes {
if volume.PluginIsAttachable {
desiredVolumesMap[volume.VolumeName] = true
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
for _, volume := range mountedVolumes {
if volume.PluginIsAttachable {
if _, exists := desiredVolumesMap[volume.VolumeName]; !exists {
volumesToReportInUse = append(volumesToReportInUse, volume.VolumeName)
}
}
}
return volumesToReportInUse
}
func (vm *volumeManager) ReconcilerStatesHasBeenSynced() bool {
return vm.reconciler.StatesHasBeenSynced()
}
func (vm *volumeManager) VolumeIsAttached(
volumeName api.UniqueVolumeName) bool {
return vm.actualStateOfWorld.VolumeExists(volumeName)
}
func (vm *volumeManager) MarkVolumesAsReportedInUse(
volumesReportedAsInUse []api.UniqueVolumeName) {
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
}
func (vm *volumeManager) WaitForAttachAndMount(pod *api.Pod) error {
expectedVolumes := getExpectedVolumes(pod)
if len(expectedVolumes) == 0 {
// No volumes to verify
return nil
}
glog.V(3).Infof("Waiting for volumes to attach and mount for pod %q", format.Pod(pod))
uniquePodName := volumehelper.GetUniquePodName(pod)
// Some pods expect to have Setup called over and over again to update.
// Remount plugins for which this is true. (Atomically updating volumes,
// like Downward API, depend on this to update the contents of the volume).
vm.desiredStateOfWorldPopulator.ReprocessPod(uniquePodName)
vm.actualStateOfWorld.MarkRemountRequired(uniquePodName)
err := wait.Poll(
podAttachAndMountRetryInterval,
podAttachAndMountTimeout,
vm.verifyVolumesMountedFunc(uniquePodName, expectedVolumes))
if err != nil {
// Timeout expired
ummountedVolumes :=
vm.getUnmountedVolumes(uniquePodName, expectedVolumes)
if len(ummountedVolumes) == 0 {
return nil
}
return fmt.Errorf(
"timeout expired waiting for volumes to attach/mount for pod %q/%q. list of unattached/unmounted volumes=%v",
pod.Name,
pod.Namespace,
ummountedVolumes)
}
glog.V(3).Infof("All volumes are attached and mounted for pod %q", format.Pod(pod))
return nil
}
// verifyVolumesMountedFunc returns a method that returns true when all expected
// volumes are mounted.
func (vm *volumeManager) verifyVolumesMountedFunc(
podName types.UniquePodName, expectedVolumes []string) wait.ConditionFunc {
return func() (done bool, err error) {
return len(vm.getUnmountedVolumes(podName, expectedVolumes)) == 0, nil
}
}
// getUnmountedVolumes fetches the current list of mounted volumes from
// the actual state of the world, and uses it to process the list of
// expectedVolumes. It returns a list of unmounted volumes.
func (vm *volumeManager) getUnmountedVolumes(
podName types.UniquePodName, expectedVolumes []string) []string {
mountedVolumes := sets.NewString()
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
mountedVolumes.Insert(mountedVolume.OuterVolumeSpecName)
}
return filterUnmountedVolumes(mountedVolumes, expectedVolumes)
}
// filterUnmountedVolumes adds each element of expectedVolumes that is not in
// mountedVolumes to a list of unmountedVolumes and returns it.
func filterUnmountedVolumes(
mountedVolumes sets.String, expectedVolumes []string) []string {
unmountedVolumes := []string{}
for _, expectedVolume := range expectedVolumes {
if !mountedVolumes.Has(expectedVolume) {
unmountedVolumes = append(unmountedVolumes, expectedVolume)
}
}
return unmountedVolumes
}
// getExpectedVolumes returns a list of volumes that must be mounted in order to
// consider the volume setup step for this pod satisfied.
func getExpectedVolumes(pod *api.Pod) []string {
expectedVolumes := []string{}
if pod == nil {
return expectedVolumes
}
for _, podVolume := range pod.Spec.Volumes {
expectedVolumes = append(expectedVolumes, podVolume.Name)
}
return expectedVolumes
}
// getExtraSupplementalGid returns the value of an extra supplemental GID as
// defined by an annotation on a volume and a boolean indicating whether the
// volume defined a GID that the pod doesn't already request.
func getExtraSupplementalGid(volumeGidValue string, pod *api.Pod) (int64, bool) {
if volumeGidValue == "" {
return 0, false
}
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
if err != nil {
return 0, false
}
if pod.Spec.SecurityContext != nil {
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
if gid == existingGid {
return 0, false
}
}
}
return gid, true
}
| {
vm := &volumeManager{
kubeClient: kubeClient,
volumePluginMgr: volumePluginMgr,
desiredStateOfWorld: cache.NewDesiredStateOfWorld(volumePluginMgr),
actualStateOfWorld: cache.NewActualStateOfWorld(nodeName, volumePluginMgr),
operationExecutor: operationexecutor.NewOperationExecutor(
kubeClient,
volumePluginMgr,
recorder,
checkNodeCapabilitiesBeforeMount),
}
vm.reconciler = reconciler.NewReconciler(
kubeClient,
controllerAttachDetachEnabled,
reconcilerLoopSleepPeriod,
reconcilerSyncStatesSleepPeriod,
waitForAttachTimeout,
nodeName,
vm.desiredStateOfWorld,
vm.actualStateOfWorld,
vm.operationExecutor,
mounter,
volumePluginMgr,
kubeletPodsDir)
vm.desiredStateOfWorldPopulator = populator.NewDesiredStateOfWorldPopulator(
kubeClient,
desiredStateOfWorldPopulatorLoopSleepPeriod,
desiredStateOfWorldPopulatorGetPodStatusRetryDuration,
podManager,
vm.desiredStateOfWorld,
kubeContainerRuntime)
return vm, nil
} |
configuration.go | // Copyright 2020 IBM Corp.
// SPDX-License-Identifier: Apache-2.0
package utils
import (
"io/ioutil"
"os"
"strings"
"github.com/hashicorp/vault/api"
)
// Attributes that are defined in a config map or the runtime environment
const (
CatalogConnectorServiceAddressKey string = "CATALOG_CONNECTOR_URL"
CredentialsManagerServiceAddressKey string = "CREDENTIALS_CONNECTOR_URL"
VaultAddressKey string = "VAULT_ADDRESS"
VaultSecretKey string = "VAULT_TOKEN"
VaultDatasetMountKey string = "VAULT_DATASET_MOUNT"
VaultUserMountKey string = "VAULT_USER_MOUNT"
VaultUserHomeKey string = "VAULT_USER_HOME"
VaultDatasetHomeKey string = "VAULT_DATASET_HOME"
VaultTTLKey string = "VAULT_TTL"
VaultAuthKey string = "VAULT_AUTH"
SecretProviderURL string = "SECRET_PROVIDER_URL"
SecretProviderRole string = "SECRET_PROVIDER_ROLE"
)
// GetSystemNamespace returns the namespace of control plane
func | () string {
if data, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace"); err == nil {
if ns := strings.TrimSpace(string(data)); len(ns) > 0 {
return ns
}
}
return "default"
}
// GetSecretProviderURL returns the path to secret provider
// A credentials path should begin with this URL
func GetSecretProviderURL() string {
return os.Getenv(SecretProviderURL)
}
// GetSecretProviderRole returns the assigned authentification role for accessing dataset credentials
func GetSecretProviderRole() string {
return os.Getenv(SecretProviderRole)
}
// GetVaultAuthTTL returns the amount of time the authorization issued by vault is valid
func GetVaultAuthTTL() string {
return os.Getenv(VaultTTLKey)
}
// GetVaultAuthService returns the authentication service that was chosen for use with vault,
// and the configuration options for it.
// Vault support multiple different types of authentication such as java web tokens (jwt), github, aws ...
func GetVaultAuthService() (string, api.EnableAuthOptions) {
auth := os.Getenv(VaultAuthKey)
options := api.EnableAuthOptions{
Type: auth,
}
return auth, options
}
// GetVaultAddress returns the address and port of the vault system,
// which is used for managing data set credentials
func GetVaultAddress() string {
return os.Getenv(VaultAddressKey)
}
// GetVaultUserHome returns the home directory in vault where the user credentials for external systems access by the m4d are stored
// All credentials will be in sub-directories of this directory in the form of system/compute_label
func GetVaultUserHome() string {
return os.Getenv(VaultUserHomeKey)
}
// GetVaultDatasetHome returns the home directory in vault of where dataset credentials are stored.
// All credentials will be in sub-directories of this directory in the form of catalog_id/dataset_id
func GetVaultDatasetHome() string {
return os.Getenv(VaultDatasetHomeKey)
}
// GetVaultUserMountPath returns the mount directory in vault of where user credentials for the external systems accessed by the m4d are stored.
func GetVaultUserMountPath() string {
return os.Getenv(VaultUserMountKey)
}
// GetVaultDatasetMountPath returns the mount directory in vault of where dataset credentials are stored.
// All credentials will be in sub-directories of this directory in the form of catalog_id/dataset_id
func GetVaultDatasetMountPath() string {
return os.Getenv(VaultDatasetMountKey)
}
// GetVaultToken returns the token this module uses to authenticate with vault
func GetVaultToken() string {
return os.Getenv(VaultSecretKey)
}
// GetCredentialsManagerServiceAddress returns the address where credentials manager is running
func GetCredentialsManagerServiceAddress() string {
return os.Getenv(CredentialsManagerServiceAddressKey)
}
// GetDataCatalogServiceAddress returns the address where data catalog is running
func GetDataCatalogServiceAddress() string {
return os.Getenv(CatalogConnectorServiceAddressKey)
}
| GetSystemNamespace |
main.go | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Command arrow-ls displays the listing of an Arrow file.
//
// Examples:
//
// $> arrow-ls ./testdata/primitives.data
// version: V4
// schema:
// fields: 11
// - bools: type=bool, nullable
// - int8s: type=int8, nullable
// - int16s: type=int16, nullable
// - int32s: type=int32, nullable
// - int64s: type=int64, nullable
// - uint8s: type=uint8, nullable
// - uint16s: type=uint16, nullable
// - uint32s: type=uint32, nullable
// - uint64s: type=uint64, nullable
// - float32s: type=float32, nullable
// - float64s: type=float64, nullable
// records: 3
//
// $> gen-arrow-stream | arrow-ls
// schema:
// fields: 11
// - bools: type=bool, nullable
// - int8s: type=int8, nullable
// - int16s: type=int16, nullable
// - int32s: type=int32, nullable
// - int64s: type=int64, nullable
// - uint8s: type=uint8, nullable
// - uint16s: type=uint16, nullable
// - uint32s: type=uint32, nullable
// - uint64s: type=uint64, nullable
// - float32s: type=float32, nullable
// - float64s: type=float64, nullable
// records: 3
package main // import "github.com/apache/arrow/go/arrow/ipc/cmd/arrow-ls"
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"os"
"github.com/apache/arrow/go/arrow/ipc"
"github.com/apache/arrow/go/arrow/memory"
"golang.org/x/xerrors"
)
func main() {
log.SetPrefix("arrow-ls: ")
log.SetFlags(0)
flag.Parse()
var err error
switch flag.NArg() {
case 0:
err = processStream(os.Stdout, os.Stdin)
default:
err = processFiles(os.Stdout, flag.Args())
}
if err != nil {
log.Fatal(err)
}
}
func processStream(w io.Writer, rin io.Reader) error {
mem := memory.NewGoAllocator()
for {
r, err := ipc.NewReader(rin, ipc.WithAllocator(mem))
if err != nil {
if xerrors.Is(err, io.EOF) {
return nil
}
return err
}
fmt.Fprintf(w, "%v\n", r.Schema())
nrecs := 0
for r.Next() {
nrecs++
}
fmt.Fprintf(w, "records: %d\n", nrecs)
r.Release()
}
return nil
}
func processFiles(w io.Writer, names []string) error {
for _, name := range names {
err := processFile(w, name)
if err != nil {
return err
}
}
return nil
}
func processFile(w io.Writer, fname string) error {
f, err := os.Open(fname)
if err != nil {
return err
}
defer f.Close()
hdr := make([]byte, len(ipc.Magic))
_, err = io.ReadFull(f, hdr)
if err != nil {
return xerrors.Errorf("could not read file header: %w", err)
}
f.Seek(0, io.SeekStart)
if !bytes.Equal(hdr, ipc.Magic) {
// try as a stream.
return processStream(w, f)
}
mem := memory.NewGoAllocator()
r, err := ipc.NewFileReader(f, ipc.WithAllocator(mem))
if err != nil |
defer r.Close()
fmt.Fprintf(w, "version: %v\n", r.Version())
fmt.Fprintf(w, "%v\n", r.Schema())
fmt.Fprintf(w, "records: %d\n", r.NumRecords())
return nil
}
func init() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, `Command arrow-ls displays the listing of an Arrow file.
Usage: arrow-ls [OPTIONS] [FILE1 [FILE2 [...]]]
Examples:
$> arrow-ls ./testdata/primitives.data
version: V4
schema:
fields: 11
- bools: type=bool, nullable
- int8s: type=int8, nullable
- int16s: type=int16, nullable
- int32s: type=int32, nullable
- int64s: type=int64, nullable
- uint8s: type=uint8, nullable
- uint16s: type=uint16, nullable
- uint32s: type=uint32, nullable
- uint64s: type=uint64, nullable
- float32s: type=float32, nullable
- float64s: type=float64, nullable
records: 3
$> gen-arrow-stream | arrow-ls
schema:
fields: 11
- bools: type=bool, nullable
- int8s: type=int8, nullable
- int16s: type=int16, nullable
- int32s: type=int32, nullable
- int64s: type=int64, nullable
- uint8s: type=uint8, nullable
- uint16s: type=uint16, nullable
- uint32s: type=uint32, nullable
- uint64s: type=uint64, nullable
- float32s: type=float32, nullable
- float64s: type=float64, nullable
records: 3
`)
os.Exit(0)
}
}
| {
if xerrors.Is(err, io.EOF) {
return nil
}
return err
} |
search.rs | #![allow(unused_imports)]
use cursive::direction::Orientation;
use cursive::event::{AnyCb, Event, EventResult, Key};
use cursive::traits::{Boxable, Finder, Identifiable, View};
use cursive::view::{IntoBoxedView, Selector, ViewNotFound, ViewWrapper};
use cursive::views::{EditView, NamedView, ViewRef};
use cursive::{Cursive, Printer, Vec2};
use std::cell::RefCell;
use std::sync::{Arc, Mutex, RwLock};
use crate::command::{Command, MoveMode};
use crate::commands::CommandResult;
use crate::events::EventManager;
use crate::library::Library;
use crate::model::album::Album;
use crate::model::artist::Artist;
use crate::model::episode::Episode;
use crate::model::playlist::Playlist;
use crate::model::show::Show;
use crate::model::track::Track;
use crate::queue::Queue;
use crate::spotify::{Spotify, UriType};
use crate::traits::{ListItem, ViewExt};
use crate::ui::layout::Layout;
use crate::ui::listview::ListView;
use crate::ui::pagination::Pagination;
use crate::ui::search_results::SearchResultsView;
use crate::ui::tabview::TabView;
use rspotify::model::search::SearchResult;
pub struct SearchView {
edit: NamedView<EditView>,
edit_focused: bool,
}
pub const EDIT_ID: &str = "search_edit";
impl SearchView {
pub fn new(events: EventManager, queue: Arc<Queue>, library: Arc<Library>) -> SearchView {
let searchfield = EditView::new()
.on_submit(move |s, input| {
if !input.is_empty() {
let results = SearchResultsView::new(
input.to_string(),
events.clone(),
queue.clone(),
library.clone(),
);
s.call_on_name("main", move |v: &mut Layout| v.push_view(Box::new(results)));
}
})
.with_name(EDIT_ID);
SearchView {
edit: searchfield,
edit_focused: true,
}
}
pub fn clear(&mut self) {
self.edit
.call_on(&Selector::Name(EDIT_ID), |v: &mut EditView| {
v.set_content("");
});
}
}
impl View for SearchView {
fn draw(&self, printer: &Printer<'_, '_>) {
let printer = &printer
.offset((0, 0))
.cropped((printer.size.x, 1))
.focused(self.edit_focused);
self.edit.draw(printer);
}
fn layout(&mut self, size: Vec2) {
self.edit.layout(Vec2::new(size.x, 1));
}
fn on_event(&mut self, event: Event) -> EventResult {
if event == Event::Key(Key::Tab) {
self.edit_focused = !self.edit_focused;
return EventResult::Consumed(None);
} else if self.edit_focused && event == Event::Key(Key::Esc) {
self.clear();
}
if self.edit_focused {
self.edit.on_event(event)
} else {
EventResult::Ignored
}
}
fn call_on_any<'a>(&mut self, selector: &Selector<'_>, callback: AnyCb<'a>) |
fn focus_view(&mut self, selector: &Selector<'_>) -> Result<(), ViewNotFound> {
if let Selector::Name(s) = selector {
self.edit_focused = s == &"search_edit";
Ok(())
} else {
Err(ViewNotFound)
}
}
}
impl ViewExt for SearchView {
fn title(&self) -> String {
"Search".to_string()
}
fn on_command(&mut self, _s: &mut Cursive, cmd: &Command) -> Result<CommandResult, String> {
if let Command::Focus(_) = cmd {
self.edit_focused = true;
self.clear();
return Ok(CommandResult::Consumed(None));
}
Ok(CommandResult::Ignored)
}
}
| {
self.edit.call_on_any(selector, &mut |v| callback(v));
} |
day1.go | package cmd
import (
"fmt"
"io"
"os"
"github.com/spf13/cobra"
)
var day1Cmd = &cobra.Command{
Use: "day1",
Run: func(cmd *cobra.Command, args []string) {
input := readFile("cmd/input/day1.txt")
fmt.Println(len(input))
defer println("Twee: ", twee(input))
defer println("Een: ", een(input))
},
}
func init() {
rootCmd.AddCommand(day1Cmd)
}
func een(input []int) int {
holdprev := 0
counter := 0
for _, val := range input {
if holdprev != 0 && val > holdprev {
counter++
}
holdprev = val
}
return counter
}
func twee(input []int) int {
counter := 0
for i := 1; i+2 < len(input); i = i + 1 {
countprev := input[i-1] + input[i] + input[i+1]
hold := input[i] + input[i+1] + input[i+2]
if hold > countprev {
counter++
}
}
return counter | fd, err := os.Open(filePath)
if err != nil {
panic(fmt.Sprintf("open %s: %v", filePath, err))
}
defer fd.Close()
var line int
for {
_, err := fmt.Fscanf(fd, "%d\n", &line)
if err != nil {
fmt.Println(err)
if err == io.EOF {
return
}
panic(fmt.Sprintf("Scan Failed %s: %v", filePath, err))
}
numbers = append(numbers, line)
}
} | }
func readFile(filePath string) (numbers []int) { |
bar.py | """
Progress bars, health bars, etc
"""
import arcade
from ..core.utils import Rect, Position
from .iabstract import _AbstractInterfaceObject
class Bar(_AbstractInterfaceObject):
"""
Drawable bar
"""
MIDDLE_OUT = 'mi'
RIGHT_TO_LEFT = 'rtl'
def __init__(self,
geometry: Rect = Rect(),
color = arcade.color.BLUE,
parent = None):
super().__init__(geometry, parent)
self._color = color
self._mode = Bar.MIDDLE_OUT
# How much of our bar do we fill
self._percent = 1.0
@property
def percent(self):
return self._percent
@property
def mode(self):
return self._mode
def set_mode(self, mode: str):
if not mode in (Bar.MIDDLE_OUT, Bar.RIGHT_TO_LEFT):
return
self._mode = mode
def set_percent(self, percent: float):
self._percent = percent
self.set_dirty(True)
def shapes(self, draw_event):
"""
Paint a basic beam
"""
shapes = [self.background_shape()]
rect = self.geometry
if self._mode == Bar.RIGHT_TO_LEFT:
rect = Rect(
rect.x, rect.y, rect.w * self._percent, rect.h
).to_arcade_rect()
elif self._mode == Bar.MIDDLE_OUT:
|
shapes.append(arcade.create_rectangle(
*(rect),
self._color
))
return shapes
| rect = rect.to_arcade_rect()
rect = Rect(
rect.x, rect.y, rect.w * self._percent, rect.h
) |
gen_key.go | package main
import (
"fmt"
"github.com/drycc/go-docopt"
"github.com/drycc/go-tuf"
)
func init() {
register("gen-key", cmdGenKey, `
usage: tuf gen-key [--expires=<days>] <role>
Generate a new signing key for the given role.
The key will be serialized to JSON and written to the "keys" directory with
filename pattern "ROLE-KEYID.json". The root manifest will also be staged
with the addition of the key's ID to the role's list of key IDs.
Options:
--expires=<days> Set the root manifest to expire <days> days from now.
`)
}
func cmdGenKey(args *docopt.Args, repo *tuf.Repo) error | {
role := args.String["<role>"]
var id string
var err error
if arg := args.String["--expires"]; arg != "" {
expires, err := parseExpires(arg)
if err != nil {
return err
}
id, err = repo.GenKeyWithExpires(role, expires)
} else {
id, err = repo.GenKey(role)
}
if err != nil {
return err
}
fmt.Println("Generated", role, "key with ID", id)
return nil
} |
|
index.js | // Copyright 2018 Google LLC | // You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
const ProductSearchClient = require('./product_search_client');
const ImageAnnotatorClient = require('./image_annotator_client');
module.exports.ProductSearchClient = ProductSearchClient;
module.exports.ImageAnnotatorClient = ImageAnnotatorClient; | //
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. |
kn.js | OC.L10N.register(
"user_ldap",
{
"Users" : "ಬಳಕೆದಾರರು",
"Groups" : "ಗುಂಪುಗಳು",
"Help" : "ಸಹಾಯ",
"Host" : "ಅತಿಥೆಯ-ಗಣಕ",
"Port" : "ರೇವು",
"Password" : "ಗುಪ್ತ ಪದ",
"Continue" : "ಮುಂದುವರಿಸಿ"
}, | "nplurals=1; plural=0;"); |
|
test_lang_target.py | import tvm
@tvm.target.generic_func
def mygeneric(data):
# default generic function
return data + 1
@mygeneric.register(["cuda", "gpu"])
def cuda_func(data):
return data + 2
@mygeneric.register("rocm")
def rocm_func(data):
return data + 3
@mygeneric.register("cpu")
def rocm_func(data):
return data + 10
def test_target_dispatch():
with tvm.target.cuda():
assert mygeneric(1) == 3
with tvm.target.rocm():
assert mygeneric(1) == 4
with tvm.target.create("cuda"):
assert mygeneric(1) == 3
with tvm.target.arm_cpu():
assert mygeneric(1) == 11
with tvm.target.create("metal"):
assert mygeneric(1) == 3
assert tvm.target.current_target() == None
def | ():
target = tvm.target.create("cuda -libs=cublas,cudnn")
assert target.target_name == "cuda"
assert target.options == ['-libs=cublas,cudnn']
assert target.keys == ['cuda', 'gpu']
assert target.libs == ['cublas', 'cudnn']
assert str(target) == str(tvm.target.cuda("-libs=cublas,cudnn"))
assert tvm.target.intel_graphics().device_name == "intel_graphics"
if __name__ == "__main__":
test_target_dispatch()
test_target_string_parse()
| test_target_string_parse |
test_problem_options.py | import unittest
import blitzml
import numpy as np
from common import captured_output
class TestProblemOptions(unittest.TestCase):
def setUp(self):
A = np.arange(20).reshape(5, 4)
b = np.arange(5).astype(np.float64)
self.prob = blitzml.LassoProblem(A, b)
def tearDown(self):
del self.prob
def test_min_time(self):
self.assertLessEqual(self.prob._min_time, 0.)
self.prob._min_time = 2.0
self.assertEqual(self.prob._min_time, 2.0)
def | (self):
self.assertGreaterEqual(self.prob._max_time, 3600.)
self.prob._max_time = 5.0
self.assertEqual(self.prob._max_time, 5.0)
def test_max_iterations(self):
self.assertGreaterEqual(self.prob._max_iterations, 100)
self.prob._max_iterations = 10
self.assertEqual(self.prob._max_iterations, 10)
def test_tolerance(self):
self.assertGreater(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.
self.assertEqual(self.prob._stopping_tolerance, 0.)
self.prob._stopping_tolerance = 0.1
self.assertEqual(self.prob._stopping_tolerance, 0.1)
def test_verbose(self):
self.assertEqual(self.prob._verbose, False)
self.prob._verbose = True
self.assertEqual(self.prob._verbose, True)
def test_use_screening(self):
self.assertEqual(self.prob._use_screening, True)
self.prob._use_screening = False
self.assertEqual(self.prob._use_screening, False)
def test_use_working_sets(self):
self.assertEqual(self.prob._use_working_sets, True)
self.prob._use_working_sets = False
self.assertEqual(self.prob._use_working_sets, False)
def test_suppress_warnings(self):
bad_log_dir = "path/to/bad_log/dir/zxc8aj3n"
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertIn("Warning", out[0])
blitzml.suppress_warnings()
with captured_output() as out:
self.prob.solve(self.prob.compute_max_l1_penalty(),
log_directory=bad_log_dir)
self.assertNotIn("Warning", out[0])
blitzml.unsuppress_warnings()
| test_max_time |
mcmc.rs | //! Defines a `ConditionalInferenceEngine` for Markov-Chain Monte-Carlo methods.
//!
//! Implementation of MCMC Inference for Conditional Queries described in Koller & Friedman
//! 12.3.5.2
use factor::{Factor, Table};
use samplers::Sampler;
use super::ConditionalInferenceEngine;
use util::{JeromeError, Result};
use variable::Variable;
use ndarray::prelude as nd;
use std::collections::HashSet;
/// A `ConditionalInferenceEngine` for Bayesian or Markovian models. This is intended for
/// Markov-Chain Monte-Carlo `Sampler`s.
pub struct McmcEngine<'a, S: 'a + Sampler> {
/// The weighted sampler for the given `DirectedModel`
sampler: &'a mut S,
/// The number of samples to use
samples: usize
}
impl<'a, S: Sampler> McmcEngine<'a, S> {
pub fn new(sampler: &'a mut S, burnin: usize, samples: usize) -> Self {
// let the sampler burnin
for _ in 0..burnin {
let _ = sampler.sample();
}
McmcEngine { sampler, samples }
}
}
impl<'a, S: 'a + Sampler> ConditionalInferenceEngine for McmcEngine<'a, S> {
fn infer(&mut self, variables: &HashSet<Variable>) -> Result<Factor> |
}
| {
// initialize the factor table. We must assign an order to variables.
let scope: Vec<Variable> = variables.iter().cloned().collect();
let shape: Vec<usize> = variables.iter().map(|v| v.cardinality()).collect();
let mut table = Table::zeros(shape);
// sample away...
for i in 0..self.samples {
let a = self.sampler.sample();
let idx: Vec<Option<&usize>> = scope.iter().map(|v| a.get(v)).collect();
// on the first iteration, verify the assignment matches the scope
if i == 0 {
if idx.iter().any(|v| v.is_none()) {
return Err(JeromeError::InvalidScope);
}
}
let idx: Vec<usize> = idx.iter().map(|v| v.unwrap()).cloned().collect();
table[nd::IxDyn(&idx)] += 1.0;
}
let factor = Factor::new(scope, table)?;
Ok(factor.normalize())
} |
block.py | from flask import url_for
from app.questionnaire.rules import evaluate_skip_conditions
from app.templating.summary.question import Question
class Block:
def __init__(self, block_schema, group_id, answer_store, metadata, schema, group_instance):
self.id = block_schema['id']
self.title = block_schema.get('title')
self.number = block_schema.get('number')
self.link = self._build_link(block_schema, group_id, metadata, group_instance)
self.questions = self._build_questions(block_schema, answer_store, metadata, schema, group_instance)
@staticmethod
def _build_link(block_schema, group_id, metadata, group_instance):
return url_for('questionnaire.get_block',
eq_id=metadata['eq_id'],
form_type=metadata['form_type'],
collection_id=metadata['collection_exercise_sid'],
group_id=group_id,
group_instance=group_instance,
block_id=block_schema['id'])
@staticmethod
def _build_questions(block_schema, answer_store, metadata, schema, group_instance): | is_skipped = evaluate_skip_conditions(question_schema.get('skip_conditions'), schema, metadata, answer_store)
if not is_skipped:
question = Question(question_schema, answer_store, metadata, schema, group_instance).serialize()
questions.append(question)
return questions
def serialize(self):
return {
'id': self.id,
'title': self.title,
'number': self.number,
'link': self.link,
'questions': self.questions,
} | questions = []
for question_schema in block_schema.get('questions', []): |
test_syncdetect.py | import unittest
import pytest
import cupy
import cupyx
class TestSyncDetect(unittest.TestCase):
def test_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def test_allowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
a.get()
def test_nested_disallowed(self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(True):
with cupyx.allow_synchronize(False):
with pytest.raises(cupyx.DeviceSynchronized):
a.get()
def | (self):
a = cupy.array([2, 3])
with cupyx.allow_synchronize(False):
with cupyx.allow_synchronize(True):
a.get()
| test_nested_allowed |
ContactMe.js | import { Box } from "@mui/system";
import { ReactComponent as Github } from "../../assets/images/Github.svg";
import { ReactComponent as Twitter } from "../../assets/images/Twitter.svg";
import { ReactComponent as Email } from "../../assets/images/Email.svg";
import TextField from "@mui/material/TextField";
// import "../../assets/css/svg.css";
import { Button, Typography } from "@mui/material";
const HeaderHome = (props) => {
return (
<Box
sx={{
textAlign: "center",
minHeight: "auto",
width: "100vw",
justifyContent: "center",
pt: {xs: "50px", sm: "100px"},
display: "flex",
flexDirection: { sm: "row", xs: "column" },
}}
>
{/* Text */}
<Box | maxWidth: {sm: "50%", xs: "100%"},
textAlign: "left",
display: "flex",
flexDirection: "column",
alignSelf: "center",
p: 2
}}
>
<Typography
className="title_header"
variant="h4"
sx={{
color: "primary",
zIndex: "5",
fontWeight: "bold",
fontSize: "38px",
}}
>
Contactez moi !
</Typography>
<Typography
className="title_header"
variant="body"
sx={{
zIndex: "5",
fontSize: "14px",
textAlign: "left",
}}
>
Remplissez le formulaire pour m’envoyé un mail ou bien en utilisant
mes coordonnées ci-dessous.
</Typography>
<Button sx={{ my: "15px", maxWidth: "150px" }} variant="contained">
Contained
</Button>
<Box>
<Box sx={{ display: "flex", my: "30px" }}>
<Github />
<Box sx={{ px: "15px" }}>
<Typography
className="title_header"
variant="h4"
sx={{
color: "primary",
zIndex: "5",
fontWeight: "bold",
fontSize: "18px",
}}
>
Github
</Typography>
<Typography
className="title_header"
variant="body"
sx={{
zIndex: "5",
fontSize: "16px",
textAlign: "left",
}}
>
xDrKush
</Typography>
</Box>
</Box>
<Box sx={{ display: "flex", my: "30px" }}>
<Twitter />
<Box sx={{ px: "15px" }}>
<Typography
className="title_header"
variant="h4"
sx={{
color: "primary",
zIndex: "5",
fontWeight: "bold",
fontSize: "18px",
}}
>
Twitter
</Typography>
<Typography
className="title_header"
variant="body"
sx={{
zIndex: "5",
fontSize: "16px",
textAlign: "left",
}}
>
@DrKush
</Typography>
</Box>
</Box>
<Box sx={{ display: "flex", my: "30px" }}>
<Email />
<Box sx={{ px: "15px" }}>
<Typography
className="title_header"
variant="h4"
sx={{
color: "primary",
zIndex: "5",
fontWeight: "bold",
fontSize: "18px",
}}
>
Email
</Typography>
<Typography
className="title_header"
variant="body"
sx={{
zIndex: "5",
fontSize: "16px",
textAlign: "left",
}}
>
[email protected]
</Typography>
</Box>
</Box>
</Box>
</Box>
{/* Form */}
<Box sx={{ display: "flex", justifyContent: "center", maxWidth: {sm: "50%", xs: "100%"} }}>
<Box
sx={{
textAlign: "left",
width: "100%",
alignItems: "center",
alignSelf: "center",
p:2,
backgroundColor: "#2F1F34",
}}
>
<Box
sx={{
width: 500,
my: "15px",
maxWidth: "100%",
}}
>
<TextField
fullWidth
id="standard-basic"
placeholder="Name"
label="Name"
variant="standard"
/>
</Box>
<Box
sx={{
width: 500,
my: "15px",
maxWidth: "100%",
}}
>
<TextField
fullWidth
id="standard-basic"
placeholder="Email"
label="Email"
variant="standard"
/>
</Box>
<Box
sx={{
width: 500,
my: "15px",
maxWidth: "100%",
}}
>
<TextField
fullWidth
id="standard-basic"
placeholder="Object"
label="Objet"
variant="standard"
/>
</Box>
<Box
sx={{
width: 500,
my: "15px",
maxWidth: "100%",
}}
>
<TextField
fullWidth
id="standard-basic"
placeholder="Message"
label="Message"
variant="standard"
/>
</Box>
<Button sx={{ my: "15px", width: "100%" }} variant="contained">
Contained
</Button>
</Box>
<Box
sx={{ textAlign: "left", width: "100%", alignSelf: "center" }}
></Box>
</Box>
</Box>
);
};
export default HeaderHome; | sx={{ |
file_toml.rs | #![cfg(feature = "toml")]
use serde_derive::Deserialize;
use std::path::PathBuf;
use config::{Config, File, FileFormat, Map, Value};
use float_cmp::ApproxEqUlps;
#[derive(Debug, Deserialize)]
struct Place {
number: PlaceNumber,
name: String,
longitude: f64,
latitude: f64,
favorite: bool,
telephone: Option<String>,
reviews: u64,
creator: Map<String, Value>,
rating: Option<f32>,
}
#[derive(Debug, Deserialize, PartialEq)]
struct PlaceNumber(u8);
#[derive(Debug, Deserialize, PartialEq)]
struct | (i8);
#[derive(Debug, Deserialize)]
struct Settings {
debug: f64,
production: Option<String>,
code: AsciiCode,
place: Place,
#[serde(rename = "arr")]
elements: Vec<String>,
}
#[cfg(test)]
fn make() -> Config {
let mut c = Config::default();
c.merge(File::new("tests/Settings", FileFormat::Toml))
.unwrap();
c
}
#[test]
fn test_file() {
let c = make();
// Deserialize the entire file as single struct
let s: Settings = c.try_deserialize().unwrap();
assert!(s.debug.approx_eq_ulps(&1.0, 2));
assert_eq!(s.production, Some("false".to_string()));
assert_eq!(s.code, AsciiCode(53));
assert_eq!(s.place.number, PlaceNumber(1));
assert_eq!(s.place.name, "Torre di Pisa");
assert!(s.place.longitude.approx_eq_ulps(&43.722_498_5, 2));
assert!(s.place.latitude.approx_eq_ulps(&10.397_052_2, 2));
assert!(!s.place.favorite);
assert_eq!(s.place.reviews, 3866);
assert_eq!(s.place.rating, Some(4.5));
assert_eq!(s.place.telephone, None);
assert_eq!(s.elements.len(), 10);
assert_eq!(s.elements[3], "4".to_string());
if cfg!(feature = "preserve_order") {
assert_eq!(
s.place
.creator
.into_iter()
.collect::<Vec<(String, config::Value)>>(),
vec![
("name".to_string(), "John Smith".into()),
("username".into(), "jsmith".into()),
("email".into(), "jsmith@localhost".into()),
]
);
} else {
assert_eq!(
s.place.creator["name"].clone().into_string().unwrap(),
"John Smith".to_string()
);
}
}
#[test]
fn test_error_parse() {
let mut c = Config::default();
let res = c.merge(File::new("tests/Settings-invalid", FileFormat::Toml));
let path_with_extension: PathBuf = ["tests", "Settings-invalid.toml"].iter().collect();
assert!(res.is_err());
assert_eq!(
res.unwrap_err().to_string(),
format!(
"invalid TOML value, did you mean to use a quoted string? at line 2 column 9 in {}",
path_with_extension.display()
)
);
}
| AsciiCode |
admin-handlers.go | /*
* Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"archive/zip"
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"os"
"strconv"
"strings"
"sync"
"time"
"github.com/gorilla/mux"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/quick"
"github.com/tidwall/gjson"
"github.com/tidwall/sjson"
)
const (
maxConfigJSONSize = 256 * 1024 // 256KiB
)
// Type-safe query params.
type mgmtQueryKey string
// Only valid query params for mgmt admin APIs.
const (
mgmtBucket mgmtQueryKey = "bucket"
mgmtPrefix mgmtQueryKey = "prefix"
mgmtClientToken mgmtQueryKey = "clientToken"
mgmtForceStart mgmtQueryKey = "forceStart"
)
var (
// This struct literal represents the Admin API version that
// the server uses.
adminAPIVersionInfo = madmin.AdminAPIVersionInfo{
Version: "1",
}
)
// VersionHandler - GET /minio/admin/version
// -----------
// Returns Administration API version
func (a adminAPIHandlers) VersionHandler(w http.ResponseWriter, r *http.Request) {
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
jsonBytes, err := json.Marshal(adminAPIVersionInfo)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
logger.LogIf(context.Background(), err)
return
}
writeSuccessResponseJSON(w, jsonBytes)
}
// ServiceStatusHandler - GET /minio/admin/v1/service
// ----------
// Returns server version and uptime.
func (a adminAPIHandlers) ServiceStatusHandler(w http.ResponseWriter, r *http.Request) {
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Fetch server version
serverVersion := madmin.ServerVersion{
Version: Version,
CommitID: CommitID,
}
// Fetch uptimes from all peers. This may fail to due to lack
// of read-quorum availability.
uptime, err := getPeerUptimes(globalAdminPeers)
if err != nil {
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
logger.LogIf(context.Background(), err)
return
}
// Create API response
serverStatus := madmin.ServiceStatus{
ServerVersion: serverVersion,
Uptime: uptime,
}
// Marshal API response
jsonBytes, err := json.Marshal(serverStatus)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
logger.LogIf(context.Background(), err)
return
}
// Reply with storage information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
}
// ServiceStopNRestartHandler - POST /minio/admin/v1/service
// Body: {"action": <restart-action>}
// ----------
// Restarts/Stops minio server gracefully. In a distributed setup,
// restarts all the servers in the cluster.
func (a adminAPIHandlers) ServiceStopNRestartHandler(w http.ResponseWriter, r *http.Request) {
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
var sa madmin.ServiceAction
err := json.NewDecoder(r.Body).Decode(&sa)
if err != nil {
logger.LogIf(context.Background(), err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return
}
var serviceSig serviceSignal
switch sa.Action {
case madmin.ServiceActionValueRestart:
serviceSig = serviceRestart
case madmin.ServiceActionValueStop:
serviceSig = serviceStop
default:
writeErrorResponseJSON(w, ErrMalformedPOSTRequest, r.URL)
logger.LogIf(context.Background(), errors.New("Invalid service action received"))
return
}
// Reply to the client before restarting minio server.
writeSuccessResponseHeadersOnly(w)
sendServiceCmd(globalAdminPeers, serviceSig)
}
// ServerProperties holds some server information such as, version, region
// uptime, etc..
type ServerProperties struct {
Uptime time.Duration `json:"uptime"`
Version string `json:"version"`
CommitID string `json:"commitID"`
Region string `json:"region"`
SQSARN []string `json:"sqsARN"`
}
// ServerConnStats holds transferred bytes from/to the server
type ServerConnStats struct {
TotalInputBytes uint64 `json:"transferred"`
TotalOutputBytes uint64 `json:"received"`
Throughput uint64 `json:"throughput,omitempty"`
}
// ServerHTTPMethodStats holds total number of HTTP operations from/to the server,
// including the average duration the call was spent.
type ServerHTTPMethodStats struct {
Count uint64 `json:"count"`
AvgDuration string `json:"avgDuration"`
}
// ServerHTTPStats holds all type of http operations performed to/from the server
// including their average execution time.
type ServerHTTPStats struct {
TotalHEADStats ServerHTTPMethodStats `json:"totalHEADs"`
SuccessHEADStats ServerHTTPMethodStats `json:"successHEADs"`
TotalGETStats ServerHTTPMethodStats `json:"totalGETs"`
SuccessGETStats ServerHTTPMethodStats `json:"successGETs"`
TotalPUTStats ServerHTTPMethodStats `json:"totalPUTs"`
SuccessPUTStats ServerHTTPMethodStats `json:"successPUTs"`
TotalPOSTStats ServerHTTPMethodStats `json:"totalPOSTs"`
SuccessPOSTStats ServerHTTPMethodStats `json:"successPOSTs"`
TotalDELETEStats ServerHTTPMethodStats `json:"totalDELETEs"`
SuccessDELETEStats ServerHTTPMethodStats `json:"successDELETEs"`
}
// ServerInfoData holds storage, connections and other
// information of a given server.
type ServerInfoData struct {
StorageInfo StorageInfo `json:"storage"`
ConnStats ServerConnStats `json:"network"`
HTTPStats ServerHTTPStats `json:"http"`
Properties ServerProperties `json:"server"`
}
// ServerInfo holds server information result of one node
type ServerInfo struct {
Error string `json:"error"`
Addr string `json:"addr"`
Data *ServerInfoData `json:"data"`
}
// ServerInfoHandler - GET /minio/admin/v1/info
// ----------
// Get server information
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
// Authenticate request
// Setting the region as empty so as the mc server info command is irrespective to the region.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Web service response
reply := make([]ServerInfo, len(globalAdminPeers))
var wg sync.WaitGroup
// Gather server information for all nodes
for i, p := range globalAdminPeers {
wg.Add(1)
// Gather information from a peer in a goroutine
go func(idx int, peer adminPeer) {
defer wg.Done()
// Initialize server info at index
reply[idx] = ServerInfo{Addr: peer.addr}
serverInfoData, err := peer.cmdRunner.ServerInfo()
if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("peerAddress", peer.addr)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
reply[idx].Error = err.Error()
return
}
reply[idx].Data = &serverInfoData
}(i, p)
}
wg.Wait()
// Marshal API response
jsonBytes, err := json.Marshal(reply)
if err != nil {
writeErrorResponseJSON(w, ErrInternalError, r.URL)
logger.LogIf(context.Background(), err)
return
}
// Reply with storage information (across nodes in a
// distributed setup) as json.
writeSuccessResponseJSON(w, jsonBytes)
}
// StartProfilingResult contains the status of the starting
// profiling action in a given server
type StartProfilingResult struct {
NodeName string `json:"nodeName"`
Success bool `json:"success"`
Error string `json:"error"`
}
// StartProfilingHandler - POST /minio/admin/v1/profiling/start/{profiler}
// ----------
// Enable profiling information
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
vars := mux.Vars(r)
profiler := vars["profiler"]
startProfilingResult := make([]StartProfilingResult, len(globalAdminPeers))
// Call StartProfiling function on all nodes and save results
wg := sync.WaitGroup{}
for i, peer := range globalAdminPeers {
wg.Add(1)
go func(idx int, peer adminPeer) {
defer wg.Done()
result := StartProfilingResult{NodeName: peer.addr}
if err := peer.cmdRunner.StartProfiling(profiler); err != nil {
result.Error = err.Error()
return
}
result.Success = true
startProfilingResult[idx] = result
}(i, peer)
}
wg.Wait()
// Create JSON result and send it to the client
startProfilingResultInBytes, err := json.Marshal(startProfilingResult)
if err != nil {
writeCustomErrorResponseJSON(w, http.StatusInternalServerError, err.Error(), r.URL)
return
}
writeSuccessResponseJSON(w, []byte(startProfilingResultInBytes))
}
// dummyFileInfo represents a dummy representation of a profile data file
// present only in memory, it helps to generate the zip stream.
type dummyFileInfo struct {
name string
size int64
mode os.FileMode
modTime time.Time
isDir bool
sys interface{}
}
func (f dummyFileInfo) Name() string { return f.name }
func (f dummyFileInfo) Size() int64 { return f.size }
func (f dummyFileInfo) Mode() os.FileMode { return f.mode }
func (f dummyFileInfo) ModTime() time.Time { return f.modTime }
func (f dummyFileInfo) IsDir() bool { return f.isDir }
func (f dummyFileInfo) Sys() interface{} { return f.sys }
// DownloadProfilingHandler - POST /minio/admin/v1/profiling/download
// ----------
// Download profiling information of all nodes in a zip format
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Return 200 OK
w.WriteHeader(http.StatusOK)
// Initialize a zip writer which will provide a zipped content
// of profiling data of all nodes
zipWriter := zip.NewWriter(w)
defer zipWriter.Close()
for i, peer := range globalAdminPeers {
// Get profiling data from a node
data, err := peer.cmdRunner.DownloadProfilingData()
if err != nil {
logger.LogIf(context.Background(), fmt.Errorf("Unable to download profiling data from node `%s`, reason: %s", peer.addr, err.Error()))
continue
}
// Send profiling data to zip as file
header, err := zip.FileInfoHeader(dummyFileInfo{
name: fmt.Sprintf("profiling-%d", i),
size: int64(len(data)),
mode: 0600,
modTime: time.Now().UTC(),
isDir: false,
sys: nil,
})
if err != nil {
continue
}
writer, err := zipWriter.CreateHeader(header)
if err != nil {
continue
}
if _, err = io.Copy(writer, bytes.NewBuffer(data)); err != nil {
return
}
}
}
// extractHealInitParams - Validates params for heal init API.
func extractHealInitParams(r *http.Request) (bucket, objPrefix string,
hs madmin.HealOpts, clientToken string, forceStart bool,
err APIErrorCode) {
vars := mux.Vars(r)
bucket = vars[string(mgmtBucket)]
objPrefix = vars[string(mgmtPrefix)]
if bucket == "" {
if objPrefix != "" {
// Bucket is required if object-prefix is given
err = ErrHealMissingBucket
return
}
} else if !IsValidBucketName(bucket) {
err = ErrInvalidBucketName
return
}
// empty prefix is valid.
if !IsValidObjectPrefix(objPrefix) {
err = ErrInvalidObjectName
return
}
qParms := r.URL.Query()
if len(qParms[string(mgmtClientToken)]) > 0 {
clientToken = qParms[string(mgmtClientToken)][0]
}
if _, ok := qParms[string(mgmtForceStart)]; ok {
forceStart = true
}
// ignore body if clientToken is provided
if clientToken == "" {
jerr := json.NewDecoder(r.Body).Decode(&hs)
if jerr != nil {
logger.LogIf(context.Background(), jerr)
err = ErrRequestBodyParse
return
}
}
err = ErrNone
return
}
// HealHandler - POST /minio/admin/v1/heal/
// -----------
// Start heal processing and return heal status items.
//
// On a successful heal sequence start, a unique client token is
// returned. Subsequent requests to this endpoint providing the client
// token will receive heal status records from the running heal
// sequence.
//
// If no client token is provided, and a heal sequence is in progress
// an error is returned with information about the running heal
// sequence. However, if the force-start flag is provided, the server
// aborts the running heal sequence and starts a new one.
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "Heal")
// Get object layer instance.
objLayer := newObjectLayerFn()
if objLayer == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Check if this setup has an erasure coded backend.
if !globalIsXL {
writeErrorResponseJSON(w, ErrHealNotImplemented, r.URL)
return
}
bucket, objPrefix, hs, clientToken, forceStart, apiErr := extractHealInitParams(r)
if apiErr != ErrNone {
writeErrorResponseJSON(w, apiErr, r.URL)
return
}
type healResp struct {
respBytes []byte
errCode APIErrorCode
errBody string
}
// Define a closure to start sending whitespace to client
// after 10s unless a response item comes in
keepConnLive := func(w http.ResponseWriter, respCh chan healResp) {
ticker := time.NewTicker(time.Second * 10)
defer ticker.Stop()
started := false
forLoop:
for {
select {
case <-ticker.C:
if !started {
// Start writing response to client
started = true
setCommonHeaders(w)
w.Header().Set("Content-Type", string(mimeJSON))
// Set 200 OK status
w.WriteHeader(200)
}
// Send whitespace and keep connection open
w.Write([]byte("\n\r"))
w.(http.Flusher).Flush()
case hr := <-respCh:
switch {
case hr.errCode == ErrNone:
writeSuccessResponseJSON(w, hr.respBytes)
case hr.errBody == "":
writeErrorResponseJSON(w, hr.errCode, r.URL)
default:
writeCustomErrorResponseJSON(w, hr.errCode, hr.errBody, r.URL)
}
break forLoop
}
}
}
// find number of disks in the setup
info := objLayer.StorageInfo(ctx)
numDisks := info.Backend.OfflineDisks + info.Backend.OnlineDisks
if clientToken == "" {
// Not a status request
nh := newHealSequence(bucket, objPrefix, handlers.GetSourceIP(r),
numDisks, hs, forceStart)
respCh := make(chan healResp)
go func() {
respBytes, errCode, errMsg := globalAllHealState.LaunchNewHealSequence(nh)
hr := healResp{respBytes, errCode, errMsg}
respCh <- hr
}()
// Due to the force-starting functionality, the Launch
// call above can take a long time - to keep the
// connection alive, we start sending whitespace
keepConnLive(w, respCh)
} else {
// Since clientToken is given, fetch heal status from running
// heal sequence.
path := bucket + "/" + objPrefix
respBytes, errCode := globalAllHealState.PopHealStatusJSON(
path, clientToken)
if errCode != ErrNone {
writeErrorResponseJSON(w, errCode, r.URL)
} else {
writeSuccessResponseJSON(w, respBytes)
}
}
}
// GetConfigHandler - GET /minio/admin/v1/config
// Get config.json of this minio setup.
func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigHandler")
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
config, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
configData, err := json.MarshalIndent(config, "", "\t")
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
password := config.GetCredential().SecretKey
econfigData, err := madmin.EncryptServerConfigData(password, configData)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
writeSuccessResponseJSON(w, econfigData)
}
// Disable tidwall json array notation in JSON key path so
// users can set json with a key as a number.
// In tidwall json, notify.webhook.0 = val means { "notify" : { "webhook" : [val] }}
// In Minio, notify.webhook.0 = val means { "notify" : { "webhook" : {"0" : val}}}
func normalizeJSONKey(input string) (key string) {
subKeys := strings.Split(input, ".")
for i, k := range subKeys {
if i > 0 {
key += "."
}
if _, err := strconv.Atoi(k); err == nil {
key += ":" + k
} else {
key += k
}
}
return
}
// GetConfigHandler - GET /minio/admin/v1/config-keys
// Get some keys in config.json of this minio setup.
func (a adminAPIHandlers) GetConfigKeysHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "GetConfigKeysHandler")
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
var keys []string
queries := r.URL.Query()
for k := range queries {
keys = append(keys, k)
}
config, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
configData, err := json.Marshal(config)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
configStr := string(configData)
newConfigStr := `{}`
for _, key := range keys {
// sjson.Set does not return an error if key is empty
// we should check by ourselves here
if key == "" {
continue
}
val := gjson.Get(configStr, key)
if j, err := sjson.Set(newConfigStr, normalizeJSONKey(key), val.Value()); err == nil {
newConfigStr = j
}
}
password := config.GetCredential().SecretKey
econfigData, err := madmin.EncryptServerConfigData(password, []byte(newConfigStr))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
writeSuccessResponseJSON(w, []byte(econfigData))
}
// toAdminAPIErrCode - converts errXLWriteQuorum error to admin API
// specific error.
func toAdminAPIErrCode(err error) APIErrorCode {
switch err {
case errXLWriteQuorum:
return ErrAdminConfigNoQuorum
default:
return toAPIErrorCode(err)
}
}
// SetConfigHandler - PUT /minio/admin/v1/config
func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigHandler")
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(w, ErrMethodNotAllowed, r.URL)
return
}
// Read configuration bytes from request body.
configBuf := make([]byte, maxConfigJSONSize+1)
n, err := io.ReadFull(r.Body, configBuf)
if err == nil {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(w, ErrAdminConfigTooLarge, r.URL)
return
}
if err != io.ErrUnexpectedEOF {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
return
}
password := globalServerConfig.GetCredential().SecretKey
configBytes, err := madmin.DecryptServerConfigData(password, bytes.NewReader(configBuf[:n]))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, ErrAdminConfigBadJSON, r.URL)
return
}
// Validate JSON provided in the request body: check the
// client has not sent JSON objects with duplicate keys.
if err = quick.CheckDuplicateKeys(string(configBytes)); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, ErrAdminConfigBadJSON, r.URL)
return
}
var config serverConfig
err = json.Unmarshal(configBytes, &config)
if err != nil {
logger.LogIf(ctx, err)
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, err.Error(), r.URL)
return
}
// If credentials for the server are provided via environment,
// then credentials in the provided configuration must match.
if globalIsEnvCreds {
creds := globalServerConfig.GetCredential()
if config.Credential.AccessKey != creds.AccessKey ||
config.Credential.SecretKey != creds.SecretKey {
writeErrorResponseJSON(w, ErrAdminCredentialsMismatch, r.URL)
return
}
}
if err = config.Validate(); err != nil {
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, err.Error(), r.URL)
return
}
if err = saveServerConfig(ctx, objectAPI, &config); err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
// Reply to the client before restarting minio server.
writeSuccessResponseHeadersOnly(w)
sendServiceCmd(globalAdminPeers, serviceRestart)
}
func | (elem []byte, jsonType gjson.Type) (interface{}, error) {
str := string(elem)
switch jsonType {
case gjson.False, gjson.True:
return strconv.ParseBool(str)
case gjson.JSON:
return gjson.Parse(str).Value(), nil
case gjson.String:
return str, nil
case gjson.Number:
return strconv.ParseFloat(str, 64)
default:
return nil, nil
}
}
// SetConfigKeysHandler - PUT /minio/admin/v1/config-keys
func (a adminAPIHandlers) SetConfigKeysHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "SetConfigKeysHandler")
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Deny if WORM is enabled
if globalWORMEnabled {
writeErrorResponseJSON(w, ErrMethodNotAllowed, r.URL)
return
}
// Validate request signature.
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Load config
configStruct, err := readServerConfig(ctx, objectAPI)
if err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
// Convert config to json bytes
configBytes, err := json.Marshal(configStruct)
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
configStr := string(configBytes)
queries := r.URL.Query()
password := globalServerConfig.GetCredential().SecretKey
// Set key values in the JSON config
for k := range queries {
// Decode encrypted data associated to the current key
encryptedElem, dErr := base64.StdEncoding.DecodeString(queries.Get(k))
if dErr != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("key", k)
ctx = logger.SetReqInfo(ctx, reqInfo)
logger.LogIf(ctx, dErr)
writeErrorResponseJSON(w, ErrAdminConfigBadJSON, r.URL)
return
}
elem, dErr := madmin.DecryptServerConfigData(password, bytes.NewBuffer([]byte(encryptedElem)))
if dErr != nil {
logger.LogIf(ctx, dErr)
writeErrorResponseJSON(w, ErrAdminConfigBadJSON, r.URL)
return
}
// Calculate the type of the current key from the
// original config json
jsonFieldType := gjson.Get(configStr, k).Type
// Convert passed value to json filed type
val, cErr := convertValueType(elem, jsonFieldType)
if cErr != nil {
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, cErr.Error(), r.URL)
return
}
// Set the key/value in the new json document
if s, sErr := sjson.Set(configStr, normalizeJSONKey(k), val); sErr == nil {
configStr = s
}
}
configBytes = []byte(configStr)
// Validate config
var config serverConfig
if err = json.Unmarshal(configBytes, &config); err != nil {
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, err.Error(), r.URL)
return
}
if err = config.Validate(); err != nil {
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, err.Error(), r.URL)
return
}
if err = config.TestNotificationTargets(); err != nil {
writeCustomErrorResponseJSON(w, ErrAdminConfigBadJSON, err.Error(), r.URL)
return
}
// If credentials for the server are provided via environment,
// then credentials in the provided configuration must match.
if globalIsEnvCreds {
creds := globalServerConfig.GetCredential()
if config.Credential.AccessKey != creds.AccessKey ||
config.Credential.SecretKey != creds.SecretKey {
writeErrorResponseJSON(w, ErrAdminCredentialsMismatch, r.URL)
return
}
}
if err = saveServerConfig(ctx, objectAPI, &config); err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
// Send success response
writeSuccessResponseHeadersOnly(w)
sendServiceCmd(globalAdminPeers, serviceRestart)
}
// UpdateCredsHandler - POST /minio/admin/v1/config/credential
// ----------
// Update credentials in a minio server. In a distributed setup,
// update all the servers in the cluster.
func (a adminAPIHandlers) UpdateCredentialsHandler(w http.ResponseWriter,
r *http.Request) {
ctx := newContext(r, w, "UpdateCredentialsHandler")
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil {
writeErrorResponseJSON(w, ErrServerNotInitialized, r.URL)
return
}
// Avoid setting new credentials when they are already passed
// by the environment. Deny if WORM is enabled.
if globalIsEnvCreds {
writeErrorResponseJSON(w, ErrMethodNotAllowed, r.URL)
return
}
// Authenticate request
adminAPIErr := checkAdminRequestAuthType(r, "")
if adminAPIErr != ErrNone {
writeErrorResponseJSON(w, adminAPIErr, r.URL)
return
}
// Read configuration bytes from request body.
configBuf := make([]byte, maxConfigJSONSize+1)
n, err := io.ReadFull(r.Body, configBuf)
if err == nil {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(w, ErrAdminConfigTooLarge, r.URL)
return
}
if err != io.ErrUnexpectedEOF {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
return
}
password := globalServerConfig.GetCredential().SecretKey
configBytes, err := madmin.DecryptServerConfigData(password, bytes.NewReader(configBuf[:n]))
if err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, ErrAdminConfigBadJSON, r.URL)
return
}
// Decode request body
var req madmin.SetCredsReq
if err = json.Unmarshal(configBytes, &req); err != nil {
logger.LogIf(ctx, err)
writeErrorResponseJSON(w, ErrRequestBodyParse, r.URL)
return
}
creds, err := auth.CreateCredentials(req.AccessKey, req.SecretKey)
if err != nil {
writeErrorResponseJSON(w, toAPIErrorCode(err), r.URL)
return
}
// Acquire lock before updating global configuration.
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()
// Update local credentials in memory.
globalServerConfig.SetCredential(creds)
if err = saveServerConfig(ctx, objectAPI, globalServerConfig); err != nil {
writeErrorResponseJSON(w, toAdminAPIErrCode(err), r.URL)
return
}
// Notify all other Minio peers to update credentials
for host, err := range globalNotificationSys.LoadCredentials() {
if err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", host.String())
logger.LogIf(ctx, err)
}
}
// Reply to the client before restarting minio server.
writeSuccessResponseHeadersOnly(w)
}
| convertValueType |
mod.rs | mod apis;
mod incluster;
mod kube_config;
mod utils;
use anyhow::{Context, Error};
use reqwest::{header, Certificate, Client, Identity, Response};
use std::sync::Arc;
use tame_oauth::gcp::{ServiceAccountInfo, ServiceAccountProvider, TokenOrRequest};
use self::kube_config::KubeConfigLoader;
/// Configuration stores kubernetes path and client for requests.
pub struct Configuration {
base_path: String,
client: Client,
auth_provider: Option<AuthProvider>,
}
impl Configuration {
pub(crate) fn new(
base_path: String,
client: Client,
auth_provider: Option<AuthProvider>,
) -> Self {
Configuration {
base_path,
client,
auth_provider,
}
}
pub(crate) async fn client(
&self,
mut request: http::Request<Vec<u8>>,
) -> Result<Response, Error> {
let client = self.client.clone();
if let Some(ref auth_provider) = self.auth_provider {
let auth_value = auth_provider.get_auth_header(&client).await?;
request
.headers_mut()
.insert(header::AUTHORIZATION, auth_value);
}
let (parts, body) = request.into_parts();
let uri_str = format!("{}{}", self.base_path, parts.uri);
let send = async move {
let req_builder = match parts.method {
http::Method::GET => client.get(&uri_str),
http::Method::POST => client.post(&uri_str),
http::Method::DELETE => client.delete(&uri_str),
http::Method::PUT => client.put(&uri_str),
_other => {
unreachable!();
}
};
let req = req_builder.headers(parts.headers).body(body);
Ok(req.send().await?)
};
send.await
}
}
pub(crate) enum AuthProvider {
//Basic(header::HeaderValue),
Oauth2(Arc<ServiceAccountProvider>),
}
impl AuthProvider {
// fn with_username_password(username: &str, password: &str) -> Result<AuthProvider, Error> {
// let encoded = base64::encode(&format!("{}:{}", username, password));
// let hv = header::HeaderValue::from_str(&format!("Basic {}", encoded))?;
// Ok(AuthProvider::Basic(hv))
// }
fn with_service_key(key: ServiceAccountInfo) -> Result<AuthProvider, Error> {
let access = ServiceAccountProvider::new(key)?;
Ok(AuthProvider::Oauth2(Arc::new(access)))
}
async fn get_auth_header<'a>(
&'a self,
client: &'a Client,
) -> Result<header::HeaderValue, Error> {
use tame_oauth::gcp::TokenProvider;
match self {
//AuthProvider::Basic(hv) => Ok(hv.clone()),
AuthProvider::Oauth2(access) => {
let token = match access
.get_token(&["https://www.googleapis.com/auth/cloud-platform"])?
{
TokenOrRequest::Request {
request,
scope_hash,
..
} => {
let (parts, body) = request.into_parts();
let uri = parts.uri.to_string();
let builder = match parts.method {
http::Method::GET => client.get(&uri),
http::Method::POST => client.post(&uri),
http::Method::DELETE => client.delete(&uri),
http::Method::PUT => client.put(&uri),
method => unreachable!("{} not implemented", method),
};
let req = builder.headers(parts.headers).body(body).build()?;
let res = client.execute(req).await?;
let mut builder = http::Response::builder()
.status(res.status())
.version(res.version());
let headers = builder.headers_mut().context("invalid response headers")?;
headers.extend(
res.headers()
.into_iter()
.map(|(k, v)| (k.clone(), v.clone())),
);
let body = res.bytes().await?;
let response = builder.body(body)?;
access.parse_token_response(scope_hash, response)?
}
TokenOrRequest::Token(_) => unreachable!(),
};
use std::convert::TryInto;
Ok(token.try_into()?)
}
}
}
}
/// Returns a config includes authentication and cluster information from kubeconfig file.
pub fn load_kube_config() -> Result<Configuration, Error> |
/// Returns a config which is used by clients within pods on kubernetes.
/// It will return an error if called from out of kubernetes cluster.
pub fn incluster_config() -> Result<Configuration, Error> {
let server = incluster::kube_server().with_context(|| {
format!(
"Unable to load incluster config, {} and {} must be defined",
incluster::SERVICE_HOSTENV,
incluster::SERVICE_PORTENV,
)
})?;
let ca = incluster::load_cert()?;
let req_ca = Certificate::from_der(&ca.to_der()?)?;
let token = incluster::load_token()?;
let mut headers = header::HeaderMap::new();
headers.insert(
header::AUTHORIZATION,
header::HeaderValue::from_str(&format!("Bearer {}", token))?,
);
let client_builder = Client::builder()
.add_root_certificate(req_ca)
.default_headers(headers);
Ok(Configuration::new(server, client_builder.build()?, None))
}
| {
let kubeconfig = utils::kubeconfig_path()
.or_else(utils::default_kube_path)
.context("kubeconfig")?;
let loader = KubeConfigLoader::load(kubeconfig)?;
let mut client_builder = Client::builder();
if let Some(ca) = loader.ca() {
let req_ca = Certificate::from_der(&ca?.to_der()?)?;
client_builder = client_builder.add_root_certificate(req_ca);
}
match loader.p12(" ") {
Ok(p12) => {
let req_p12 = Identity::from_pkcs12_der(&p12.to_der()?, " ")?;
client_builder = client_builder.identity(req_p12);
}
Err(_) => {
// last resort only if configs ask for it, and no client certs
if let Some(true) = loader.cluster.insecure_skip_tls_verify {
client_builder = client_builder.danger_accept_invalid_certs(true);
}
}
}
let auth_provider = match (
utils::data_or_file(&loader.user.token, &loader.user.token_file),
(loader.user.username, loader.user.password),
) {
(Ok(_), _) => {
let path = std::env::var_os("GOOGLE_APPLICATION_CREDENTIALS")
.map(std::path::PathBuf::from)
.context("missing GOOGLE_APPLICATION_CREDENTIALS")?;
let svc_acct_info = std::fs::read_to_string(path)?;
Some(AuthProvider::with_service_key(
ServiceAccountInfo::deserialize(svc_acct_info)?,
)?)
}
(_, (Some(u), Some(p))) => {
let mut headers = header::HeaderMap::new();
let encoded = base64::encode(&format!("{}:{}", u, p));
let hv = header::HeaderValue::from_str(&format!("Basic {}", encoded))?;
headers.insert(header::AUTHORIZATION, hv);
client_builder = client_builder.default_headers(headers);
None
}
_ => anyhow::bail!("unable to find an auth-provider"),
};
Ok(Configuration::new(
loader.cluster.server,
client_builder.build()?,
auth_provider,
))
} |
0001_initial.py | # Generated by Django 2.2.4 on 2019-09-01 00:56
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class | (migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Color',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
('rgb', models.CharField(max_length=6)),
('transparent', models.BooleanField()),
],
),
migrations.CreateModel(
name='PartCategory',
fields=[
('id', models.IntegerField(editable=False, primary_key=True, serialize=False)),
('name', models.CharField(max_length=200, unique=True)),
],
),
migrations.CreateModel(
name='Part',
fields=[
('part_num', models.CharField(max_length=20, primary_key=True, serialize=False)),
('name', models.CharField(max_length=250)),
('width', models.PositiveIntegerField(blank=True, null=True)),
('height', models.PositiveIntegerField(blank=True, null=True)),
('length', models.PositiveIntegerField(blank=True, null=True)),
('stud_count', models.PositiveIntegerField(blank=True, null=True)),
('multi_height', models.BooleanField(blank=True, null=True)),
('uneven_dimensions', models.BooleanField(blank=True, null=True)),
('category_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parts', to='inventory.PartCategory')),
],
),
migrations.CreateModel(
name='UserPart',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('color', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Color')),
('part_num', models.ForeignKey(db_column='part_num_id', on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to='inventory.Part')),
('user_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user_parts', to=settings.AUTH_USER_MODEL)),
],
options={
'unique_together': {('user_id', 'part_num', 'color')},
},
),
]
| Migration |
godef.go | package main
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"go/build"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/rogpeppe/godef/go/ast"
"github.com/rogpeppe/godef/go/parser"
"github.com/rogpeppe/godef/go/printer"
"github.com/rogpeppe/godef/go/token"
"github.com/rogpeppe/godef/go/types"
)
var readStdin = flag.Bool("i", false, "read file from stdin")
var offset = flag.Int("o", -1, "file offset of identifier in stdin")
var debug = flag.Bool("debug", false, "debug mode")
var tflag = flag.Bool("t", false, "print type information")
var aflag = flag.Bool("a", false, "print public type and member information")
var Aflag = flag.Bool("A", false, "print all type and members information")
var fflag = flag.String("f", "", "Go source filename")
var acmeFlag = flag.Bool("acme", false, "use current acme window")
var jsonFlag = flag.Bool("json", false, "output location in JSON format (-t flag is ignored)")
func fail(s string, a ...interface{}) {
fmt.Fprint(os.Stderr, "godef: "+fmt.Sprintf(s, a...)+"\n")
os.Exit(2)
}
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "usage: godef [flags] [expr]\n")
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() > 1 {
flag.Usage()
os.Exit(2)
}
types.Debug = *debug
*tflag = *tflag || *aflag || *Aflag
searchpos := *offset
filename := *fflag
var afile *acmeFile
var src []byte
if *acmeFlag {
var err error
if afile, err = acmeCurrentFile(); err != nil {
fail("%v", err)
}
filename, src, searchpos = afile.name, afile.body, afile.offset
} else if *readStdin {
src, _ = ioutil.ReadAll(os.Stdin)
} else {
// TODO if there's no filename, look in the current
// directory and do something plausible.
b, err := ioutil.ReadFile(filename)
if err != nil {
fail("cannot read %s: %v", filename, err)
}
src = b
}
pkgScope := ast.NewScope(parser.Universe)
f, err := parser.ParseFile(types.FileSet, filename, src, 0, pkgScope, types.DefaultImportPathToName)
if f == nil {
fail("cannot parse %s: %v", filename, err)
}
var o ast.Node
switch {
case flag.NArg() > 0:
o = parseExpr(f.Scope, flag.Arg(0))
case searchpos >= 0:
o = findIdentifier(f, searchpos)
default:
fmt.Fprintf(os.Stderr, "no expression or offset specified\n")
flag.Usage()
os.Exit(2)
}
// print old source location to facilitate backtracking
if *acmeFlag {
fmt.Printf("\t%s:#%d\n", afile.name, afile.runeOffset)
}
switch e := o.(type) {
case *ast.ImportSpec:
path := importPath(e)
pkg, err := build.Default.Import(path, filepath.Dir(filename), build.FindOnly)
if err != nil {
fail("error finding import path for %s: %s", path, err)
}
fmt.Println(pkg.Dir)
case ast.Expr:
if !*tflag {
// try local declarations only
if obj, typ := types.ExprType(e, types.DefaultImporter, types.FileSet); obj != nil {
done(obj, typ)
}
}
// add declarations from other files in the local package and try again
pkg, err := parseLocalPackage(filename, f, pkgScope, types.DefaultImportPathToName)
if pkg == nil && !*tflag {
fmt.Printf("parseLocalPackage error: %v\n", err)
}
if flag.NArg() > 0 {
// Reading declarations in other files might have
// resolved the original expression.
e = parseExpr(f.Scope, flag.Arg(0)).(ast.Expr)
}
if obj, typ := types.ExprType(e, types.DefaultImporter, types.FileSet); obj != nil {
done(obj, typ)
}
fail("no declaration found for %v", pretty{e})
}
}
func importPath(n *ast.ImportSpec) string {
p, err := strconv.Unquote(n.Path.Value)
if err != nil {
fail("invalid string literal %q in ast.ImportSpec", n.Path.Value)
}
return p
}
// findIdentifier looks for an identifier at byte-offset searchpos
// inside the parsed source represented by node.
// If it is part of a selector expression, it returns
// that expression rather than the identifier itself.
//
// As a special case, if it finds an import
// spec, it returns ImportSpec.
//
func findIdentifier(f *ast.File, searchpos int) ast.Node {
ec := make(chan ast.Node)
found := func(startPos, endPos token.Pos) bool {
start := types.FileSet.Position(startPos).Offset
end := start + int(endPos-startPos)
return start <= searchpos && searchpos <= end
}
go func() {
var visit func(ast.Node) bool
visit = func(n ast.Node) bool {
var startPos token.Pos
switch n := n.(type) {
default:
return true
case *ast.Ident:
startPos = n.NamePos
case *ast.SelectorExpr:
startPos = n.Sel.NamePos
case *ast.ImportSpec:
startPos = n.Pos()
case *ast.StructType:
// If we find an anonymous bare field in a
// struct type, its definition points to itself,
// but we actually want to go elsewhere,
// so assume (dubiously) that the expression
// works globally and return a new node for it.
for _, field := range n.Fields.List {
if field.Names != nil {
continue
}
t := field.Type
if pt, ok := field.Type.(*ast.StarExpr); ok {
t = pt.X
}
if id, ok := t.(*ast.Ident); ok {
if found(id.NamePos, id.End()) {
ec <- parseExpr(f.Scope, id.Name)
runtime.Goexit()
}
}
}
return true
}
if found(startPos, n.End()) {
ec <- n
runtime.Goexit()
}
return true
}
ast.Walk(FVisitor(visit), f)
ec <- nil
}()
ev := <-ec
if ev == nil {
fail("no identifier found")
}
return ev
}
type orderedObjects []*ast.Object
func (o orderedObjects) Less(i, j int) bool { return o[i].Name < o[j].Name }
func (o orderedObjects) Len() int { return len(o) }
func (o orderedObjects) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func done(obj *ast.Object, typ types.Type) {
defer os.Exit(0)
pos := types.FileSet.Position(types.DeclPos(obj))
if *jsonFlag {
p := struct {
Filename string `json:"filename,omitempty"`
Line int `json:"line,omitempty"`
Column int `json:"column,omitempty"`
}{
Filename: pos.Filename,
Line: pos.Line,
Column: pos.Column,
}
jsonStr, err := json.Marshal(p)
if err != nil {
fail("JSON marshal error: %v", err)
}
fmt.Printf("%s\n", jsonStr)
return
} else {
fmt.Printf("%v\n", pos)
}
if typ.Kind == ast.Bad || !*tflag {
return
}
fmt.Printf("%s\n", strings.Replace(typeStr(obj, typ), "\n", "\n\t", -1))
if *aflag || *Aflag {
var m orderedObjects
for obj := range typ.Iter() {
m = append(m, obj)
}
sort.Sort(m)
for _, obj := range m {
// Ignore unexported members unless Aflag is set.
if !*Aflag && (typ.Pkg != "" || !ast.IsExported(obj.Name)) {
continue
}
id := ast.NewIdent(obj.Name)
id.Obj = obj
_, mt := types.ExprType(id, types.DefaultImporter, types.FileSet)
fmt.Printf("\t%s\n", strings.Replace(typeStr(obj, mt), "\n", "\n\t\t", -1))
fmt.Printf("\t\t%v\n", types.FileSet.Position(types.DeclPos(obj)))
}
}
}
func typeStr(obj *ast.Object, typ types.Type) string {
switch obj.Kind {
case ast.Fun, ast.Var:
return fmt.Sprintf("%s %v", obj.Name, prettyType{typ})
case ast.Pkg:
return fmt.Sprintf("import (%s %s)", obj.Name, typ.Node.(*ast.ImportSpec).Path.Value)
case ast.Con:
if decl, ok := obj.Decl.(*ast.ValueSpec); ok {
return fmt.Sprintf("const %s %v = %s", obj.Name, prettyType{typ}, pretty{decl.Values[0]})
}
return fmt.Sprintf("const %s %v", obj.Name, prettyType{typ})
case ast.Lbl:
return fmt.Sprintf("label %s", obj.Name)
case ast.Typ:
typ = typ.Underlying(false)
return fmt.Sprintf("type %s %v", obj.Name, prettyType{typ})
}
return fmt.Sprintf("unknown %s %v", obj.Name, typ.Kind)
}
func parseExpr(s *ast.Scope, expr string) ast.Expr {
n, err := parser.ParseExpr(types.FileSet, "<arg>", expr, s, types.DefaultImportPathToName)
if err != nil {
fail("cannot parse expression: %v", err)
}
switch n := n.(type) {
case *ast.Ident, *ast.SelectorExpr:
return n
}
fail("no identifier found in expression")
return nil
}
type FVisitor func(n ast.Node) bool
func (f FVisitor) Visit(n ast.Node) ast.Visitor {
if f(n) {
return f
}
return nil
}
var errNoPkgFiles = errors.New("no more package files found")
// parseLocalPackage reads and parses all go files from the
// current directory that implement the same package name
// the principal source file, except the original source file
// itself, which will already have been parsed.
//
func parseLocalPackage(filename string, src *ast.File, pkgScope *ast.Scope, pathToName parser.ImportPathToName) (*ast.Package, error) {
pkg := &ast.Package{src.Name.Name, pkgScope, nil, map[string]*ast.File{filename: src}}
d, f := filepath.Split(filename)
if d == "" {
d = "./"
}
fd, err := os.Open(d)
if err != nil {
return nil, errNoPkgFiles
}
defer fd.Close()
list, err := fd.Readdirnames(-1)
if err != nil {
return nil, errNoPkgFiles
}
for _, pf := range list {
file := filepath.Join(d, pf)
if !strings.HasSuffix(pf, ".go") ||
pf == f ||
pkgName(file) != pkg.Name {
continue
}
src, err := parser.ParseFile(types.FileSet, file, nil, 0, pkg.Scope, types.DefaultImportPathToName)
if err == nil {
pkg.Files[file] = src
}
}
if len(pkg.Files) == 1 {
return nil, errNoPkgFiles
}
return pkg, nil
}
// pkgName returns the package name implemented by the
// go source filename.
//
func pkgName(filename string) string {
prog, _ := parser.ParseFile(types.FileSet, filename, nil, parser.PackageClauseOnly, nil, types.DefaultImportPathToName)
if prog != nil {
return prog.Name.Name
}
return ""
}
func | (s, suff string) bool {
return len(s) >= len(suff) && s[len(s)-len(suff):] == suff
}
type pretty struct {
n interface{}
}
func (p pretty) String() string {
var b bytes.Buffer
printer.Fprint(&b, types.FileSet, p.n)
return b.String()
}
type prettyType struct {
n types.Type
}
func (p prettyType) String() string {
// TODO print path package when appropriate.
// Current issues with using p.n.Pkg:
// - we should actually print the local package identifier
// rather than the package path when possible.
// - p.n.Pkg is non-empty even when
// the type is not relative to the package.
return pretty{p.n.Node}.String()
}
| hasSuffix |
util.py | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Helpful routines for regression testing."""
from base64 import b64encode
from binascii import hexlify, unhexlify
from decimal import Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring (%s) not found in: %s" % (message, e.error['message']))
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
|
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
attempt += 1
time.sleep(0.5)
# Print the cause of the timeout
assert_greater_than(attempts, attempt)
assert_greater_than(timeout, time.time())
raise RuntimeError('Unreachable')
# RPC/P2P connection constants and functions
############################################
# The maximum number of nodes a single test can spawn
MAX_NODES = 8
# Don't assign rpc or p2p ports lower than this
PORT_MIN = 11000
# The number of ports to "reserve" for p2p and rpc, each
PORT_RANGE = 5000
class PortSeed:
# Must be initialized with a unique integer for each process
n = None
def get_rpc_proxy(url, node_number, timeout=None, coveragedir=None):
"""
Args:
url (str): URL of the RPC server to call
node_number (int): the node number (or id) that this calls to
Kwargs:
timeout (int): HTTP timeout in seconds
Returns:
AuthServiceProxy. convenience object for making RPC calls.
"""
proxy_kwargs = {}
if timeout is not None:
proxy_kwargs['timeout'] = timeout
proxy = AuthServiceProxy(url, **proxy_kwargs)
proxy.url = url # store URL on proxy for info
coverage_logfile = coverage.get_filename(
coveragedir, node_number) if coveragedir else None
return coverage.AuthServiceProxyWrapper(proxy, coverage_logfile)
def p2p_port(n):
assert(n <= MAX_NODES)
return PORT_MIN + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_port(n):
return PORT_MIN + PORT_RANGE + n + (MAX_NODES * PortSeed.n) % (PORT_RANGE - 1 - MAX_NODES)
def rpc_url(datadir, i, rpchost=None):
rpc_u, rpc_p = get_auth_cookie(datadir)
host = '127.0.0.1'
port = rpc_port(i)
if rpchost:
parts = rpchost.split(':')
if len(parts) == 2:
host, port = parts
else:
host = rpchost
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
# Node functions
################
def initialize_datadir(dirname, n):
datadir = get_datadir_path(dirname, n)
if not os.path.isdir(datadir):
os.makedirs(datadir)
rpc_u, rpc_p = rpc_auth_pair(n)
with open(os.path.join(datadir, "epgc.conf"), 'w', encoding='utf8') as f:
f.write("regtest=1\n")
f.write("rpcuser=" + rpc_u + "\n")
f.write("rpcpassword=" + rpc_p + "\n")
f.write("port=" + str(p2p_port(n)) + "\n")
f.write("rpcport=" + str(rpc_port(n)) + "\n")
f.write("listenonion=0\n")
f.write("enablezeromint=0\n")
f.write("staking=0\n")
f.write("spendzeroconfchange=1\n")
return datadir
def rpc_auth_pair(n):
return 'rpcuser�' + str(n), 'rpcpass�' + str(n)
def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
def get_auth_cookie(datadir):
user = None
password = None
if os.path.isfile(os.path.join(datadir, "epgc.conf")):
with open(os.path.join(datadir, "epgc.conf"), 'r', encoding='utf8') as f:
for line in f:
if line.startswith("rpcuser="):
assert user is None # Ensure that there is only one rpcuser line
user = line.split("=")[1].strip("\n")
if line.startswith("rpcpassword="):
assert password is None # Ensure that there is only one rpcpassword line
password = line.split("=")[1].strip("\n")
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
with open(os.path.join(datadir, "regtest", ".cookie"), 'r') as f:
userpass = f.read()
split_userpass = userpass.split(':')
user = split_userpass[0]
password = split_userpass[1]
if user is None or password is None:
raise ValueError("No RPC credentials")
return user, password
# If a cookie file exists in the given datadir, delete it.
def delete_cookie_file(datadir):
if os.path.isfile(os.path.join(datadir, "regtest", ".cookie")):
logger.debug("Deleting leftover cookie file")
os.remove(os.path.join(datadir, "regtest", ".cookie"))
def get_bip9_status(node, key):
info = node.getblockchaininfo()
return info['bip9_softforks'][key]
def set_node_times(nodes, t):
for node in nodes:
node.setmocktime(t)
def disconnect_nodes(from_connection, node_num):
for addr in [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']]:
try:
from_connection.disconnectnode(addr)
except JSONRPCException as e:
# If this node is disconnected between calculating the peer id
# and issuing the disconnect, don't worry about it.
# This avoids a race condition if we're mass-disconnecting peers.
if e.error['code'] != -29: # RPC_CLIENT_NODE_NOT_CONNECTED
raise
# wait to disconnect
wait_until(lambda: [peer['addr'] for peer in from_connection.getpeerinfo() if "testnode%d" % node_num in peer['subver']] == [], timeout=5)
def connect_nodes(from_connection, node_num):
ip_port = "127.0.0.1:" + str(p2p_port(node_num))
from_connection.addnode(ip_port, "onetry")
# poll until version handshake complete to avoid race conditions
# with transaction relaying
wait_until(lambda: all(peer['version'] != 0 for peer in from_connection.getpeerinfo()))
def connect_nodes_bi(nodes, a, b):
connect_nodes(nodes[a], b)
connect_nodes(nodes[b], a)
def connect_nodes_clique(nodes):
l = len(nodes)
for a in range(l):
for b in range(a, l):
connect_nodes_bi(nodes, a, b)
def sync_blocks(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same tip.
sync_blocks needs to be called with an rpc_connections set that has least
one node already synced to the latest, stable tip, otherwise there's a
chance it might return before all nodes are stably synced.
"""
# Use getblockcount() instead of waitforblockheight() to determine the
# initial max height because the two RPCs look at different internal global
# variables (chainActive vs latestBlock) and the former gets updated
# earlier.
time.sleep(5)
maxheight = max(x.getblockcount() for x in rpc_connections)
start_time = cur_time = time.time()
while cur_time <= start_time + timeout:
tips = [r.waitforblockheight(maxheight, int(wait * 1000)) for r in rpc_connections]
if all(t["height"] == maxheight for t in tips):
if all(t["hash"] == tips[0]["hash"] for t in tips):
return
raise AssertionError("Block sync failed, mismatched block hashes:{}".format(
"".join("\n {!r}".format(tip) for tip in tips)))
cur_time = time.time()
raise AssertionError("Block sync to height {} timed out:{}".format(
maxheight, "".join("\n {!r}".format(tip) for tip in tips)))
def sync_chain(rpc_connections, *, wait=1, timeout=60):
"""
Wait until everybody has the same best block
"""
while timeout > 0:
best_hash = [x.getbestblockhash() for x in rpc_connections]
if best_hash == [best_hash[0]] * len(best_hash):
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Chain sync failed: Best block hashes don't match")
def sync_mempools(rpc_connections, *, wait=1, timeout=60, flush_scheduler=True):
"""
Wait until everybody has the same transactions in their memory
pools
"""
while timeout > 0:
pool = set(rpc_connections[0].getrawmempool())
num_match = 1
for i in range(1, len(rpc_connections)):
if set(rpc_connections[i].getrawmempool()) == pool:
num_match = num_match + 1
if num_match == len(rpc_connections):
#if flush_scheduler:
#for r in rpc_connections:
# r.syncwithvalidationinterfacequeue()
return
time.sleep(wait)
timeout -= wait
raise AssertionError("Mempool sync failed")
# Transaction/Block functions
#############################
def find_output(node, txid, amount):
"""
Return index to output of txid with value amount
Raises exception if there is none.
"""
txdata = node.getrawtransaction(txid, 1)
for i in range(len(txdata["vout"])):
if txdata["vout"][i]["value"] == amount:
return i
raise RuntimeError("find_output txid %s : %s not found" % (txid, str(amount)))
def gather_inputs(from_node, amount_needed, confirmations_required=1):
"""
Return a random set of unspent txouts that are enough to pay amount_needed
"""
assert(confirmations_required >= 0)
utxo = from_node.listunspent(confirmations_required)
random.shuffle(utxo)
inputs = []
total_in = Decimal("0.00000000")
while total_in < amount_needed and len(utxo) > 0:
t = utxo.pop()
total_in += t["amount"]
inputs.append({"txid": t["txid"], "vout": t["vout"], "address": t["address"]})
if total_in < amount_needed:
raise RuntimeError("Insufficient funds: need %d, have %d" % (amount_needed, total_in))
return (total_in, inputs)
def make_change(from_node, amount_in, amount_out, fee):
"""
Create change output(s), return them
"""
outputs = {}
amount = amount_out + fee
change = amount_in - amount
if change > amount * 2:
# Create an extra change output to break up big inputs
change_address = from_node.getnewaddress()
# Split change in two, being careful of rounding:
outputs[change_address] = Decimal(change / 2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
change = amount_in - amount - outputs[change_address]
if change > 0:
outputs[from_node.getnewaddress()] = change
return outputs
def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants):
"""
Create a random transaction.
Returns (txid, hex-encoded-transaction-data, fee)
"""
from_node = random.choice(nodes)
to_node = random.choice(nodes)
fee = min_fee + fee_increment * random.randint(0, fee_variants)
(total_in, inputs) = gather_inputs(from_node, amount + fee)
outputs = make_change(from_node, total_in, amount, fee)
outputs[to_node.getnewaddress()] = float(amount)
rawtx = from_node.createrawtransaction(inputs, outputs)
signresult = from_node.signrawtransaction(rawtx)
txid = from_node.sendrawtransaction(signresult["hex"], True)
return (txid, signresult["hex"], fee)
# Helper to create at least "count" utxos
# Pass in a fee that is sufficient for relay and mining new transactions.
def create_confirmed_utxos(fee, node, count):
to_generate = int(0.5 * count) + 101
while to_generate > 0:
node.generate(min(25, to_generate))
to_generate -= 25
utxos = node.listunspent()
iterations = count - len(utxos)
addr1 = node.getnewaddress()
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
for i in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
outputs = {}
send_value = t['amount'] - fee
outputs[addr1] = float(satoshi_round(send_value / 2))
outputs[addr2] = float(satoshi_round(send_value / 2))
raw_tx = node.createrawtransaction(inputs, outputs)
signed_tx = node.signrawtransaction(raw_tx)["hex"]
node.sendrawtransaction(signed_tx)
while (node.getmempoolinfo()['size'] > 0):
node.generate(1)
utxos = node.listunspent()
assert(len(utxos) >= count)
return utxos
# Create large OP_RETURN txouts that can be appended to a transaction
# to make it large (helper for constructing large transactions).
def gen_return_txouts():
# Some pre-processing to create a bunch of OP_RETURN txouts to insert into transactions we create
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
for i in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = "81"
for k in range(128):
# add txout value
txouts = txouts + "0000000000000000"
# add length of script_pubkey
txouts = txouts + "fd0402"
# add script_pubkey
txouts = txouts + script_pubkey
return txouts
def create_tx(node, coinbase, to_address, amount):
inputs = [{"txid": coinbase, "vout": 0}]
outputs = {to_address: amount}
rawtx = node.createrawtransaction(inputs, outputs)
signresult = node.signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
# Create a spend of each passed-in utxo, splicing in "txouts" to each raw
# transaction to make it large. See gen_return_txouts() above.
def create_lots_of_big_transactions(node, txouts, utxos, num, fee):
addr = node.getnewaddress()
txids = []
for _ in range(num):
t = utxos.pop()
inputs = [{"txid": t["txid"], "vout": t["vout"]}]
outputs = {}
change = t['amount'] - fee
outputs[addr] = float(satoshi_round(change))
rawtx = node.createrawtransaction(inputs, outputs)
newtx = rawtx[0:92]
newtx = newtx + txouts
newtx = newtx + rawtx[94:]
signresult = node.signrawtransaction(newtx, None, None, "NONE")
txid = node.sendrawtransaction(signresult["hex"], True)
txids.append(txid)
return txids
def mine_large_block(node, utxos=None):
# generate a 66k transaction,
# and 14 of them is close to the 1MB block limit
num = 14
txouts = gen_return_txouts()
utxos = utxos if utxos is not None else []
if len(utxos) < num:
utxos.clear()
utxos.extend(node.listunspent())
fee = 100 * node.getnetworkinfo()["relayfee"]
create_lots_of_big_transactions(node, txouts, utxos, num, fee=fee)
node.generate(1)
### EPGC specific utils ###
vZC_DENOMS = [1, 5, 10, 50, 100, 500, 1000, 5000]
DEFAULT_FEE = 0.01
SPORK_ACTIVATION_TIME = 1563253447
SPORK_DEACTIVATION_TIME = 4070908800
def DecimalAmt(x):
"""Return Decimal from float for equality checks against rpc outputs"""
return Decimal("{:0.8f}".format(x))
| return b64encode(string.encode('utf-8')).decode('ascii') |
plot_thesis.py | from functools import partial
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn.apionly as sns
from ..analysis.csv_analysis import analyze_data, load_surveys
from ..data.survey_utils import ExperimentType
from .latexify import latexify, figure, fig_size
from .plot_tools import plot_detailed, plot_distribution, plot_overview
# Colours
default_cycler = plt.rcParamsDefault["axes.prop_cycle"]
colorblind_cmaps = ["Dark2", "Set2"]
cmap_main, cmap_complement = colorblind_cmaps
# cmap_main, cmap_complement = cmap_complement, cmap_main
colorblind_cyclers = {cmap: plt.cycler("color", plt.cm.get_cmap(cmap).colors)
for cmap in colorblind_cmaps}
plt.rcParams["axes.prop_cycle"] = colorblind_cyclers[cmap_main]
FIGURE_DIR = Path(__file__).parent.joinpath("../../reports/thesis/img/plots")
figure = partial(figure, folder=FIGURE_DIR, exts=["pdf", "pgf"])
def do_drone_dos():
with figure("ardrone_dos", size=fig_size(0.45)):
distances = np.array([0, 2, 8, 18, 23, 29, 34, 40,
45, 51, 56, 62, 67, 72, 78, 80])
powers = np.array([90, 90, 86, 60, 50, 62, 35, 26,
24, 12, 20, 22, 26, 22, 12, 5])
fig, ax1 = plt.subplots()
ax1.step(distances, powers, lw=0.5)
ax1.set_xlabel("distance (m)")
ax1.set_ylabel(r"signal (\%)")
ax1.set_ylim(0, 100)
x_range = np.arange(80)
best_fit = 10 * np.log10(6 / (1e5 * x_range**2.7))
ax2 = ax1.twinx()
ax2.plot(x_range, best_fit, c="C1", lw=0.5)
ax2.set_ylim(-100, -50)
ax2.yaxis.set_tick_params(which="both", labelright=False, right=False)
plt.legend([ax.get_children()[0] for ax in (ax1, ax2)], ["data", "fit"])
def do_paths():
with figure("paths_overview", size=fig_size(0.75, 0.8)):
ax1 = plt.subplot("121")
plot_overview(results, ExperimentType.Onboard, color="C0", size_point=2,
drone_width=0.5)
ax2 = plt.subplot("122", sharex=ax1, sharey=ax1)
plot_overview(results, ExperimentType.Spirit, color="C1", size_point=2,
ylabel="", drone_width=0.5)
plt.setp(ax2.get_yticklabels(), visible=False)
with figure("paths_detailed", size=fig_size(0.75, 0.7)):
ax1 = plt.subplot("121")
plot_detailed(results, ExperimentType.Onboard, color="C0",
size_point=2, crosshair=True, drone_width=0.5)
ax1.legend_.remove()
ax2 = plt.subplot("122", sharex=ax1, sharey=ax1)
plot_detailed(results, ExperimentType.Spirit, color="C1", ylabel="",
size_point=2, crosshair=True, drone_width=0.5)
ax2.legend_.remove()
plt.setp(ax2.get_yticklabels(), visible=False)
def do_distributions():
with figure("distribution_onboard", size=fig_size(0.44, 1)):
plot_distribution(results, ExperimentType.Onboard, color="C0",
crosshair=True, drone_width=0.5)
with figure("distribution_spirit", size=fig_size(0.44, 1)):
plot_distribution(results, ExperimentType.Spirit, color="C1",
crosshair=True, drone_width=0.5)
def do_durations():
with figure("duration", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="duration", data=analyses, kind="box")
sns.swarmplot(x="experiment", y="duration", split=True, data=analyses,
palette=cmap_complement)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("duration (s)")
with figure("duration_runs", size=fig_size(0.44, 1)):
sns.factorplot(x="order", y="duration", hue="experiment", data=analyses,
capsize=0.2)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("duration (s)")
plt.xlabel("run")
def do_movement():
with figure("movement", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["path_length", "move_x", "move_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Path length")
g.fig.axes[1].set_title("Movement in $x$")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_x"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_l", "move_r", "move_x"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Movement left")
g.fig.axes[1].set_title("Movement right")
g.fig.axes[2].set_title("Movement in $x$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_y"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_b", "move_f", "move_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Movement backwards")
g.fig.axes[1].set_title("Movement forwards")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
plt.ylim(0, plt.ylim()[1])
with figure("movement_back"):
sns.factorplot(x="experiment", y="move_b", data=analyses, kind="box")
sns.swarmplot(x="experiment", y="move_b", split=True, data=analyses,
palette=cmap_complement)
plt.ylabel("distance (m)")
plt.title("Movement backwards")
with figure("movement_runs", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["path_length", "move_x", "move_y"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment", capsize=0.2)
g.fig.axes[0].set_title("Path length")
g.fig.axes[1].set_title("Movement in $x$")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
with figure("movement_x_runs"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_l", "move_r", "move_x"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment")
g.fig.axes[0].set_title("Movement left")
g.fig.axes[1].set_title("Movement right")
g.fig.axes[2].set_title("Movement in $x$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
with figure("movement_y_runs"):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["move_b", "move_f", "move_y"])
g = sns.factorplot(x="order", y="value", col="variable",
data=molten, hue="experiment")
g.fig.axes[0].set_title("Movement backwards")
g.fig.axes[1].set_title("Movement forwards")
g.fig.axes[2].set_title("Movement in $y$")
g.fig.axes[0].set_ylabel("distance (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
plt.ylim(0, plt.ylim()[1])
def | ():
with figure("rms", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["rms", "rms_x", "rms_y"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("RMS Error*")
g.fig.axes[1].set_title("RMS Error in $x$*")
g.fig.axes[2].set_title("RMS Error in $y$*")
g.fig.axes[0].set_ylabel("error (m)")
with figure("rms_runs", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=["rms", "rms_x", "rms_y"])
g = sns.factorplot(x="order", y="value", col="variable",
hue="experiment", data=molten, capsize=0.2)
g.fig.axes[0].set_title("RMS Error")
g.fig.axes[1].set_title("RMS Error in $x$")
g.fig.axes[2].set_title("RMS Error in $y$")
g.fig.axes[0].set_ylabel("error (m)")
g.fig.axes[0].set_xlabel("run")
g.fig.axes[1].set_xlabel("run")
g.fig.axes[2].set_xlabel("run")
with figure("distance", size=fig_size(0.9, 0.4)):
molten = pd.melt(analyses,
id_vars=["user", "experiment", "order", "group"],
value_vars=[r"dist_err", r"x_err", r"y_err"])
g = sns.factorplot(x="experiment", y="value", col="variable",
data=molten, kind="box")
g.fig.axes[0].set_title("Distance from target*")
g.fig.axes[1].set_title("Distance from target in $x$")
g.fig.axes[2].set_title("Distance from target in $y$*")
g.fig.axes[0].set_ylabel("distance (m)")
g.axes[0][0].axhline(0, color="black", linewidth=1, zorder=-1)
g.axes[0][1].axhline(0, color="black", linewidth=1, zorder=-1)
g.axes[0][2].axhline(0, color="black", linewidth=1, zorder=-1)
def do_surveys():
with figure("tlx_results", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="tlx", data=tlx, kind="box")
sns.swarmplot(x="experiment", y=r"tlx",
data=tlx, palette=cmap_complement, split=True)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("NASA-TLX weighted score*")
with figure("tlx_components", size=fig_size(0.44, 1)):
components = ["mental", "physical", "temporal", "performance",
"effort", "frustration"]
molten = pd.melt(tlx, id_vars=["user", "experiment", "order"],
value_vars=components,
var_name="component", value_name="score")
sns.barplot(x=r"component", y="score", hue="experiment", data=molten)
plt.gca().set_xticklabels(
["MD", "PD", "TD", "P", "E", "F"])
plt.xlabel("NASA-TLX component")
plt.ylabel("score")
with figure("survey_results", size=fig_size(0.44, 1)):
sns.factorplot(x="experiment", y="total", data=surveys, kind="box")
sns.swarmplot(x="experiment", y=r"total", data=surveys,
palette=cmap_complement, split=True)
plt.ylim(0, plt.ylim()[1])
plt.ylabel("survey score*")
with figure("survey_components", size=fig_size(0.44, 1)):
components = [r"orientation_understanding", r"orientation_control",
r"position_understanding", r"position_control",
r"spacial_understanding", r"spacial_control"]
molten = pd.melt(surveys, id_vars=["user", "experiment", "order"],
value_vars=components,
var_name="question", value_name="rating")
sns.barplot(x=r"question", y="rating", hue="experiment", data=molten)
plt.gca().set_xticklabels(
["OA", "OC", "PA*", "PC*", "RA*", "RC*"])
plt.xlabel("question")
plt.ylabel("rating")
with figure("survey_overview", size=fig_size(0.9, 0.5)):
molten = pd.melt(surveys, id_vars=["user", "experiment", "order"],
value_vars=[r"orientation_understanding",
r"orientation_control",
r"position_understanding",
r"position_control",
r"spacial_understanding",
r"spacial_control"],
var_name="question", value_name="rating")
g = sns.barplot(x=r"rating", y=r"question", hue="experiment",
data=molten)
sns.stripplot(x="rating", y=r"question", data=molten, hue="experiment",
split=True, palette=cmap_complement, jitter=0.6, size=3)
plt.gca().set_yticklabels(
["angle aware", "angle control",
"position aware*", "position control*",
"rel. pos. aware*", "rel. pos. control*"])
handles, labels = g.get_legend_handles_labels()
plt.legend(handles[2:], labels[2:])
plt.xlabel("rating")
plt.title("Survey results")
if __name__ == "__main__":
latexify()
do_drone_dos()
results, analyses = analyze_data()
do_paths()
do_distributions()
do_durations()
do_movement()
do_errors()
users, tlx, surveys = load_surveys()
do_surveys()
| do_errors |
sqlbuilding.py | from __future__ import absolute_import, print_function, division
from pony.py23compat import PY2, izip, imap, itervalues, basestring, unicode, buffer, int_types
from operator import attrgetter
from decimal import Decimal
from datetime import date, datetime, timedelta
from binascii import hexlify
from pony import options
from pony.utils import datetime2timestamp, throw, is_ident
from pony.converting import timedelta2str
from pony.orm.ormtypes import RawSQL, Json
class AstError(Exception): pass
class Param(object):
__slots__ = 'style', 'id', 'paramkey', 'converter', 'optimistic'
def __init__(param, paramstyle, paramkey, converter=None, optimistic=False):
param.style = paramstyle
param.id = None
param.paramkey = paramkey
param.converter = converter
param.optimistic = optimistic
def eval(param, values):
varkey, i, j = param.paramkey
value = values[varkey]
if i is not None:
t = type(value)
if t is tuple: value = value[i]
elif t is RawSQL: value = value.values[i]
elif hasattr(value, '_get_items'): value = value._get_items()[i]
else: assert False, t
if j is not None:
assert type(type(value)).__name__ == 'EntityMeta'
value = value._get_raw_pkval_()[j]
converter = param.converter
if value is not None and converter is not None:
if converter.attr is None:
value = converter.val2dbval(value)
value = converter.py2sql(value)
return value
def __unicode__(param):
paramstyle = param.style
if paramstyle == 'qmark': return u'?'
elif paramstyle == 'format': return u'%s'
elif paramstyle == 'numeric': return u':%d' % param.id
elif paramstyle == 'named': return u':p%d' % param.id
elif paramstyle == 'pyformat': return u'%%(p%d)s' % param.id
else: throw(NotImplementedError)
if not PY2: __str__ = __unicode__ | __slots__ = 'items', 'func'
def __init__(param, paramstyle, paramkey, items, func):
for item in items: assert isinstance(item, (Param, Value)), item
Param.__init__(param, paramstyle, paramkey)
param.items = items
param.func = func
def eval(param, values):
args = [ item.eval(values) if isinstance(item, Param) else item.value for item in param.items ]
return param.func(args)
class Value(object):
__slots__ = 'paramstyle', 'value'
def __init__(self, paramstyle, value):
self.paramstyle = paramstyle
self.value = value
def __unicode__(self):
value = self.value
if value is None:
return 'null'
if isinstance(value, bool):
return value and '1' or '0'
if isinstance(value, basestring):
return self.quote_str(value)
if isinstance(value, datetime):
return 'TIMESTAMP ' + self.quote_str(datetime2timestamp(value))
if isinstance(value, date):
return 'DATE ' + self.quote_str(str(value))
if isinstance(value, timedelta):
return "INTERVAL '%s' HOUR TO SECOND" % timedelta2str(value)
if PY2:
if isinstance(value, (int, long, float, Decimal)):
return str(value)
if isinstance(value, buffer):
return "X'%s'" % hexlify(value)
else:
if isinstance(value, (int, float, Decimal)):
return str(value)
if isinstance(value, bytes):
return "X'%s'" % hexlify(value).decode('ascii')
assert False, repr(value) # pragma: no cover
if not PY2:
__str__ = __unicode__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.value)
def quote_str(self, s):
if self.paramstyle in ('format', 'pyformat'): s = s.replace('%', '%%')
return "'%s'" % s.replace("'", "''")
def flat(tree):
stack = [ tree ]
result = []
stack_pop = stack.pop
stack_extend = stack.extend
result_append = result.append
while stack:
x = stack_pop()
if isinstance(x, basestring): result_append(x)
else:
try: stack_extend(reversed(x))
except TypeError: result_append(x)
return result
def flat_conditions(conditions):
result = []
for condition in conditions:
if condition[0] == 'AND':
result.extend(flat_conditions(condition[1:]))
else: result.append(condition)
return result
def join(delimiter, items):
items = iter(items)
try: result = [ next(items) ]
except StopIteration: return []
for item in items:
result.append(delimiter)
result.append(item)
return result
def move_conditions_from_inner_join_to_where(sections):
new_sections = list(sections)
for i, section in enumerate(sections):
if section[0] == 'FROM':
new_from_list = [ 'FROM' ] + [ list(item) for item in section[1:] ]
new_sections[i] = new_from_list
if len(sections) > i+1 and sections[i+1][0] == 'WHERE':
new_where_list = list(sections[i+1])
new_sections[i+1] = new_where_list
else:
new_where_list = [ 'WHERE' ]
new_sections.insert(i+1, new_where_list)
break
else: return sections
for join in new_from_list[2:]:
if join[1] in ('TABLE', 'SELECT') and len(join) == 4:
new_where_list.append(join.pop())
return new_sections
def make_binary_op(symbol, default_parentheses=False):
def binary_op(builder, expr1, expr2, parentheses=None):
if parentheses is None: parentheses = default_parentheses
if parentheses: return '(', builder(expr1), symbol, builder(expr2), ')'
return builder(expr1), symbol, builder(expr2)
return binary_op
def make_unary_func(symbol):
def unary_func(builder, expr):
return '%s(' % symbol, builder(expr), ')'
return unary_func
def indentable(method):
def new_method(builder, *args, **kwargs):
result = method(builder, *args, **kwargs)
if builder.indent <= 1: return result
return builder.indent_spaces * (builder.indent-1), result
new_method.__name__ = method.__name__
return new_method
class SQLBuilder(object):
dialect = None
param_class = Param
composite_param_class = CompositeParam
value_class = Value
indent_spaces = " " * 4
least_func_name = 'least'
greatest_func_name = 'greatest'
def __init__(builder, provider, ast):
builder.provider = provider
builder.quote_name = provider.quote_name
builder.paramstyle = paramstyle = provider.paramstyle
builder.ast = ast
builder.indent = 0
builder.keys = {}
builder.inner_join_syntax = options.INNER_JOIN_SYNTAX
builder.suppress_aliases = False
builder.result = flat(builder(ast))
params = tuple(x for x in builder.result if isinstance(x, Param))
layout = []
for i, param in enumerate(params):
if param.id is None: param.id = i + 1
layout.append(param.paramkey)
builder.layout = layout
builder.sql = u''.join(imap(unicode, builder.result)).rstrip('\n')
if paramstyle in ('qmark', 'format'):
def adapter(values):
return tuple(param.eval(values) for param in params)
elif paramstyle == 'numeric':
def adapter(values):
return tuple(param.eval(values) for param in params)
elif paramstyle in ('named', 'pyformat'):
def adapter(values):
return {'p%d' % param.id: param.eval(values) for param in params}
else: throw(NotImplementedError, paramstyle)
builder.params = params
builder.adapter = adapter
def __call__(builder, ast):
if isinstance(ast, basestring):
throw(AstError, 'An SQL AST list was expected. Got string: %r' % ast)
symbol = ast[0]
if not isinstance(symbol, basestring):
throw(AstError, 'Invalid node name in AST: %r' % ast)
method = getattr(builder, symbol, None)
if method is None: throw(AstError, 'Method not found: %s' % symbol)
try:
return method(*ast[1:])
except TypeError:
raise
## traceback = sys.exc_info()[2]
## if traceback.tb_next is None:
## del traceback
## throw(AstError, 'Invalid data for method %s: %r'
## % (symbol, ast[1:]))
## else:
## del traceback
## raise
def INSERT(builder, table_name, columns, values, returning=None):
return [ 'INSERT INTO ', builder.quote_name(table_name), ' (',
join(', ', [builder.quote_name(column) for column in columns ]),
') VALUES (', join(', ', [builder(value) for value in values]), ')' ]
def DEFAULT(builder):
return 'DEFAULT'
def UPDATE(builder, table_name, pairs, where=None):
return [ 'UPDATE ', builder.quote_name(table_name), '\nSET ',
join(', ', [ (builder.quote_name(name), ' = ', builder(param)) for name, param in pairs]),
where and [ '\n', builder(where) ] or [] ]
def DELETE(builder, alias, from_ast, where=None):
builder.indent += 1
if alias is not None:
assert isinstance(alias, basestring)
if not where: return 'DELETE ', alias, ' ', builder(from_ast)
return 'DELETE ', alias, ' ', builder(from_ast), builder(where)
else:
assert from_ast[0] == 'FROM' and len(from_ast) == 2 and from_ast[1][1] == 'TABLE'
alias = from_ast[1][0]
if alias is not None: builder.suppress_aliases = True
if not where: return 'DELETE ', builder(from_ast)
return 'DELETE ', builder(from_ast), builder(where)
def _subquery(builder, *sections):
builder.indent += 1
if not builder.inner_join_syntax:
sections = move_conditions_from_inner_join_to_where(sections)
result = [ builder(s) for s in sections ]
builder.indent -= 1
return result
def SELECT(builder, *sections):
prev_suppress_aliases = builder.suppress_aliases
builder.suppress_aliases = False
try:
result = builder._subquery(*sections)
if builder.indent:
indent = builder.indent_spaces * builder.indent
return '(\n', result, indent + ')'
return result
finally:
builder.suppress_aliases = prev_suppress_aliases
def SELECT_FOR_UPDATE(builder, nowait, skip_locked, *sections):
assert not builder.indent
result = builder.SELECT(*sections)
nowait = ' NOWAIT' if nowait else ''
skip_locked = ' SKIP LOCKED' if skip_locked else ''
return result, 'FOR UPDATE', nowait, skip_locked, '\n'
def EXISTS(builder, *sections):
result = builder._subquery(*sections)
indent = builder.indent_spaces * builder.indent
return 'EXISTS (\n', indent, 'SELECT 1\n', result, indent, ')'
def NOT_EXISTS(builder, *sections):
return 'NOT ', builder.EXISTS(*sections)
@indentable
def ALL(builder, *expr_list):
exprs = [ builder(e) for e in expr_list ]
return 'SELECT ', join(', ', exprs), '\n'
@indentable
def DISTINCT(builder, *expr_list):
exprs = [ builder(e) for e in expr_list ]
return 'SELECT DISTINCT ', join(', ', exprs), '\n'
@indentable
def AGGREGATES(builder, *expr_list):
exprs = [ builder(e) for e in expr_list ]
return 'SELECT ', join(', ', exprs), '\n'
def AS(builder, expr, alias):
return builder(expr), ' AS ', builder.quote_name(alias)
def compound_name(builder, name_parts):
return '.'.join(p and builder.quote_name(p) or '' for p in name_parts)
def sql_join(builder, join_type, sources):
indent = builder.indent_spaces * (builder.indent-1)
indent2 = indent + builder.indent_spaces
indent3 = indent2 + builder.indent_spaces
result = [ indent, 'FROM ']
for i, source in enumerate(sources):
if len(source) == 3:
alias, kind, x = source
join_cond = None
elif len(source) == 4:
alias, kind, x, join_cond = source
else: throw(AstError, 'Invalid source in FROM section: %r' % source)
if i > 0:
if join_cond is None: result.append(', ')
else: result += [ '\n', indent, ' %s JOIN ' % join_type ]
if builder.suppress_aliases: alias = None
elif alias is not None: alias = builder.quote_name(alias)
if kind == 'TABLE':
if isinstance(x, basestring): result.append(builder.quote_name(x))
else: result.append(builder.compound_name(x))
if alias is not None: result += ' ', alias # Oracle does not support 'AS' here
elif kind == 'SELECT':
if alias is None: throw(AstError, 'Subquery in FROM section must have an alias')
result += builder.SELECT(*x), ' ', alias # Oracle does not support 'AS' here
else: throw(AstError, 'Invalid source kind in FROM section: %r' % kind)
if join_cond is not None: result += [ '\n', indent2, 'ON ', builder(join_cond) ]
result.append('\n')
return result
def FROM(builder, *sources):
return builder.sql_join('INNER', sources)
def INNER_JOIN(builder, *sources):
builder.inner_join_syntax = True
return builder.sql_join('INNER', sources)
@indentable
def LEFT_JOIN(builder, *sources):
return builder.sql_join('LEFT', sources)
def WHERE(builder, *conditions):
if not conditions: return ''
conditions = flat_conditions(conditions)
indent = builder.indent_spaces * (builder.indent-1)
result = [ indent, 'WHERE ' ]
extend = result.extend
extend((builder(conditions[0]), '\n'))
for condition in conditions[1:]:
extend((indent, ' AND ', builder(condition), '\n'))
return result
def HAVING(builder, *conditions):
if not conditions: return ''
conditions = flat_conditions(conditions)
indent = builder.indent_spaces * (builder.indent-1)
result = [ indent, 'HAVING ' ]
extend = result.extend
extend((builder(conditions[0]), '\n'))
for condition in conditions[1:]:
extend((indent, ' AND ', builder(condition), '\n'))
return result
@indentable
def GROUP_BY(builder, *expr_list):
exprs = [ builder(e) for e in expr_list ]
return 'GROUP BY ', join(', ', exprs), '\n'
@indentable
def UNION(builder, kind, *sections):
return 'UNION ', kind, '\n', builder.SELECT(*sections)
@indentable
def INTERSECT(builder, *sections):
return 'INTERSECT\n', builder.SELECT(*sections)
@indentable
def EXCEPT(builder, *sections):
return 'EXCEPT\n', builder.SELECT(*sections)
@indentable
def ORDER_BY(builder, *order_list):
result = [ 'ORDER BY ' ]
result.extend(join(', ', [ builder(expr) for expr in order_list ]))
result.append('\n')
return result
def DESC(builder, expr):
return builder(expr), ' DESC'
@indentable
def LIMIT(builder, limit, offset=None):
if limit is None:
limit = 'null'
else:
assert isinstance(limit, int_types)
assert offset is None or isinstance(offset, int)
if offset:
return 'LIMIT %s OFFSET %d\n' % (limit, offset)
else:
return 'LIMIT %s\n' % limit
def COLUMN(builder, table_alias, col_name):
if builder.suppress_aliases or not table_alias:
return [ '%s' % builder.quote_name(col_name) ]
return [ '%s.%s' % (builder.quote_name(table_alias), builder.quote_name(col_name)) ]
def PARAM(builder, paramkey, converter=None, optimistic=False):
return builder.make_param(builder.param_class, paramkey, converter, optimistic)
def make_param(builder, param_class, paramkey, *args):
keys = builder.keys
param = keys.get(paramkey)
if param is None:
param = param_class(builder.paramstyle, paramkey, *args)
keys[paramkey] = param
return param
def make_composite_param(builder, paramkey, items, func):
return builder.make_param(builder.composite_param_class, paramkey, items, func)
def STAR(builder, table_alias):
return builder.quote_name(table_alias), '.*'
def ROW(builder, *items):
return '(', join(', ', imap(builder, items)), ')'
def VALUE(builder, value):
return builder.value_class(builder.paramstyle, value)
def AND(builder, *cond_list):
cond_list = [ builder(condition) for condition in cond_list ]
return join(' AND ', cond_list)
def OR(builder, *cond_list):
cond_list = [ builder(condition) for condition in cond_list ]
return '(', join(' OR ', cond_list), ')'
def NOT(builder, condition):
return 'NOT (', builder(condition), ')'
def POW(builder, expr1, expr2):
return 'power(', builder(expr1), ', ', builder(expr2), ')'
EQ = make_binary_op(' = ')
NE = make_binary_op(' <> ')
LT = make_binary_op(' < ')
LE = make_binary_op(' <= ')
GT = make_binary_op(' > ')
GE = make_binary_op(' >= ')
ADD = make_binary_op(' + ', True)
SUB = make_binary_op(' - ', True)
MUL = make_binary_op(' * ', True)
DIV = make_binary_op(' / ', True)
FLOORDIV = make_binary_op(' / ', True)
def MOD(builder, a, b):
symbol = ' %% ' if builder.paramstyle in ('format', 'pyformat') else ' % '
return '(', builder(a), symbol, builder(b), ')'
def FLOAT_EQ(builder, a, b):
a, b = builder(a), builder(b)
return 'abs(', a, ' - ', b, ') / coalesce(nullif(greatest(abs(', a, '), abs(', b, ')), 0), 1) <= 1e-14'
def FLOAT_NE(builder, a, b):
a, b = builder(a), builder(b)
return 'abs(', a, ' - ', b, ') / coalesce(nullif(greatest(abs(', a, '), abs(', b, ')), 0), 1) > 1e-14'
def CONCAT(builder, *args):
return '(', join(' || ', imap(builder, args)), ')'
def NEG(builder, expr):
return '-(', builder(expr), ')'
def IS_NULL(builder, expr):
return builder(expr), ' IS NULL'
def IS_NOT_NULL(builder, expr):
return builder(expr), ' IS NOT NULL'
def LIKE(builder, expr, template, escape=None):
result = builder(expr), ' LIKE ', builder(template)
if escape: result = result + (' ESCAPE ', builder(escape))
return result
def NOT_LIKE(builder, expr, template, escape=None):
result = builder(expr), ' NOT LIKE ', builder(template)
if escape: result = result + (' ESCAPE ', builder(escape))
return result
def BETWEEN(builder, expr1, expr2, expr3):
return builder(expr1), ' BETWEEN ', builder(expr2), ' AND ', builder(expr3)
def NOT_BETWEEN(builder, expr1, expr2, expr3):
return builder(expr1), ' NOT BETWEEN ', builder(expr2), ' AND ', builder(expr3)
def IN(builder, expr1, x):
if not x: return '0 = 1'
if len(x) >= 1 and x[0] == 'SELECT':
return builder(expr1), ' IN ', builder(x)
expr_list = [ builder(expr) for expr in x ]
return builder(expr1), ' IN (', join(', ', expr_list), ')'
def NOT_IN(builder, expr1, x):
if not x: return '1 = 1'
if len(x) >= 1 and x[0] == 'SELECT':
return builder(expr1), ' NOT IN ', builder(x)
expr_list = [ builder(expr) for expr in x ]
return builder(expr1), ' NOT IN (', join(', ', expr_list), ')'
def COUNT(builder, distinct, *expr_list):
assert distinct in (None, True, False)
if not distinct:
if not expr_list: return ['COUNT(*)']
return 'COUNT(', join(', ', imap(builder, expr_list)), ')'
if not expr_list: throw(AstError, 'COUNT(DISTINCT) without argument')
if len(expr_list) == 1:
return 'COUNT(DISTINCT ', builder(expr_list[0]), ')'
if builder.dialect == 'PostgreSQL':
return 'COUNT(DISTINCT ', builder.ROW(*expr_list), ')'
elif builder.dialect == 'MySQL':
return 'COUNT(DISTINCT ', join(', ', imap(builder, expr_list)), ')'
# Oracle and SQLite queries translated to completely different subquery syntax
else: throw(NotImplementedError) # This line must not be executed
def SUM(builder, distinct, expr):
assert distinct in (None, True, False)
return distinct and 'coalesce(SUM(DISTINCT ' or 'coalesce(SUM(', builder(expr), '), 0)'
def AVG(builder, distinct, expr):
assert distinct in (None, True, False)
return distinct and 'AVG(DISTINCT ' or 'AVG(', builder(expr), ')'
def GROUP_CONCAT(builder, distinct, expr, sep=None):
assert distinct in (None, True, False)
result = distinct and 'GROUP_CONCAT(DISTINCT ' or 'GROUP_CONCAT(', builder(expr)
if sep is not None:
if builder.provider.dialect == 'MySQL':
result = result, ' SEPARATOR ', builder(sep)
else:
result = result, ', ', builder(sep)
return result, ')'
UPPER = make_unary_func('upper')
LOWER = make_unary_func('lower')
LENGTH = make_unary_func('length')
ABS = make_unary_func('abs')
def COALESCE(builder, *args):
if len(args) < 2: assert False # pragma: no cover
return 'coalesce(', join(', ', imap(builder, args)), ')'
def MIN(builder, distinct, *args):
assert not distinct, distinct
if len(args) == 0: assert False # pragma: no cover
elif len(args) == 1: fname = 'MIN'
else: fname = builder.least_func_name
return fname, '(', join(', ', imap(builder, args)), ')'
def MAX(builder, distinct, *args):
assert not distinct, distinct
if len(args) == 0: assert False # pragma: no cover
elif len(args) == 1: fname = 'MAX'
else: fname = builder.greatest_func_name
return fname, '(', join(', ', imap(builder, args)), ')'
def SUBSTR(builder, expr, start, len=None):
if len is None: return 'substr(', builder(expr), ', ', builder(start), ')'
return 'substr(', builder(expr), ', ', builder(start), ', ', builder(len), ')'
def CASE(builder, expr, cases, default=None):
if expr is None and default is not None and default[0] == 'CASE' and default[1] is None:
cases2, default2 = default[2:]
return builder.CASE(None, tuple(cases) + tuple(cases2), default2)
result = [ 'case' ]
if expr is not None:
result.append(' ')
result.extend(builder(expr))
for condition, expr in cases:
result.extend((' when ', builder(condition), ' then ', builder(expr)))
if default is not None:
result.extend((' else ', builder(default)))
result.append(' end')
return result
def TRIM(builder, expr, chars=None):
if chars is None: return 'trim(', builder(expr), ')'
return 'trim(', builder(expr), ', ', builder(chars), ')'
def LTRIM(builder, expr, chars=None):
if chars is None: return 'ltrim(', builder(expr), ')'
return 'ltrim(', builder(expr), ', ', builder(chars), ')'
def RTRIM(builder, expr, chars=None):
if chars is None: return 'rtrim(', builder(expr), ')'
return 'rtrim(', builder(expr), ', ', builder(chars), ')'
def REPLACE(builder, str, from_, to):
return 'replace(', builder(str), ', ', builder(from_), ', ', builder(to), ')'
def TO_INT(builder, expr):
return 'CAST(', builder(expr), ' AS integer)'
def TO_STR(builder, expr):
return 'CAST(', builder(expr), ' AS text)'
def TO_REAL(builder, expr):
return 'CAST(', builder(expr), ' AS real)'
def TODAY(builder):
return 'CURRENT_DATE'
def NOW(builder):
return 'CURRENT_TIMESTAMP'
def DATE(builder, expr):
return 'DATE(', builder(expr) ,')'
def YEAR(builder, expr):
return 'EXTRACT(YEAR FROM ', builder(expr), ')'
def MONTH(builder, expr):
return 'EXTRACT(MONTH FROM ', builder(expr), ')'
def DAY(builder, expr):
return 'EXTRACT(DAY FROM ', builder(expr), ')'
def HOUR(builder, expr):
return 'EXTRACT(HOUR FROM ', builder(expr), ')'
def MINUTE(builder, expr):
return 'EXTRACT(MINUTE FROM ', builder(expr), ')'
def SECOND(builder, expr):
return 'EXTRACT(SECOND FROM ', builder(expr), ')'
def RANDOM(builder):
return 'RAND()'
def RAWSQL(builder, sql):
if isinstance(sql, basestring): return sql
return [ x if isinstance(x, basestring) else builder(x) for x in sql ]
def build_json_path(builder, path):
empty_slice = slice(None, None, None)
has_params = False
has_wildcards = False
items = [ builder(element) for element in path ]
for item in items:
if isinstance(item, Param):
has_params = True
elif isinstance(item, Value):
value = item.value
if value is Ellipsis or value == empty_slice: has_wildcards = True
else: assert isinstance(value, (int, basestring)), value
else: assert False, item
if has_params:
paramkey = tuple(item.paramkey if isinstance(item, Param) else
None if type(item.value) is slice else item.value
for item in items)
path_sql = builder.make_composite_param(paramkey, items, builder.eval_json_path)
else:
result_value = builder.eval_json_path(item.value for item in items)
path_sql = builder.value_class(builder.paramstyle, result_value)
return path_sql, has_params, has_wildcards
@classmethod
def eval_json_path(cls, values):
result = ['$']
append = result.append
empty_slice = slice(None, None, None)
for value in values:
if isinstance(value, int): append('[%d]' % value)
elif isinstance(value, basestring):
append('.' + value if is_ident(value) else '."%s"' % value.replace('"', '\\"'))
elif value is Ellipsis: append('.*')
elif value == empty_slice: append('[*]')
else: assert False, value
return ''.join(result)
def JSON_QUERY(builder, expr, path):
throw(NotImplementedError)
def JSON_VALUE(builder, expr, path, type):
throw(NotImplementedError)
def JSON_NONZERO(builder, expr):
throw(NotImplementedError)
def JSON_CONCAT(builder, left, right):
throw(NotImplementedError)
def JSON_CONTAINS(builder, expr, path, key):
throw(NotImplementedError)
def JSON_ARRAY_LENGTH(builder, value):
throw(NotImplementedError)
def JSON_PARAM(builder, expr):
return builder(expr)
def ARRAY_INDEX(builder, col, index):
throw(NotImplementedError)
def ARRAY_CONTAINS(builder, key, not_in, col):
throw(NotImplementedError)
def ARRAY_SUBSET(builder, array1, not_in, array2):
throw(NotImplementedError)
def ARRAY_LENGTH(builder, array):
throw(NotImplementedError)
def ARRAY_SLICE(builder, array, start, stop):
throw(NotImplementedError)
def MAKE_ARRAY(builder, *items):
throw(NotImplementedError) | def __repr__(param):
return '%s(%r)' % (param.__class__.__name__, param.paramkey)
class CompositeParam(Param): |
plugin.go | package run
import (
"context"
"github.com/ansel1/merry/v2"
"github.com/outblocks/outblocks-cli/pkg/plugins"
apiv1 "github.com/outblocks/outblocks-plugin-go/gen/api/v1"
)
type PluginRunResult struct {
Info map[*plugins.Plugin]*PluginInfo
OutputCh chan *apiv1.RunOutputResponse
}
type PluginInfo struct {
Response *apiv1.RunStartResponse
done chan struct{}
err error
}
func (i *PluginInfo) Wait() error {
<-i.done
return i.err
}
func ThroughPlugin(ctx context.Context, runMap map[*plugins.Plugin]*apiv1.RunRequest) (*PluginRunResult, error) |
func (l *PluginRunResult) Stop() error {
var firstErr error
for p := range l.Info {
err := p.Stop()
if err != nil && firstErr == nil {
firstErr = err
}
}
return firstErr
}
func (l *PluginRunResult) Wait() error {
errCh := make(chan error, 1)
total := len(l.Info)
for _, pi := range l.Info {
pi := pi
go func() {
err := pi.Wait()
errCh <- err
}()
}
var err error
for i := 0; i < total; i++ {
err = <-errCh
if err != nil {
break
}
}
return err
}
| {
ret := &PluginRunResult{
Info: make(map[*plugins.Plugin]*PluginInfo),
OutputCh: make(chan *apiv1.RunOutputResponse),
}
errCh := make(chan error, 1)
for plug, req := range runMap {
res, err := plug.Client().Run(ctx, req, ret.OutputCh, errCh)
if err != nil {
return nil, err
}
i := &PluginInfo{
Response: res,
done: make(chan struct{}),
}
go func() {
err := <-errCh
if err != nil {
i.err = merry.Errorf("plugin:%s: %s", plug.Name, err)
}
close(i.done)
}()
ret.Info[plug] = i
}
return ret, nil
} |
mod.rs | use proc_macro2::{Ident, TokenStream};
use syn::{Fields, ItemStruct, Result};
mod attr;
mod storage;
mod var;
pub use attr::{has_storage_attr, StructAttr, StructAttrKind};
pub use storage::storage_vars;
pub use var::{Var, VarId};
pub struct Struct {
raw_struct: ItemStruct,
attrs: Result<Vec<StructAttr>>,
}
impl Struct {
pub fn new(raw_struct: ItemStruct) -> Self {
let attrs = attr::struct_attrs(&raw_struct.attrs);
Self { raw_struct, attrs }
}
pub fn raw_name(&self) -> Ident {
self.raw_struct.ident.clone()
}
pub fn raw_fields(&self) -> &Fields {
&self.raw_struct.fields
}
pub fn attrs(&self) -> &Result<Vec<StructAttr>> {
&self.attrs
}
}
pub fn expand(strukt: &Struct, must_mock: bool) -> Result<TokenStream> {
match strukt.attrs() {
Ok(attrs) => {
if has_storage_attr(attrs) {
storage::expand(strukt, attrs, must_mock)
} else {
todo!()
} | }
Err(err) => Err(err.clone()),
}
} |
|
links.go | package links
import (
"fmt" //@link(`fmt`,"https://godoc.org/fmt")
"golang.org/x/tools/internal/lsp/foo" //@link(`golang.org/x/tools/internal/lsp/foo`,`https://godoc.org/golang.org/x/tools/internal/lsp/foo`)
_ "database/sql" //@link(`database/sql`, `https://godoc.org/database/sql`)
errors "golang.org/x/xerrors" //@link(`golang.org/x/xerrors`, `https://godoc.org/golang.org/x/xerrors`)
)
var (
_ fmt.Formatter
_ foo.StructFoo
_ errors.Formatter
)
// Foo function
func | () string {
/*https://example.com/comment */ //@link("https://example.com/comment","https://example.com/comment")
url := "https://example.com/string_literal" //@link("https://example.com/string_literal","https://example.com/string_literal")
return url
}
| Foo |
test_tuning_curve.py | import numpy as np
import pytest
from ...core import consume_config
from ..cwgan import ConditionalTuningCurveGenerator
from ..ssn import emit_tuning_curve_generator, ssn_type_choices
from ..wgan import DEFAULT_PARAMS
from .test_euler_ssn import JDS
TEST_PARAMS = dict(
DEFAULT_PARAMS,
# Stimulator:
num_tcdom=10,
num_sites=50,
# Model / SSN:
tau_E=2,
dt=0.1,
seqlen=240,
skip_steps=200,
# Prober:
probes=[0],
**JDS # Model / SSN
)
del TEST_PARAMS['bandwidths']
del TEST_PARAMS['contrasts']
del TEST_PARAMS['sample_sites']
del TEST_PARAMS['gen']
del TEST_PARAMS['disc']
def emit_tcg_for_test(**kwargs):
|
def tcg_for_test(config={}, **kwargs):
tcg, rest = consume_config(emit_tcg_for_test, config, **kwargs)
assert not rest
return tcg
def get_param_values(self):
values = {}
for p in self.get_all_params():
values[p.name] = p.get_value()
return values
@pytest.mark.parametrize('ssn_type, params', [
('default', {}),
# dict(J=0.5), # unsupported (should I?)
('default', dict(J=[[1, 2], [3, 4]])),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype=int))),
('default', dict(J=np.array([[1, 2], [3, 4]], dtype='float32'))),
('heteroin', dict(V=[0.3, 0])),
('deg-heteroin', dict(V=0.5)),
])
def test_tcg_set_params(ssn_type, params):
config = dict(ssn_type=ssn_type)
tcg = tcg_for_test(config)
keys = set(params)
tcg.set_params(params)
assert keys == set(params) # set_params must not modify params
actual = get_param_values(tcg)
test = {}
for k in keys:
test[k] = np.allclose(actual[k], params[k])
# Manually compare parameters (instead of
# np.testing.assert_equal) since params[k] may be broadcast to
# array.
assert all(test.values())
def test_tcg_set_unknown_params():
tcg = tcg_for_test()
with pytest.raises(ValueError) as excinfo:
tcg.set_params(dict(V=[0.3, 0]))
assert 'Unknown parameters:' in str(excinfo.value)
flat_param_names = {
'default': [
'J_EE', 'J_EI',
'J_IE', 'J_II',
'D_EE', 'D_EI',
'D_IE', 'D_II',
'S_EE', 'S_EI',
'S_IE', 'S_II',
],
}
flat_param_names['heteroin'] = ['V_E', 'V_I'] + flat_param_names['default']
flat_param_names['deg-heteroin'] = ['V'] + flat_param_names['default']
@pytest.mark.parametrize('ssn_type', ssn_type_choices)
@pytest.mark.parametrize('conditional', [False, True])
def test_tcg_flat_param_names(ssn_type, conditional):
desired_names = tuple(flat_param_names[ssn_type])
config = {}
if conditional:
config['emit_tcg'] = ConditionalTuningCurveGenerator.consume_kwargs
tcg = tcg_for_test(config, ssn_type=ssn_type)
assert tcg.get_flat_param_names() == desired_names
| return emit_tuning_curve_generator(**dict(TEST_PARAMS, **kwargs)) |
alert.component.spec.ts | import {it, inject, beforeEachProviders} from 'angular2/testing';
import {Alert} from './alert.component';
describe('Alert', () => {
beforeEachProviders(() => [
Alert
]); | expect(alert.type)
.toEqual('warning');
}));
}); |
it('should have default type', inject([Alert], (alert:Alert) => { |
dsv_serialize1_test.go | package dsv_test
import (
"fmt"
"math/rand"
"reflect"
"testing"
dsv "github.com/tony-o/dsv"
)
type LottoFields struct {
A string `csv:"A"`
B int `csv:"B"`
C float64 `csv:"C"`
D string `csv:"D"`
E string `csv:"E"`
F string `csv:"F"`
G string `csv:"G"`
H string `csv:"H"`
I float32 `csv:"I"`
J bool `csv:"J"`
K byte `csv:"K"`
L []byte `csv:"L"`
}
func (a LottoFields) Cmp(b LottoFields) (bool, string) {
if a.A != b.A {
return false, "A"
}
if a.B != b.B {
return false, "B"
}
if fmt.Sprintf("%0.2f", a.C) != fmt.Sprintf("%0.2f", b.C) {
return false, "C"
}
if a.D != b.D {
return false, "D"
}
if a.E != b.E {
return false, "E"
}
if a.F != b.F {
return false, "F"
}
if a.G != b.G {
return false, "G"
}
if a.H != b.H {
return false, "H"
}
if fmt.Sprintf("%0.2f", a.I) != fmt.Sprintf("%0.2f", b.I) {
return false, "I"
}
if a.J != b.J {
return false, "J"
}
if a.K != b.K {
return false, "K"
}
if len(a.L) != len(b.L) {
return false, "L"
}
for i := range a.L {
if a.L[i] != b.L[i] {
return false, "L"
}
}
return true, ""
}
func | (t *testing.T) {
return
testCase := []LottoFields{}
for i := 0; i < 2000; i++ {
var b bool
if rand.Intn(1000) < 500 {
b = true
}
testCase = append(testCase, LottoFields{
A: randStr(15),
B: rand.Intn(2400),
C: rand.Float64() * 2400,
D: randStr(24),
E: randStr(3),
F: randStr(16),
G: randStr(512),
H: randStr(1),
I: rand.Float32() * 10,
J: b,
K: ls[rand.Intn(len(ls))],
L: []byte(randStr(5000)),
})
}
d, e := dsv.NewDSV(dsv.DSVOpt{})
if e != nil {
t.Logf("failed to create dsv: %v", e)
t.FailNow()
}
bs, e := d.Serialize(testCase)
if e != nil {
t.Logf("serialization error: %v", e)
t.FailNow()
}
resultCase := []LottoFields{}
e = d.Deserialize(bs, &resultCase)
if e != nil {
t.Logf("deserialization error: %v", e)
t.FailNow()
}
if len(resultCase) != len(testCase) {
t.Logf("deserialization length mismatch, expected=%d,got=%d", len(testCase), len(resultCase))
t.FailNow()
}
for i := range testCase {
if ok, fld := testCase[i].Cmp(resultCase[i]); !ok {
av := reflect.ValueOf(testCase[i])
af := av.FieldByName(fld)
bv := reflect.ValueOf(resultCase[i])
bf := bv.FieldByName(fld)
t.Logf("deserialization error with field [%d]%s: expected=%+v,got=%+v", i, fld, af.Interface(), bf.Interface())
t.FailNow()
}
}
}
var ls = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func randStr(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = ls[rand.Intn(len(ls))]
}
return string(b)
}
// TestDSV_Serialize_Basic basic test for serialization
func TestDSV_Serialize_Basic(t *testing.T) {
d, e := dsv.NewDSV(dsv.DSVOpt{})
if e != nil {
t.Logf("failed to create dsv: %v", e)
t.FailNow()
}
ts := &[]*TagTest{
&TagTest{
Id: 42,
Name: "nAME",
Email: "eMAIL",
},
&TagTest{
Id: 64,
Name: "NaMe",
Email: "EmAiL",
},
}
bs, e := d.Serialize(ts)
if e != nil {
t.Logf("serialization error: %v", e)
t.FailNow()
}
xs := TagTestArray{}
e = d.Deserialize(bs, &xs)
if e != nil {
t.Logf("deserialization error: %v", e)
t.FailNow()
}
var ls TagTestArray = TagTestArray{*(*ts)[0], *(*ts)[1]}
if ok, _ := TagTestCmp(&xs, &ls); ok {
t.Logf("results failure: expected:\"id,name,email address\\n42,nAME,eMAIL\\n64,NaMe,EmAiL\", got:%q", string(bs))
t.FailNow()
}
}
// TestDSV_Serialize_Tests tests that the serializer data can be deserialized again and returns what's expected
func TestDSV_Serialize_Tests(t *testing.T) {
for _, tst := range tests {
t.Run(tst.Name, func(t2 *testing.T) {
d := dsv.NewDSVMust(tst.Dsvo)
bs, e := d.Serialize(tst.Expect)
if e != nil {
t.Logf("%s failed: serialization error %v", tst.Name, e)
t.FailNow()
}
switch tst.Into.(type) {
case *[]TagTest:
e = d.Deserialize(bs, (tst.Into.(*[]TagTest)))
case *TagTestArray:
e = d.Deserialize(bs, (tst.Into.(*TagTestArray)))
case *[]genericCSV:
e = d.Deserialize(bs, (tst.Into.(*[]genericCSV)))
default:
t2.Logf("%s failed: invalid type %T", tst.Name, tst.Into)
t2.FailNow()
}
if e != nil {
t2.Logf("%s failed: parse error %v", tst.Name, e)
t2.FailNow()
}
if tst.Len(tst.Into) != tst.Expect.RowCount {
t2.Logf("%s failed: row count expected=%d,got=%d", tst.Name, tst.Expect.RowCount, tst.Len(tst.Into))
t2.FailNow()
}
if pass, errstr := tst.Cmp(tst.Expect.Value, tst.Into); !pass {
t2.Logf("%s failed: cmp fails with message: %s", tst.Name, errstr)
t2.FailNow()
}
})
}
}
type X struct {
F float64
}
type Y struct {
X *X `csv:"x"`
}
func TestDSV_Serialize_FullPkg(t *testing.T) {
d := dsv.NewDSVMust(dsv.DSVOpt{
ParseHeader: dsv.DBool(false),
Serializers: dsv.DSerial(map[string]func(interface{}) ([]byte, bool){
"*dsv_test.X": func(i interface{}) ([]byte, bool) {
switch i.(type) {
case *X:
if i.(*X) == nil {
return []byte("nil"), true
}
return []byte(fmt.Sprintf("+%0.0f", i.(*X).F)), true
}
return []byte{}, false
},
}),
})
bs, e := d.Serialize(&[]Y{
{X: &X{F: 5.00}},
{},
})
if e != nil {
t.Logf("serialization error: %s", e)
t.FailNow()
}
if string(bs) != "+5\nnil" {
t.Logf("serialization wrong: expected=\"+5\\nnil\",got=%q", string(bs))
t.FailNow()
}
}
| TestDSV_Serialize_EnsureOrdering |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.