file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
apps.py
|
from django.apps import AppConfig
class SystemConfig(AppConfig):
|
name = 'system'
verbose_name = '系统'
# 通过ready 来导入信号量
def ready(self):
import system.signals
|
|
serial.rs
|
use uart_16550::SerialPort;
use spin::Mutex;
use lazy_static::lazy_static;
lazy_static! {
pub static ref SERIAL1: Mutex<SerialPort> = {
let mut serial_port = unsafe { SerialPort::new(0x3F8) };
serial_port.init();
Mutex::new(serial_port)
};
}
#[doc(hidden)]
pub fn
|
(args: ::core::fmt::Arguments) {
use core::fmt::Write;
use x86_64::instructions::interrupts;
interrupts::without_interrupts(|| {
SERIAL1
.lock()
.write_fmt(args)
.expect("Printing to serial failed");
});
}
#[macro_export]
macro_rules! serial_print {
($($arg:tt)*) => {
$crate::serial::_print(format_args!($($arg)*));
};
}
#[macro_export]
macro_rules! serial_println {
() => ($crate::serial_print!("\n"));
($fmt:expr) => ($crate::serial_print!(concat!($fmt, "\n")));
($fmt:expr, $($arg:tt)*) => ($crate::serial_print!(
concat!($fmt, "\n"), $($arg)*));
}
|
_print
|
handlers.js
|
import pickBy from 'lodash/pickBy';
import uniq from 'lodash/uniq';
import { ContentNodeKinds } from 'kolibri.coreVue.vuex.constants';
import {
ContentNodeResource,
BookmarksResource,
ContentNodeSearchResource,
ChannelResource,
} from 'kolibri.resources';
import { assessmentMetaDataState } from 'kolibri.coreVue.vuex.mappers';
import router from 'kolibri.coreVue.router';
import chunk from 'lodash/chunk';
import { PageNames } from '../../constants';
import { filterAndAnnotateContentList, fetchChannelQuizzes } from './actions';
function showExamCreationPage(store, params) {
const { contentList, bookmarksList, pageName, ancestors = [], searchResults = null } = params;
return store.dispatch('loading').then(() => {
store.commit('examCreation/SET_ANCESTORS', ancestors);
store.commit('examCreation/SET_CONTENT_LIST', contentList);
store.commit('examCreation/SET_BOOKMARKS_LIST', bookmarksList);
if (searchResults) {
store.commit('examCreation/SET_SEARCH_RESULTS', searchResults);
}
store.commit('SET_PAGE_NAME', pageName);
store.dispatch('notLoading');
});
}
export function showExamCreationRootPage(store, params) {
return store.dispatch('loading').then(() => {
return ChannelResource.fetchCollection({
getParams: { available: true, has_exercise: true },
}).then(channels => {
const channelContentList = channels.map(channel => ({
...channel,
id: channel.root,
title: channel.name,
kind: ContentNodeKinds.CHANNEL,
is_leaf: false,
}));
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAMS,
});
return showExamCreationPage(store, {
classId: params.classId,
contentList: channelContentList,
pageName: PageNames.EXAM_CREATION_ROOT,
});
});
});
}
export function showChannelQuizCreationRootPage(store, params) {
return fetchChannelQuizzes().then(channels => {
const channelContentList = channels.map(channel => ({
...channel,
id: channel.id,
title: channel.title,
kind: ContentNodeKinds.CHANNEL,
is_leaf: false,
}));
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAMS,
});
return showExamCreationPage(store, {
classId: params.classId,
contentList: channelContentList,
pageName: PageNames.EXAM_CREATION_CHANNEL_QUIZ,
});
});
}
export function showChannelQuizCreationTopicPage(store, params) {
return store.dispatch('loading').then(() => {
const { topicId } = params;
const topicNodePromise = ContentNodeResource.fetchModel({ id: topicId });
const childNodesPromise = ContentNodeResource.fetchCollection({
getParams: {
parent: topicId,
kind_in: [ContentNodeKinds.TOPIC, ContentNodeKinds.EXERCISE],
},
});
const loadRequirements = [topicNodePromise, childNodesPromise];
return Promise.all(loadRequirements).then(([topicNode, childNodes]) => {
return filterAndAnnotateContentList(childNodes).then(contentList => {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAMS,
});
return showExamCreationPage(store, {
classId: params.classId,
contentList,
pageName: PageNames.EXAM_CREATION_SELECT_CHANNEL_QUIZ_TOPIC,
ancestors: [...topicNode.ancestors, topicNode],
});
});
});
});
}
export function showExamCreationTopicPage(store, params) {
return store.dispatch('loading').then(() => {
const { topicId } = params;
const topicNodePromise = ContentNodeResource.fetchModel({ id: topicId });
const childNodesPromise = ContentNodeResource.fetchCollection({
getParams: {
parent: topicId,
kind_in: [ContentNodeKinds.TOPIC, ContentNodeKinds.EXERCISE],
},
});
const loadRequirements = [topicNodePromise, childNodesPromise];
return Promise.all(loadRequirements).then(([topicNode, childNodes]) => {
return filterAndAnnotateContentList(childNodes).then(contentList => {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAMS,
});
return showExamCreationPage(store, {
classId: params.classId,
contentList,
pageName: PageNames.EXAM_CREATION_TOPIC,
ancestors: [...topicNode.ancestors, topicNode],
});
});
});
});
}
export function showExamCreationBookmarksPage(store, params) {
return store.dispatch('loading').then(() => {
const { topicId } = params;
const topicNodePromise = ContentNodeResource.fetchModel({ id: topicId });
const childNodesPromise = ContentNodeResource.fetchCollection({
getParams: {
parent: topicId,
kind_in: [ContentNodeKinds.TOPIC, ContentNodeKinds.VIDEO, ContentNodeKinds.EXERCISE],
},
});
const loadRequirements = [topicNodePromise, childNodesPromise];
return Promise.all(loadRequirements).then(([topicNode, childNodes]) => {
return filterAndAnnotateContentList(childNodes).then(() => {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAMS,
});
return showExamCreationPage(store, {
classId: params.classId,
bookmarksList: childNodes,
pageName: PageNames.EXAM_CREATION_BOOKMARKS,
ancestors: [...topicNode.ancestors, topicNode],
});
});
});
});
}
export function showExamCreationAllBookmarks(store) {
return store.dispatch('loading').then(() => {
getBookmarks().then(bookmarks => {
return showExamCreationPage(store, {
bookmarksList: bookmarks[0],
});
});
});
}
function getBookmarks() {
return BookmarksResource.fetchCollection()
.then(bookmarks => bookmarks.map(bookmark => bookmark.contentnode_id))
.then(contentNodeIds => {
const chunkedContentNodeIds = chunk(contentNodeIds, 50); // Breaking contentNodeIds into lists no more than 50 in length
// Now we will create an array of promises, each of which queries for the 50-id chunk
const fetchPromises = chunkedContentNodeIds.map(idsChunk => {
return ContentNodeResource.fetchCollection({
getParams: {
ids: idsChunk, // This filters only the ids we want
},
});
});
return Promise.all(fetchPromises);
|
}
export function showExamCreationPreviewPage(store, params, query = {}) {
const { classId, contentId } = params;
return store.dispatch('loading').then(() => {
return Promise.all([_prepExamContentPreview(store, classId, contentId)])
.then(([contentNode]) => {
const { searchTerm, ...otherQueryParams } = query;
if (searchTerm) {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAM_CREATION_SEARCH,
params: {
searchTerm,
},
query: otherQueryParams,
});
} else {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAM_CREATION_TOPIC,
params: {
topicId: contentNode.parent,
},
});
}
store.dispatch('notLoading');
})
.catch(error => {
store.dispatch('notLoading');
return store.dispatch('handleApiError', error);
});
});
}
export function showChannelQuizCreationPreviewPage(store, params) {
const { classId, contentId } = params;
return store.dispatch('loading').then(() => {
return Promise.all([_prepChannelQuizContentPreview(store, classId, contentId)])
.then(([contentNode]) => {
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAM_CREATION_SELECT_CHANNEL_QUIZ_TOPIC,
params: {
topicId: contentNode.parent,
},
});
store.dispatch('notLoading');
})
.catch(error => {
store.dispatch('notLoading');
return store.dispatch('handleApiError', error);
});
});
}
function _prepChannelQuizContentPreview(store, classId, contentId) {
return ContentNodeResource.fetchModel({ id: contentId }).then(
contentNode => {
const contentMetadata = assessmentMetaDataState(contentNode);
store.commit('SET_TOOLBAR_ROUTE', {});
store.commit('examCreation/SET_CURRENT_CONTENT_NODE', { ...contentNode });
store.commit('examCreation/SET_PREVIEW_STATE', {
questions: contentMetadata.assessmentIds,
completionData: contentMetadata.masteryModel,
});
store.commit('SET_PAGE_NAME', PageNames.EXAM_CREATION_CHANNEL_QUIZ_PREVIEW);
return contentNode;
},
error => {
return store.dispatch('handleApiError', error);
}
);
}
function _prepExamContentPreview(store, classId, contentId) {
return ContentNodeResource.fetchModel({ id: contentId }).then(
contentNode => {
const contentMetadata = assessmentMetaDataState(contentNode);
store.commit('SET_TOOLBAR_ROUTE', {});
store.commit('examCreation/SET_CURRENT_CONTENT_NODE', { ...contentNode });
store.commit('examCreation/SET_PREVIEW_STATE', {
questions: contentMetadata.assessmentIds,
completionData: contentMetadata.masteryModel,
});
store.commit('SET_PAGE_NAME', PageNames.EXAM_CREATION_PREVIEW);
return contentNode;
},
error => {
return store.dispatch('handleApiError', error);
}
);
}
export function showExamCreationSearchPage(store, params, query = {}) {
return store.dispatch('loading').then(() => {
let kinds;
if (query.kind) {
kinds = [query.kind];
} else {
kinds = [ContentNodeKinds.EXERCISE, ContentNodeKinds.TOPIC];
}
store.commit('SET_TOOLBAR_ROUTE', {
name: PageNames.EXAM_CREATION_ROOT,
params: {},
});
return ContentNodeSearchResource.fetchCollection({
getParams: {
search: params.searchTerm,
kind_in: kinds,
...pickBy({ channel_id: query.channel }),
},
}).then(results => {
return filterAndAnnotateContentList(results.results).then(contentList => {
const searchResults = {
...results,
results: contentList,
content_kinds: results.content_kinds.filter(kind =>
[ContentNodeKinds.TOPIC, ContentNodeKinds.EXERCISE].includes(kind)
),
contentIdsFetched: uniq(results.results.map(({ content_id }) => content_id)),
};
return showExamCreationPage(store, {
classId: params.classId,
contentList: contentList,
pageName: PageNames.EXAM_CREATION_SEARCH,
searchResults,
});
});
});
});
}
const creationPages = [
PageNames.EXAM_CREATION_ROOT,
PageNames.EXAM_CREATION_TOPIC,
PageNames.EXAM_CREATION_PREVIEW,
PageNames.EXAM_CREATION_SEARCH,
];
export function showExamCreationQuestionSelectionPage(store, toRoute, fromRoute) {
// if we got here from somewhere else, start over
if (!creationPages.includes(fromRoute.name)) {
router.replace({
name: PageNames.EXAM_CREATION_ROOT,
params: toRoute.params,
});
}
store.commit('SET_PAGE_NAME', 'EXAM_CREATION_QUESTION_SELECTION');
store.commit('SET_TOOLBAR_ROUTE', { name: fromRoute.name, params: fromRoute.params });
store.dispatch('examCreation/updateSelectedQuestions');
}
|
});
|
module.ts
|
import './graph';
import './series_overrides_ctrl';
import './thresholds_form';
import './time_regions_form';
import template from './template';
import _ from 'lodash';
import { MetricsPanelCtrl } from 'app/plugins/sdk';
import { DataProcessor } from './data_processor';
import { axesEditorComponent } from './axes_editor';
import config from 'app/core/config';
import { GrafanaTheme, getColorFromHexRgbOrName } from '@grafana/ui';
class
|
extends MetricsPanelCtrl {
static template = template;
renderError: boolean;
hiddenSeries: any = {};
seriesList: any = [];
dataList: any = [];
annotations: any = [];
alertState: any;
annotationsPromise: any;
dataWarning: any;
colors: any = [];
subTabIndex: number;
processor: DataProcessor;
panelDefaults = {
// datasource name, null = default datasource
datasource: null,
// sets client side (flot) or native graphite png renderer (png)
renderer: 'flot',
yaxes: [
{
label: null,
show: true,
logBase: 1,
min: null,
max: null,
format: 'short',
},
{
label: null,
show: true,
logBase: 1,
min: null,
max: null,
format: 'short',
},
],
xaxis: {
show: true,
mode: 'time',
name: null,
values: [],
buckets: null,
},
yaxis: {
align: false,
alignLevel: null,
},
// show/hide lines
lines: true,
// fill factor
fill: 1,
// line width in pixels
linewidth: 1,
// show/hide dashed line
dashes: false,
// length of a dash
dashLength: 10,
// length of space between two dashes
paceLength: 10,
// show hide points
points: false,
// point radius in pixels
pointradius: 2,
// show hide bars
bars: false,
// enable/disable stacking
stack: false,
// stack percentage mode
percentage: false,
// legend options
legend: {
show: true, // disable/enable legend
values: false, // disable/enable legend values
min: false,
max: false,
current: false,
total: false,
avg: false,
},
// how null points should be handled
nullPointMode: 'null',
// staircase line mode
steppedLine: false,
// tooltip options
tooltip: {
value_type: 'individual',
shared: true,
sort: 0,
},
// time overrides
timeFrom: null,
timeShift: null,
// metric queries
targets: [{}],
// series color overrides
aliasColors: {},
// other style overrides
seriesOverrides: [],
thresholds: [],
timeRegions: [],
};
/** @ngInject */
constructor($scope, $injector, private annotationsSrv) {
super($scope, $injector);
_.defaults(this.panel, this.panelDefaults);
_.defaults(this.panel.tooltip, this.panelDefaults.tooltip);
_.defaults(this.panel.legend, this.panelDefaults.legend);
_.defaults(this.panel.xaxis, this.panelDefaults.xaxis);
this.processor = new DataProcessor(this.panel);
this.events.on('render', this.onRender.bind(this));
this.events.on('data-received', this.onDataReceived.bind(this));
this.events.on('data-error', this.onDataError.bind(this));
this.events.on('data-snapshot-load', this.onDataSnapshotLoad.bind(this));
this.events.on('init-edit-mode', this.onInitEditMode.bind(this));
this.events.on('init-panel-actions', this.onInitPanelActions.bind(this));
}
onInitEditMode() {
this.addEditorTab('Display options', 'public/app/plugins/panel/graph/tab_display.html');
this.addEditorTab('Axes', axesEditorComponent);
this.addEditorTab('Legend', 'public/app/plugins/panel/graph/tab_legend.html');
this.addEditorTab('Thresholds & Time Regions', 'public/app/plugins/panel/graph/tab_thresholds_time_regions.html');
this.subTabIndex = 0;
}
onInitPanelActions(actions) {
actions.push({ text: 'Export CSV', click: 'ctrl.exportCsv()' });
actions.push({ text: 'Toggle legend', click: 'ctrl.toggleLegend()', shortcut: 'p l' });
}
issueQueries(datasource) {
this.annotationsPromise = this.annotationsSrv.getAnnotations({
dashboard: this.dashboard,
panel: this.panel,
range: this.range,
});
/* Wait for annotationSrv requests to get datasources to
* resolve before issuing queries. This allows the annotations
* service to fire annotations queries before graph queries
* (but not wait for completion). This resolves
* issue 11806.
*/
return this.annotationsSrv.datasourcePromises.then(r => {
return super.issueQueries(datasource);
});
}
zoomOut(evt) {
this.publishAppEvent('zoom-out', 2);
}
onDataSnapshotLoad(snapshotData) {
this.annotationsPromise = this.annotationsSrv.getAnnotations({
dashboard: this.dashboard,
panel: this.panel,
range: this.range,
});
this.onDataReceived(snapshotData);
}
onDataError(err) {
this.seriesList = [];
this.annotations = [];
this.render([]);
}
onDataReceived(dataList) {
this.dataList = dataList;
this.seriesList = this.processor.getSeriesList({
dataList: dataList,
range: this.range,
});
this.dataWarning = null;
const datapointsCount = this.seriesList.reduce((prev, series) => {
return prev + series.datapoints.length;
}, 0);
if (datapointsCount === 0) {
this.dataWarning = {
title: 'No data points',
tip: 'No datapoints returned from data query',
};
} else {
for (const series of this.seriesList) {
if (series.isOutsideRange) {
this.dataWarning = {
title: 'Data points outside time range',
tip: 'Can be caused by timezone mismatch or missing time filter in query',
};
break;
}
}
}
this.annotationsPromise.then(
result => {
this.loading = false;
this.alertState = result.alertState;
this.annotations = result.annotations;
this.render(this.seriesList);
},
() => {
this.loading = false;
this.render(this.seriesList);
}
);
}
onRender() {
if (!this.seriesList) {
return;
}
for (const series of this.seriesList) {
series.applySeriesOverrides(this.panel.seriesOverrides);
if (series.unit) {
this.panel.yaxes[series.yaxis - 1].format = series.unit;
}
}
}
onColorChange = (series, color) => {
series.setColor(getColorFromHexRgbOrName(color, config.bootData.user.lightTheme ? GrafanaTheme.Light : GrafanaTheme.Dark));
this.panel.aliasColors[series.alias] = color;
this.render();
};
onToggleSeries = hiddenSeries => {
this.hiddenSeries = hiddenSeries;
this.render();
};
onToggleSort = (sortBy, sortDesc) => {
this.panel.legend.sort = sortBy;
this.panel.legend.sortDesc = sortDesc;
this.render();
};
onToggleAxis = info => {
let override = _.find(this.panel.seriesOverrides, { alias: info.alias });
if (!override) {
override = { alias: info.alias };
this.panel.seriesOverrides.push(override);
}
override.yaxis = info.yaxis;
this.render();
};
addSeriesOverride(override) {
this.panel.seriesOverrides.push(override || {});
}
removeSeriesOverride(override) {
this.panel.seriesOverrides = _.without(this.panel.seriesOverrides, override);
this.render();
}
toggleLegend() {
this.panel.legend.show = !this.panel.legend.show;
this.refresh();
}
legendValuesOptionChanged() {
const legend = this.panel.legend;
legend.values = legend.min || legend.max || legend.avg || legend.current || legend.total;
this.render();
}
exportCsv() {
const scope = this.$scope.$new(true);
scope.seriesList = this.seriesList;
this.publishAppEvent('show-modal', {
templateHtml: '<export-data-modal data="seriesList"></export-data-modal>',
scope,
modalClass: 'modal--narrow',
});
}
}
export { GraphCtrl, GraphCtrl as PanelCtrl };
|
GraphCtrl
|
target.go
|
/*
Copyright © 2019 Michael Gruener
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package target
import (
"fmt"
inv "github.com/bedag/kusible/pkg/inventory"
"github.com/bedag/kusible/pkg/values"
)
func New(entry *inv.Entry, valuesPath string, ejson *values.EjsonSettings) (*Target, error) {
|
func (t *Target) Values() values.Values {
return t.values
}
func (t *Target) Entry() *inv.Entry {
return t.entry
}
|
target := &Target{
entry: entry,
}
groups := entry.Groups()
values, err := values.New(valuesPath, groups, false, *ejson)
if err != nil {
return nil, fmt.Errorf("failed to compile values for target '%s': %s", entry.Name(), err)
}
target.values = values
return target, nil
}
|
index.js
|
var expressFactory = require('./express-factory');
var request = require('superagent');
var zip = require('../');
var expect = require('chai').expect;
testExpressVersion(2)
testExpressVersion(3)
testExpressVersion(4)
function
|
(version) {
describe('when using express ' + version + '.x', function() {
var server
before(function startServer() {
var app = expressFactory.create(version)
app.get('/test/1', function(req, res) {
res.zip([
{ path: __dirname + '/zip_contents/data1.txt', name: 'data1.txt' },
{ path: __dirname + '/zip_contents/data2.txt', name: '/zip_contents/data2.txt' }
]);
});
app.get('/test/2', function(req, res) {
res.zip([
{ path: __dirname + '/zip_contents/data1.txt', name: 'data1.txt' },
{ path: __dirname + '/zip_contents/data2.txt', name: '/zip_contents/data2.txt' }
], 'test2.zip');
});
server = app.listen(8383)
})
describe('res.zip()', function() {
it('should response valid content-type', function(done) {
request
.get('http://127.0.0.1:8383/test/1')
.end(function(err, res) {
expect(res.headers['content-type']).to.match(/^application\/zip/);
done();
});
});
it('should response valid content-disposition', function(done) {
request
.get('http://127.0.0.1:8383/test/1')
.end(function(err, res) {
expect(res.headers['content-disposition']).to.match(/^attachment; filename="attachment.zip"/);
done();
});
});
it('can pass filename', function(done) {
request
.get('http://127.0.0.1:8383/test/2')
.end(function(err, res) {
expect(res.headers['content-disposition']).to.match(/^attachment; filename="test2.zip"/);
done();
});
});
});
after(function closeServer() {
server.close()
})
})
}
|
testExpressVersion
|
compat.py
|
# Natural Language Toolkit: Compatibility Functions
#
# Copyright (C) 2001-2008 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# Edward Loper <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
"""
Backwards compatibility with previous versions of Python.
This module provides backwards compatibility by defining
functions and classes that were not available in earlier versions of
Python. Intented usage:
>>> from nltk.compat import *
Currently, NLTK requires Python 2.4 or later.
"""
######################################################################
# New in Python 2.5
######################################################################
# ElementTree
try:
from xml.etree import ElementTree
except ImportError:
from nltk.etree import ElementTree
# collections.defaultdict
# originally contributed by Yoav Goldberg <[email protected]>
# new version by Jason Kirtland from Python cookbook.
# <http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/523034>
try:
from collections import defaultdict
except ImportError:
class defaultdict(dict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not hasattr(default_factory, '__call__')):
raise TypeError('first argument must be callable')
dict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def
|
(self):
return 'defaultdict(%s, %s)' % (self.default_factory,
dict.__repr__(self))
# [XX] to make pickle happy in python 2.4:
import collections
collections.defaultdict = defaultdict
__all__ = ['ElementTree', 'defaultdict']
|
__repr__
|
9ede8d2d7089_initialize_migration.py
|
"""Initialize Migration
Revision ID: 9ede8d2d7089
Revises:
Create Date: 2020-09-28 00:25:38.033227
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9ede8d2d7089'
down_revision = None
branch_labels = None
depends_on = None
def
|
():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=255), nullable=True),
sa.Column('email', sa.String(length=255), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('bio', sa.String(length=255), nullable=True),
sa.Column('profile_pic_path', sa.String(), nullable=True),
sa.Column('password_hash', sa.String(length=255), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=False)
op.create_table('profile_photos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('pic_path', sa.String(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('profile_photos')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_table('roles')
# ### end Alembic commands ###
|
upgrade
|
vector.ts
|
export default class Vector {
|
y: number;
constructor(x?: number, y?: number) {
this.x = x == undefined ? 0 : x;
this.y = y == undefined ? 0 : y;
}
public static create(x?: number, y?: number) {
return new Vector(x, y);
}
}
|
x: number;
|
test_userlibrary.py
|
import unittest
import os
from robot.running import userkeyword
from robot.running.model import ResourceFile, UserKeyword
from robot.running.userkeyword import UserLibrary
from robot.errors import DataError
from robot.utils.asserts import (assert_equal, assert_none,
assert_raises_with_msg, assert_true)
class UserHandlerStub:
def __init__(self, kwdata, library):
self.name = kwdata.name
self.libname = library
if kwdata.name == 'FAIL':
raise Exception('Expected failure')
def create(self, name):
return self
class EmbeddedArgsHandlerStub:
def __init__(self, kwdata, library, embedded):
self.name = kwdata.name
if kwdata.name != 'Embedded ${arg}':
raise TypeError
def matches(self, name):
return name == self.name
class TestUserLibrary(unittest.TestCase):
def setUp(self):
self._orig_user_handler = userkeyword.UserKeywordHandler
self._orig_embedded_handler = userkeyword.EmbeddedArgumentsHandler
userkeyword.UserKeywordHandler = UserHandlerStub
userkeyword.EmbeddedArgumentsHandler = EmbeddedArgsHandlerStub
def tearDown(self):
userkeyword.UserKeywordHandler = self._orig_user_handler
userkeyword.EmbeddedArgumentsHandler = self._orig_embedded_handler
def test_name_from_resource(self):
for source, exp in [('resources.html', 'resources'),
(os.path.join('..','res','My Res.HTM'), 'My Res'),
(os.path.abspath('my_res.xhtml'), 'my_res')]:
lib = self._get_userlibrary(source=source)
assert_equal(lib.name, exp)
def test_name_from_test_case_file(self):
assert_none(self._get_userlibrary().name)
def test_creating_keyword(self):
lib = self._get_userlibrary('kw 1', 'kw 2')
assert_equal(len(lib.handlers), 2)
assert_true('kw 1' in lib.handlers)
assert_true('kw 2' in lib.handlers)
def test_creating_keyword_when_kw_name_has_embedded_arg(self):
lib = self._get_userlibrary('Embedded ${arg}')
self._lib_has_embedded_arg_keyword(lib)
def test_creating_keywords_when_normal_and_embedded_arg_kws(self):
lib = self._get_userlibrary('kw1', 'Embedded ${arg}', 'kw2')
assert_equal(len(lib.handlers), 3)
assert_true('kw1' in lib.handlers)
assert_true('kw 2' in lib.handlers)
self._lib_has_embedded_arg_keyword(lib)
def test_creating_duplicate_embedded_arg_keyword_in_resource_file(self):
lib = self._get_userlibrary('Embedded ${arg}', 'kw', 'Embedded ${arg}')
assert_equal(len(lib.handlers), 3)
assert_true(not hasattr(lib.handlers['kw'], 'error'))
self._lib_has_embedded_arg_keyword(lib, count=2)
def test_creating_duplicate_keyword_in_resource_file(self):
|
def test_creating_duplicate_keyword_in_test_case_file(self):
lib = self._get_userlibrary('MYKW', 'my kw')
assert_equal(len(lib.handlers), 1)
assert_true('mykw' in lib.handlers)
assert_equal(lib.handlers['mykw'].error,
"Keyword with same name defined multiple times.")
def test_handlers_contains(self):
lib = self._get_userlibrary('kw')
assert_true('kw' in lib.handlers)
assert_true('nonex' not in lib.handlers)
def test_handlers_getitem_with_non_existing_keyword(self):
lib = self._get_userlibrary('kw')
assert_raises_with_msg(
DataError,
"Test case file contains no keywords matching name 'non existing'.",
lib.handlers.__getitem__, 'non existing')
def test_handlers_getitem_with_existing_keyword(self):
lib = self._get_userlibrary('kw')
handler = lib.handlers['kw']
assert_true(isinstance(handler, UserHandlerStub))
def _get_userlibrary(self, *keywords, **conf):
resource = ResourceFile(**conf)
resource.keywords = [UserKeyword(name) for name in keywords]
resource_type = UserLibrary.TEST_CASE_FILE_TYPE \
if 'source' not in conf else UserLibrary.RESOURCE_FILE_TYPE
return UserLibrary(resource, resource_type)
def _lib_has_embedded_arg_keyword(self, lib, count=1):
assert_true('Embedded ${arg}' in lib.handlers)
embedded = lib.handlers._embedded
assert_equal(len(embedded), count)
for template in embedded:
assert_equal(template.name, 'Embedded ${arg}')
if __name__ == '__main__':
unittest.main()
|
lib = self._get_userlibrary('kw', 'kw', 'kw 2')
assert_equal(len(lib.handlers), 2)
assert_true('kw' in lib.handlers)
assert_true('kw 2' in lib.handlers)
assert_equal(lib.handlers['kw'].error,
"Keyword with same name defined multiple times.")
|
attrs.rs
|
//! FIXME: write short doc here
use crate::{
db::{AstDatabase, DefDatabase, HirDatabase},
Adt, Const, Enum, EnumVariant, FieldSource, Function, HasSource, MacroDef, Module, Static,
Struct, StructField, Trait, TypeAlias, Union,
};
use hir_def::attr::Attr;
use hir_expand::hygiene::Hygiene;
use ra_syntax::ast;
use std::sync::Arc;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum
|
{
Module(Module),
StructField(StructField),
Adt(Adt),
Function(Function),
EnumVariant(EnumVariant),
Static(Static),
Const(Const),
Trait(Trait),
TypeAlias(TypeAlias),
MacroDef(MacroDef),
}
impl_froms!(
AttrDef: Module,
StructField,
Adt(Struct, Enum, Union),
EnumVariant,
Static,
Const,
Function,
Trait,
TypeAlias,
MacroDef
);
pub trait Attrs {
fn attrs(&self, db: &impl HirDatabase) -> Option<Arc<[Attr]>>;
}
pub(crate) fn attributes_query(
db: &(impl DefDatabase + AstDatabase),
def: AttrDef,
) -> Option<Arc<[Attr]>> {
match def {
AttrDef::Module(it) => {
let src = it.declaration_source(db)?;
let hygiene = Hygiene::new(db, src.file_id);
Attr::from_attrs_owner(&src.ast, &hygiene)
}
AttrDef::StructField(it) => match it.source(db).ast {
FieldSource::Named(named) => {
let src = it.source(db);
let hygiene = Hygiene::new(db, src.file_id);
Attr::from_attrs_owner(&named, &hygiene)
}
FieldSource::Pos(..) => None,
},
AttrDef::Adt(it) => match it {
Adt::Struct(it) => attrs_from_ast(it, db),
Adt::Enum(it) => attrs_from_ast(it, db),
Adt::Union(it) => attrs_from_ast(it, db),
},
AttrDef::EnumVariant(it) => attrs_from_ast(it, db),
AttrDef::Static(it) => attrs_from_ast(it, db),
AttrDef::Const(it) => attrs_from_ast(it, db),
AttrDef::Function(it) => attrs_from_ast(it, db),
AttrDef::Trait(it) => attrs_from_ast(it, db),
AttrDef::TypeAlias(it) => attrs_from_ast(it, db),
AttrDef::MacroDef(it) => attrs_from_ast(it, db),
}
}
fn attrs_from_ast<T, D>(node: T, db: &D) -> Option<Arc<[Attr]>>
where
T: HasSource,
T::Ast: ast::AttrsOwner,
D: DefDatabase + AstDatabase,
{
let src = node.source(db);
let hygiene = Hygiene::new(db, src.file_id);
Attr::from_attrs_owner(&src.ast, &hygiene)
}
impl<T: Into<AttrDef> + Copy> Attrs for T {
fn attrs(&self, db: &impl HirDatabase) -> Option<Arc<[Attr]>> {
db.attrs((*self).into())
}
}
|
AttrDef
|
oses.go
|
package mukluk
|
type Os struct {
Os_name string `json:"os_name"`
Os_step int64 `json:"os_step"`
Boot_mode string `json:"boot_mode"`
Boot_kernel string `json:"boot_kernel"`
Boot_initrd string `json:"boot_initrd"`
Boot_options string `json:"boot_options"`
Next_step string `json:"next_step"`
// Init_data []byte `json:"init_data"` // ignored because should be large
}
| |
test_relu6_grad_001.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
|
# limitations under the License.
"""
################################################
Testcase_PrepareCondition:
Testcase_TestSteps:
Testcase_ExpectedResult:
"""
import os
import pytest
from tests.common.base import TestBase
from tests.common.test_run.relu6_grad_run import relu6_grad_run
############################################################
# TestCase= class: put to tests/*/
############################################################
class TestCase(TestBase):
def setup(self):
case_name = "test_akg_relu6_grad_001"
case_path = os.getcwd()
# params init
self.params_init(case_name, case_path)
self.caseresult = True
self._log.info("============= {0} Setup case============".format(self.casename))
self.testarg = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float16")),
("relu6_grad_002", relu6_grad_run, ((8, 28, 28, 4), "float16")),
("relu6_grad_003", relu6_grad_run, ((8, 14, 14, 6), "float16")),
("relu6_grad_004", relu6_grad_run, ((8, 7, 7, 6), "float16")),
("relu6_grad_005", relu6_grad_run, ((8, 4, 4, 6), "float16")),
("relu6_grad_006", relu6_grad_run, ((8, 2, 2, 4), "float16")),
]
self.testarg_cloud = [
# testflag,opfuncname,testRunArgs, dimArgs
("relu6_grad_001", relu6_grad_run, ((1, 128), "float32")),
]
self.testarg_rpc_cloud = [
("relu6_grad_fp32_001", relu6_grad_run, ((8, 28, 28, 4), "float32")),
("relu6_grad_fp32_002", relu6_grad_run, ((8, 14, 14, 6), "float32")),
("relu6_grad_fp32_003", relu6_grad_run, ((8, 7, 7, 6), "float32")),
("relu6_grad_fp32_004", relu6_grad_run, ((8, 4, 4, 6), "float32")),
("relu6_grad_fp32_005", relu6_grad_run, ((8, 2, 2, 4), "float32")),
]
return
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_onecard
def test_run(self):
self.common_run(self.testarg)
def test_run_cloud(self):
self.common_run(self.testarg_cloud)
def test_run_rpc_cloud(self):
self.common_run(self.testarg_rpc_cloud)
def teardown(self):
self._log.info("============= {0} Teardown============".format(self.casename))
return
# a=TestCase()
# a.setup()
# a.test_run()
|
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
|
tm_splitter.py
|
#!/usr/bin/python3
# tm_splitter.py by Michael Henderson
# Split individual TM files from a bulk downloaded XML file containing an aggregation of TMs
# Write out a separate XML files and Solr ready XML file for each TM
import os
import re
import sys
import getopt
import xml.dom.minidom
def main(argv):
#defaults for optional command line arguments
|
# read input file one line at a time
def readfile(filename):
if filename.endswith('.xml'):
fh = open(filename, encoding="utf8")
return fh.readlines()
else: raise ValueError('Filename must end with .xml')
# read from input file using a buffered reader and record separator to demarcate end of record
def readrecords(filename, record_separator='\n', bufsize=4096):
print("reading input file:")
print(filename)
print("using record separator:")
print(record_separator)
print("read buffer size in bytes:")
print(bufsize)
record_separator_pattern = re.compile(record_separator, re.IGNORECASE)
input = open(filename, encoding="utf8")
buf = ''
print("reading TMs...please wait...")
while True:
newbuf = input.read(bufsize)
#print ("newbuf: ", newbuf)
if not newbuf:
if buf != '':
yield buf
return
buf += newbuf
lines = buf.split(record_separator)
for line in lines[:-1]:
yield line + record_separator
buf = lines[-1]
if __name__ == "__main__": main(sys.argv[1:])
|
bulk_tm_file = 'apc161231-56-test.xml'
output_directory = 'tm_corpus'
# regular expressions for tm bibdata extraction
xml_tag ='<?xml version=\"1.0\" encoding=\"UTF-8\"?>'
start_xml_pattern = re.compile("(?P<xml_tag><\?xml[^>]+\?>[\s]*?)", re.IGNORECASE)
doctype_tag = ''
start_doctype_pattern = re.compile("(?P<doctype_tag><!DOCTYPE trademark-applications-daily \[[^]]+\]>[\s]*)", re.IGNORECASE)
start_tm_file = '(?P<start_tm_file_tags><trademark-applications-daily>[^<]+<version>[^<]+<version-no>[^<]+</version-no>[^<]+<version-date>[^<]+</version-date>[^<]+</version>[^<]+<creation-datetime>(?P<creation_datetime>[^<]+)</creation-datetime>[^<]+<application-information>[^<]+<file-segments>[^<]+<file-segment>[^<]+</file-segment>[^<]+<action-keys>[^<]+<action-key>[^<]+</action-key>)'
start_tm_file_pattern = re.compile(start_tm_file, re.IGNORECASE)
creation_datetime_tag = '<creation-datetime>(?P<creation_datetime>[^<]+)</creation-datetime>'
creation_datetime_pattern = re.compile(creation_datetime_tag, re.IGNORECASE)
creation_datetime = ''
tm_start_tag = ''
start_tm_file_tags = ''
eof_tags = ''
start_tm_pattern = re.compile("(?P<tm_start_tag><case-file>)", re.IGNORECASE)
end_tm_pattern = re.compile("<\/case-file>", re.IGNORECASE)
tm_pattern=re.compile("(?P<case_file><case-file>([\w\W]+?</case-file>))", re.IGNORECASE)
INTL_pattern=re.compile("(<international-code>(?P<intl_code>[\w\W]+?)</international-code>)",re.IGNORECASE)
main_class_pattern=re.compile("(?P<main_class_1>[\w])[\s](?P<main_class_2>[\w])")
patent_file_pattern = re.compile("<us-patent-grant([^>]+?file\=\")(?=(?P<patent_file>[^>]+?)\")([^>]+?>)", re.IGNORECASE)
record_separator = "</case-file>\n"
end_of_file = '(?P<eof_tags></action-keys>[^<]+</file-segments>[^<]+</application-information>[^<]+</trademark-applications-daily>)'
patent_file_base_pattern = re.compile("(?P<file_base>[\w\-]+?)[\.][xX][mM][lL]", re.IGNORECASE)
serial_number_pattern = re.compile("(<serial-number>(?P<serial_number>[\w\W]+?)</serial-number>)", re.IGNORECASE)
eof_pattern = re.compile(end_of_file, re.IGNORECASE)
mark_id_pattern = re.compile("(<mark-identification>(?P<mark_id>[\w\W]+?)</mark-identification>)", re.IGNORECASE)
gs_pattern = re.compile("(<case-file-statement>[^<]+<type-code>(?P<gs_type_code>GS[\w\W]+?)</type-code>[^<]+<text>(?P<gs_text>[\w\W]+?)</text>)", re.IGNORECASE)
cf_stmts_pattern = re.compile("(?P<cf_stmts><case-file-statements>([\w\W]+?</case-file-statements>))", re.IGNORECASE)
tm_drawing_code_pattern = re.compile ("(<mark-drawing-code>(?P<mark_drawing_code>[\w\W]+?)</mark-drawing-code>)", re.IGNORECASE)
tm_design_code_pattern = re.compile ("(<design-search>[^<]+<code>(?P<design_search_code>[\w\W]+?)</code>)", re.IGNORECASE)
tm_design_searches_pattern = re.compile("(?P<design_searches><design-searches>([\w\W]+?</design-searches>))", re.IGNORECASE)
# counters for tracking frequencies of XML tags
line_count_readfile = 0
start_xml_count = 0
start_doctype_count = 0
start_patent_count = 0
end_patent_count = 0
patent_count = 0
USPC_count = 0
tm_list_length = 0
# list that will hold records read from the input file
records = []
# list that will hold TM records
tm_list = []
# list that will hold TM design codes
tm_design_codes_list = []
# list that will hold the TM GS codes + text
tm_gs_codes_list = []
# parse the optional command line arguments; use default values if no cmd line args provided by user
print('Number of arguments: ', len(sys.argv), 'arguments')
print('Argument List:', str(sys.argv))
print('Argv: ', argv)
numargs = len(sys.argv)
if numargs < 2: raise TypeError('requires at least one argument: tm_splitter.py [-i <inputfile> | --ifile <inputfile>]')
try:
opts, args = getopt.getopt(argv, "hi:d:",["ifile=", "odir"])
except getopt.GetoptError:
print ('tm_splitter.py [-i <inputfile> | --ifile <inputfile>] [-d <outdir> | --odir <outdir>]')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('tm_splitter.py [-i <inputfile> | --ifile <inputfile>][-d <outdir>|--odir <outdir>]')
sys.exit()
elif opt in ("-i", "--ifile"):
bulk_patents_file = arg
elif opt in ("-d", "--odir"):
output_directory = arg
try:
# use buffered reader to read records from in input file; build a list of strings representing TM records
records = readrecords(bulk_patents_file, record_separator)
record_list = list(records)
for record in record_list:
match_start_tm_file_pattern = re.search(start_tm_file_pattern, record)
if match_start_tm_file_pattern:
start_tm_file_dict = match_start_tm_file_pattern.groupdict()
start_tm_file_tags = start_tm_file_dict['start_tm_file_tags']
print("start_tm_file_tags:", start_tm_file_tags)
match_creation_datetime = re.search(creation_datetime_pattern, record)
if match_creation_datetime:
creation_datetime_dict = match_creation_datetime.groupdict()
creation_datetime = creation_datetime_dict['creation_datetime']
print("creation datetime: ", creation_datetime)
match_eof = re.search(eof_pattern, record)
if match_eof:
print("matched eof: ", record)
match_eof_dict = match_eof.groupdict()
eof_tags = match_eof_dict['eof_tags']
# match open + closing tags i.e. <case-file ...> ...</case-file>
match_tm = re.search(tm_pattern, record)
if match_tm:
match_tm_dict = match_tm.groupdict()
tm_case_file = match_tm_dict['case_file']
tm_list.append(tm_case_file)
else:
print("didn't match TM record:", record)
next
tm_list_length = len(tm_list)
print ("finished reading TMs")
print("number of TMs read:")
print(tm_list_length)
print("")
except IOError as e:
print("could not open file: ", e)
except ValueError as e:
print('bad filename', e)
# extract bibdata from each record
print("processing tms:")
for record in tm_list:
tm_design_codes_list = []
# extract opening xml elements i.e.<?xml version="1.0" encoding="UTF-8"?>
match_xml_start = re.search(start_xml_pattern, record)
if match_xml_start:
start_xml_count += 1
match_xml_start_dict = match_xml_start.groupdict()
xml_tag = match_xml_start_dict['xml_tag']
# remove trailing white space including newline
xml_tag.rstrip()
#count doctype elements i.e. <!DOCTYPE us-patent-grant SYSTEM "us-patent-grant-v42-2006-08-23.dtd" [ ]>
match_doctype_start = re.search(start_doctype_pattern, record)
if match_doctype_start:
start_doctype_count += 1
match_doctype_dict = match_doctype_start.groupdict()
doctype_tag = match_doctype_dict['doctype_tag']
# remove trailing white space including newline
doctype_tag.rstrip()
# match open + closing tags i.e. <case-file ...> ...</case-file>
match_tm = re.search(tm_pattern, record)
if match_tm:
patent_count +=1
print ("processing tm number: ", patent_count, " / ", tm_list_length)
print(tm_list_length)
else:
print("didn't match TM record:", record)
next
# extract us tm opening elements i.e. <us-patent-grant ... file="USD0595476-20090707.XML" ...>
match_tm_start = re.search(start_tm_pattern, record)
if match_tm_start:
start_patent_count +=1
match_tm_start_dict= match_tm_start.groupdict()
tm_start_tag = match_tm_start_dict['tm_start_tag']
tm_start_tag.rstrip()
# count tm closing elements i.e. </us-patent-grant>
match_tm_end = re.search(end_tm_pattern, record)
if match_tm_end:
end_patent_count += 1
# extract the patent XML file name i.e.<us-patent-grant ... file="USD0595476-20090707.XML" ...>
match_patent_file = re.search(patent_file_pattern, record)
# assign default file names in case serial number is missing
tm_file = 'tm_123.xml'
tm_file_base = 'tm123'
tm_file_solr = 'solr_' + tm_file
# extract the TM serial number
match_tm_serial = re.search(serial_number_pattern, record)
if match_tm_serial:
match_tm_serial_dict = match_tm_serial.groupdict()
tm_serial_number = match_tm_serial_dict['serial_number']
tm_file = tm_serial_number + ".xml"
tm_file_solr = "solr_" + tm_file
print("tm_file: ", tm_file)
print("tm_file_solr:", tm_file_solr)
# extract the TM mark id
tm_mark_id = "empty"
match_tm_mark_id = re.search(mark_id_pattern, record)
if match_tm_mark_id:
match_tm_mark_id_dict = match_tm_mark_id.groupdict()
tm_mark_id = match_tm_mark_id_dict['mark_id']
print("tm_mark_id: ", tm_mark_id)
# extract TM goods and services embedded under case-file-statements:
cf_stmts = "empty"
tm_gs_code = "empty"
tm_gs_text = "empty"
tm_gs_codes_list = []
# match & extract <case-file-statements>...</>
match_cf_stmts = re.search(cf_stmts_pattern, record)
if match_cf_stmts:
match_cf_stmts_dict = match_cf_stmts.groupdict()
cf_stmts = match_cf_stmts_dict['cf_stmts']
print("cf_stmts: ", cf_stmts.encode("utf-8"))
# split the individual GS codes
lines = cf_stmts.split("</case-file-statement>")
gs_cnt = 0
for line in lines[:-1]:
tm_gs_code = "empty"
tm_gs_text = "empty"
match_tm_gs = re.search(gs_pattern, line)
if match_tm_gs:
gs_cnt += 1
print("line: ", gs_cnt)
print(line.encode("utf-8"))
match_tm_gs_dict = match_tm_gs.groupdict()
tm_gs_code = match_tm_gs_dict['gs_type_code']
tm_gs_text = match_tm_gs_dict['gs_text']
tm_gs_codes_list.append(tm_gs_code + ":" + tm_gs_text)
print("appended to tm_gs_codes_list: ")
print(tm_gs_code + ":" + tm_gs_text)
# extract TM mark drawing code pattern
tm_drawing_code = "empty"
match_tm_drawing_code = re.search(tm_drawing_code_pattern, record)
if match_tm_drawing_code:
match_tm_drawing_code_dict = match_tm_drawing_code.groupdict()
tm_drawing_code = match_tm_drawing_code_dict['mark_drawing_code']
print("tm_drawing_code: ", tm_drawing_code.encode("utf-8"))
#extract TM design searches
tm_design_searches = "empty"
match_tm_design_searches = re.search(tm_design_searches_pattern, record)
if match_tm_design_searches:
match_tm_design_searches_dict = match_tm_design_searches.groupdict()
tm_design_searches = match_tm_design_searches_dict['design_searches']
print("tm_design_searches: ", tm_design_searches.encode("utf-8"))
lines = tm_design_searches.split("</design-search>")
for line in lines[:-1]:
print("line: ")
print(line.encode("utf-8"))
# extract TM design codes: <code> </code> elements
match_design_code = re.search(tm_design_code_pattern, line)
if match_design_code:
match_design_code_dict = match_design_code.groupdict()
tm_design_code = match_design_code_dict['design_search_code']
tm_design_codes_list.append(tm_design_code)
print("appended to tm_design_codes_list: ")
print(tm_design_code.encode("utf-8"))
# extract the match and append to tm_design_codes_list
# extract the XML file name of the patent
if match_patent_file:
patent_file_dict = match_patent_file.groupdict()
tm_file = patent_file_dict['patent_file']
# extract the base name of input file to create directory name
match_patent_file_base = re.search(patent_file_base_pattern, bulk_patents_file)
if match_patent_file_base:
patent_file_base_dict = match_patent_file_base.groupdict()
tm_file_base = patent_file_base_dict['file_base']
print("tm_file_base:", tm_file_base.encode("utf-8"))
# extract the TM intl code
int_code = "no_intl_code"
if (re.search(INTL_pattern, record)):
INTL_pattern_match = re.search(INTL_pattern, record).groupdict()
print ("INTL_pattern_match:", INTL_pattern_match)
USPC_count += 1
# get rid of leading and trailing white space
int_code = INTL_pattern_match['intl_code'].strip()
# get rid of internal white space in main class
# i.e. D 1 =>D1, D 2=>D2, D 9=>D9
is_intern_space = re.search(main_class_pattern, int_code)
if (is_intern_space):
main_class_dict = is_intern_space.groupdict()
int_code = main_class_dict['main_class_1'] + main_class_dict['main_class_2']
# build the patents corpus under the output directory using the international code
#directory = output_directory + "/" + int_code
#path_tm = output_directory + "/" + int_code + "/" + tm_file
# build the TM corpus under the output directory using the base name of the XML input file
directory = output_directory + "/" + tm_file_base + "/solr"
# complete TM record XML file
path_tm = output_directory + "/" + tm_file_base + "/" + tm_file
# solr ready XML
path_tm_solr = output_directory + "/" + tm_file_base + "/solr/" + tm_file_solr
#this call to os.path.dirname significantly impacts the performance
#directory = os.path.dirname(path)
# in Python 3: os.makdirs() supports exist_ok=True to ignore directory already exists errors
print("creating directory: ")
print(directory.encode("utf-8"))
os.makedirs(directory, exist_ok=True)
# write the complete TM XML file under directory named after TM annual XML file
print("creating tm file:")
print(path_tm.encode("utf-8"))
outfile = open(path_tm,'w', encoding="utf8")
outfile.write(xml_tag)
outfile.write("\n")
outfile.write(start_tm_file_tags)
outfile.write("\n")
outfile.write(record)
outfile.write("\n")
outfile.write(eof_tags)
outfile.close()
print("tm file creation complete\n")
# write the Solr ready XML file
print("creating Solr ready XML file:")
print(path_tm_solr.encode("utf-8"))
outfile = open(path_tm_solr,'w', encoding="utf8")
outfile.write(xml_tag)
outfile.write("\n")
outfile.write("<add>\n")
outfile.write("<doc>\n")
# set id to serial number
outfile.write("<field name=\"id\">")
outfile.write(tm_serial_number)
outfile.write("</field>\n")
# creation-datetime
outfile.write("<field name=\"creation-datetime\">")
outfile.write(creation_datetime)
outfile.write("</field>\n")
# parent document type is tm-bibdata
outfile.write("<field name=\"type\">")
outfile.write("tm-bibdata")
outfile.write("</field>\n")
# serial number
outfile.write("<field name=\"serial-number\">")
outfile.write(tm_serial_number)
outfile.write("</field>\n")
# mark-identification
outfile.write("<field name=\"mark-identification\">")
outfile.write(tm_mark_id)
outfile.write("</field>\n")
# mark-drawing-code
outfile.write("<field name=\"mark-drawing-code\">")
outfile.write(tm_drawing_code)
outfile.write("</field>\n")
# design codes:
for design_code in tm_design_codes_list:
outfile.write("<field name=\"design-code\">")
outfile.write(design_code)
outfile.write("</field>\n")
# good-services
for gs_code_text_pair in tm_gs_codes_list:
print ("gs_code_text_pair:")
print(gs_code_text_pair.encode("utf-8"))
lines = re.split(":", gs_code_text_pair)
gs_code = lines[0]
gs_text = lines[1]
# add the gs code and text as nested document w/unique id
gs_serial_number = tm_serial_number + "_" + gs_code
outfile.write("<doc>\n")
outfile.write("<field name =\"id\">")
outfile.write(gs_serial_number)
outfile.write("</field>\n")
outfile.write("<field name=\"type\">")
outfile.write("goods-services")
outfile.write("</field>\n")
outfile.write("<field name=\"gs-code\">")
outfile.write(gs_code)
outfile.write("</field>\n")
outfile.write("<field name=\"gs-text\">")
outfile.write(gs_text)
outfile.write("</field>\n")
outfile.write("</doc>\n")
outfile.write("</doc>\n")
outfile.write("</add>\n")
outfile.close()
print("tm Solr file creation complete\n")
# print summary of tags matched during tm processing
print("\ntm processing summary:")
print("Number of lines processed from input file:")
#print(len(readfile(bulk_patents_file)))
print("Number of tm classifications extracted:")
print(USPC_count)
print("Number of opening xml elements matched:")
print(start_xml_count)
print("Number of doctype elements matched:")
print(start_doctype_count)
print("Number of start tm elements matched:")
print(start_patent_count)
print("Number of end tm elements matched: ")
print(end_patent_count)
print ("Number of complete tms matched:")
print(patent_count)
print("Corpus created under output directory:")
print(os.path.abspath(output_directory))
print("tm processing complete")
|
deploy.go
|
package eks
import (
"math"
"strings"
"time"
"github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/elb"
"github.com/gruntwork-io/gruntwork-cli/errors"
"github.com/gruntwork-io/kubergrunt/kubectl"
"github.com/gruntwork-io/kubergrunt/logging"
)
// RollOutDeployment will perform a zero downtime roll out of the current launch configuration associated with the
// provided ASG in the provided EKS cluster. This is accomplished by:
// 1. Double the desired capacity of the Auto Scaling Group that powers the EKS Cluster. This will launch new EKS
// workers with the new launch configuration.
// 2. Wait for the new nodes to be ready for Pod scheduling in Kubernetes.
// 3. Drain the pods scheduled on the old EKS workers (using the equivalent of "kubectl drain"), so that they will be
// rescheduled on the new EKS workers.
// 4. Wait for all the pods to migrate off of the old EKS workers.
// 5. Set the desired capacity down to the original value and remove the old EKS workers from the ASG.
// TODO feature request: Break up into stages/checkpoints, and store state along the way so that command can pick up
// from a stage if something bad happens.
func
|
(
region string,
eksAsgName string,
kubectlOptions *kubectl.KubectlOptions,
drainTimeout time.Duration,
maxRetries int,
sleepBetweenRetries time.Duration,
) error {
logger := logging.GetProjectLogger()
logger.Infof("Beginning roll out for EKS cluster worker group %s in %s", eksAsgName, region)
// Construct clients for AWS
sess, err := NewAuthenticatedSession(region)
if err != nil {
return errors.WithStackTrace(err)
}
asgSvc := autoscaling.New(sess)
ec2Svc := ec2.New(sess)
elbSvc := elb.New(sess)
logger.Infof("Successfully authenticated with AWS")
// Retrieve the ASG object and gather required info we will need later
originalCapacity, currentInstanceIds, err := getAsgInfo(asgSvc, eksAsgName)
if err != nil {
return err
}
// Calculate default max retries
if maxRetries == 0 {
maxRetries = getDefaultMaxRetries(originalCapacity, sleepBetweenRetries)
logger.Infof(
"No max retries set. Defaulted to %d based on sleep between retries duration of %s and scale up count %d.",
maxRetries,
sleepBetweenRetries,
originalCapacity,
)
}
// Make sure ASG is in steady state
if originalCapacity != int64(len(currentInstanceIds)) {
logger.Infof("Ensuring ASG is in steady state (current capacity = desired capacity)")
err = waitForCapacity(asgSvc, eksAsgName, maxRetries, sleepBetweenRetries)
if err != nil {
logger.Error("Error waiting for ASG to reach steady state. Try again after the ASG is in a steady state.")
return err
}
logger.Infof("Verified ASG is in steady state (current capacity = desired capacity)")
originalCapacity, currentInstanceIds, err = getAsgInfo(asgSvc, eksAsgName)
if err != nil {
return err
}
}
logger.Infof("Starting with the following list of instances in ASG:")
logger.Infof("%s", strings.Join(currentInstanceIds, ","))
logger.Infof("Launching new nodes with new launch config on ASG %s", eksAsgName)
err = scaleUp(
asgSvc,
ec2Svc,
elbSvc,
kubectlOptions,
eksAsgName,
originalCapacity*2,
currentInstanceIds,
maxRetries,
sleepBetweenRetries,
)
if err != nil {
return err
}
logger.Infof("Successfully launched new nodes with new launch config on ASG %s", eksAsgName)
logger.Infof("Draining Pods on old instances in cluster ASG %s", eksAsgName)
err = drainNodesInAsg(ec2Svc, kubectlOptions, currentInstanceIds, drainTimeout)
if err != nil {
logger.Errorf("Error while draining nodes.")
logger.Errorf("Continue to drain nodes that failed manually, and then terminate the underlying instances to complete the rollout.")
return err
}
logger.Infof("Successfully drained all scheduled Pods on old instances in cluster ASG %s", eksAsgName)
logger.Infof("Removing old nodes from ASG %s", eksAsgName)
err = detachInstances(asgSvc, eksAsgName, currentInstanceIds)
if err != nil {
logger.Errorf("Error while detaching the old instances.")
logger.Errorf("Continue to detach the old instances and then terminate the underlying instances to complete the rollout.")
return err
}
err = terminateInstances(ec2Svc, currentInstanceIds)
if err != nil {
logger.Errorf("Error while terminating the old instances.")
logger.Errorf("Continue to terminate the underlying instances to complete the rollout.")
return err
}
logger.Infof("Successfully removed old nodes from ASG %s", eksAsgName)
logger.Infof("Successfully finished roll out for EKS cluster worker group %s in %s", eksAsgName, region)
return nil
}
// Retrieves current state of the ASG and returns the original Capacity and the IDs of the instances currently
// associated with it.
func getAsgInfo(asgSvc *autoscaling.AutoScaling, asgName string) (int64, []string, error) {
logger := logging.GetProjectLogger()
logger.Infof("Retrieving current ASG info")
asg, err := GetAsgByName(asgSvc, asgName)
if err != nil {
return -1, nil, err
}
originalCapacity := *asg.DesiredCapacity
currentInstances := asg.Instances
currentInstanceIds := idsFromAsgInstances(currentInstances)
logger.Infof("Successfully retrieved current ASG info.")
logger.Infof("\tCurrent desired capacity: %d", originalCapacity)
logger.Infof("\tCurrent capacity: %d", len(currentInstances))
return originalCapacity, currentInstanceIds, nil
}
// Calculates the default max retries based on a heuristic of 5 minutes per wave. This assumes that the ASG scales up in
// waves of 10 instances, so the number of retries is:
// ceil(scaleUpCount / 10) * 5 minutes / sleepBetweenRetries
func getDefaultMaxRetries(scaleUpCount int64, sleepBetweenRetries time.Duration) int {
logger := logging.GetProjectLogger()
numWaves := int(math.Ceil(float64(scaleUpCount) / float64(10)))
logger.Debugf("Calculated number of waves as %d (scaleUpCount %d)", numWaves, scaleUpCount)
sleepBetweenRetriesSeconds := int(math.Trunc(sleepBetweenRetries.Seconds()))
defaultMaxRetries := numWaves * 600 / sleepBetweenRetriesSeconds
logger.Debugf(
"Calculated default max retries as %d (scaleUpCount %d, num waves %d, duration (s) %d)",
defaultMaxRetries,
scaleUpCount,
numWaves,
sleepBetweenRetriesSeconds,
)
return defaultMaxRetries
}
|
RollOutDeployment
|
tableBoardGame.ts
|
export class TableBoardGame {
BoardGameId: number;
BGGId: number | null;
BoardGameName: string;
|
TableId: number;
GamerId: string;
GamerNickname: string;
}
|
MinBoardGamePlayers: number;
MaxBoardGamePlayers: number;
ImageUrl: string;
|
gdc_req_legacy.py
|
'''
this script queries the gdc legacy archive via the search and retrieve api and
returns msi_status object (from files endpoint on legacy)
-- get uuids of xml files with the msi annotations from legacy server
-- download each xml file
-- parse xml files to extract msi annotations for each subject
script should be called from within gdc_ann_make, which itself should be called
as part of snakemake pipeline
-- usage: snakemake setup_tcga
'''
import io
import json
import os
import pandas as pd
import requests
import re
import subprocess
import glob
import xml.etree.ElementTree as ET
modname = 'gdc_req_legacy'
def
|
():
'''
set filters for gdc legacy files endpoint search
-- json format
-- for files.data_type, values for MSI status are 'Auxiliary test' and
'Microsatellite instability'
-- here use 'Auxiliary test' per TCGAbiolinks examples
'''
filters = {
'op':'and',
'content':[
{'op':'or',
'content':[
{'op':'in',
'content':{
'field':'cases.project.project_id',
'value':'TCGA-COAD'
}
},
{'op':'in',
'content':{
'field':'cases.project.project_id',
'value':'TCGA-READ'
}
}
]
},
{'op':'and',
'content':[
{'op':'in',
'content':{
'field':'files.data_category',
'value':'Other'
}
},
{'op':'in',
'content':{
'field':'files.data_type',
'value':'Auxiliary test'
}
},
{'op':'in',
'content':{
'field':'files.access',
'value':'open'
}
}
]
}
]
}
filters = json.dumps(filters)
return filters
def set_fields():
'''
set fields for extraction from endpoint
'''
fields = [
'file_name',
'file_id',
'md5sum',
'file_size',
'state'
]
fields = ','.join(fields)
return fields
def set_params(filters,fields):
'''
set parameters for https get request to endpoint
-- set size parameter empirically to a level greater than number of target
cases to get all records at once
'''
params = {
'filters': filters,
'fields': fields,
'format': 'TSV',
'size': '1500'
}
return params
def get_results(endpoint,params):
'''
given an endpoint and parameters, execute https GET request for xml file_id
entities and build results dataframe with msi results
'''
response = requests.get(endpoint, params=params)
object = io.StringIO(response.content.decode('utf-8'))
results = pd.read_table(object)
return results
def download_xml_uuid(files_res,dest):
'''
download xml files one at a time by uuid
'''
file_count = 0
for uuid in files_res.id:
cmd = ' '.join(['gdc-client download',uuid,'-d',dest])
subprocess.call(cmd, shell=True)
print(' '.join([uuid,'downloaded']))
file_count = file_count + 1
print(' '.join([str(file_count),'files downloaded']))
def download_xml_manifest(files_res,dest):
'''
-- create manifest object
-- write manifest to file
-- use manifest for bulk download
'''
select = ['file_id', 'file_name', 'md5sum', 'file_size', 'state']
manifest = files_res[select]
manifest.columns = ['id', 'filename', 'md5', 'size', 'state']
manifest = manifest.sort_values(by=['id'])
out_file = dest + 'manifest.tsv'
manifest.to_csv(out_file, sep='\t', index=False)
cmd = ' '.join(['gdc-client download','-m',out_file,'-d',dest])
subprocess.call(cmd, shell=True)
print('manifest downloaded')
def parse_xml(files_res,dest):
'''
parse xml files to extract msi status
'''
msi_dict = {}
msi_dict['subject_id'] = []
msi_dict['msi_status'] = []
tag1 = 'mononucleotide_and_dinucleotide_marker_panel_analysis_status'
tag2 = 'mononucleotide_marker_panel_analysis_status'
file_count = 0
for uuid in files_res.id:
pattern = dest + uuid + '/*.xml'
fn = glob.glob(pattern)[0]
tree = ET.parse(fn)
for elem in tree.getiterator():
if 'bcr_patient_barcode' in elem.tag:
subject_id = elem.text
if tag1 in elem.tag and elem.text != None:
msi_status = elem.text
elif tag2 in elem.tag and elem.text != None:
msi_status = elem.text
msi_dict['subject_id'].append(subject_id)
msi_dict['msi_status'].append(msi_status)
file_count = file_count + 1
print(' '.join([str(file_count),'files parsed']))
msi_res = pd.DataFrame.from_dict(msi_dict)
return msi_res
def check_outpath(out_path):
'''
check for presence of absence of out_path and make directory if absent
'''
l = out_path.strip('/').split('/')
d = ''
for e in l:
d = d + '/' + e
if os.path.exists(d):
print(d,'present')
else:
print(d,'absent')
print('making',d,'now')
os.mkdir(d)
def main():
endpoint = 'https://api.gdc.cancer.gov/legacy/files/'
filters = set_filters()
fields = set_fields()
params = set_params(filters, fields)
files_res = get_results(endpoint, params)
dest = os.environ['ann_dir'] + 'tcga/msi/'
check_outpath(dest)
download_xml_manifest(files_res, dest)
msi_res = parse_xml(files_res, dest)
return msi_res
if __name__ == '__main__':
print('This script is not meant to be run as main. See usage statment:')
print('usage: snakemake setup_tcga')
else:
msi_res = main()
|
set_filters
|
standardista-table-sorting.js
|
/**
* Written by Neil Crosby.
* http://www.workingwith.me.uk/articles/scripting/standardista_table_sorting
*
* This module is based on Stuart Langridge's "sorttable" code. Specifically,
* the determineSortFunction, sortCaseInsensitive, sortDate, sortNumeric, and
* sortCurrency functions are heavily based on his code. This module would not
* have been possible without Stuart's earlier outstanding work.
*
* Use this wherever you want, but please keep this comment at the top of this file.
*
* Copyright (c) 2006 Neil Crosby
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
**/
var standardistaTableSorting = {
that: false,
isOdd: false,
sortColumnIndex : -1,
lastAssignedId : 0,
newRows: -1,
lastSortedTable: -1,
/**
* Initialises the Standardista Table Sorting module
**/
init : function() {
// first, check whether this web browser is capable of running this script
if (!document.getElementsByTagName) {
return;
}
this.that = this;
this.run();
},
/**
* Runs over each table in the document, making it sortable if it has a class
* assigned named "sortable" and an id assigned.
**/
run : function() {
var tables = document.getElementsByTagName("table");
for (var i=0; i < tables.length; i++) {
var thisTable = tables[i];
if (css.elementHasClass(thisTable, 'sortable')) {
this.makeSortable(thisTable);
}
}
},
/**
* Makes the given table sortable.
**/
makeSortable : function(table) {
// first, check if the table has an id. if it doesn't, give it one
if (!table.id) {
table.id = 'sortableTable'+this.lastAssignedId++;
}
// if this table does not have a thead, we don't want to know about it
if (!table.tHead || !table.tHead.rows || 0 == table.tHead.rows.length) {
return;
}
// we'll assume that the last row of headings in the thead is the row that
// wants to become clickable
var row = table.tHead.rows[table.tHead.rows.length - 1];
for (var i=0; i < row.cells.length; i++) {
// create a link with an onClick event which will
// control the sorting of the table
var linkEl = createElement('a');
linkEl.href = '#';
linkEl.onclick = this.headingClicked;
linkEl.setAttribute('columnId', i);
linkEl.title = 'Click to sort';
// move the current contents of the cell that we're
// hyperlinking into the hyperlink
var innerEls = row.cells[i].childNodes;
for (var j = 0; j < innerEls.length; j++) {
linkEl.appendChild(innerEls[j]);
}
// and finally add the new link back into the cell
row.cells[i].appendChild(linkEl);
var spanEl = createElement('span');
spanEl.className = 'tableSortArrow';
spanEl.appendChild(document.createTextNode('\u00A0\u00A0'));
row.cells[i].appendChild(spanEl);
}
if (css.elementHasClass(table, 'autostripe')) {
this.isOdd = false;
var rows = table.tBodies[0].rows;
// We appendChild rows that already exist to the tbody, so it moves them rather than creating new ones
for (var i=0;i<rows.length;i++) {
this.doStripe(rows[i]);
}
}
},
headingClicked: function(e) {
var that = standardistaTableSorting.that;
// linkEl is the hyperlink that was clicked on which caused
// this method to be called
var linkEl = getEventTarget(e);
// directly outside it is a td, tr, thead and table
var td = linkEl.parentNode;
var tr = td.parentNode;
var thead = tr.parentNode;
var table = thead.parentNode;
// if the table we're looking at doesn't have any rows
// (or only has one) then there's no point trying to sort it
if (!table.tBodies || table.tBodies[0].rows.length <= 1) {
return false;
}
// the column we want is indicated by td.cellIndex
var column = linkEl.getAttribute('columnId') || td.cellIndex;
//var column = td.cellIndex;
// find out what the current sort order of this column is
var arrows = css.getElementsByClass(td, 'tableSortArrow', 'span');
var previousSortOrder = '';
if (arrows.length > 0) {
previousSortOrder = arrows[0].getAttribute('sortOrder');
}
// work out how we want to sort this column using the data in the first cell
// but just getting the first cell is no good if it contains no data
// so if the first cell just contains white space then we need to track
// down until we find a cell which does contain some actual data
var itm = ''
var rowNum = 0;
while ('' == itm && rowNum < table.tBodies[0].rows.length) {
itm = that.getInnerText(table.tBodies[0].rows[rowNum].cells[column]);
rowNum++;
}
var sortfn = that.determineSortFunction(itm);
// if the last column that was sorted was this one, then all we need to
// do is reverse the sorting on this column
if (table.id == that.lastSortedTable && column == that.sortColumnIndex) {
newRows = that.newRows;
newRows.reverse();
// otherwise, we have to do the full sort
} else {
that.sortColumnIndex = column;
var newRows = new Array();
for (var j = 0; j < table.tBodies[0].rows.length; j++) {
newRows[j] = table.tBodies[0].rows[j];
}
newRows.sort(sortfn);
}
that.moveRows(table, newRows);
that.newRows = newRows;
that.lastSortedTable = table.id;
// now, give the user some feedback about which way the column is sorted
// first, get rid of any arrows in any heading cells
var arrows = css.getElementsByClass(tr, 'tableSortArrow', 'span');
for (var j = 0; j < arrows.length; j++) {
var arrowParent = arrows[j].parentNode;
arrowParent.removeChild(arrows[j]);
if (arrowParent != td) {
spanEl = createElement('span');
spanEl.className = 'tableSortArrow';
spanEl.appendChild(document.createTextNode('\u00A0\u00A0'));
arrowParent.appendChild(spanEl);
}
}
// now, add back in some feedback
var spanEl = createElement('span');
spanEl.className = 'tableSortArrow';
if (null == previousSortOrder || '' == previousSortOrder || 'DESC' == previousSortOrder) {
spanEl.appendChild(document.createTextNode(' \u2191'));
spanEl.setAttribute('sortOrder', 'ASC');
} else {
spanEl.appendChild(document.createTextNode(' \u2193'));
spanEl.setAttribute('sortOrder', 'DESC');
}
td.appendChild(spanEl);
return false;
},
getInnerText : function(el) {
if ('string' == typeof el || 'undefined' == typeof el) {
return el;
}
if (el.innerText) {
return el.innerText; // Not needed but it is faster
}
var str = el.getAttribute('standardistaTableSortingInnerText');
if (null != str && '' != str) {
return str;
}
str = '';
var cs = el.childNodes;
var l = cs.length;
for (var i = 0; i < l; i++) {
// 'if' is considerably quicker than a 'switch' statement,
// in Internet Explorer which translates up to a good time
// reduction since this is a very often called recursive function
if (1 == cs[i].nodeType) { // ELEMENT NODE
str += this.getInnerText(cs[i]);
break;
} else if (3 == cs[i].nodeType) { //TEXT_NODE
str += cs[i].nodeValue;
break;
}
}
// set the innertext for this element directly on the element
// so that it can be retrieved early next time the innertext
// is requested
el.setAttribute('standardistaTableSortingInnerText', str);
return str;
},
determineSortFunction : function(itm) {
var sortfn = this.sortCaseInsensitive;
if (itm.match(/^\d\d[\/-]\d\d[\/-]\d\d\d\d$/)) {
sortfn = this.sortDate;
}
if (itm.match(/^\d\d[\/-]\d\d[\/-]\d\d$/)) {
sortfn = this.sortDate;
}
if (itm.match(/^[?$]/)) {
sortfn = this.sortCurrency;
}
if (itm.match(/^\d?\.?\d+$/)) {
sortfn = this.sortNumeric;
}
if (itm.match(/^[+-]?\d*\.?\d+([eE]-?\d+)?$/)) {
sortfn = this.sortNumeric;
}
if (itm.match(/^([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])\.([01]?\d\d?|2[0-4]\d|25[0-5])$/)) {
sortfn = this.sortIP;
}
return sortfn;
},
sortCaseInsensitive : function(a, b) {
var that = standardistaTableSorting.that;
var aa = that.getInnerText(a.cells[that.sortColumnIndex]).toLowerCase();
var bb = that.getInnerText(b.cells[that.sortColumnIndex]).toLowerCase();
if (aa==bb) {
return 0;
} else if (aa<bb) {
return -1;
} else {
return 1;
}
},
sortDate : function(a,b) {
var that = standardistaTableSorting.that;
// y2k notes: two digit years less than 50 are treated as 20XX, greater than 50 are treated as 19XX
var aa = that.getInnerText(a.cells[that.sortColumnIndex]);
var bb = that.getInnerText(b.cells[that.sortColumnIndex]);
var dt1, dt2, yr = -1;
if (aa.length == 10) {
dt1 = aa.substr(6,4)+aa.substr(3,2)+aa.substr(0,2);
} else {
yr = aa.substr(6,2);
if (parseInt(yr) < 50) {
yr = '20'+yr;
} else {
yr = '19'+yr;
}
dt1 = yr+aa.substr(3,2)+aa.substr(0,2);
}
if (bb.length == 10) {
dt2 = bb.substr(6,4)+bb.substr(3,2)+bb.substr(0,2);
} else {
yr = bb.substr(6,2);
if (parseInt(yr) < 50) {
yr = '20'+yr;
} else {
yr = '19'+yr;
}
dt2 = yr+bb.substr(3,2)+bb.substr(0,2);
}
if (dt1==dt2) {
return 0;
} else if (dt1<dt2) {
return -1;
}
return 1;
},
sortCurrency : function(a,b) {
var that = standardistaTableSorting.that;
var aa = that.getInnerText(a.cells[that.sortColumnIndex]).replace(/[^0-9.]/g,'');
var bb = that.getInnerText(b.cells[that.sortColumnIndex]).replace(/[^0-9.]/g,'');
return parseFloat(aa) - parseFloat(bb);
},
sortNumeric : function(a,b) {
var that = standardistaTableSorting.that;
var aa = parseFloat(that.getInnerText(a.cells[that.sortColumnIndex]));
if (isNaN(aa)) {
aa = 0;
}
var bb = parseFloat(that.getInnerText(b.cells[that.sortColumnIndex]));
if (isNaN(bb)) {
bb = 0;
}
return aa-bb;
},
makeStandardIPAddress : function(val) {
var vals = val.split('.');
for (x in vals) {
val = vals[x];
while (3 > val.length) {
val = '0'+val;
}
vals[x] = val;
}
val = vals.join('.');
return val;
},
sortIP : function(a,b) {
var that = standardistaTableSorting.that;
var aa = that.makeStandardIPAddress(that.getInnerText(a.cells[that.sortColumnIndex]).toLowerCase());
var bb = that.makeStandardIPAddress(that.getInnerText(b.cells[that.sortColumnIndex]).toLowerCase());
if (aa==bb) {
return 0;
} else if (aa<bb) {
return -1;
} else {
return 1;
}
},
moveRows : function(table, newRows) {
this.isOdd = false;
// We appendChild rows that already exist to the tbody, so it moves them rather than creating new ones
for (var i=0;i<newRows.length;i++) {
var rowItem = newRows[i];
this.doStripe(rowItem);
table.tBodies[0].appendChild(rowItem);
}
},
doStripe : function(rowItem) {
if (this.isOdd) {
css.addClassToElement(rowItem, 'odd');
} else {
css.removeClassFromElement(rowItem, 'odd');
}
this.isOdd = !this.isOdd;
}
}
function
|
() {
standardistaTableSorting.init();
}
addEvent(window, 'load', standardistaTableSortingInit)
|
standardistaTableSortingInit
|
demo_skeleton.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import shutil
import cv2
import mmcv
import numpy as np
import torch
from mmcv import DictAction
from mmaction.apis import inference_recognizer, init_recognizer
try:
from mmdet.apis import inference_detector, init_detector
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_detector` and '
'`init_detector` form `mmdet.apis`. These apis are '
'required in this demo! ')
try:
from mmpose.apis import (inference_top_down_pose_model, init_pose_model,
vis_pose_result)
except (ImportError, ModuleNotFoundError):
raise ImportError('Failed to import `inference_top_down_pose_model`, '
'`init_pose_model`, and `vis_pose_result` form '
'`mmpose.apis`. These apis are required in this demo! ')
try:
import moviepy.editor as mpy
except ImportError:
raise ImportError('Please install moviepy to enable output file')
FONTFACE = cv2.FONT_HERSHEY_DUPLEX
FONTSCALE = 0.75
FONTCOLOR = (255, 255, 255) # BGR, white
THICKNESS = 1
LINETYPE = 1
def parse_args():
|
def frame_extraction(video_path, short_side):
"""Extract frames given video_path.
Args:
video_path (str): The video_path.
"""
# Load the video, extract frames into ./tmp/video_name
target_dir = osp.join('./tmp', osp.basename(osp.splitext(video_path)[0]))
os.makedirs(target_dir, exist_ok=True)
# Should be able to handle videos up to several hours
frame_tmpl = osp.join(target_dir, 'img_{:06d}.jpg')
vid = cv2.VideoCapture(video_path)
frames = []
frame_paths = []
flag, frame = vid.read()
cnt = 0
new_h, new_w = None, None
while flag:
if new_h is None:
h, w, _ = frame.shape
new_w, new_h = mmcv.rescale_size((w, h), (short_side, np.Inf))
frame = mmcv.imresize(frame, (new_w, new_h))
frames.append(frame)
frame_path = frame_tmpl.format(cnt + 1)
frame_paths.append(frame_path)
cv2.imwrite(frame_path, frame)
cnt += 1
flag, frame = vid.read()
return frame_paths, frames
def detection_inference(args, frame_paths):
"""Detect human boxes given frame paths.
Args:
args (argparse.Namespace): The arguments.
frame_paths (list[str]): The paths of frames to do detection inference.
Returns:
list[np.ndarray]: The human detection results.
"""
model = init_detector(args.det_config, args.det_checkpoint, args.device)
assert model.CLASSES[0] == 'person', ('We require you to use a detector '
'trained on COCO')
results = []
print('Performing Human Detection for each frame')
prog_bar = mmcv.ProgressBar(len(frame_paths))
for frame_path in frame_paths:
result = inference_detector(model, frame_path)
# We only keep human detections with score larger than det_score_thr
result = result[0][result[0][:, 4] >= args.det_score_thr]
results.append(result)
prog_bar.update()
return results
def pose_inference(args, frame_paths, det_results):
model = init_pose_model(args.pose_config, args.pose_checkpoint,
args.device)
ret = []
print('Performing Human Pose Estimation for each frame')
prog_bar = mmcv.ProgressBar(len(frame_paths))
for f, d in zip(frame_paths, det_results):
# Align input format
d = [dict(bbox=x) for x in list(d)]
pose = inference_top_down_pose_model(model, f, d, format='xyxy')[0]
ret.append(pose)
prog_bar.update()
return ret
def main():
args = parse_args()
frame_paths, original_frames = frame_extraction(args.video,
args.short_side)
num_frame = len(frame_paths)
h, w, _ = original_frames[0].shape
# Get clip_len, frame_interval and calculate center index of each clip
config = mmcv.Config.fromfile(args.config)
config.merge_from_dict(args.cfg_options)
for component in config.data.test.pipeline:
if component['type'] == 'PoseNormalize':
component['mean'] = (w // 2, h // 2, .5)
component['max_value'] = (w, h, 1.)
model = init_recognizer(config, args.checkpoint, args.device)
# Load label_map
label_map = [x.strip() for x in open(args.label_map).readlines()]
# Get Human detection results
det_results = detection_inference(args, frame_paths)
torch.cuda.empty_cache()
pose_results = pose_inference(args, frame_paths, det_results)
torch.cuda.empty_cache()
fake_anno = dict(
frame_dir='',
label=-1,
img_shape=(h, w),
original_shape=(h, w),
start_index=0,
modality='Pose',
total_frames=num_frame)
num_person = max([len(x) for x in pose_results])
num_keypoint = 17
keypoint = np.zeros((num_person, num_frame, num_keypoint, 2),
dtype=np.float16)
keypoint_score = np.zeros((num_person, num_frame, num_keypoint),
dtype=np.float16)
for i, poses in enumerate(pose_results):
for j, pose in enumerate(poses):
pose = pose['keypoints']
keypoint[j, i] = pose[:, :2]
keypoint_score[j, i] = pose[:, 2]
fake_anno['keypoint'] = keypoint
fake_anno['keypoint_score'] = keypoint_score
results = inference_recognizer(model, fake_anno)
action_label = label_map[results[0][0]]
pose_model = init_pose_model(args.pose_config, args.pose_checkpoint,
args.device)
vis_frames = [
vis_pose_result(pose_model, frame_paths[i], pose_results[i])
for i in range(num_frame)
]
for frame in vis_frames:
cv2.putText(frame, action_label, (10, 30), FONTFACE, FONTSCALE,
FONTCOLOR, THICKNESS, LINETYPE)
vid = mpy.ImageSequenceClip([x[:, :, ::-1] for x in vis_frames], fps=24)
vid.write_videofile(args.out_filename, remove_temp=True)
tmp_frame_dir = osp.dirname(frame_paths[0])
shutil.rmtree(tmp_frame_dir)
if __name__ == '__main__':
main()
|
parser = argparse.ArgumentParser(description='MMAction2 demo')
parser.add_argument('video', help='video file/url')
parser.add_argument('out_filename', help='output filename')
parser.add_argument(
'--config',
default=('configs/skeleton/posec3d/'
'slowonly_r50_u48_240e_ntu120_xsub_keypoint.py'),
help='skeleton model config file path')
parser.add_argument(
'--checkpoint',
default=('https://download.openmmlab.com/mmaction/skeleton/posec3d/'
'slowonly_r50_u48_240e_ntu120_xsub_keypoint/'
'slowonly_r50_u48_240e_ntu120_xsub_keypoint-6736b03f.pth'),
help='skeleton model checkpoint file/url')
parser.add_argument(
'--det-config',
default='demo/faster_rcnn_r50_fpn_2x_coco.py',
help='human detection config file path (from mmdet)')
parser.add_argument(
'--det-checkpoint',
default=('http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/'
'faster_rcnn_r50_fpn_2x_coco/'
'faster_rcnn_r50_fpn_2x_coco_'
'bbox_mAP-0.384_20200504_210434-a5d8aa15.pth'),
help='human detection checkpoint file/url')
parser.add_argument(
'--pose-config',
default='demo/hrnet_w32_coco_256x192.py',
help='human pose estimation config file path (from mmpose)')
parser.add_argument(
'--pose-checkpoint',
default=('https://download.openmmlab.com/mmpose/top_down/hrnet/'
'hrnet_w32_coco_256x192-c78dce93_20200708.pth'),
help='human pose estimation checkpoint file/url')
parser.add_argument(
'--det-score-thr',
type=float,
default=0.9,
help='the threshold of human detection score')
parser.add_argument(
'--label-map',
default='tools/data/skeleton/label_map_ntu120.txt',
help='label map file')
parser.add_argument(
'--device', type=str, default='cuda:0', help='CPU/CUDA device option')
parser.add_argument(
'--short-side',
type=int,
default=480,
help='specify the short-side length of the image')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
default={},
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. For example, '
"'--cfg-options model.backbone.depth=18 model.backbone.with_cp=True'")
args = parser.parse_args()
return args
|
command.rs
|
use std::{borrow::Borrow, ops::Range};
use hal::{
buffer,
command::{
AttachmentClear, BufferCopy, BufferImageCopy, ClearValue, CommandBufferFlags,
CommandBufferInheritanceInfo, DescriptorSetOffset, ImageBlit, ImageCopy, ImageResolve,
Level, SubpassContents,
},
device::OutOfMemory,
image::{Filter, Layout, SubresourceRange},
memory::{Barrier, Dependencies},
pso, query,
queue::Submission,
window::{PresentError, PresentationSurface, Suboptimal, SwapImageIndex},
DrawCount, IndexCount, IndexType, InstanceCount, TaskCount, VertexCount, VertexOffset,
WorkGroupCount,
};
use crate::Backend;
#[derive(Debug)]
pub struct Queue;
impl hal::queue::Queue<Backend> for Queue {
unsafe fn submit<'a, Ic, Iw, Is>(
&mut self,
_: Ic,
_: Iw,
_: Is,
_fence: Option<&mut <Backend as hal::Backend>::Fence>,
) where
Ic: Iterator<Item = &'a <Backend as hal::Backend>::CommandBuffer>,
{
todo!()
}
unsafe fn present(
&mut self,
_surface: &mut <Backend as hal::Backend>::Surface,
_image: <<Backend as hal::Backend>::Surface as PresentationSurface<Backend>>::SwapchainImage,
_wait_semaphore: Option<&mut <Backend as hal::Backend>::Semaphore>,
) -> Result<Option<Suboptimal>, PresentError> {
todo!()
}
fn wait_idle(&mut self) -> Result<(), OutOfMemory> {
todo!()
}
}
#[derive(Debug)]
pub struct CommandPool;
impl hal::pool::CommandPool<Backend> for CommandPool {
unsafe fn reset(&mut self, _release_resources: bool) {
todo!()
}
unsafe fn allocate_one(&mut self, _level: Level) -> CommandBuffer {
todo!()
}
unsafe fn free<I>(&mut self, _buffers: I)
where
I: Iterator<Item = CommandBuffer>,
{
todo!()
}
}
#[derive(Debug)]
pub struct CommandBuffer;
impl hal::command::CommandBuffer<Backend> for CommandBuffer {
unsafe fn begin(
&mut self,
_flags: CommandBufferFlags,
_inheritance_info: CommandBufferInheritanceInfo<Backend>,
) {
todo!()
}
unsafe fn begin_primary(&mut self, _flags: CommandBufferFlags) {
todo!()
}
unsafe fn finish(&mut self) {
todo!()
}
unsafe fn reset(&mut self, _release_resources: bool) {
todo!()
}
unsafe fn pipeline_barrier<'a, T>(
&mut self,
_stages: Range<pso::PipelineStage>,
_dependencies: Dependencies,
_barriers: T,
) where
T: Iterator<Item = Barrier<'a, Backend>>,
{
todo!()
}
unsafe fn fill_buffer(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_range: buffer::SubRange,
_data: u32,
) {
todo!()
}
unsafe fn update_buffer(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_data: &[u8],
) {
todo!()
}
unsafe fn clear_image<T>(
&mut self,
_image: &<Backend as hal::Backend>::Image,
_layout: Layout,
_value: ClearValue,
_subresource_ranges: T,
) where
T: Iterator,
T::Item: Borrow<SubresourceRange>,
{
todo!()
}
unsafe fn clear_attachments<T, U>(&mut self, _clears: T, _rects: U)
where
T: Iterator,
T::Item: Borrow<AttachmentClear>,
U: Iterator,
U::Item: Borrow<pso::ClearRect>,
{
todo!()
}
unsafe fn resolve_image<T>(
&mut self,
_src: &<Backend as hal::Backend>::Image,
_src_layout: Layout,
_dst: &<Backend as hal::Backend>::Image,
_dst_layout: Layout,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<ImageResolve>,
{
todo!()
}
unsafe fn
|
<T>(
&mut self,
_src: &<Backend as hal::Backend>::Image,
_src_layout: Layout,
_dst: &<Backend as hal::Backend>::Image,
_dst_layout: Layout,
_filter: Filter,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<ImageBlit>,
{
todo!()
}
unsafe fn bind_index_buffer(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_sub: buffer::SubRange,
_ty: IndexType,
) {
todo!()
}
unsafe fn bind_vertex_buffers<I, T>(&mut self, _first_binding: pso::BufferIndex, _buffers: I)
where
I: Iterator<Item = (T, buffer::SubRange)>,
T: Borrow<<Backend as hal::Backend>::Buffer>,
{
todo!()
}
unsafe fn set_viewports<T>(&mut self, _first_viewport: u32, _viewports: T)
where
T: Iterator,
T::Item: Borrow<pso::Viewport>,
{
todo!()
}
unsafe fn set_scissors<T>(&mut self, _first_scissor: u32, _rects: T)
where
T: Iterator,
T::Item: Borrow<pso::Rect>,
{
todo!()
}
unsafe fn set_stencil_reference(&mut self, _faces: pso::Face, _value: pso::StencilValue) {
todo!()
}
unsafe fn set_stencil_read_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) {
todo!()
}
unsafe fn set_stencil_write_mask(&mut self, _faces: pso::Face, _value: pso::StencilValue) {
todo!()
}
unsafe fn set_blend_constants(&mut self, _color: pso::ColorValue) {
todo!()
}
unsafe fn set_depth_bounds(&mut self, _bounds: Range<f32>) {
todo!()
}
unsafe fn set_line_width(&mut self, _width: f32) {
todo!()
}
unsafe fn set_depth_bias(&mut self, _depth_bias: pso::DepthBias) {
todo!()
}
unsafe fn begin_render_pass<T>(
&mut self,
_render_pass: &<Backend as hal::Backend>::RenderPass,
_framebuffer: &<Backend as hal::Backend>::Framebuffer,
_render_area: pso::Rect,
_clear_values: T,
_first_subpass: SubpassContents,
) where
T: Iterator,
T::Item: Borrow<ClearValue>,
{
todo!()
}
unsafe fn next_subpass(&mut self, _contents: SubpassContents) {
todo!()
}
unsafe fn end_render_pass(&mut self) {
todo!()
}
unsafe fn bind_graphics_pipeline(
&mut self,
_pipeline: &<Backend as hal::Backend>::GraphicsPipeline,
) {
todo!()
}
unsafe fn bind_graphics_descriptor_sets<I, J>(
&mut self,
_layout: &<Backend as hal::Backend>::PipelineLayout,
_first_set: usize,
_sets: I,
_offsets: J,
) where
I: Iterator,
I::Item: Borrow<<Backend as hal::Backend>::DescriptorSet>,
J: Iterator,
J::Item: Borrow<DescriptorSetOffset>,
{
todo!()
}
unsafe fn bind_compute_pipeline(
&mut self,
_pipeline: &<Backend as hal::Backend>::ComputePipeline,
) {
todo!()
}
unsafe fn bind_compute_descriptor_sets<I, J>(
&mut self,
_layout: &<Backend as hal::Backend>::PipelineLayout,
_first_set: usize,
_sets: I,
_offsets: J,
) where
I: Iterator,
I::Item: Borrow<<Backend as hal::Backend>::DescriptorSet>,
J: Iterator,
J::Item: Borrow<DescriptorSetOffset>,
{
todo!()
}
unsafe fn dispatch(&mut self, _count: WorkGroupCount) {
todo!()
}
unsafe fn dispatch_indirect(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
) {
todo!()
}
unsafe fn copy_buffer<T>(
&mut self,
_src: &<Backend as hal::Backend>::Buffer,
_dst: &<Backend as hal::Backend>::Buffer,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<BufferCopy>,
{
todo!()
}
unsafe fn copy_image<T>(
&mut self,
_src: &<Backend as hal::Backend>::Image,
_src_layout: Layout,
_dst: &<Backend as hal::Backend>::Image,
_dst_layout: Layout,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<ImageCopy>,
{
todo!()
}
unsafe fn copy_buffer_to_image<T>(
&mut self,
_src: &<Backend as hal::Backend>::Buffer,
_dst: &<Backend as hal::Backend>::Image,
_dst_layout: Layout,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<BufferImageCopy>,
{
todo!()
}
unsafe fn copy_image_to_buffer<T>(
&mut self,
_src: &<Backend as hal::Backend>::Image,
_src_layout: Layout,
_dst: &<Backend as hal::Backend>::Buffer,
_regions: T,
) where
T: Iterator,
T::Item: Borrow<BufferImageCopy>,
{
todo!()
}
unsafe fn draw(&mut self, _vertices: Range<VertexCount>, _instances: Range<InstanceCount>) {
todo!()
}
unsafe fn draw_indexed(
&mut self,
_indices: Range<IndexCount>,
_base_vertex: VertexOffset,
_instances: Range<InstanceCount>,
) {
todo!()
}
unsafe fn draw_indirect(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_draw_count: DrawCount,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn draw_indexed_indirect(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_draw_count: DrawCount,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn draw_indirect_count(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_count_buffer: &<Backend as hal::Backend>::Buffer,
_count_buffer_offset: buffer::Offset,
_max_draw_count: u32,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn draw_indexed_indirect_count(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_count_buffer: &<Backend as hal::Backend>::Buffer,
_count_buffer_offset: buffer::Offset,
_max_draw_count: u32,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn draw_mesh_tasks(&mut self, _task_count: TaskCount, _first_task: TaskCount) {
todo!()
}
unsafe fn draw_mesh_tasks_indirect(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_draw_count: DrawCount,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn draw_mesh_tasks_indirect_count(
&mut self,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_count_buffer: &<Backend as hal::Backend>::Buffer,
_count_buffer_offset: buffer::Offset,
_max_draw_count: DrawCount,
_stride: buffer::Stride,
) {
todo!()
}
unsafe fn set_event(
&mut self,
_event: &<Backend as hal::Backend>::Event,
_stages: pso::PipelineStage,
) {
todo!()
}
unsafe fn reset_event(
&mut self,
_event: &<Backend as hal::Backend>::Event,
_stages: pso::PipelineStage,
) {
todo!()
}
unsafe fn wait_events<'a, I, J>(
&mut self,
_events: I,
_stages: Range<pso::PipelineStage>,
_barriers: J,
) where
I: Iterator,
I::Item: Borrow<<Backend as hal::Backend>::Event>,
J: Iterator<Item = Barrier<'a, Backend>>,
{
todo!()
}
unsafe fn begin_query(&mut self, _query: query::Query<Backend>, _flags: query::ControlFlags) {
todo!()
}
unsafe fn end_query(&mut self, _query: query::Query<Backend>) {
todo!()
}
unsafe fn reset_query_pool(
&mut self,
_pool: &<Backend as hal::Backend>::QueryPool,
_queries: Range<query::Id>,
) {
todo!()
}
unsafe fn copy_query_pool_results(
&mut self,
_pool: &<Backend as hal::Backend>::QueryPool,
_queries: Range<query::Id>,
_buffer: &<Backend as hal::Backend>::Buffer,
_offset: buffer::Offset,
_stride: buffer::Stride,
_flags: query::ResultFlags,
) {
todo!()
}
unsafe fn write_timestamp(
&mut self,
_stage: pso::PipelineStage,
_query: query::Query<Backend>,
) {
todo!()
}
unsafe fn push_graphics_constants(
&mut self,
_layout: &<Backend as hal::Backend>::PipelineLayout,
_stages: pso::ShaderStageFlags,
_offset: u32,
_constants: &[u32],
) {
todo!()
}
unsafe fn push_compute_constants(
&mut self,
_layout: &<Backend as hal::Backend>::PipelineLayout,
_offset: u32,
_constants: &[u32],
) {
todo!()
}
unsafe fn execute_commands<'a, T, I>(&mut self, _cmd_buffers: I)
where
T: 'a + Borrow<<Backend as hal::Backend>::CommandBuffer>,
I: Iterator<Item = &'a T>,
{
todo!()
}
unsafe fn insert_debug_marker(&mut self, _name: &str, _color: u32) {
todo!()
}
unsafe fn begin_debug_marker(&mut self, _name: &str, _color: u32) {
todo!()
}
unsafe fn end_debug_marker(&mut self) {
todo!()
}
}
|
blit_image
|
index.ts
|
import { Web3Provider } from '@ethersproject/providers';
import { strategy as ethReceivedStrategy } from '../eth-received';
import fetch from 'cross-fetch';
export const author = 'mccallofthewild';
export const version = '0.1.0';
const ethCharities = [
['GiveDirectly', '0xc7464dbcA260A8faF033460622B23467Df5AEA42'],
['Unsung.org', '0x02a13ED1805624738Cc129370Fee358ea487B0C6'],
['Heifer.org', '0xD3F81260a44A1df7A7269CF66Abd9c7e4f8CdcD1'],
['GraceAid.org.uk', '0x236dAA98f115caa9991A3894ae387CDc13eaaD1B'],
['SENS.org', '0x542EFf118023cfF2821b24156a507a513Fe93539'],
['350.org', '0x50990F09d4f0cb864b8e046e7edC749dE410916b'],
['EFF.org', '0xb189f76323678E094D4996d182A792E52369c005'],
['WikiLeaks', '0xE96E2181F6166A37EA4C04F6E6E2bD672D72Acc1'],
['GiveWell.org', '0x7cF2eBb5Ca55A8bd671A020F8BDbAF07f60F26C1'],
['CoolEarth.org', '0x3c8cB169281196737c493AfFA8F49a9d823bB9c5'],
['Run2Rescue.org', '0xd17bcbFa6De9E3741aa43Ed32e64696F6a9FA996'],
['Archive.org', '0xFA8E3920daF271daB92Be9B87d9998DDd94FEF08']
];
export async function strategy(
...args: [string, string, Web3Provider, string[], { coeff?: number }, number]
) {
const [space, network, provider, addresses, options, snapshot] = args;
const { coeff = 100 } = options;
return ethReceivedStrategy(
space,
network,
provider,
addresses,
{
receivingAddresses: ethCharities.map(([name, address]) => address),
coeff
},
snapshot
|
);
}
|
|
parser.py
|
"""
Entry point for training and evaluating a dependency parser.
This implementation combines a deep biaffine graph-based parser with linearization and distance features.
For details please refer to paper: https://nlp.stanford.edu/pubs/qi2018universal.pdf.
"""
"""
Training and evaluation for the parser.
"""
import sys
import os
import shutil
import time
import argparse
import logging
import numpy as np
import random
import torch
from torch import nn, optim
import stanza.models.depparse.data as data
from stanza.models.depparse.data import DataLoader
from stanza.models.depparse.trainer import Trainer
from stanza.models.depparse import scorer
from stanza.models.common import utils
from stanza.models.common import pretrain
from stanza.models.common.data import augment_punct
from stanza.models.common.doc import *
from stanza.utils.conll import CoNLL
from stanza.models import _training_logging
logger = logging.getLogger('stanza')
def parse_args(args=None):
|
def main(args=None):
args = parse_args(args=args)
if args.cpu:
args.cuda = False
utils.set_random_seed(args.seed, args.cuda)
args = vars(args)
logger.info("Running parser in {} mode".format(args['mode']))
if args['mode'] == 'train':
train(args)
else:
evaluate(args)
# TODO: refactor with tagger
def model_file_name(args):
if args['save_name'] is not None:
save_name = args['save_name']
else:
save_name = args['shorthand'] + "_parser.pt"
return os.path.join(args['save_dir'], save_name)
# TODO: refactor with everywhere
def load_pretrain(args):
pt = None
if args['pretrain']:
pretrain_file = pretrain.find_pretrain_file(args['wordvec_pretrain_file'], args['save_dir'], args['shorthand'], args['lang'])
if os.path.exists(pretrain_file):
vec_file = None
else:
vec_file = args['wordvec_file'] if args['wordvec_file'] else utils.get_wordvec_file(args['wordvec_dir'], args['shorthand'])
pt = pretrain.Pretrain(pretrain_file, vec_file, args['pretrain_max_vocab'])
return pt
def train(args):
model_file = model_file_name(args)
utils.ensure_dir(os.path.split(model_file)[0])
# load pretrained vectors if needed
pretrain = load_pretrain(args)
# load data
logger.info("Loading data with batch size {}...".format(args['batch_size']))
train_data, _ = CoNLL.conll2dict(input_file=args['train_file'])
# possibly augment the training data with some amount of fake data
# based on the options chosen
logger.info("Original data size: {}".format(len(train_data)))
train_data.extend(augment_punct(train_data, args['augment_nopunct'],
keep_original_sentences=False))
logger.info("Augmented data size: {}".format(len(train_data)))
train_doc = Document(train_data)
train_batch = DataLoader(train_doc, args['batch_size'], args, pretrain, evaluation=False)
vocab = train_batch.vocab
dev_doc = CoNLL.conll2doc(input_file=args['eval_file'])
dev_batch = DataLoader(dev_doc, args['batch_size'], args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
# pred and gold path
system_pred_file = args['output_file']
gold_file = args['gold_file']
# skip training if the language does not have training or dev data
if len(train_batch) == 0 or len(dev_batch) == 0:
logger.info("Skip training because no data available...")
sys.exit(0)
logger.info("Training parser...")
trainer = Trainer(args=args, vocab=vocab, pretrain=pretrain, use_cuda=args['cuda'])
global_step = 0
max_steps = args['max_steps']
dev_score_history = []
best_dev_preds = []
current_lr = args['lr']
global_start_time = time.time()
format_str = 'Finished STEP {}/{}, loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
using_amsgrad = False
last_best_step = 0
# start training
train_loss = 0
while True:
do_break = False
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch, eval=False) # update step
train_loss += loss
if global_step % args['log_step'] == 0:
duration = time.time() - start_time
logger.info(format_str.format(global_step, max_steps, loss, duration, current_lr))
if global_step % args['eval_interval'] == 0:
# eval on dev
logger.info("Evaluating on dev set...")
dev_preds = []
for batch in dev_batch:
preds = trainer.predict(batch)
dev_preds += preds
dev_preds = utils.unsort(dev_preds, dev_batch.data_orig_idx)
dev_batch.doc.set([HEAD, DEPREL], [y for x in dev_preds for y in x])
CoNLL.write_doc2conll(dev_batch.doc, system_pred_file)
_, _, dev_score = scorer.score(system_pred_file, gold_file)
train_loss = train_loss / args['eval_interval'] # avg loss per batch
logger.info("step {}: train_loss = {:.6f}, dev_score = {:.4f}".format(global_step, train_loss, dev_score))
train_loss = 0
# save best model
if len(dev_score_history) == 0 or dev_score > max(dev_score_history):
last_best_step = global_step
trainer.save(model_file)
logger.info("new best model saved.")
best_dev_preds = dev_preds
dev_score_history += [dev_score]
if global_step - last_best_step >= args['max_steps_before_stop']:
if not using_amsgrad:
logger.info("Switching to AMSGrad")
last_best_step = global_step
using_amsgrad = True
trainer.optimizer = optim.Adam(trainer.model.parameters(), amsgrad=True, lr=args['lr'], betas=(.9, args['beta2']), eps=1e-6)
else:
do_break = True
break
if global_step >= args['max_steps']:
do_break = True
break
if do_break: break
train_batch.reshuffle()
logger.info("Training ended with {} steps.".format(global_step))
best_f, best_eval = max(dev_score_history)*100, np.argmax(dev_score_history)+1
logger.info("Best dev F1 = {:.2f}, at iteration = {}".format(best_f, best_eval * args['eval_interval']))
def evaluate(args):
# file paths
system_pred_file = args['output_file']
gold_file = args['gold_file']
model_file = model_file_name(args)
# load pretrained vectors if needed
pretrain = load_pretrain(args)
# load model
logger.info("Loading model from: {}".format(model_file))
use_cuda = args['cuda'] and not args['cpu']
trainer = Trainer(pretrain=pretrain, model_file=model_file, use_cuda=use_cuda)
loaded_args, vocab = trainer.args, trainer.vocab
# load config
for k in args:
if k.endswith('_dir') or k.endswith('_file') or k in ['shorthand'] or k == 'mode':
loaded_args[k] = args[k]
# load data
logger.info("Loading data with batch size {}...".format(args['batch_size']))
doc = CoNLL.conll2doc(input_file=args['eval_file'])
batch = DataLoader(doc, args['batch_size'], loaded_args, pretrain, vocab=vocab, evaluation=True, sort_during_eval=True)
if len(batch) > 0:
logger.info("Start evaluation...")
preds = []
for i, b in enumerate(batch):
preds += trainer.predict(b)
else:
# skip eval if dev data does not exist
preds = []
preds = utils.unsort(preds, batch.data_orig_idx)
# write to file and score
batch.doc.set([HEAD, DEPREL], [y for x in preds for y in x])
CoNLL.write_doc2conll(batch.doc, system_pred_file)
if gold_file is not None:
_, _, score = scorer.score(system_pred_file, gold_file)
logger.info("Parser score:")
logger.info("{} {:.2f}".format(args['shorthand'], score*100))
if __name__ == '__main__':
main()
|
parser = argparse.ArgumentParser()
parser.add_argument('--data_dir', type=str, default='data/depparse', help='Root dir for saving models.')
parser.add_argument('--wordvec_dir', type=str, default='extern_data/word2vec', help='Directory of word vectors.')
parser.add_argument('--wordvec_file', type=str, default=None, help='Word vectors filename.')
parser.add_argument('--wordvec_pretrain_file', type=str, default=None, help='Exact name of the pretrain file to read')
parser.add_argument('--train_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--eval_file', type=str, default=None, help='Input file for data loader.')
parser.add_argument('--output_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--gold_file', type=str, default=None, help='Output CoNLL-U file.')
parser.add_argument('--mode', default='train', choices=['train', 'predict'])
parser.add_argument('--lang', type=str, help='Language')
parser.add_argument('--shorthand', type=str, help="Treebank shorthand")
parser.add_argument('--hidden_dim', type=int, default=400)
parser.add_argument('--char_hidden_dim', type=int, default=400)
parser.add_argument('--deep_biaff_hidden_dim', type=int, default=400)
parser.add_argument('--composite_deep_biaff_hidden_dim', type=int, default=100)
parser.add_argument('--word_emb_dim', type=int, default=75)
parser.add_argument('--char_emb_dim', type=int, default=100)
parser.add_argument('--tag_emb_dim', type=int, default=50)
parser.add_argument('--transformed_dim', type=int, default=125)
parser.add_argument('--num_layers', type=int, default=3)
parser.add_argument('--char_num_layers', type=int, default=1)
parser.add_argument('--pretrain_max_vocab', type=int, default=250000)
parser.add_argument('--word_dropout', type=float, default=0.33)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--char_rec_dropout', type=float, default=0, help="Recurrent dropout")
parser.add_argument('--no_char', dest='char', action='store_false', help="Turn off character model.")
parser.add_argument('--no_pretrain', dest='pretrain', action='store_false', help="Turn off pretrained embeddings.")
parser.add_argument('--no_linearization', dest='linearization', action='store_false', help="Turn off linearization term.")
parser.add_argument('--no_distance', dest='distance', action='store_false', help="Turn off distance term.")
parser.add_argument('--sample_train', type=float, default=1.0, help='Subsample training data.')
parser.add_argument('--optim', type=str, default='adam', help='sgd, adagrad, adam or adamax.')
parser.add_argument('--lr', type=float, default=3e-3, help='Learning rate')
parser.add_argument('--beta2', type=float, default=0.95)
parser.add_argument('--max_steps', type=int, default=50000)
parser.add_argument('--eval_interval', type=int, default=100)
parser.add_argument('--max_steps_before_stop', type=int, default=3000)
parser.add_argument('--batch_size', type=int, default=5000)
parser.add_argument('--max_grad_norm', type=float, default=1.0, help='Gradient clipping.')
parser.add_argument('--log_step', type=int, default=20, help='Print log every k steps.')
parser.add_argument('--save_dir', type=str, default='saved_models/depparse', help='Root dir for saving models.')
parser.add_argument('--save_name', type=str, default=None, help="File name to save the model")
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--cuda', type=bool, default=torch.cuda.is_available())
parser.add_argument('--cpu', action='store_true', help='Ignore CUDA.')
parser.add_argument('--augment_nopunct', type=float, default=None, help='Augment the training data by copying this fraction of punct-ending sentences as non-punct. Default of None will aim for roughly 10%')
args = parser.parse_args(args=args)
return args
|
main.go
|
package main
import (
"fmt"
"sort"
)
func
|
() {
n := []int{7, 4, 8, 2, 9, 19, 12, 32, 3}
fmt.Println(n)
sort.Sort(sort.IntSlice(n))
fmt.Println(n)
}
|
main
|
test_sysex.rs
|
extern crate midir;
fn main() {
match example::run() {
Ok(_) => (),
Err(err) => println!("Error: {}", err)
}
}
#[cfg(not(any(windows, target_arch = "wasm32")))] // virtual ports are not supported on Windows nor on Web MIDI
mod example {
use std::thread::sleep;
use std::time::Duration;
use std::error::Error;
use midir::{MidiInput, MidiOutput, Ignore};
use midir::os::unix::VirtualInput;
const LARGE_SYSEX_SIZE: usize = 5572; // This is the maximum that worked for me
pub fn run() -> Result<(), Box<dyn Error>> {
let mut midi_in = MidiInput::new("My Test Input")?;
midi_in.ignore(Ignore::None);
let midi_out = MidiOutput::new("My Test Output")?;
let previous_count = midi_out.port_count();
println!("Creating virtual input port ...");
let conn_in = midi_in.create_virtual("midir-test", |stamp, message, _| {
println!("{}: {:?} (len = {})", stamp, message, message.len());
}, ())?;
assert_eq!(midi_out.port_count(), previous_count + 1);
let out_ports = midi_out.ports();
let new_port = out_ports.last().unwrap();
println!("Connecting to port '{}' ...", midi_out.port_name(&new_port).unwrap());
let mut conn_out = midi_out.connect(&new_port, "midir-test")?;
println!("Starting to send messages ...");
//sleep(Duration::from_millis(2000));
println!("Sending NoteOn message");
conn_out.send(&[144, 60, 1])?;
sleep(Duration::from_millis(200));
println!("Sending small SysEx message ...");
conn_out.send(&[0xF0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xF7])?;
sleep(Duration::from_millis(200));
println!("Sending large SysEx message ...");
let mut v = Vec::with_capacity(LARGE_SYSEX_SIZE);
v.push(0xF0u8);
for _ in 1..LARGE_SYSEX_SIZE-1 {
v.push(0u8);
}
v.push(0xF7u8);
assert_eq!(v.len(), LARGE_SYSEX_SIZE);
conn_out.send(&v)?;
|
for ch in v.chunks(4) {
conn_out.send(ch)?;
}
sleep(Duration::from_millis(200));
println!("Sending small SysEx message ...");
conn_out.send(&[0xF0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xF7])?;
sleep(Duration::from_millis(200));
println!("Closing output ...");
conn_out.close();
println!("Closing virtual input ...");
conn_in.close().0;
Ok(())
}
}
// needed to compile successfully
#[cfg(any(windows, target_arch = "wasm32"))] mod example {
use std::error::Error;
pub fn run() -> Result<(), Box<dyn Error>> { Ok(()) }
}
|
sleep(Duration::from_millis(200));
// FIXME: the following doesn't seem to work with ALSA
println!("Sending large SysEx message (chunked)...");
|
string_joiner.rs
|
/// An Iterator adapter that walks through all the elements in the Iterator,
/// converts them to Strings and joins them to one big String, seperated by
/// some seperator string slice.
pub trait StringJoiner {
/// Converts all elements the Iterator yields to Strings,
/// then combines them all into one String, seperated by sep.
///
/// # Example
///
/// ```rust
/// use collect::iter::StringJoiner;
///
/// let vec = vec![1,2,3];
/// assert_eq!(&*vec.iter().join(", "), "1, 2, 3");
/// ```
fn join(&mut self, sep: &str) -> String;
}
// Implement it for all Iterators with Elements convertable into a String
impl<A: ToString, T: Iterator<Item=A>> StringJoiner for T {
fn join(&mut self, sep: &str) -> String {
match self.next() {
Some(elem) => {
let mut output = elem.to_string();
for elem in *self {
output.push_str(sep);
output.push_str(elem.to_string().as_slice());
}
output
}
None => String::new()
}
}
}
#[test]
fn test_join()
|
{
let many = vec![1,2,3];
let one = vec![1];
let none: Vec<usize> = vec![];
assert_eq!(many.iter().join(", ").as_slice(), "1, 2, 3");
assert_eq!( one.iter().join(", ").as_slice(), "1");
assert_eq!(none.iter().join(", ").as_slice(), "");
}
|
|
gl_canvas.rs
|
use std::sync::mpsc::Sender;
use crate::context::Context;
use crate::event::{Action, Key, Modifiers, MouseButton, TouchAction, WindowEvent};
use crate::window::canvas::{CanvasSetup, NumSamples};
use crate::window::AbstractCanvas;
use glutin::{
self,
dpi::LogicalSize,
event::TouchPhase,
event_loop::{ControlFlow, EventLoop},
platform::run_return::EventLoopExtRunReturn,
window::WindowBuilder,
ContextBuilder, GlRequest, PossiblyCurrent, WindowedContext,
};
use image::{GenericImage, Pixel};
/// A canvas based on glutin and OpenGL.
pub struct GLCanvas {
window: WindowedContext<PossiblyCurrent>,
events: EventLoop<()>,
cursor_pos: Option<(f64, f64)>,
key_states: [Action; Key::Unknown as usize + 1],
button_states: [Action; MouseButton::Button8 as usize + 1],
out_events: Sender<WindowEvent>,
// listeners: Vec<EventListenerHandle>,
}
impl AbstractCanvas for GLCanvas {
fn open(
title: &str,
hide: bool,
width: u32,
height: u32,
canvas_setup: Option<CanvasSetup>,
out_events: Sender<WindowEvent>,
) -> Self {
#[cfg(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd"
))]
let events = {
use glutin::platform::unix::EventLoopExtUnix;
EventLoop::new_any_thread()
};
#[cfg(windows)]
let events = {
use glutin::platform::windows::EventLoopExtWindows;
EventLoop::new_any_thread()
};
#[cfg(not(any(
target_os = "linux",
target_os = "dragonfly",
target_os = "freebsd",
target_os = "netbsd",
target_os = "openbsd",
windows
)))]
let events = EventLoop::new();
let window = WindowBuilder::new()
.with_title(title)
.with_inner_size(LogicalSize::new(width as f64, height as f64))
.with_visible(!hide);
let canvas_setup = canvas_setup.unwrap_or(CanvasSetup {
vsync: true,
samples: NumSamples::Zero,
});
let window = ContextBuilder::new()
.with_vsync(canvas_setup.vsync)
.with_multisampling(canvas_setup.samples as u16)
.with_gl(GlRequest::GlThenGles {
opengl_version: (3, 2),
opengles_version: (2, 0),
})
.build_windowed(window, &events)
.unwrap();
let window = unsafe { window.make_current().unwrap() };
Context::init(|| unsafe {
glow::Context::from_loader_function(|name| window.get_proc_address(name) as *const _)
});
let ctxt = Context::get();
let vao = ctxt.create_vertex_array();
ctxt.bind_vertex_array(vao.as_ref());
GLCanvas {
window,
events,
cursor_pos: None,
key_states: [Action::Release; Key::Unknown as usize + 1],
button_states: [Action::Release; MouseButton::Button8 as usize + 1],
out_events,
}
}
fn render_loop(mut callback: impl FnMut(f64) -> bool + 'static)
|
fn poll_events(&mut self) {
let out_events = &mut self.out_events;
let window = &mut self.window;
let button_states = &mut self.button_states;
let key_states = &mut self.key_states;
let cursor_pos = &mut self.cursor_pos;
self.events.run_return(|event, _, control_flow| {
use glutin::event::Event;
match event {
Event::WindowEvent { event, .. } => match event {
glutin::event::WindowEvent::CloseRequested => {
let _ = out_events.send(WindowEvent::Close);
}
glutin::event::WindowEvent::Resized(physical_size) => {
window.resize(physical_size);
let fb_size: (u32, u32) = physical_size.into();
let _ = out_events.send(WindowEvent::FramebufferSize(fb_size.0, fb_size.1));
}
glutin::event::WindowEvent::CursorMoved {
position,
modifiers,
..
} => {
let modifiers = translate_modifiers(modifiers);
*cursor_pos = Some(position.into());
let _ = out_events
.send(WindowEvent::CursorPos(position.x, position.y, modifiers));
}
glutin::event::WindowEvent::MouseInput {
state,
button,
modifiers,
..
} => {
let action = translate_action(state);
let button = translate_mouse_button(button);
let modifiers = translate_modifiers(modifiers);
button_states[button as usize] = action;
let _ =
out_events.send(WindowEvent::MouseButton(button, action, modifiers));
}
glutin::event::WindowEvent::Touch(touch) => {
let action = match touch.phase {
TouchPhase::Started => TouchAction::Start,
TouchPhase::Ended => TouchAction::End,
TouchPhase::Moved => TouchAction::Move,
TouchPhase::Cancelled => TouchAction::Cancel,
};
let _ = out_events.send(WindowEvent::Touch(
touch.id,
touch.location.x,
touch.location.y,
action,
Modifiers::empty(),
));
}
glutin::event::WindowEvent::MouseWheel {
delta, modifiers, ..
} => {
let (x, y) = match delta {
glutin::event::MouseScrollDelta::LineDelta(dx, dy) => {
(dx as f64 * 10.0, dy as f64 * 10.0)
}
glutin::event::MouseScrollDelta::PixelDelta(delta) => delta.into(),
};
let modifiers = translate_modifiers(modifiers);
let _ = out_events.send(WindowEvent::Scroll(x, y, modifiers));
}
glutin::event::WindowEvent::KeyboardInput { input, .. } => {
let action = translate_action(input.state);
let key = translate_key(input.virtual_keycode);
let modifiers = translate_modifiers(input.modifiers);
key_states[key as usize] = action;
let _ = out_events.send(WindowEvent::Key(key, action, modifiers));
}
glutin::event::WindowEvent::ReceivedCharacter(c) => {
let _ = out_events.send(WindowEvent::Char(c));
}
_ => {}
},
Event::RedrawEventsCleared => {
*control_flow = ControlFlow::Exit;
}
_ => {}
};
})
}
fn swap_buffers(&mut self) {
let _ = self.window.swap_buffers();
}
fn size(&self) -> (u32, u32) {
self.window.window().inner_size().into()
}
fn cursor_pos(&self) -> Option<(f64, f64)> {
self.cursor_pos
}
fn scale_factor(&self) -> f64 {
self.window.window().scale_factor() as f64
}
fn set_title(&mut self, title: &str) {
self.window.window().set_title(title)
}
fn set_icon(&mut self, icon: impl GenericImage<Pixel = impl Pixel<Subpixel = u8>>) {
let (width, height) = icon.dimensions();
let mut rgba = Vec::with_capacity((width * height) as usize * 4);
for (_, _, pixel) in icon.pixels() {
rgba.extend_from_slice(&pixel.to_rgba().0);
}
let icon = glutin::window::Icon::from_rgba(rgba, width, height).unwrap();
self.window.window().set_window_icon(Some(icon))
}
fn set_cursor_grab(&self, grab: bool) {
let _ = self.window.window().set_cursor_grab(grab);
}
fn set_cursor_position(&self, x: f64, y: f64) {
self.window
.window()
.set_cursor_position(glutin::dpi::PhysicalPosition::new(x, y))
.unwrap();
}
fn hide_cursor(&self, hide: bool) {
self.window.window().set_cursor_visible(!hide)
}
fn hide(&mut self) {
self.window.window().set_visible(false)
}
fn show(&mut self) {
self.window.window().set_visible(true)
}
fn get_mouse_button(&self, button: MouseButton) -> Action {
self.button_states[button as usize]
}
fn get_key(&self, key: Key) -> Action {
self.key_states[key as usize]
}
}
fn translate_action(action: glutin::event::ElementState) -> Action {
match action {
glutin::event::ElementState::Pressed => Action::Press,
glutin::event::ElementState::Released => Action::Release,
}
}
fn translate_modifiers(modifiers: glutin::event::ModifiersState) -> Modifiers {
let mut res = Modifiers::empty();
if modifiers.shift() {
res.insert(Modifiers::Shift)
}
if modifiers.ctrl() {
res.insert(Modifiers::Control)
}
if modifiers.alt() {
res.insert(Modifiers::Alt)
}
if modifiers.logo() {
res.insert(Modifiers::Super)
}
res
}
fn translate_mouse_button(button: glutin::event::MouseButton) -> MouseButton {
match button {
glutin::event::MouseButton::Left => MouseButton::Button1,
glutin::event::MouseButton::Right => MouseButton::Button2,
glutin::event::MouseButton::Middle => MouseButton::Button3,
glutin::event::MouseButton::Other(_) => MouseButton::Button4, // XXX: the default is not good.
}
}
fn translate_key(button: Option<glutin::event::VirtualKeyCode>) -> Key {
if let Some(button) = button {
match button {
glutin::event::VirtualKeyCode::Key1 => Key::Key1,
glutin::event::VirtualKeyCode::Key2 => Key::Key2,
glutin::event::VirtualKeyCode::Key3 => Key::Key3,
glutin::event::VirtualKeyCode::Key4 => Key::Key4,
glutin::event::VirtualKeyCode::Key5 => Key::Key5,
glutin::event::VirtualKeyCode::Key6 => Key::Key6,
glutin::event::VirtualKeyCode::Key7 => Key::Key7,
glutin::event::VirtualKeyCode::Key8 => Key::Key8,
glutin::event::VirtualKeyCode::Key9 => Key::Key9,
glutin::event::VirtualKeyCode::Key0 => Key::Key0,
glutin::event::VirtualKeyCode::A => Key::A,
glutin::event::VirtualKeyCode::B => Key::B,
glutin::event::VirtualKeyCode::C => Key::C,
glutin::event::VirtualKeyCode::D => Key::D,
glutin::event::VirtualKeyCode::E => Key::E,
glutin::event::VirtualKeyCode::F => Key::F,
glutin::event::VirtualKeyCode::G => Key::G,
glutin::event::VirtualKeyCode::H => Key::H,
glutin::event::VirtualKeyCode::I => Key::I,
glutin::event::VirtualKeyCode::J => Key::J,
glutin::event::VirtualKeyCode::K => Key::K,
glutin::event::VirtualKeyCode::L => Key::L,
glutin::event::VirtualKeyCode::M => Key::M,
glutin::event::VirtualKeyCode::N => Key::N,
glutin::event::VirtualKeyCode::O => Key::O,
glutin::event::VirtualKeyCode::P => Key::P,
glutin::event::VirtualKeyCode::Q => Key::Q,
glutin::event::VirtualKeyCode::R => Key::R,
glutin::event::VirtualKeyCode::S => Key::S,
glutin::event::VirtualKeyCode::T => Key::T,
glutin::event::VirtualKeyCode::U => Key::U,
glutin::event::VirtualKeyCode::V => Key::V,
glutin::event::VirtualKeyCode::W => Key::W,
glutin::event::VirtualKeyCode::X => Key::X,
glutin::event::VirtualKeyCode::Y => Key::Y,
glutin::event::VirtualKeyCode::Z => Key::Z,
glutin::event::VirtualKeyCode::Escape => Key::Escape,
glutin::event::VirtualKeyCode::F1 => Key::F1,
glutin::event::VirtualKeyCode::F2 => Key::F2,
glutin::event::VirtualKeyCode::F3 => Key::F3,
glutin::event::VirtualKeyCode::F4 => Key::F4,
glutin::event::VirtualKeyCode::F5 => Key::F5,
glutin::event::VirtualKeyCode::F6 => Key::F6,
glutin::event::VirtualKeyCode::F7 => Key::F7,
glutin::event::VirtualKeyCode::F8 => Key::F8,
glutin::event::VirtualKeyCode::F9 => Key::F9,
glutin::event::VirtualKeyCode::F10 => Key::F10,
glutin::event::VirtualKeyCode::F11 => Key::F11,
glutin::event::VirtualKeyCode::F12 => Key::F12,
glutin::event::VirtualKeyCode::F13 => Key::F13,
glutin::event::VirtualKeyCode::F14 => Key::F14,
glutin::event::VirtualKeyCode::F15 => Key::F15,
glutin::event::VirtualKeyCode::F16 => Key::F16,
glutin::event::VirtualKeyCode::F17 => Key::F17,
glutin::event::VirtualKeyCode::F18 => Key::F18,
glutin::event::VirtualKeyCode::F19 => Key::F19,
glutin::event::VirtualKeyCode::F20 => Key::F20,
glutin::event::VirtualKeyCode::F21 => Key::F21,
glutin::event::VirtualKeyCode::F22 => Key::F22,
glutin::event::VirtualKeyCode::F23 => Key::F23,
glutin::event::VirtualKeyCode::F24 => Key::F24,
glutin::event::VirtualKeyCode::Snapshot => Key::Snapshot,
glutin::event::VirtualKeyCode::Scroll => Key::Scroll,
glutin::event::VirtualKeyCode::Pause => Key::Pause,
glutin::event::VirtualKeyCode::Insert => Key::Insert,
glutin::event::VirtualKeyCode::Home => Key::Home,
glutin::event::VirtualKeyCode::Delete => Key::Delete,
glutin::event::VirtualKeyCode::End => Key::End,
glutin::event::VirtualKeyCode::PageDown => Key::PageDown,
glutin::event::VirtualKeyCode::PageUp => Key::PageUp,
glutin::event::VirtualKeyCode::Left => Key::Left,
glutin::event::VirtualKeyCode::Up => Key::Up,
glutin::event::VirtualKeyCode::Right => Key::Right,
glutin::event::VirtualKeyCode::Down => Key::Down,
glutin::event::VirtualKeyCode::Back => Key::Back,
glutin::event::VirtualKeyCode::Return => Key::Return,
glutin::event::VirtualKeyCode::Space => Key::Space,
glutin::event::VirtualKeyCode::Compose => Key::Compose,
glutin::event::VirtualKeyCode::Caret => Key::Caret,
glutin::event::VirtualKeyCode::Numlock => Key::Numlock,
glutin::event::VirtualKeyCode::Numpad0 => Key::Numpad0,
glutin::event::VirtualKeyCode::Numpad1 => Key::Numpad1,
glutin::event::VirtualKeyCode::Numpad2 => Key::Numpad2,
glutin::event::VirtualKeyCode::Numpad3 => Key::Numpad3,
glutin::event::VirtualKeyCode::Numpad4 => Key::Numpad4,
glutin::event::VirtualKeyCode::Numpad5 => Key::Numpad5,
glutin::event::VirtualKeyCode::Numpad6 => Key::Numpad6,
glutin::event::VirtualKeyCode::Numpad7 => Key::Numpad7,
glutin::event::VirtualKeyCode::Numpad8 => Key::Numpad8,
glutin::event::VirtualKeyCode::Numpad9 => Key::Numpad9,
glutin::event::VirtualKeyCode::AbntC1 => Key::AbntC1,
glutin::event::VirtualKeyCode::AbntC2 => Key::AbntC2,
glutin::event::VirtualKeyCode::NumpadAdd => Key::Add,
glutin::event::VirtualKeyCode::Apostrophe => Key::Apostrophe,
glutin::event::VirtualKeyCode::Apps => Key::Apps,
glutin::event::VirtualKeyCode::At => Key::At,
glutin::event::VirtualKeyCode::Ax => Key::Ax,
glutin::event::VirtualKeyCode::Backslash => Key::Backslash,
glutin::event::VirtualKeyCode::Calculator => Key::Calculator,
glutin::event::VirtualKeyCode::Capital => Key::Capital,
glutin::event::VirtualKeyCode::Colon => Key::Colon,
glutin::event::VirtualKeyCode::Comma => Key::Comma,
glutin::event::VirtualKeyCode::Convert => Key::Convert,
glutin::event::VirtualKeyCode::NumpadDecimal => Key::Decimal,
glutin::event::VirtualKeyCode::NumpadDivide => Key::Divide,
glutin::event::VirtualKeyCode::Asterisk => Key::Multiply,
glutin::event::VirtualKeyCode::Plus => Key::Add,
glutin::event::VirtualKeyCode::Equals => Key::Equals,
glutin::event::VirtualKeyCode::Grave => Key::Grave,
glutin::event::VirtualKeyCode::Kana => Key::Kana,
glutin::event::VirtualKeyCode::Kanji => Key::Kanji,
glutin::event::VirtualKeyCode::LAlt => Key::LAlt,
glutin::event::VirtualKeyCode::LBracket => Key::LBracket,
glutin::event::VirtualKeyCode::LControl => Key::LControl,
glutin::event::VirtualKeyCode::LShift => Key::LShift,
glutin::event::VirtualKeyCode::LWin => Key::LWin,
glutin::event::VirtualKeyCode::Mail => Key::Mail,
glutin::event::VirtualKeyCode::MediaSelect => Key::MediaSelect,
glutin::event::VirtualKeyCode::MediaStop => Key::MediaStop,
glutin::event::VirtualKeyCode::Minus => Key::Minus,
glutin::event::VirtualKeyCode::NumpadMultiply => Key::Multiply,
glutin::event::VirtualKeyCode::Mute => Key::Mute,
glutin::event::VirtualKeyCode::MyComputer => Key::MyComputer,
glutin::event::VirtualKeyCode::NavigateForward => Key::NavigateForward,
glutin::event::VirtualKeyCode::NavigateBackward => Key::NavigateBackward,
glutin::event::VirtualKeyCode::NextTrack => Key::NextTrack,
glutin::event::VirtualKeyCode::NoConvert => Key::NoConvert,
glutin::event::VirtualKeyCode::NumpadComma => Key::NumpadComma,
glutin::event::VirtualKeyCode::NumpadEnter => Key::NumpadEnter,
glutin::event::VirtualKeyCode::NumpadEquals => Key::NumpadEquals,
glutin::event::VirtualKeyCode::OEM102 => Key::OEM102,
glutin::event::VirtualKeyCode::Period => Key::Period,
glutin::event::VirtualKeyCode::PlayPause => Key::PlayPause,
glutin::event::VirtualKeyCode::Power => Key::Power,
glutin::event::VirtualKeyCode::PrevTrack => Key::PrevTrack,
glutin::event::VirtualKeyCode::RAlt => Key::RAlt,
glutin::event::VirtualKeyCode::RBracket => Key::RBracket,
glutin::event::VirtualKeyCode::RControl => Key::RControl,
glutin::event::VirtualKeyCode::RShift => Key::RShift,
glutin::event::VirtualKeyCode::RWin => Key::RWin,
glutin::event::VirtualKeyCode::Semicolon => Key::Semicolon,
glutin::event::VirtualKeyCode::Slash => Key::Slash,
glutin::event::VirtualKeyCode::Sleep => Key::Sleep,
glutin::event::VirtualKeyCode::Stop => Key::Stop,
glutin::event::VirtualKeyCode::NumpadSubtract => Key::Subtract,
glutin::event::VirtualKeyCode::Sysrq => Key::Sysrq,
glutin::event::VirtualKeyCode::Tab => Key::Tab,
glutin::event::VirtualKeyCode::Underline => Key::Underline,
glutin::event::VirtualKeyCode::Unlabeled => Key::Unlabeled,
glutin::event::VirtualKeyCode::VolumeDown => Key::VolumeDown,
glutin::event::VirtualKeyCode::VolumeUp => Key::VolumeUp,
glutin::event::VirtualKeyCode::Wake => Key::Wake,
glutin::event::VirtualKeyCode::WebBack => Key::WebBack,
glutin::event::VirtualKeyCode::WebFavorites => Key::WebFavorites,
glutin::event::VirtualKeyCode::WebForward => Key::WebForward,
glutin::event::VirtualKeyCode::WebHome => Key::WebHome,
glutin::event::VirtualKeyCode::WebRefresh => Key::WebRefresh,
glutin::event::VirtualKeyCode::WebSearch => Key::WebSearch,
glutin::event::VirtualKeyCode::WebStop => Key::WebStop,
glutin::event::VirtualKeyCode::Yen => Key::Yen,
glutin::event::VirtualKeyCode::Copy => Key::Copy,
glutin::event::VirtualKeyCode::Paste => Key::Paste,
glutin::event::VirtualKeyCode::Cut => Key::Cut,
}
} else {
Key::Unknown
}
}
|
{
loop {
if !callback(0.0) {
break;
} // XXX: timestamp
}
}
|
smtpmock.py
|
'''
Provides a mock SMTP server implementation, MockSMTPServer.
Sample usage:
----
# create the server -- will start automatically
import smtpmock
mock_server = smtpmock.MockSMTPServer("localhost", 25025)
#send a test message
import smtplib
client = smtplib.SMTP("localhost", 25025)
fromaddr = "[email protected]"
toaddrs = ["[email protected]", "[email protected]"]
content = "test message content"
msg = "From: %s\r\nTo: %s\r\n\r\n%s" % (fromaddr, ", ".join(toaddrs), content)
client.sendmail(fromaddr, toaddrs, msg)
client.quit()
# verify that the message has been recieved
assert(mock_server.received_message_matching("From: .*\\nTo: .*\\n+.+tent"))
# reset the server to be ready for a new test
mock_server.reset()
assert(mock_server.received_messages_count() == 0)
----
'''
import asyncore
import re
import smtpd
import threading
class MockSMTPServer(smtpd.SMTPServer, threading.Thread):
'''
A mock SMTP server. Runs in a separate thread so can be started from
existing test code.
'''
def __init__(self, hostname, port, callback=None):
threading.Thread.__init__(self)
smtpd.SMTPServer.__init__(self, (hostname, port), None)
self.daemon = True
self.callback = callback
self.received_messages = []
self.start()
def stop(self):
asyncore.ExitNow()
self.close()
def run(self):
asyncore.loop()
def process_message(self, peer, mailfrom, rcpttos, data,
mail_options=None, rcpt_options=None):
self.received_messages.append(data)
if self.callback:
self.callback(data)
def
|
(self):
self.received_messages = []
# helper methods for assertions in test cases
def received_message_matching(self, template):
for message in self.received_messages:
if re.match(template, message.decode(), flags=re.DOTALL):
return True
return False
def received_messages_count(self):
return len(self.received_messages)
|
reset
|
server_config_client.go
|
package handshake
import (
"bytes"
"encoding/binary"
"errors"
"math"
"time"
"github.com/phuslu/quic-go/internal/crypto"
"github.com/phuslu/quic-go/internal/utils"
"github.com/phuslu/quic-go/qerr"
)
type serverConfigClient struct {
raw []byte
ID []byte
obit []byte
expiry time.Time
kex crypto.KeyExchange
sharedSecret []byte
}
var (
errMessageNotServerConfig = errors.New("ServerConfig must have TagSCFG")
)
// parseServerConfig parses a server config
func parseServerConfig(data []byte) (*serverConfigClient, error)
|
func (s *serverConfigClient) parseValues(tagMap map[Tag][]byte) error {
// SCID
scfgID, ok := tagMap[TagSCID]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "SCID")
}
if len(scfgID) != 16 {
return qerr.Error(qerr.CryptoInvalidValueLength, "SCID")
}
s.ID = scfgID
// KEXS
// TODO: setup Key Exchange
kexs, ok := tagMap[TagKEXS]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "KEXS")
}
if len(kexs)%4 != 0 {
return qerr.Error(qerr.CryptoInvalidValueLength, "KEXS")
}
c255Foundat := -1
for i := 0; i < len(kexs)/4; i++ {
if bytes.Equal(kexs[4*i:4*i+4], []byte("C255")) {
c255Foundat = i
break
}
}
if c255Foundat < 0 {
return qerr.Error(qerr.CryptoNoSupport, "KEXS: Could not find C255, other key exchanges are not supported")
}
// AEAD
aead, ok := tagMap[TagAEAD]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "AEAD")
}
if len(aead)%4 != 0 {
return qerr.Error(qerr.CryptoInvalidValueLength, "AEAD")
}
var aesgFound bool
for i := 0; i < len(aead)/4; i++ {
if bytes.Equal(aead[4*i:4*i+4], []byte("AESG")) {
aesgFound = true
break
}
}
if !aesgFound {
return qerr.Error(qerr.CryptoNoSupport, "AEAD")
}
// PUBS
pubs, ok := tagMap[TagPUBS]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "PUBS")
}
var pubs_kexs []struct{Length uint32; Value []byte}
var last_len uint32
for i := 0; i < len(pubs)-3; i += int(last_len)+3 {
// the PUBS value is always prepended by 3 byte little endian length field
err := binary.Read(bytes.NewReader([]byte{pubs[i], pubs[i+1], pubs[i+2], 0x00}), binary.LittleEndian, &last_len);
if err != nil {
return qerr.Error(qerr.CryptoInvalidValueLength, "PUBS not decodable")
}
if last_len == 0 {
return qerr.Error(qerr.CryptoInvalidValueLength, "PUBS")
}
if i+3+int(last_len) > len(pubs) {
return qerr.Error(qerr.CryptoInvalidValueLength, "PUBS")
}
pubs_kexs = append(pubs_kexs, struct{Length uint32; Value []byte}{last_len, pubs[i+3:i+3+int(last_len)]})
}
if c255Foundat >= len(pubs_kexs) {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "KEXS not in PUBS")
}
if pubs_kexs[c255Foundat].Length != 32 {
return qerr.Error(qerr.CryptoInvalidValueLength, "PUBS")
}
var err error
s.kex, err = crypto.NewCurve25519KEX()
if err != nil {
return err
}
s.sharedSecret, err = s.kex.CalculateSharedKey(pubs_kexs[c255Foundat].Value)
if err != nil {
return err
}
// OBIT
obit, ok := tagMap[TagOBIT]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "OBIT")
}
if len(obit) != 8 {
return qerr.Error(qerr.CryptoInvalidValueLength, "OBIT")
}
s.obit = obit
// EXPY
expy, ok := tagMap[TagEXPY]
if !ok {
return qerr.Error(qerr.CryptoMessageParameterNotFound, "EXPY")
}
if len(expy) != 8 {
return qerr.Error(qerr.CryptoInvalidValueLength, "EXPY")
}
// make sure that the value doesn't overflow an int64
// furthermore, values close to MaxInt64 are not a valid input to time.Unix, thus set MaxInt64/2 as the maximum value here
expyTimestamp := utils.MinUint64(binary.LittleEndian.Uint64(expy), math.MaxInt64/2)
s.expiry = time.Unix(int64(expyTimestamp), 0)
// TODO: implement VER
return nil
}
func (s *serverConfigClient) IsExpired() bool {
return s.expiry.Before(time.Now())
}
func (s *serverConfigClient) Get() []byte {
return s.raw
}
|
{
message, err := ParseHandshakeMessage(bytes.NewReader(data))
if err != nil {
return nil, err
}
if message.Tag != TagSCFG {
return nil, errMessageNotServerConfig
}
scfg := &serverConfigClient{raw: data}
err = scfg.parseValues(message.Data)
if err != nil {
return nil, err
}
return scfg, nil
}
|
listTopicSharedAccessKeys.ts
|
// *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import { input as inputs, output as outputs, enums } from "../../types";
import * as utilities from "../../utilities";
/**
* Shared access keys of the Topic
* Latest API Version: 2020-06-01.
*/
/** @deprecated The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:listTopicSharedAccessKeys'. */
export function
|
(args: ListTopicSharedAccessKeysArgs, opts?: pulumi.InvokeOptions): Promise<ListTopicSharedAccessKeysResult> {
pulumi.log.warn("listTopicSharedAccessKeys is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-nextgen:eventgrid:listTopicSharedAccessKeys'.")
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
return pulumi.runtime.invoke("azure-nextgen:eventgrid/latest:listTopicSharedAccessKeys", {
"resourceGroupName": args.resourceGroupName,
"topicName": args.topicName,
}, opts);
}
export interface ListTopicSharedAccessKeysArgs {
/**
* The name of the resource group within the user's subscription.
*/
readonly resourceGroupName: string;
/**
* Name of the topic.
*/
readonly topicName: string;
}
/**
* Shared access keys of the Topic
*/
export interface ListTopicSharedAccessKeysResult {
/**
* Shared access key1 for the topic.
*/
readonly key1?: string;
/**
* Shared access key2 for the topic.
*/
readonly key2?: string;
}
|
listTopicSharedAccessKeys
|
__init__.py
|
from pythonforandroid.recipe import CythonRecipe
from os.path import join
class ShapelyRecipe(CythonRecipe):
version = '1.7a1'
url = 'https://github.com/Toblerity/Shapely/archive/{version}.tar.gz'
depends = ['setuptools', 'libgeos']
# Actually, this recipe seems to compile/install fine for python2, but it
# fails at runtime when importing module with:
# `[Errno 2] No such file or directory`
conflicts = ['python2']
call_hostpython_via_targetpython = False
# Patch to avoid libgeos check (because it fails), insert environment
# variables for our libgeos build (includes, lib paths...) and force
# the cython's compilation to raise an error in case that it fails
patches = ['setup.patch']
# Don't Force Cython
# setup_extra_args = ['sdist']
def
|
(self, arch=None, with_flags_in_cc=True):
env = super(ShapelyRecipe, self).get_recipe_env(arch)
libgeos_install = join(self.get_recipe(
'libgeos', self.ctx).get_build_dir(arch.arch), 'install_target')
# All this `GEOS_X` variables should be string types, separated
# by commas in case that we need to pass more than one value
env['GEOS_INCLUDE_DIRS'] = join(libgeos_install, 'include')
env['GEOS_LIBRARY_DIRS'] = join(libgeos_install, 'lib')
env['GEOS_LIBRARIES'] = 'geos_c,geos'
return env
recipe = ShapelyRecipe()
|
get_recipe_env
|
nemo_convert_png.py
|
'''Extension for Nemo's context menu to easily convert images to PNG and
optimize their filesize with pngcrush.'''
from __future__ import annotations
import os
import subprocess
from urllib.parse import unquote_plus, urlparse
from PIL import Image, UnidentifiedImageError
import PySimpleGUI as sg
import gi
gi.require_version('Nemo', '3.0')
from gi.repository import GObject, Nemo # type: ignore pylint: disable=wrong-import-position
EXTENSIONS = ('jpg', 'jpeg', 'gif', 'tiff', 'bmp', 'png')
uqp= unquote_plus
def get_files(files_in: list[GObject]) -> list[str]|None:
"""
Retrieve filenames as cross-platform safe strings from file objects.
:param files_in: List of file objects.
"""
files = []
for file_in in files_in:
file_in = unquote_plus(file_in.get_uri()[7:])
if os.path.isfile(file_in):
files.append(file_in)
if files:
return files
return None
def convert_one(file: str) -> None:
'''
Converts an image to a PNG.
:param file: Filename of the image to convert.
'''
filename = f'{file.split(".")[-2]}.png'
try:
img = Image.open(file).convert('RGB')
except UnidentifiedImageError:
img = False
if img:
os.remove(file)
img.save(filename, 'PNG')
def
|
(_, files: list[str]) -> list[str]:
'''
Called by the context menu item "Convert selected image(s) to PNG".
:param files: The currently selected files.
'''
filenames = [f'{file.split(".")[-2]}.png' for file in files]
count = sum(not file.endswith('png') for file in files)
for i, file in enumerate(files):
if not file.endswith('png'):
sg.OneLineProgressMeter('Please wait...', i+1, count, 'pb', 'Converting images', orientation='h')
convert_one(file)
sg.OneLineProgressMeter('', count, count, key='pb')
return filenames
def crush_one(file: str) -> None:
'''
Runs pngcrush on a png file.
:param file: The file to execute this action on.
'''
subprocess.run(['pngcrush', '-rem', 'alla', '-nofilecheck', '-fix', '-ow',
'-reduce', '-m', '0', file], check=False)
def crush_images(_, files: list[str]) -> None:
'''
Called by the context menu item "Optimize image(s) with pngcrush.
:param files: The currently selected files.
'''
for i, file in enumerate(files):
sg.OneLineProgressMeter('Please wait...', i+1, len(files), 'pb',
'Optimize images with pngcrush', orientation='h')
crush_one(file)
sg.OneLineProgressMeter('', len(files), len(files), key='pb')
def convert_and_crush(_, files: list[str]) -> None:
'''
Called by the context menu item "Convert to PNG and optimize.
:param files: The currently selected files.
'''
converted = convert_images(None, files)
crush_images(None, converted)
class PNGConverter(GObject.GObject, Nemo.MenuProvider):
'''Class for extension context menu items.'''
def __init__(self):
'''File manager crashes if init is not called.'''
...
def get_background_items( # pylint: disable=arguments-differ
self, _, folder: GObject) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with no file objects selected.
:param folder: Nemo's current working directory.
'''
folder = urlparse(folder.get_uri()).path
files = [uqp(os.path.join(folder, f))
for f in os.listdir(uqp(folder))
if os.path.isfile(uqp(os.path.join(folder, f))) and
f.lower().endswith(EXTENSIONS)]
if all(file.endswith('png') for file in files):
crush = Nemo.MenuItem(
name='CrushImages',
label='Optimize image(s) with pngcrush',
tip='Optimize image filesizes with pngcrush'
)
crush.connect('activate', crush_images, files)
return [crush]
if any(file.endswith(EXTENSIONS) for file in files):
convert = Nemo.MenuItem(
name="ConvertAllImagestoPNG",
label="Convert all images to PNG",
tip="Convert all images to PNG"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name='ConvertandCrush',
label="Convert images to PNG and optimize",
tip="Convert images to PNG and optimize filesizes with pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
def get_file_items( # pylint: disable=arguments-differ
self, _, files: list[GObject]) -> list[Nemo.MenuItem]|None:
'''
Called when context menu is called with files selected.
:param files: The currently selected file objects.
'''
files = get_files(files) # type: ignore
try:
is_iter = iter(files)
check = all(file.lower().endswith('png') for file in files)
except TypeError:
is_iter = False
check = False
if check:
convert = Nemo.MenuItem(
name="CrushImages",
label="Optimize image(s) with pngcrush",
tip="Optimize filesize(s) with pngcrush"
)
convert.connect('activate', crush_images, files)
return [convert]
if is_iter:
check = all(file.lower().endswith(EXTENSIONS) for file in files)
if check:
convert = Nemo.MenuItem(
name="ConvertImagetoPNG",
label="Convert selected image(s) to .png",
tip="Convert image(s) to .png"
)
convert.connect('activate', convert_images, files)
crush = Nemo.MenuItem(
name="ConvertandCrush",
label="Convert to PNG and optimize with pngcrush",
tip="Convert image(s) to PNG and optimize filesize(s) with\
pngcrush"
)
crush.connect('activate', convert_and_crush, files)
return [convert, crush]
|
convert_images
|
span_data_converter.go
|
package sls_store
import (
"encoding/json"
"fmt"
"time"
slsSdk "github.com/aliyun/aliyun-log-go-sdk"
"github.com/gogo/protobuf/proto"
"github.com/jaegertracing/jaeger/model"
"github.com/spf13/cast"
)
type DataConverter interface {
ToJaegerSpan(data map[string]string) (*model.Span, error)
ToSLSSpan(span *model.Span) ([]*slsSdk.LogContent, error)
}
var dataConvert = &dataConverterImpl{}
type dataConverterImpl struct {
}
func (dataConverterImpl) ToJaegerSpan(log map[string]string) (*model.Span, error) {
span := model.Span{}
process := model.Process{
Tags: make([]model.KeyValue, 0),
}
for k, v := range log {
switch k {
case TraceID:
traceID, err := model.TraceIDFromString(v)
if err != nil {
logger.Warn("Failed to convert traceId", "key", k, "value", v)
return nil, err
}
span.TraceID = traceID
break
case SpanID:
spanID, err := model.SpanIDFromString(v)
if err != nil {
logger.Warn("Failed to convert spanID", "key", k, "value", v)
return nil, err
}
span.SpanID = spanID
break
case OperationName:
span.OperationName = v
break
case Flags:
span.Flags = model.Flags(cast.ToUint64(v))
break
case StartTime:
span.StartTime = model.EpochMicrosecondsAsTime(cast.ToUint64(v))
break
case Duration:
span.Duration = model.MicrosecondsAsDuration(cast.ToUint64(v))
break
case ServiceName:
process.ServiceName = v
break
case Links:
refs, err := unmarshalReferences(v)
if err != nil {
logger.Warn("Failed to convert links", "key", k, "value", v, "exception", err)
return nil, err
}
span.References = refs
break
case Logs:
logs, err := unmarshalLogs(v)
if err != nil {
logger.Warn("Failed to convert logs", "key", k, "value", v, "exception", err)
return nil, err
}
span.Logs = logs
break
case StatusMessageField:
if v != "" {
span.Warnings = append(span.Warnings, v)
}
break
case Attribute:
span.Tags = unmarshalTags(v)
break
case Resource:
process.Tags, span.ProcessID = unmarshalResource(v)
break
case StatusCodeField:
if v == "ERROR" {
span.Warnings = append(span.Warnings, v)
}
|
return &span, nil
}
func (dataConverterImpl) ToSLSSpan(span *model.Span) ([]*slsSdk.LogContent, error) {
contents := make([]*slsSdk.LogContent, 0)
contents = appendAttributeToLogContent(contents, TraceID, TraceIDToString(&span.TraceID))
contents = appendAttributeToLogContent(contents, SpanID, span.SpanID.String())
contents = appendAttributeToLogContent(contents, ParentSpanID, span.ParentSpanID().String())
contents = appendAttributeToLogContent(contents, OperationName, span.OperationName)
contents = appendAttributeToLogContent(contents, Flags, fmt.Sprintf("%d", span.Flags))
contents = appendAttributeToLogContent(contents, StartTime, cast.ToString(span.StartTime.UnixNano()/1000))
contents = appendAttributeToLogContent(contents, Duration, cast.ToString(span.Duration.Nanoseconds()/1000))
contents = appendAttributeToLogContent(contents, EndTime, cast.ToString((span.StartTime.UnixNano()+span.Duration.Nanoseconds())/1000))
contents = appendAttributeToLogContent(contents, ServiceName, span.Process.ServiceName)
contents = appendAttributeToLogContent(contents, StatusCode, "UNSET")
contents = appendAttributeToLogContent(contents, Attribute, marshalTags(span.Tags))
contents = appendAttributeToLogContent(contents, Resource, marshalResource(span.Process.Tags, span.ProcessID))
if refStr, err := marshalReferences(span.References); err != nil {
logger.Warn("Failed to convert references", "spanID", span.SpanID, "reference", span.References, "exception", err)
return nil, err
} else {
contents = appendAttributeToLogContent(contents, Links, refStr)
}
if logsStr, err := marshalLogs(span.Logs); err != nil {
logger.Warn("Failed to convert logs", "spanID", span.SpanID, "logs", span.Logs, "exception", err)
return nil, err
} else {
contents = appendAttributeToLogContent(contents, Logs, logsStr)
}
contents, err := appendWarnings(contents, span.Warnings)
if err != nil {
logger.Warn("Failed to convert warnings", "spanID", span.SpanID, "warnings", span.Warnings, "exception", err)
return nil, err
}
return contents, nil
}
func appendWarnings(contents []*slsSdk.LogContent, warnings []string) ([]*slsSdk.LogContent, error) {
if len(warnings) < 1 {
return contents, nil
}
r, err := json.Marshal(warnings)
if err != nil {
return nil, err
}
return appendAttributeToLogContent(contents, StatusMessage, string(r)), nil
}
func marshalResource(v []model.KeyValue, processID string) string {
dataMap := keyValueToMap(v)
dataMap["ProcessID"] = processID
data, err := json.Marshal(dataMap)
if err != nil {
return fmt.Sprintf("%v", string(data))
}
return string(data)
}
func unmarshalResource(v string) (kvs []model.KeyValue, processID string) {
data := make(map[string]string)
err := json.Unmarshal([]byte(v), &data)
if err != nil {
kvs = append(kvs, model.String("tags", v))
return kvs, ""
}
return mapToKeyValue(data), data["ProcessID"]
}
func marshalTags(v []model.KeyValue) string {
dataMap := keyValueToMap(v)
data, err := json.Marshal(dataMap)
if err != nil {
return fmt.Sprintf("%v", string(data))
}
return string(data)
}
func unmarshalTags(v string) (kvs []model.KeyValue) {
data := make(map[string]string)
err := json.Unmarshal([]byte(v), &data)
if err != nil {
kvs = append(kvs, model.String("tags", v))
return
}
return mapToKeyValue(data)
}
type SpanLog struct {
Attribute map[string]string `json:"attribute"`
Time int64 `json:"time"`
}
func marshalLogs(logs []model.Log) (string, error) {
if len(logs) <= 0 {
return "[]", nil
}
slsLogs := make([]SpanLog, len(logs))
for i, l := range logs {
slsLogs[i] = SpanLog{
Time: l.Timestamp.UnixNano(),
Attribute: keyValueToMap(l.Fields),
}
}
r, err := json.Marshal(slsLogs)
if err != nil {
return "", err
}
return string(r), nil
}
func unmarshalLogs(s string) ([]model.Log, error) {
if s == "[]" {
return nil, nil
}
logs := make([]SpanLog, 0)
if err := json.Unmarshal([]byte(s), &logs); err != nil {
return nil, err
}
result := make([]model.Log, len(logs))
for i, log := range logs {
result[i] = model.Log{
Timestamp: time.Unix(log.Time/1e9, log.Time%1e9),
Fields: mapToKeyValue(log.Attribute),
}
}
return result, nil
}
func marshalReferences(refs []model.SpanRef) (string, error) {
if len(refs) <= 0 {
return "[]", nil
}
rs := make([]map[string]string, 0)
for _, ref := range refs {
r := make(map[string]string)
r["TraceID"] = ref.TraceID.String()
r["SpanID"] = ref.SpanID.String()
r["RefType"] = ref.RefType.String()
rs = append(rs, r)
}
r, err := json.Marshal(rs)
if err != nil {
return "", err
}
return string(r), nil
}
func unmarshalReferences(s string) (refs []model.SpanRef, err error) {
if s == "[]" {
return nil, nil
}
rs := make([]map[string]string, 0)
err = json.Unmarshal([]byte(s), &rs)
if err != nil {
return nil, err
}
for _, r := range rs {
tid, e1 := model.TraceIDFromString(r["TraceID"])
if e1 != nil {
return nil, e1
}
spanID, e2 := model.SpanIDFromString(r["SpanID"])
if e2 != nil {
return nil, e2
}
spanType := model.SpanRefType_value[r["RefType"]]
refs = append(refs, model.SpanRef{
TraceID: tid,
SpanID: spanID,
RefType: model.SpanRefType(spanType),
})
}
return refs, nil
}
func mapToKeyValue(data map[string]string) []model.KeyValue {
result := make([]model.KeyValue, 0)
for key, value := range data {
result = append(result, model.String(key, value))
}
return result
}
func keyValueToMap(fields []model.KeyValue) map[string]string {
m := make(map[string]string)
for _, keyVal := range fields {
m[keyVal.Key] = keyVal.AsString()
}
return m
}
func TraceIDToString(t *model.TraceID) string {
return t.String()
}
func appendAttributeToLogContent(contents []*slsSdk.LogContent, k, v string) []*slsSdk.LogContent {
content := slsSdk.LogContent{
Key: proto.String(k),
Value: proto.String(v),
}
return append(contents, &content)
}
|
}
}
span.Process = &process
|
types.rs
|
use crate::syntax::improper::ImproperCtype;
use crate::syntax::report::Errors;
use crate::syntax::set::{OrderedSet as Set, UnorderedSet};
use crate::syntax::trivial::{self, TrivialReason};
use crate::syntax::{
toposort, Api, Enum, ExternType, Impl, Pair, RustName, Struct, Type, TypeAlias,
};
use proc_macro2::Ident;
use quote::ToTokens;
use std::collections::BTreeMap as Map;
pub struct Types<'a> {
pub all: Set<&'a Type>,
pub structs: Map<&'a Ident, &'a Struct>,
pub enums: Map<&'a Ident, &'a Enum>,
pub cxx: Set<&'a Ident>,
pub rust: Set<&'a Ident>,
pub aliases: Map<&'a Ident, &'a TypeAlias>,
pub untrusted: Map<&'a Ident, &'a ExternType>,
pub required_trivial: Map<&'a Ident, Vec<TrivialReason<'a>>>,
pub explicit_impls: Set<&'a Impl>,
pub resolutions: Map<&'a RustName, &'a Pair>,
pub struct_improper_ctypes: UnorderedSet<&'a Ident>,
pub toposorted_structs: Vec<&'a Struct>,
}
impl<'a> Types<'a> {
pub fn collect(cx: &mut Errors, apis: &'a [Api]) -> Self {
let mut all = Set::new();
let mut structs = Map::new();
let mut enums = Map::new();
let mut cxx = Set::new();
let mut rust = Set::new();
let mut aliases = Map::new();
let mut untrusted = Map::new();
let mut explicit_impls = Set::new();
let mut resolutions = Map::new();
let struct_improper_ctypes = UnorderedSet::new();
let toposorted_structs = Vec::new();
fn visit<'a>(all: &mut Set<&'a Type>, ty: &'a Type) {
all.insert(ty);
match ty {
Type::Ident(_) | Type::Str(_) | Type::Void(_) => {}
Type::RustBox(ty)
| Type::UniquePtr(ty)
| Type::SharedPtr(ty)
| Type::WeakPtr(ty)
| Type::CxxVector(ty)
| Type::RustVec(ty) => visit(all, &ty.inner),
Type::Ref(r) => visit(all, &r.inner),
Type::Array(a) => visit(all, &a.inner),
Type::SliceRef(s) => visit(all, &s.inner),
Type::Fn(f) => {
if let Some(ret) = &f.ret {
visit(all, ret);
}
for arg in &f.args {
visit(all, &arg.ty);
}
}
}
}
let mut add_resolution = |pair: &'a Pair| {
resolutions.insert(RustName::from_ref(&pair.rust), pair);
};
let mut type_names = UnorderedSet::new();
let mut function_names = UnorderedSet::new();
for api in apis {
// The same identifier is permitted to be declared as both a shared
// enum and extern C++ type, or shared struct and extern C++ type.
// That indicates to not emit the C++ enum/struct definition because
// it's defined by the included headers already.
//
// All other cases of duplicate identifiers are reported as an error.
match api {
Api::Include(_) => {}
Api::Struct(strct) => {
let ident = &strct.name.rust;
if !type_names.insert(ident)
&& (!cxx.contains(ident)
|| structs.contains_key(ident)
|| enums.contains_key(ident))
{
// If already declared as a struct or enum, or if
// colliding with something other than an extern C++
// type, then error.
duplicate_name(cx, strct, ident);
}
structs.insert(&strct.name.rust, strct);
for field in &strct.fields {
visit(&mut all, &field.ty);
}
add_resolution(&strct.name);
}
Api::Enum(enm) => {
all.insert(&enm.repr_type);
let ident = &enm.name.rust;
if !type_names.insert(ident)
&& (!cxx.contains(ident)
|| structs.contains_key(ident)
|| enums.contains_key(ident))
{
// If already declared as a struct or enum, or if
// colliding with something other than an extern C++
// type, then error.
duplicate_name(cx, enm, ident);
}
enums.insert(ident, enm);
add_resolution(&enm.name);
}
Api::CxxType(ety) => {
let ident = &ety.name.rust;
if !type_names.insert(ident)
&& (cxx.contains(ident)
|| !structs.contains_key(ident) && !enums.contains_key(ident))
{
// If already declared as an extern C++ type, or if
// colliding with something which is neither struct nor
// enum, then error.
duplicate_name(cx, ety, ident);
}
cxx.insert(ident);
if !ety.trusted {
untrusted.insert(ident, ety);
}
add_resolution(&ety.name);
}
Api::RustType(ety) => {
let ident = &ety.name.rust;
if !type_names.insert(ident) {
duplicate_name(cx, ety, ident);
}
rust.insert(ident);
add_resolution(&ety.name);
}
Api::CxxFunction(efn) | Api::RustFunction(efn) => {
// Note: duplication of the C++ name is fine because C++ has
// function overloading.
if !function_names.insert((&efn.receiver, &efn.name.rust)) {
duplicate_name(cx, efn, &efn.name.rust);
}
for arg in &efn.args {
visit(&mut all, &arg.ty);
}
if let Some(ret) = &efn.ret {
visit(&mut all, ret);
}
}
Api::TypeAlias(alias) => {
let ident = &alias.name.rust;
if !type_names.insert(ident) {
duplicate_name(cx, alias, ident);
}
cxx.insert(ident);
aliases.insert(ident, alias);
add_resolution(&alias.name);
}
Api::Impl(imp) => {
visit(&mut all, &imp.ty);
explicit_impls.insert(imp);
}
}
}
// All these APIs may contain types passed by value. We need to ensure
// we check that this is permissible. We do this _after_ scanning all
// the APIs above, in case some function or struct references a type
// which is declared subsequently.
let required_trivial =
trivial::required_trivial_reasons(apis, &all, &structs, &enums, &cxx);
let mut types = Types {
all,
structs,
enums,
cxx,
rust,
aliases,
untrusted,
required_trivial,
explicit_impls,
resolutions,
struct_improper_ctypes,
toposorted_structs,
};
types.toposorted_structs = toposort::sort(cx, apis, &types);
let mut unresolved_structs: Vec<&Ident> = types.structs.keys().copied().collect();
let mut new_information = true;
while new_information {
new_information = false;
unresolved_structs.retain(|ident| {
let mut retain = false;
for var in &types.structs[ident].fields {
if match types.determine_improper_ctype(&var.ty) {
ImproperCtype::Depends(inner) => {
retain = true;
types.struct_improper_ctypes.contains(inner)
}
ImproperCtype::Definite(improper) => improper,
} {
types.struct_improper_ctypes.insert(ident);
new_information = true;
return false;
}
}
// If all fields definite false, remove from unresolved_structs.
retain
});
}
types
}
pub fn needs_indirect_abi(&self, ty: &Type) -> bool {
match ty {
Type::RustBox(_) | Type::UniquePtr(_) => false,
Type::Array(_) => true,
_ => !self.is_guaranteed_pod(ty),
}
}
// Types that trigger rustc's default #[warn(improper_ctypes)] lint, even if
// they may be otherwise unproblematic to mention in an extern signature.
// For example in a signature like `extern "C" fn(*const String)`, rustc
|
// Rust String, even though C could easily have obtained that pointer
// legitimately from a Rust call.
pub fn is_considered_improper_ctype(&self, ty: &Type) -> bool {
match self.determine_improper_ctype(ty) {
ImproperCtype::Definite(improper) => improper,
ImproperCtype::Depends(ident) => self.struct_improper_ctypes.contains(ident),
}
}
pub fn resolve(&self, ident: &RustName) -> &Pair {
self.resolutions.get(ident).expect("Unable to resolve type")
}
}
impl<'t, 'a> IntoIterator for &'t Types<'a> {
type Item = &'a Type;
type IntoIter = crate::syntax::set::Iter<'t, 'a, Type>;
fn into_iter(self) -> Self::IntoIter {
self.all.into_iter()
}
}
fn duplicate_name(cx: &mut Errors, sp: impl ToTokens, ident: &Ident) {
let msg = format!("the name `{}` is defined multiple times", ident);
cx.error(sp, msg);
}
|
// refuses to believe that C could know how to supply us with a pointer to a
|
encoding.rs
|
extern crate bellman;
extern crate pairing;
extern crate rand;
extern crate sapling_crypto;
#[macro_use]
extern crate clap;
#[cfg(feature = "profile")]
extern crate gperftools;
extern crate memmap;
extern crate tempfile;
#[macro_use]
extern crate slog;
extern crate filecoin_proofs;
extern crate storage_proofs;
use clap::{App, Arg};
#[cfg(feature = "profile")]
use gperftools::profiler::PROFILER;
use memmap::MmapMut;
use memmap::MmapOptions;
use pairing::bls12_381::Bls12;
use rand::{Rng, SeedableRng, XorShiftRng};
use std::fs::File;
use std::io::Write;
use std::time::Instant;
use storage_proofs::drgporep;
use storage_proofs::drgraph::*;
use storage_proofs::example_helper::prettyb;
use storage_proofs::fr32::fr_into_bytes;
use storage_proofs::hasher::{Hasher, PedersenHasher};
use storage_proofs::layered_drgporep;
use storage_proofs::proof::ProofScheme;
use storage_proofs::vde;
use storage_proofs::zigzag_drgporep::*;
use filecoin_proofs::FCP_LOG;
#[cfg(feature = "profile")]
#[inline(always)]
fn start_profile(stage: &str)
|
#[cfg(not(feature = "profile"))]
#[inline(always)]
fn start_profile(_stage: &str) {}
#[cfg(feature = "profile")]
#[inline(always)]
fn stop_profile() {
PROFILER.lock().unwrap().stop().unwrap();
}
#[cfg(not(feature = "profile"))]
#[inline(always)]
fn stop_profile() {}
fn file_backed_mmap_from_random_bytes(n: usize) -> MmapMut {
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
let mut tmpfile: File = tempfile::tempfile().unwrap();
for _ in 0..n {
tmpfile
.write_all(&fr_into_bytes::<Bls12>(&rng.gen()))
.unwrap();
}
unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }
}
pub fn file_backed_mmap_from(data: &[u8]) -> MmapMut {
let mut tmpfile: File = tempfile::tempfile().unwrap();
tmpfile.write_all(data).unwrap();
unsafe { MmapOptions::new().map_mut(&tmpfile).unwrap() }
}
fn do_the_work<H: 'static>(data_size: usize, m: usize, expansion_degree: usize, sloth_iter: usize)
where
H: Hasher,
{
let rng = &mut XorShiftRng::from_seed([0x3dbe6259, 0x8d313d76, 0x3237db17, 0xe5bc0654]);
info!(FCP_LOG, "data size: {}", prettyb(data_size); "target" => "config");
info!(FCP_LOG, "m: {}", m; "target" => "config");
info!(FCP_LOG, "expansion_degree: {}", expansion_degree; "target" => "config");
info!(FCP_LOG, "sloth: {}", sloth_iter; "target" => "config");
info!(FCP_LOG, "generating fake data"; "target" => "status");
let nodes = data_size / 32;
let mut data = file_backed_mmap_from_random_bytes(nodes);
let replica_id: H::Domain = rng.gen();
let sp = layered_drgporep::SetupParams {
drg_porep_setup_params: drgporep::SetupParams {
drg: drgporep::DrgParams {
nodes,
degree: m,
expansion_degree,
seed: new_seed(),
},
sloth_iter,
},
layers: 1,
challenge_count: 1,
};
info!(FCP_LOG, "running setup");
start_profile("setup");
let pp = ZigZagDrgPoRep::<H>::setup(&sp).unwrap();
let drgpp = pp.drg_porep_public_params;
stop_profile();
let start = Instant::now();
info!(FCP_LOG, "encoding");
start_profile("encode");
vde::encode(&drgpp.graph, drgpp.sloth_iter, &replica_id, &mut data).unwrap();
stop_profile();
let encoding_time = start.elapsed();
info!(FCP_LOG, "encoding_time: {:?}", encoding_time; "target" => "stats");
info!(
FCP_LOG,
"encoding time/byte: {:?}",
encoding_time / data_size as u32; "target" => "stats"
);
info!(
FCP_LOG,
"encoding time/GiB: {:?}",
(1 << 30) * encoding_time / data_size as u32; "target" => "stats"
);
}
fn main() {
let matches = App::new(stringify!("DrgPoRep Vanilla Bench"))
.version("1.0")
.arg(
Arg::with_name("size")
.required(true)
.long("size")
.help("The data size in KB")
.takes_value(true),
)
.arg(
Arg::with_name("m")
.help("The size of m")
.long("m")
.default_value("5")
.takes_value(true),
)
.arg(
Arg::with_name("exp")
.help("Expansion degree")
.long("expansion")
.default_value("6")
.takes_value(true),
)
.arg(
Arg::with_name("sloth")
.help("The number of sloth iterations")
.long("sloth")
.default_value("0")
.takes_value(true),
)
.arg(
Arg::with_name("layers")
.long("layers")
.help("How many layers to use")
.default_value("10")
.takes_value(true),
)
.get_matches();
let data_size = value_t!(matches, "size", usize).unwrap() * 1024;
let m = value_t!(matches, "m", usize).unwrap();
let expansion_degree = value_t!(matches, "exp", usize).unwrap();
let sloth_iter = value_t!(matches, "sloth", usize).unwrap();
do_the_work::<PedersenHasher>(data_size, m, expansion_degree, sloth_iter);
}
|
{
PROFILER
.lock()
.unwrap()
.start(format!("./{}.profile", stage))
.unwrap();
}
|
utxoviewpoint.go
|
// Copyright (c) 2015-2016 The btcsuite developers
// Use of this source code is governed by an ISC
// license that can be found in the LICENSE file.
package blockchain
import (
"fmt"
"github.com/cctip/bchd/chaincfg/chainhash"
"github.com/cctip/bchd/txscript"
"github.com/cctip/bchd/wire"
"github.com/gcash/bchutil"
)
// UtxoViewpoint represents a view into the set of unspent transaction outputs
// from a specific point of view in the chain. For example, it could be for
// the end of the main chain, some point in the history of the main chain, or
// down a side chain.
//
// The unspent outputs are needed by other transactions for things such as
// script validation and double spend prevention.
type UtxoViewpoint struct {
entries map[wire.OutPoint]*UtxoEntry
bestHash chainhash.Hash
}
// LookupEntry returns information about a given transaction output according to
// the current state of the view. It will return nil if the passed output does
// not exist in the view or is otherwise not available such as when it has been
// disconnected during a reorg.
func (view *UtxoViewpoint) LookupEntry(outpoint wire.OutPoint) *UtxoEntry {
return view.entries[outpoint]
}
// getEntry tries to get an entry from the view. If the entry is not in the
// view, both the returned entry and the error are nil.
func (view *UtxoViewpoint) getEntry(outpoint wire.OutPoint) (*UtxoEntry, error) {
return view.LookupEntry(outpoint), nil
}
// addEntry adds a new entry to the view. Set overwrite to true if this
// entry should overwrite any existing entry for the same outpoint.
func (view *UtxoViewpoint) addEntry(outpoint wire.OutPoint, entry *UtxoEntry, overwrite bool) error {
view.entries[outpoint] = entry
return nil
}
// spendEntry marks an entry as spent.
func (view *UtxoViewpoint) spendEntry(outpoint wire.OutPoint, putIfNil *UtxoEntry) error {
// If we don't have the entry yet, add it.
entry, found := view.entries[outpoint]
if !found {
entry = putIfNil
view.entries[outpoint] = entry
}
// Then mark it as spent.
entry.Spend()
return nil
}
// addTxOut adds the specified output to the view if it is not provably
// unspendable. When the view already has an entry for the output, it will be
// marked unspent. All fields will be updated for existing entries since it's
// possible it has changed during a reorg.
func (view *UtxoViewpoint) addTxOut(outpoint wire.OutPoint, txOut *wire.TxOut, isCoinBase bool, blockHeight int32) {
// Don't add provably unspendable outputs.
if txscript.IsUnspendable(txOut.PkScript) {
return
}
// Update existing entries. All fields are updated because it's
// possible (although extremely unlikely) that the existing entry is
// being replaced by a different transaction with the same hash. This
// is allowed so long as the previous transaction is fully spent.
entry := view.LookupEntry(outpoint)
if entry == nil {
entry = new(UtxoEntry)
view.entries[outpoint] = entry
}
pkScript := make([]byte, len(txOut.PkScript))
copy(pkScript, txOut.PkScript)
entry.amount = txOut.Value
entry.pkScript = pkScript
entry.blockHeight = blockHeight
entry.packedFlags = tfModified
if isCoinBase {
entry.packedFlags |= tfCoinBase
}
}
// AddTxOut adds the specified output of the passed transaction to the view if
// it exists and is not provably unspendable. When the view already has an
// entry for the output, it will be marked unspent. All fields will be updated
// for existing entries since it's possible it has changed during a reorg.
func (view *UtxoViewpoint) AddTxOut(tx *bchutil.Tx, txOutIdx uint32, blockHeight int32) {
// Can't add an output for an out of bounds index.
if txOutIdx >= uint32(len(tx.MsgTx().TxOut)) {
return
}
// Update existing entries. All fields are updated because it's
// possible (although extremely unlikely) that the existing entry is
// being replaced by a different transaction with the same hash. This
// is allowed so long as the previous transaction is fully spent.
prevOut := wire.OutPoint{Hash: *tx.Hash(), Index: txOutIdx}
txOut := tx.MsgTx().TxOut[txOutIdx]
view.addTxOut(prevOut, txOut, IsCoinBase(tx), blockHeight)
}
// AddTxOuts adds all outputs in the passed transaction which are not provably
// unspendable to the view. When the view already has entries for any of the
// outputs, they are simply marked unspent. All fields will be updated for
// existing entries since it's possible it has changed during a reorg.
func (view *UtxoViewpoint) AddTxOuts(tx *bchutil.Tx, blockHeight int32) {
// Loop all of the transaction outputs and add those which are not
// provably unspendable.
isCoinBase := IsCoinBase(tx)
prevOut := wire.OutPoint{Hash: *tx.Hash()}
for txOutIdx, txOut := range tx.MsgTx().TxOut {
// Update existing entries. All fields are updated because it's
// possible (although extremely unlikely) that the existing
// entry is being replaced by a different transaction with the
// same hash. This is allowed so long as the previous
// transaction is fully spent.
prevOut.Index = uint32(txOutIdx)
view.addTxOut(prevOut, txOut, isCoinBase, blockHeight)
}
}
// addInputUtxos adds the unspent transaction outputs for the inputs referenced
// by the transactions in the given block to the view. In particular, referenced
// entries that are earlier in the block are added to the view and entries that
// are already in the view are not modified.
func (view *UtxoViewpoint) addInputUtxos(source utxoView, block *bchutil.Block, ignoreOutOfOrder bool) error {
// Build a map of in-flight transactions because some of the inputs in
// this block could be referencing other transactions earlier in this
// block which are not yet in the chain.
txInFlight := map[chainhash.Hash]int{}
transactions := block.Transactions()
for i, tx := range transactions {
txInFlight[*tx.Hash()] = i
}
// Loop through all of the transaction inputs (except for the coinbase
// which has no inputs).
for i, tx := range block.Transactions()[1:] {
for _, txIn := range tx.MsgTx().TxIn {
originHash := &txIn.PreviousOutPoint.Hash
if inFlightIndex, ok := txInFlight[*originHash]; ok &&
(i >= inFlightIndex || ignoreOutOfOrder) {
originTx := transactions[inFlightIndex]
view.AddTxOuts(originTx, block.Height())
continue
}
// Don't do anything for entries that are already in the view.
if _, ok := view.entries[txIn.PreviousOutPoint]; ok {
continue
}
// Add the entry from the source.
entry, err := source.getEntry(txIn.PreviousOutPoint)
if err == nil && entry != nil {
view.entries[txIn.PreviousOutPoint] = entry.Clone()
}
}
}
return nil
}
func addTxOuts(view utxoView, tx *bchutil.Tx, blockHeight int32, overwrite bool) error {
// Add the transaction's outputs as available utxos.
isCoinBase := IsCoinBase(tx)
prevOut := wire.OutPoint{Hash: *tx.Hash()}
for txOutIdx, txOut := range tx.MsgTx().TxOut {
prevOut.Index = uint32(txOutIdx)
// Don't add provably unspendable outputs.
if txscript.IsUnspendable(txOut.PkScript) {
continue
}
// Create a new entry from the output.
pkScript := make([]byte, len(txOut.PkScript))
copy(pkScript, txOut.PkScript)
entry := &UtxoEntry{
amount: txOut.Value,
pkScript: pkScript,
blockHeight: blockHeight,
packedFlags: tfModified,
}
if isCoinBase
|
if !overwrite {
// If overwrite is false (i.e. we are not replaying blocks in
// recovery mode), this entry is fresh, meaning it can be pruned when
// it gets spent before the next flush.
entry.packedFlags |= tfFresh
}
// Add entry to the view.
if err := view.addEntry(prevOut, entry, overwrite); err != nil {
return err
}
}
return nil
}
// spendTransactionInputs spends the referenced utxos by marking them spent in the view and,
// if a slice was provided for the spent txout details, append an entry to it.
func spendTransactionInputs(view utxoView, tx *bchutil.Tx, stxos *[]SpentTxOut) error {
// Spend the referenced utxos by marking them spent in the view and,
// if a slice was provided for the spent txout details, append an entry
// to it.
for _, txIn := range tx.MsgTx().TxIn {
// Ensure the referenced utxo exists in the view. This should
// never happen unless there is a bug is introduced in the code.
entry, err := view.getEntry(txIn.PreviousOutPoint)
if err != nil {
return err
}
if entry == nil {
return AssertError(fmt.Sprintf("view missing input %v",
txIn.PreviousOutPoint))
}
// Only create the stxo details if requested.
if stxos != nil {
pkScript := make([]byte, len(entry.PkScript()))
copy(pkScript, entry.PkScript())
// Populate the stxo details using the utxo entry.
var stxo = SpentTxOut{
Amount: entry.Amount(),
PkScript: pkScript,
Height: entry.BlockHeight(),
IsCoinBase: entry.IsCoinBase(),
}
*stxos = append(*stxos, stxo)
}
// Mark the entry as spent.
if err := view.spendEntry(txIn.PreviousOutPoint, entry); err != nil {
return err
}
}
return nil
}
// connectTransaction updates the view by adding all new utxos created by the
// passed transaction and marking all utxos that the transactions spend as
// spent. In addition, when the 'stxos' argument is not nil, it will be updated
// to append an entry for each spent txout. An error will be returned if the
// view does not contain the required utxos. Set overwrite to true of new
// entries should be allowed to overwrite existing not-fully-spent entries.
//
// If you iterate over a block of transactions and call connectTransaction on
// each one, you will necessarily validate the topological order on each one.
func connectTransaction(view utxoView, tx *bchutil.Tx, blockHeight int32, stxos *[]SpentTxOut, overwrite bool) error {
// Skip input processing when tx is coinbase.
if !IsCoinBase(tx) {
spendTransactionInputs(view, tx, stxos)
}
// Add the transaction's outputs as available utxos.
addTxOuts(view, tx, blockHeight, overwrite)
return nil
}
// connectTransactions updates the view by adding all new utxos created by all
// of the transactions in the passed block, marking all utxos the transactions
// spend as spent, and setting the best hash for the view to the passed block.
// In addition, when the 'stxos' argument is not nil, it will be updated to
// entries should be allowed to overwrite existing not-fully-spent entries.
//
// This function does NOT validate topological order and thus should not be
// used when topological order is needed.
func connectTransactions(view utxoView, block *bchutil.Block, stxos *[]SpentTxOut, overwrite bool) error {
for _, tx := range block.Transactions() {
err := addTxOuts(view, tx, block.Height(), overwrite)
if err != nil {
return err
}
}
for _, tx := range block.Transactions() {
if !IsCoinBase(tx) {
err := spendTransactionInputs(view, tx, stxos)
if err != nil {
return err
}
}
}
return nil
}
// disconnectTransactions updates the view by removing all of the transactions
// created by the passed block, restoring all utxos the transactions spent by
// using the provided spent txo information, and setting the best hash for the
// view to the block before the passed block.
//
// This function is safe to use on both TTOR and CTOR blocks. It will not,
// however, validate any ordering.
func disconnectTransactions(view utxoView, block *bchutil.Block, stxos []SpentTxOut) error {
// Sanity check the correct number of stxos are provided.
if len(stxos) != countSpentOutputs(block) {
return AssertError("disconnectTransactions called with bad " +
"spent transaction out information")
}
// Loop backwards through all transactions so everything is unspent in
// reverse order.
stxoIdx := len(stxos) - 1
transactions := block.Transactions()
for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- {
tx := transactions[txIdx]
// All entries will need to potentially be marked as a coinbase.
isCoinBase := txIdx == 0
// Loop backwards through all of the transaction inputs (except
// for the coinbase which has no inputs) and unspend the
// referenced txos. This is necessary to match the order of the
// spent txout entries.
if isCoinBase {
continue
}
for txInIdx := len(tx.MsgTx().TxIn) - 1; txInIdx > -1; txInIdx-- {
originOut := tx.MsgTx().TxIn[txInIdx].PreviousOutPoint
// Ensure the spent txout index is decremented to stay
// in sync with the transaction input.
stxo := &stxos[stxoIdx]
stxoIdx--
pkScript := make([]byte, len(stxo.PkScript))
copy(pkScript, stxo.PkScript)
entry := &UtxoEntry{
amount: stxo.Amount,
pkScript: pkScript,
blockHeight: stxo.Height,
packedFlags: tfModified,
}
if stxo.IsCoinBase {
entry.packedFlags |= tfCoinBase
}
// Then store the entry in the view.
if err := view.addEntry(originOut, entry, true); err != nil {
return err
}
}
}
// Mark all of the spendable outputs originally created by the
// transaction as spent. It is instructive to note that while
// the outputs aren't actually being spent here, rather they no
// longer exist, since a pruned utxo set is used, there is no
// practical difference between a utxo that does not exist and
// one that has been spent.
//
// When the utxo does not already exist in the view, add an
// entry for it and then mark it spent. This is done because
// the code relies on its existence in the view in order to
// signal modifications have happened.
for txIdx := len(transactions) - 1; txIdx > -1; txIdx-- {
tx := transactions[txIdx]
isCoinBase := txIdx == 0
var packedFlags txoFlags
if isCoinBase {
packedFlags |= tfCoinBase
}
txHash := tx.Hash()
prevOut := wire.OutPoint{Hash: *txHash}
for txOutIdx, txOut := range tx.MsgTx().TxOut {
if txscript.IsUnspendable(txOut.PkScript) {
continue
}
prevOut.Index = uint32(txOutIdx)
pkScript := make([]byte, len(txOut.PkScript))
copy(pkScript, txOut.PkScript)
// Mark the entry as spent. To make sure the view has the entry,
// create one to pass along.
entry := &UtxoEntry{
amount: txOut.Value,
pkScript: pkScript,
blockHeight: block.Height(),
packedFlags: packedFlags,
}
if err := view.spendEntry(prevOut, entry); err != nil {
return err
}
}
}
return nil
}
// RemoveEntry removes the given transaction output from the current state of
// the view. It will have no effect if the passed output does not exist in the
// view.
func (view *UtxoViewpoint) RemoveEntry(outpoint wire.OutPoint) {
delete(view.entries, outpoint)
}
// Entries returns the underlying map that stores of all the utxo entries.
func (view *UtxoViewpoint) Entries() map[wire.OutPoint]*UtxoEntry {
return view.entries
}
// prune prunes all entries marked modified that are now fully spent and marks
// all entries as unmodified.
func (view *UtxoViewpoint) prune() {
for outpoint, entry := range view.entries {
if entry == nil || (entry.isModified() && entry.IsSpent()) {
delete(view.entries, outpoint)
continue
}
entry.packedFlags ^= tfModified
}
}
// NewUtxoViewpoint returns a new empty unspent transaction output view.
func NewUtxoViewpoint() *UtxoViewpoint {
return &UtxoViewpoint{
entries: make(map[wire.OutPoint]*UtxoEntry),
}
}
|
{
entry.packedFlags |= tfCoinBase
}
|
scales.py
|
"""
The intensity measurement scale has changed, and might change again
Therefore, I need this module to translate between numeric intensity scores
and casualty numbers
"""
from typing import Optional
from datetime import date
import pydantic
class CasualtyRange(pydantic.BaseModel):
lower: int
upper: Optional[int]
text: Optional[str]
@property
def zero(self):
return self.upper == 0
SCALES = {
# The old scale
|
2: CasualtyRange(lower=26,upper=99),
3: CasualtyRange(lower=100,upper=999),
4: CasualtyRange(lower=1000,upper=None),
},
# The current scale
date(2021,1,1):{
0: CasualtyRange(lower=1,upper=25,text="Low"),
1: CasualtyRange(lower=26,upper=99,text="Medium"),
2: CasualtyRange(lower=100,upper=None,text="High"),
}
}
def scaled(date:date,intensity_value:int)->CasualtyRange:
if intensity_value < 0:
return CasualtyRange(lower=0,upper=0)
valid_scales = {k:v for k,v in SCALES.items() if k <= date}
scale_for_date = SCALES[max((d for d,_ in valid_scales.items()))]
return scale_for_date[intensity_value]
|
date(1,1,1):{
0: CasualtyRange(lower=0,upper=1),
1: CasualtyRange(lower=2,upper=25),
|
docker.go
|
/*
Copyright 2016 Padduck, LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"encoding/json"
"errors"
"fmt"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/docker/client"
"github.com/docker/go-connections/nat"
"github.com/pufferpanel/pufferpanel/v2"
"github.com/pufferpanel/pufferpanel/v2/logging"
"github.com/pufferpanel/pufferpanel/v2/messages"
"io"
"io/ioutil"
"os"
"path"
"runtime"
"syscall"
"time"
)
type docker struct {
*pufferpanel.BaseEnvironment
ContainerId string `json:"-"`
ImageName string `json:"image"`
Binds map[string]string `json:"bindings,omitempty"`
NetworkMode string `json:"networkMode,omitempty"`
Network string `json:"networkName,omitempty"`
Ports []string `json:"portBindings,omitempty"`
Resources container.Resources `json:"resources,omitempty"`
ExposedPorts nat.PortSet `json:"exposedPorts,omitempty"`
connection types.HijackedResponse
cli *client.Client
downloadingImage bool
}
func (d *docker) dockerExecuteAsync(steps pufferpanel.ExecutionData) error {
running, err := d.IsRunning()
if err != nil {
return err
}
if running {
return pufferpanel.ErrContainerRunning
}
d.Wait.Wait()
if d.downloadingImage {
return pufferpanel.ErrImageDownloading
}
dockerClient, err := d.getClient()
ctx := context.Background()
//TODO: This logic may not work anymore, it's complicated to use an existing container with install/uninstall
exists, err := d.doesContainerExist(dockerClient, ctx)
if err != nil {
return err
}
if exists {
return errors.New("docker container already exists")
}
err = d.createContainer(dockerClient, ctx, steps.Command, steps.Arguments, steps.Environment, steps.WorkingDirectory)
if err != nil {
return err
}
config := types.ContainerAttachOptions{
Stdin: true,
Stdout: true,
Stderr: true,
Stream: true,
}
d.connection, err = dockerClient.ContainerAttach(ctx, d.ContainerId, config)
if err != nil {
return err
}
d.Wait.Add(1)
go func() {
defer d.connection.Close()
wrapper := d.CreateWrapper()
_, _ = io.Copy(wrapper, d.connection.Reader)
//because we use the auto-delete, we don't manually stop the container
//c, _ := d.getClient()
//err = c.ContainerStop(context.Background(), d.ContainerId, nil)
d.Wait.Done()
if err != nil {
logging.Error().Printf("Error stopping container "+d.ContainerId, err)
}
msg := messages.Status{Running: false}
_ = d.WSManager.WriteMessage(msg)
if steps.Callback != nil {
steps.Callback(err == nil)
}
}()
startOpts := types.ContainerStartOptions{}
msg := messages.Status{Running: true}
_ = d.WSManager.WriteMessage(msg)
d.DisplayToConsole(true, "Starting container\n")
err = dockerClient.ContainerStart(ctx, d.ContainerId, startOpts)
if err != nil {
return err
}
return err
}
func (d *docker) ExecuteInMainProcess(cmd string) (err error) {
running, err := d.IsRunning()
if err != nil {
return
}
if !running {
err = pufferpanel.ErrServerOffline
return
}
_, _ = d.connection.Conn.Write([]byte(cmd + "\n"))
return
}
func (d *docker) Kill() (err error) {
running, err := d.IsRunning()
if err != nil {
return err
}
if !running {
return
}
dockerClient, err := d.getClient()
if err != nil {
return err
}
err = dockerClient.ContainerKill(context.Background(), d.ContainerId, "SIGKILL")
return
}
func (d *docker) Create() error {
return os.Mkdir(d.RootDirectory, 0755)
}
func (d *docker) IsRunning() (bool, error) {
dockerClient, err := d.getClient()
if err != nil {
return false, err
}
ctx := context.Background()
exists, err := d.doesContainerExist(dockerClient, ctx)
if !exists {
return false, err
}
stats, err := dockerClient.ContainerInspect(ctx, d.ContainerId)
if err != nil {
return false, err
}
return stats.State.Running, nil
}
func (d *docker) GetStats() (*pufferpanel.ServerStats, error) {
running, err := d.IsRunning()
if err != nil {
return nil, err
}
if !running {
return nil, pufferpanel.ErrServerOffline
}
dockerClient, err := d.getClient()
if err != nil {
return nil, err
}
ctx := context.Background()
res, err := dockerClient.ContainerStats(ctx, d.ContainerId, false)
defer func() {
if res.Body != nil {
pufferpanel.Close(res.Body)
}
}()
if err != nil {
return nil, err
}
data := &types.StatsJSON{}
err = json.NewDecoder(res.Body).Decode(&data)
if err != nil {
return nil, err
}
return &pufferpanel.ServerStats{
Memory: calculateMemoryPercent(data),
Cpu: calculateCPUPercent(data),
}, nil
}
func (d *docker) WaitForMainProcess() error {
return d.WaitForMainProcessFor(0)
}
func (d *docker) WaitForMainProcessFor(timeout int) (err error) {
running, err := d.IsRunning()
if err != nil {
return
}
if running {
if timeout > 0 {
var timer = time.AfterFunc(time.Duration(timeout)*time.Millisecond, func() {
err = d.Kill()
})
d.Wait.Wait()
timer.Stop()
} else {
d.Wait.Wait()
}
}
return
}
func (d *docker) getClient() (*client.Client, error) {
var err error = nil
if d.cli == nil {
d.cli, err = client.NewClientWithOpts(client.FromEnv)
ctx := context.Background()
d.cli.NegotiateAPIVersion(ctx)
}
return d.cli, err
}
func (d *docker) doesContainerExist(client *client.Client, ctx context.Context) (bool, error) {
opts := types.ContainerListOptions{
Filters: filters.NewArgs(),
}
opts.All = true
opts.Filters.Add("name", d.ContainerId)
existingContainers, err := client.ContainerList(ctx, opts)
if len(existingContainers) == 0 {
return false, err
} else {
return true, err
}
}
func (d *docker) pullImage(client *client.Client, ctx context.Context, force bool) error {
exists := false
opts := types.ImageListOptions{
All: true,
Filters: filters.NewArgs(),
}
opts.Filters.Add("reference", d.ImageName)
images, err := client.ImageList(ctx, opts)
if err != nil {
return err
}
if len(images) >= 1 {
exists = true
}
logging.Debug().Printf("Does image %v exist? %v", d.ImageName, exists)
if exists && !force {
return nil
}
op := types.ImagePullOptions{}
logging.Debug().Printf("Downloading image %v", d.ImageName)
d.DisplayToConsole(true, "Downloading image for container, please wait\n")
d.downloadingImage = true
r, err := client.ImagePull(ctx, d.ImageName, op)
defer pufferpanel.Close(r)
if err != nil {
return err
}
_, err = io.Copy(ioutil.Discard, r)
d.downloadingImage = false
logging.Debug().Printf("Downloaded image %v", d.ImageName)
d.DisplayToConsole(true, "Downloaded image for container\n")
return err
}
func (d *docker) createContainer(client *client.Client, ctx context.Context, cmd string, args []string, env map[string]string, workDir string) error {
logging.Debug().Printf("Creating container")
containerRoot := "/pufferpanel"
err := d.pullImage(client, ctx, false)
if err != nil {
return err
}
cmdSlice := strslice.StrSlice{}
cmdSlice = append(cmdSlice, cmd)
for _, v := range args {
cmdSlice = append(cmdSlice, v)
}
//newEnv := os.Environ()
newEnv := []string{"HOME=" + containerRoot}
for k, v := range env {
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, v))
}
if workDir == "" {
workDir = containerRoot
}
logging.Debug().Printf("Container command: %s\n", cmdSlice)
containerConfig := &container.Config{
AttachStderr: true,
AttachStdin: true,
AttachStdout: true,
Tty: true,
OpenStdin: true,
NetworkDisabled: false,
Cmd: cmdSlice,
Image: d.ImageName,
WorkingDir: workDir,
Env: newEnv,
ExposedPorts: d.ExposedPorts,
}
if runtime.GOOS == "linux" {
containerConfig.User = fmt.Sprintf("%d:%d", os.Getuid(), os.Getgid())
}
dir := d.RootDirectory
//convert root dir to a full path, so we can bind it
if !path.IsAbs(dir) {
pwd, err := os.Getwd()
if err != nil {
return err
}
dir = path.Join(pwd, dir)
}
hostConfig := &container.HostConfig{
AutoRemove: true,
NetworkMode: container.NetworkMode(d.NetworkMode),
Resources: d.Resources,
Binds: []string{dir + ":" + containerRoot},
PortBindings: nat.PortMap{},
}
for k, v := range d.Binds {
hostConfig.Binds = append(hostConfig.Binds, k+":"+v)
}
networkConfig := &network.NetworkingConfig{}
_, bindings, err := nat.ParsePortSpecs(d.Ports)
if err != nil {
return err
}
hostConfig.PortBindings = bindings
_, err = client.ContainerCreate(ctx, containerConfig, hostConfig, networkConfig, d.ContainerId)
return err
}
func (d *docker) SendCode(code int) error {
running, err := d.IsRunning()
if err != nil || !running {
return err
}
dockerClient, err := d.getClient()
if err != nil {
return err
}
ctx := context.Background()
return dockerClient.ContainerKill(ctx, d.ContainerId, syscall.Signal(code).String())
}
func calculateCPUPercent(v *types.StatsJSON) float64 {
// Max number of 100ns intervals between the previous time read and now
possIntervals := uint64(v.Read.Sub(v.PreRead).Nanoseconds()) // Start with number of ns intervals
possIntervals /= 100 // Convert to number of 100ns intervals
//possIntervals *= uint64(v.NumProcs) // Multiple by the number of processors
// Intervals used
intervalsUsed := v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage
// Percentage avoiding divide-by-zero
if possIntervals > 0 {
return float64(intervalsUsed) / float64(possIntervals)
}
return 0.00
}
func
|
(v *types.StatsJSON) float64 {
return float64(v.MemoryStats.Usage)
}
|
calculateMemoryPercent
|
build_plan.rs
|
// Copyright (c) The Diem Core Contributors
// Copyright (c) The Move Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
compilation::compiled_package::CompiledPackage, resolution::resolution_graph::ResolvedGraph,
source_package::parsed_manifest::PackageName,
};
use anyhow::Result;
use move_compiler::{compiled_unit::AnnotatedCompiledUnit, diagnostics::FilesSourceText, Compiler};
use petgraph::algo::toposort;
use std::{collections::BTreeSet, io::Write, path::Path};
use super::package_layout::CompiledPackageLayout;
#[cfg(feature = "evm-backend")]
use {
colored::Colorize,
move_to_yul::{options::Options as MoveToYulOptions, run_to_yul},
std::{fs, io},
termcolor::Buffer,
walkdir::WalkDir,
};
#[derive(Debug, Clone)]
pub struct BuildPlan {
root: PackageName,
|
}
#[cfg(feature = "evm-backend")]
fn should_recompile(
source_paths: impl IntoIterator<Item = impl AsRef<Path>>,
output_paths: impl IntoIterator<Item = impl AsRef<Path>>,
) -> Result<bool> {
let mut earliest_output_mod_time = None;
for output_path in output_paths.into_iter() {
match fs::metadata(output_path) {
Ok(meta) => {
let mod_time = meta
.modified()
.expect("failed to get file modification time");
match &mut earliest_output_mod_time {
None => earliest_output_mod_time = Some(mod_time),
Some(earliest_mod_time) => *earliest_mod_time = mod_time,
}
}
Err(err) => {
if let io::ErrorKind::NotFound = err.kind() {
return Ok(true);
}
return Err(err.into());
}
}
}
let earliest_output_mod_time = match earliest_output_mod_time {
Some(mod_time) => mod_time,
None => panic!("no output files given -- this should not happen"),
};
for source_path in source_paths.into_iter() {
for entry in WalkDir::new(source_path) {
let entry = entry?;
let mod_time = entry
.metadata()?
.modified()
.expect("failed to get file modification time");
if mod_time > earliest_output_mod_time {
return Ok(true);
}
}
}
Ok(false)
}
impl BuildPlan {
pub fn create(resolution_graph: ResolvedGraph) -> Result<Self> {
let mut sorted_deps = match toposort(&resolution_graph.graph, None) {
Ok(nodes) => nodes,
Err(err) => {
// Is a DAG after resolution otherwise an error should be raised from that.
anyhow::bail!("IPE: Cyclic dependency found after resolution {:?}", err)
}
};
sorted_deps.reverse();
Ok(Self {
root: resolution_graph.root_package.package.name,
sorted_deps,
resolution_graph,
})
}
pub fn compile<W: Write>(&self, writer: &mut W) -> Result<CompiledPackage> {
self.compile_with_driver(writer, |compiler| compiler.build_and_report())
}
pub fn compile_with_driver<W: Write>(
&self,
writer: &mut W,
mut compiler_driver: impl FnMut(
Compiler,
)
-> anyhow::Result<(FilesSourceText, Vec<AnnotatedCompiledUnit>)>,
) -> Result<CompiledPackage> {
let root_package = &self.resolution_graph.package_table[&self.root];
let project_root = match &self.resolution_graph.build_options.install_dir {
Some(under_path) => under_path.clone(),
None => self.resolution_graph.root_package_path.clone(),
};
let immediate_dependencies_names =
root_package.immediate_dependencies(&self.resolution_graph);
let transitive_dependencies = root_package
.transitive_dependencies(&self.resolution_graph)
.into_iter()
.map(|package_name| {
let dep_package = self
.resolution_graph
.package_table
.get(&package_name)
.unwrap();
let dep_source_paths = dep_package
.get_sources(&self.resolution_graph.build_options)
.unwrap();
(
package_name,
immediate_dependencies_names.contains(&package_name),
dep_source_paths,
&dep_package.resolution_table,
)
})
.collect();
let compiled = CompiledPackage::build_all(
writer,
&project_root,
root_package.clone(),
transitive_dependencies,
&self.resolution_graph,
&mut compiler_driver,
)?;
Self::clean(
&project_root.join(CompiledPackageLayout::Root.path()),
self.sorted_deps.iter().copied().collect(),
)?;
Ok(compiled)
}
#[cfg(feature = "evm-backend")]
pub fn compile_evm<W: Write>(&self, writer: &mut W) -> Result<()> {
let root_package = &self.resolution_graph.package_table[&self.root];
let project_root = match &self.resolution_graph.build_options.install_dir {
Some(under_path) => under_path.clone(),
None => self.resolution_graph.root_package_path.clone(),
};
let build_root_path = project_root
.join(CompiledPackageLayout::Root.path())
.join("evm");
// Step 1: Compile Move into Yul
// Step 1a: Gather command line arguments for move-to-yul
let dependencies = self
.resolution_graph
.package_table
.iter()
.filter_map(|(name, package)| {
if name == &root_package.source_package.package.name {
None
} else {
Some(format!(
"{}/sources",
package.package_path.to_string_lossy()
))
}
})
.collect::<Vec<_>>();
let sources = vec![format!(
"{}/sources",
root_package.package_path.to_string_lossy()
)];
let bytecode_output = format!(
"{}/{}.bin",
build_root_path.to_string_lossy(),
root_package.source_package.package.name
);
let yul_output = format!(
"{}/{}.yul",
build_root_path.to_string_lossy(),
root_package.source_package.package.name
);
let abi_output = format!(
"{}/{}.abi.json",
build_root_path.to_string_lossy(),
root_package.source_package.package.name
);
let output_paths = [&bytecode_output, &yul_output, &abi_output];
let package_names = self
.resolution_graph
.package_table
.iter()
.map(|(name, _)| name.to_string())
.collect::<Vec<_>>()
.join(", ");
let named_address_mapping = self
.resolution_graph
.extract_named_address_mapping()
.map(|(name, addr)| format!("{}={}", name.as_str(), addr))
.collect();
// Step 1b: Check if a fresh compilation is really needed. Only recompile if either
// a) Some of the output artifacts are missing
// b) Any source files have been modified since last compile
let manifests = self
.resolution_graph
.package_table
.iter()
.map(|(_name, package)| format!("{}/Move.toml", package.package_path.to_string_lossy()))
.collect::<Vec<_>>();
let all_sources = manifests
.iter()
.chain(sources.iter())
.chain(dependencies.iter());
if !should_recompile(all_sources, output_paths)? {
writeln!(writer, "{} {}", "CACHED".bold().green(), package_names)?;
return Ok(());
}
// Step 1c: Call move-to-yul
writeln!(
writer,
"{} {} to Yul",
"COMPILING".bold().green(),
package_names
)?;
if let Err(err) = std::fs::remove_dir_all(&build_root_path) {
match err.kind() {
io::ErrorKind::NotFound => (),
_ => {
writeln!(
writer,
"{} Failed to remove build dir {}: {}",
"ERROR".bold().red(),
build_root_path.to_string_lossy(),
err,
)?;
return Err(err.into());
}
}
}
if let Err(err) = std::fs::create_dir_all(&build_root_path) {
writeln!(
writer,
"{} Failed to create build dir {}",
"ERROR".bold().red(),
build_root_path.to_string_lossy(),
)?;
return Err(err.into());
}
// TODO: should inherit color settings from current shell
let mut error_buffer = Buffer::ansi();
if let Err(err) = run_to_yul(
&mut error_buffer,
MoveToYulOptions {
dependencies,
named_address_mapping,
sources,
output: yul_output.clone(),
abi_output,
..MoveToYulOptions::default()
},
) {
writeln!(
writer,
"{} Failed to compile Move into Yul {}",
err,
"ERROR".bold().red()
)?;
writeln!(
writer,
"{}",
std::str::from_utf8(error_buffer.as_slice()).unwrap()
)?;
let mut source = err.source();
while let Some(s) = source {
writeln!(writer, "{}", s)?;
source = s.source();
}
return Err(err);
}
// Step 2: Compile Yul into bytecode using solc
let yul_source = match std::fs::read_to_string(&yul_output) {
Ok(yul_source) => yul_source,
Err(err) => {
writeln!(
writer,
"{} Failed to read from {}",
"ERROR".bold().red(),
yul_output,
)?;
return Err(err.into());
}
};
writeln!(
writer,
"{} EVM bytecote from Yul",
"GENERATING".bold().green(),
)?;
match evm_exec_utils::compile::solc_yul(&yul_source, false) {
Ok((bytecode, _)) => {
let mut bytecode_file = match std::fs::File::create(&bytecode_output) {
Ok(file) => file,
Err(err) => {
writeln!(
writer,
"{} Failed to create bytecode output {}",
"ERROR".bold().red(),
bytecode_output,
)?;
return Err(err.into());
}
};
if let Err(err) = bytecode_file.write_all(hex::encode(&bytecode).as_bytes()) {
writeln!(
writer,
"{} Failed to write bytecode to file {}",
"ERROR".bold().red(),
bytecode_output,
)?;
return Err(err.into());
}
}
Err(err) => {
writeln!(
writer,
"{} Failed to generate EVM bytecote",
"ERROR".bold().red()
)?;
let mut source = err.source();
while let Some(s) = source {
writeln!(writer, "{}", s)?;
source = s.source();
}
return Err(err);
}
}
Ok(())
}
// Clean out old packages that are no longer used, or no longer used under the current
// compilation flags
fn clean(build_root: &Path, keep_paths: BTreeSet<PackageName>) -> Result<()> {
for dir in std::fs::read_dir(build_root)? {
let path = dir?.path();
if !keep_paths.iter().any(|name| path.ends_with(name.as_str())) {
std::fs::remove_dir_all(&path)?;
}
}
Ok(())
}
}
|
sorted_deps: Vec<PackageName>,
resolution_graph: ResolvedGraph,
|
Content.tsx
|
import React from 'react'
import Store from 'src/store'
import ContainerExample from 'components/examples/ContainerExample'
import TypeExample from 'components/examples/TypeExample'
import InsertExample from 'components/examples/InsertExample'
import AnimationWrapper from 'components/examples/AnimationExample'
import CustomContentExample from 'components/examples/CustomContentExample'
function ExampleHeading() {
return (
<div className="row">
<div className="col-lg-6 offset-lg-3 column col-md-10 offset-md-1 col-sm-12 heading">
<h2 className="text-center">Examples</h2>
<button type="button" className="btn btn-outline-danger" onClick={() => Store.removeAllNotifications()}>
Remove All Notifications
</button>
<div className="alert alert-warning alert-small">
<i className="fa fa-info-circle"></i>
All notifications have been set to be automatically dismissed after{' '}
<code className="white-code">5000ms</code>. Notifications can be manually dismissed by{' '}
<code className="white-code">clicking</code> or by{' '}
<code className="white-code">swiping</code> on mobile devices.
</div>
</div>
</div>
)
}
function Examples() {
return (
<React.Fragment>
<ExampleHeading />
<ContainerExample />
<TypeExample />
<CustomContentExample />
<InsertExample />
<AnimationWrapper />
</React.Fragment>
)
}
function
|
() {
return (
<div className="content">
<div className="container">
<Examples />
</div>
</div>
)
}
export default Content
|
Content
|
test_ne.py
|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_ne(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = flow.ne(input, other)
of_out2 = flow.not_equal(input, other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_out2.numpy(), np_out))
def _test_tensor_ne_operator(test_case, shape, device):
arr1 = np.random.randn(*shape)
arr2 = np.random.randn(*shape)
input = flow.tensor(arr1, dtype=flow.float32, device=flow.device(device))
other = flow.tensor(arr2, dtype=flow.float32, device=flow.device(device))
of_out = input.ne(other)
np_out = np.not_equal(arr1, arr2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def
|
(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_int(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_ne_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = flow.ne(input, num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def _test_tensor_ne_operator_float(test_case, shape, device):
arr = np.random.randn(*shape)
input = flow.tensor(arr, dtype=flow.float32, device=flow.device(device))
num = 1.0
of_out = input.ne(num)
np_out = np.not_equal(arr, num)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
@flow.unittest.skip_unless_1n1d()
class TestNe(flow.unittest.TestCase):
def test_ne(test_case):
arg_dict = OrderedDict()
arg_dict["test_func"] = [
_test_ne,
_test_tensor_ne_operator,
_test_ne_int,
_test_tensor_ne_operator_int,
_test_ne_float,
_test_tensor_ne_operator_float,
]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(auto_backward=False, check_graph=False)
def test_ne_with_0shape_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
x2 = random_pytorch_tensor(4, 2, 3, 0, 5).to(device)
y1 = torch.ne(x1, x2)
y2 = torch.ne(x1, 2)
y3 = torch.ne(x1, 2.0)
return (y1, y2, y3)
if __name__ == "__main__":
unittest.main()
|
_test_ne_int
|
edit_script_runner.py
|
from xml.dom import Node
from htmltreediff.util import (
get_child,
get_location,
remove_node,
insert_or_append,
)
class EditScriptRunner(object):
def __init__(self, dom, edit_script):
self.dom = dom
self.edit_script = edit_script
self.del_nodes = []
self.ins_nodes = []
# edit script actions #
def action_delete(self, node):
parent = node.parentNode
next_sibling = node.nextSibling
remove_node(node)
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.del_nodes.append(node)
def action_insert(
self,
parent,
child_index,
node_type=None,
node_name=None,
node_value=None,
attributes=None,
):
node = None
if node_type == Node.ELEMENT_NODE:
node = self.dom.createElement(node_name)
if attributes:
for key, value in attributes.items():
node.setAttribute(key, value)
elif node_type == Node.TEXT_NODE:
node = self.dom.createTextNode(node_value)
if node is not None:
|
def action_insert_node(self, parent, child_index, node):
next_sibling = get_child(parent, child_index)
insert_or_append(parent, node, next_sibling)
# add node to ins_nodes
assert node.parentNode is not None
node.orig_parent = parent
node.orig_next_sibling = next_sibling
self.ins_nodes.append(node)
# script running #
def run_edit_script(self):
"""
Run an xml edit script, and return the new html produced.
"""
for action, location, properties in self.edit_script:
if action == 'delete':
node = get_location(self.dom, location)
self.action_delete(node)
elif action == 'insert':
parent = get_location(self.dom, location[:-1])
child_index = location[-1]
self.action_insert(parent, child_index, **properties)
return self.dom
|
self.action_insert_node(parent, child_index, node)
|
FieldModelStructList.go
|
// Automatically generated by the Fast Binary Encoding compiler, do not modify!
// https://github.com/chronoxor/FastBinaryEncoding
// Source: test.fbe
// Version: 1.8.0.0
package test
import "errors"
import "../fbe"
import "../proto"
// Workaround for Go unused imports issue
var _ = errors.New
var _ = fbe.Version
var _ = proto.Version
// Fast Binary Encoding StructList field model
type FieldModelStructList struct {
// Field model buffer
buffer *fbe.Buffer
// Field model buffer offset
offset int
F1 *FieldModelVectorByte
F2 *FieldModelVectorOptionalByte
F3 *FieldModelVectorBytes
F4 *FieldModelVectorOptionalBytes
F5 *FieldModelVectorEnumSimple
F6 *FieldModelVectorOptionalEnumSimple
F7 *FieldModelVectorFlagsSimple
F8 *FieldModelVectorOptionalFlagsSimple
F9 *FieldModelVectorStructSimple
F10 *FieldModelVectorOptionalStructSimple
}
// Create a new StructList field model
func NewFieldModelStructList(buffer *fbe.Buffer, offset int) *FieldModelStructList {
fbeResult := FieldModelStructList{buffer: buffer, offset: offset}
fbeResult.F1 = NewFieldModelVectorByte(buffer, 4 + 4)
fbeResult.F2 = NewFieldModelVectorOptionalByte(buffer, fbeResult.F1.FBEOffset() + fbeResult.F1.FBESize())
fbeResult.F3 = NewFieldModelVectorBytes(buffer, fbeResult.F2.FBEOffset() + fbeResult.F2.FBESize())
fbeResult.F4 = NewFieldModelVectorOptionalBytes(buffer, fbeResult.F3.FBEOffset() + fbeResult.F3.FBESize())
fbeResult.F5 = NewFieldModelVectorEnumSimple(buffer, fbeResult.F4.FBEOffset() + fbeResult.F4.FBESize())
fbeResult.F6 = NewFieldModelVectorOptionalEnumSimple(buffer, fbeResult.F5.FBEOffset() + fbeResult.F5.FBESize())
fbeResult.F7 = NewFieldModelVectorFlagsSimple(buffer, fbeResult.F6.FBEOffset() + fbeResult.F6.FBESize())
fbeResult.F8 = NewFieldModelVectorOptionalFlagsSimple(buffer, fbeResult.F7.FBEOffset() + fbeResult.F7.FBESize())
fbeResult.F9 = NewFieldModelVectorStructSimple(buffer, fbeResult.F8.FBEOffset() + fbeResult.F8.FBESize())
fbeResult.F10 = NewFieldModelVectorOptionalStructSimple(buffer, fbeResult.F9.FBEOffset() + fbeResult.F9.FBESize())
return &fbeResult
}
// Get the field size
func (fm *FieldModelStructList) FBESize() int { return 4 }
// Get the field body size
func (fm *FieldModelStructList) FBEBody() int {
fbeResult := 4 + 4 +
fm.F1.FBESize() +
fm.F2.FBESize() +
fm.F3.FBESize() +
fm.F4.FBESize() +
fm.F5.FBESize() +
fm.F6.FBESize() +
fm.F7.FBESize() +
fm.F8.FBESize() +
fm.F9.FBESize() +
fm.F10.FBESize() +
0
return fbeResult
}
// Get the field extra size
func (fm *FieldModelStructList) FBEExtra() int {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return 0
}
fbeStructOffset := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()))
if (fbeStructOffset == 0) || ((fm.buffer.Offset() + fbeStructOffset + 4) > fm.buffer.Size()) {
return 0
}
fm.buffer.Shift(fbeStructOffset)
fbeResult := fm.FBEBody() +
fm.F1.FBEExtra() +
fm.F2.FBEExtra() +
fm.F3.FBEExtra() +
fm.F4.FBEExtra() +
fm.F5.FBEExtra() +
fm.F6.FBEExtra() +
fm.F7.FBEExtra() +
fm.F8.FBEExtra() +
fm.F9.FBEExtra() +
fm.F10.FBEExtra() +
0
fm.buffer.Unshift(fbeStructOffset)
return fbeResult
}
// Get the field type
func (fm *FieldModelStructList) FBEType() int { return 131 }
// Get the field offset
func (fm *FieldModelStructList) FBEOffset() int { return fm.offset }
// Set the field offset
func (fm *FieldModelStructList) SetFBEOffset(value int) { fm.offset = value }
// Shift the current field offset
func (fm *FieldModelStructList) FBEShift(size int) { fm.offset += size }
// Unshift the current field offset
func (fm *FieldModelStructList) FBEUnshift(size int) { fm.offset -= size }
// Check if the struct value is valid
func (fm *FieldModelStructList) Verify() bool { return fm.VerifyType(true) }
// Check if the struct value and its type are valid
func (fm *FieldModelStructList) VerifyType(fbeVerifyType bool) bool {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return true
}
fbeStructOffset := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()))
if (fbeStructOffset == 0) || ((fm.buffer.Offset() + fbeStructOffset + 4 + 4) > fm.buffer.Size()) {
return false
}
fbeStructSize := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeStructOffset))
if fbeStructSize < (4 + 4) {
return false
}
fbeStructType := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeStructOffset + 4))
if fbeVerifyType && (fbeStructType != fm.FBEType()) {
return false
}
fm.buffer.Shift(fbeStructOffset)
fbeResult := fm.VerifyFields(fbeStructSize)
fm.buffer.Unshift(fbeStructOffset)
return fbeResult
}
// // Check if the struct value fields are valid
func (fm *FieldModelStructList) VerifyFields(fbeStructSize int) bool {
fbeCurrentSize := 4 + 4
if (fbeCurrentSize + fm.F1.FBESize()) > fbeStructSize {
return true
}
if !fm.F1.Verify() {
return false
}
fbeCurrentSize += fm.F1.FBESize()
if (fbeCurrentSize + fm.F2.FBESize()) > fbeStructSize {
return true
}
if !fm.F2.Verify() {
return false
}
fbeCurrentSize += fm.F2.FBESize()
if (fbeCurrentSize + fm.F3.FBESize()) > fbeStructSize {
return true
}
if !fm.F3.Verify() {
return false
}
fbeCurrentSize += fm.F3.FBESize()
if (fbeCurrentSize + fm.F4.FBESize()) > fbeStructSize {
return true
}
if !fm.F4.Verify() {
return false
}
fbeCurrentSize += fm.F4.FBESize()
if (fbeCurrentSize + fm.F5.FBESize()) > fbeStructSize {
return true
}
if !fm.F5.Verify() {
return false
}
fbeCurrentSize += fm.F5.FBESize()
if (fbeCurrentSize + fm.F6.FBESize()) > fbeStructSize {
return true
}
if !fm.F6.Verify() {
return false
}
fbeCurrentSize += fm.F6.FBESize()
if (fbeCurrentSize + fm.F7.FBESize()) > fbeStructSize {
return true
}
if !fm.F7.Verify() {
return false
}
fbeCurrentSize += fm.F7.FBESize()
if (fbeCurrentSize + fm.F8.FBESize()) > fbeStructSize {
return true
}
if !fm.F8.Verify() {
return false
}
fbeCurrentSize += fm.F8.FBESize()
if (fbeCurrentSize + fm.F9.FBESize()) > fbeStructSize {
return true
}
if !fm.F9.Verify() {
return false
}
fbeCurrentSize += fm.F9.FBESize()
if (fbeCurrentSize + fm.F10.FBESize()) > fbeStructSize {
return true
}
if !fm.F10.Verify() {
return false
}
fbeCurrentSize += fm.F10.FBESize()
return true
}
// Get the struct value (begin phase)
func (fm *FieldModelStructList) GetBegin() (int, error) {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return 0, nil
}
fbeStructOffset := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset()))
if (fbeStructOffset == 0) || ((fm.buffer.Offset() + fbeStructOffset + 4 + 4) > fm.buffer.Size()) {
return 0, errors.New("model is broken")
}
fbeStructSize := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeStructOffset))
if fbeStructSize < (4 + 4) {
return 0, errors.New("model is broken")
}
fm.buffer.Shift(fbeStructOffset)
return fbeStructOffset, nil
}
// Get the struct value (end phase)
func (fm *FieldModelStructList) GetEnd(fbeBegin int) {
fm.buffer.Unshift(fbeBegin)
}
// Get the struct value
func (fm *FieldModelStructList) Get() (*StructList, error) {
fbeResult := NewStructList()
return fbeResult, fm.GetValue(fbeResult)
}
// Get the struct value by the given pointer
func (fm *FieldModelStructList) GetValue(fbeValue *StructList) error {
fbeBegin, err := fm.GetBegin()
if fbeBegin == 0 {
return err
}
fbeStructSize := int(fbe.ReadUInt32(fm.buffer.Data(), fm.buffer.Offset()))
fm.GetFields(fbeValue, fbeStructSize)
fm.GetEnd(fbeBegin)
return nil
}
// Get the struct fields values
func (fm *FieldModelStructList) GetFields(fbeValue *StructList, fbeStructSize int) {
fbeCurrentSize := 4 + 4
if (fbeCurrentSize + fm.F1.FBESize()) <= fbeStructSize {
fbeValue.F1, _ = fm.F1.Get()
} else {
fbeValue.F1 = make([]byte, 0)
}
fbeCurrentSize += fm.F1.FBESize()
if (fbeCurrentSize + fm.F2.FBESize()) <= fbeStructSize {
fbeValue.F2, _ = fm.F2.Get()
} else {
fbeValue.F2 = make([]*byte, 0)
}
fbeCurrentSize += fm.F2.FBESize()
if (fbeCurrentSize + fm.F3.FBESize()) <= fbeStructSize {
fbeValue.F3, _ = fm.F3.Get()
} else {
fbeValue.F3 = make([][]byte, 0)
}
fbeCurrentSize += fm.F3.FBESize()
if (fbeCurrentSize + fm.F4.FBESize()) <= fbeStructSize {
fbeValue.F4, _ = fm.F4.Get()
} else {
fbeValue.F4 = make([]*[]byte, 0)
}
fbeCurrentSize += fm.F4.FBESize()
if (fbeCurrentSize + fm.F5.FBESize()) <= fbeStructSize {
fbeValue.F5, _ = fm.F5.Get()
} else {
fbeValue.F5 = make([]EnumSimple, 0)
}
fbeCurrentSize += fm.F5.FBESize()
if (fbeCurrentSize + fm.F6.FBESize()) <= fbeStructSize {
fbeValue.F6, _ = fm.F6.Get()
} else {
fbeValue.F6 = make([]*EnumSimple, 0)
}
fbeCurrentSize += fm.F6.FBESize()
if (fbeCurrentSize + fm.F7.FBESize()) <= fbeStructSize {
fbeValue.F7, _ = fm.F7.Get()
} else {
fbeValue.F7 = make([]FlagsSimple, 0)
}
fbeCurrentSize += fm.F7.FBESize()
if (fbeCurrentSize + fm.F8.FBESize()) <= fbeStructSize {
fbeValue.F8, _ = fm.F8.Get()
} else {
fbeValue.F8 = make([]*FlagsSimple, 0)
}
fbeCurrentSize += fm.F8.FBESize()
if (fbeCurrentSize + fm.F9.FBESize()) <= fbeStructSize {
fbeValue.F9, _ = fm.F9.Get()
} else {
fbeValue.F9 = make([]StructSimple, 0)
}
fbeCurrentSize += fm.F9.FBESize()
if (fbeCurrentSize + fm.F10.FBESize()) <= fbeStructSize {
fbeValue.F10, _ = fm.F10.Get()
} else {
fbeValue.F10 = make([]*StructSimple, 0)
}
fbeCurrentSize += fm.F10.FBESize()
}
// Set the struct value (begin phase)
func (fm *FieldModelStructList) SetBegin() (int, error) {
if (fm.buffer.Offset() + fm.FBEOffset() + fm.FBESize()) > fm.buffer.Size() {
return 0, errors.New("model is broken")
}
fbeStructSize := fm.FBEBody()
fbeStructOffset := fm.buffer.Allocate(fbeStructSize) - fm.buffer.Offset()
if (fbeStructOffset <= 0) || ((fm.buffer.Offset() + fbeStructOffset + fbeStructSize) > fm.buffer.Size()) {
return 0, errors.New("model is broken")
}
fbe.WriteUInt32(fm.buffer.Data(), fm.buffer.Offset() + fm.FBEOffset(), uint32(fbeStructOffset))
fbe.WriteUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeStructOffset, uint32(fbeStructSize))
fbe.WriteUInt32(fm.buffer.Data(), fm.buffer.Offset() + fbeStructOffset + 4, uint32(fm.FBEType()))
fm.buffer.Shift(fbeStructOffset)
return fbeStructOffset, nil
}
// Set the struct value (end phase)
func (fm *FieldModelStructList) SetEnd(fbeBegin int) {
fm.buffer.Unshift(fbeBegin)
}
// Set the struct value
func (fm *FieldModelStructList) Set(fbeValue *StructList) error {
fbeBegin, err := fm.SetBegin()
if fbeBegin == 0
|
err = fm.SetFields(fbeValue)
fm.SetEnd(fbeBegin)
return err
}
// Set the struct fields values
func (fm *FieldModelStructList) SetFields(fbeValue *StructList) error {
var err error = nil
if err = fm.F1.Set(fbeValue.F1); err != nil {
return err
}
if err = fm.F2.Set(fbeValue.F2); err != nil {
return err
}
if err = fm.F3.Set(fbeValue.F3); err != nil {
return err
}
if err = fm.F4.Set(fbeValue.F4); err != nil {
return err
}
if err = fm.F5.Set(fbeValue.F5); err != nil {
return err
}
if err = fm.F6.Set(fbeValue.F6); err != nil {
return err
}
if err = fm.F7.Set(fbeValue.F7); err != nil {
return err
}
if err = fm.F8.Set(fbeValue.F8); err != nil {
return err
}
if err = fm.F9.Set(fbeValue.F9); err != nil {
return err
}
if err = fm.F10.Set(fbeValue.F10); err != nil {
return err
}
return err
}
|
{
return err
}
|
buffer.go
|
package buf
import (
"io"
"v2ray.com/core/common/bytespool"
)
const (
// Size of a regular buffer.
Size = 2048
)
// Supplier is a writer that writes contents into the given buffer.
type Supplier func([]byte) (int, error)
// Buffer is a recyclable allocation of a byte array. Buffer.Release() recycles
// the buffer into an internal buffer pool, in order to recreate a buffer more
// quickly.
type Buffer struct {
v []byte
start int32
end int32
}
// Release recycles the buffer into an internal buffer pool.
func (b *Buffer) Release() {
if b == nil || b.v == nil {
return
}
bytespool.Free(b.v)
b.v = nil
b.Clear()
}
// Clear clears the content of the buffer, results an empty buffer with
// Len() = 0.
func (b *Buffer) Clear() {
b.start = 0
b.end = 0
}
// AppendSupplier appends the content of a BytesWriter to the buffer.
func (b *Buffer) AppendSupplier(writer Supplier) error {
nBytes, err := writer(b.v[b.end:])
b.end += int32(nBytes)
return err
}
// Byte returns the bytes at index.
func (b *Buffer) Byte(index int32) byte {
return b.v[b.start+index]
}
// SetByte sets the byte value at index.
func (b *Buffer) SetByte(index int32, value byte) {
b.v[b.start+index] = value
}
// Bytes returns the content bytes of this Buffer.
func (b *Buffer) Bytes() []byte {
return b.v[b.start:b.end]
}
// Reset resets the content of the Buffer with a supplier.
func (b *Buffer) Reset(writer Supplier) error {
nBytes, err := writer(b.v)
b.start = 0
b.end = int32(nBytes)
return err
}
// BytesRange returns a slice of this buffer with given from and to boundary.
func (b *Buffer) BytesRange(from, to int32) []byte {
if from < 0 {
from += b.Len()
}
if to < 0 {
to += b.Len()
}
return b.v[b.start+from : b.start+to]
}
// BytesFrom returns a slice of this Buffer starting from the given position.
func (b *Buffer) BytesFrom(from int32) []byte {
if from < 0 {
from += b.Len()
}
return b.v[b.start+from : b.end]
}
// BytesTo returns a slice of this Buffer from start to the given position.
func (b *Buffer) BytesTo(to int32) []byte {
if to < 0 {
to += b.Len()
}
return b.v[b.start : b.start+to]
}
// Resize cuts the buffer at the given position.
func (b *Buffer) Resize(from, to int32) {
if from < 0 {
from += b.Len()
}
if to < 0 {
to += b.Len()
}
if to < from {
panic("Invalid slice")
}
b.end = b.start + to
b.start += from
}
// Advance cuts the buffer at the given position.
func (b *Buffer) Advance(from int32) {
if from < 0 {
from += b.Len()
}
b.start += from
}
// Len returns the length of the buffer content.
func (b *Buffer) Len() int32 {
|
}
return b.end - b.start
}
// IsEmpty returns true if the buffer is empty.
func (b *Buffer) IsEmpty() bool {
return b.Len() == 0
}
// IsFull returns true if the buffer has no more room to grow.
func (b *Buffer) IsFull() bool {
return b.end == int32(len(b.v))
}
// Write implements Write method in io.Writer.
func (b *Buffer) Write(data []byte) (int, error) {
nBytes := copy(b.v[b.end:], data)
b.end += int32(nBytes)
return nBytes, nil
}
// WriteBytes appends one or more bytes to the end of the buffer.
func (b *Buffer) WriteBytes(bytes ...byte) (int, error) {
return b.Write(bytes)
}
// Read implements io.Reader.Read().
func (b *Buffer) Read(data []byte) (int, error) {
if b.Len() == 0 {
return 0, io.EOF
}
nBytes := copy(data, b.v[b.start:b.end])
if int32(nBytes) == b.Len() {
b.Clear()
} else {
b.start += int32(nBytes)
}
return nBytes, nil
}
// String returns the string form of this Buffer.
func (b *Buffer) String() string {
return string(b.Bytes())
}
// New creates a Buffer with 0 length and 2K capacity.
func New() *Buffer {
return &Buffer{
v: bytespool.Alloc(Size),
}
}
|
if b == nil {
return 0
|
conf.py
|
"""Sphinx configuration file"""
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Carbon Black Cloud Python SDK'
copyright = '2020-2021, Developer Relations'
author = 'Developer Relations'
# The full version, including alpha/beta/rc tags
release = '1.3.4'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.napoleon', 'sphinx.ext.autodoc', 'sphinx.ext.autosectionlabel']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'tango'
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
'display_version': False,
'style_external_links': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/cbc-sdk-thumbnail.png"
# Output file base name for HTML help builder.
htmlhelp_basename = 'CarbonBlackAPI-PythonBindingsdoc'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings.tex', u'Carbon Black Cloud Python API Documentation',
u'Carbon Black Developer Network', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'carbonblackcloud-pythonbindings', u'Carbon Black Cloud Python API Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'CarbonBlackCloud-PythonBindings', u'Carbon Black Cloud Python API Documentation',
author, 'CarbonBlackCloud-PythonBindings', 'Python bindings for the Carbon Black Cloud API',
'Miscellaneous'),
]
latex_elements = {
# Additional stuff for the LaTeX preamble.
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
'preamble': "".join((
'\\DeclareUnicodeCharacter{25A0}{=}', # Solid box
)),
}
autoclass_content = 'both'
def setup(app):
|
"""Setup Sphinx."""
app.add_css_file('css/custom.css')
|
|
internal_game_state.rs
|
use ::{BOARD_HEIGHT, BOARD_WIDTH, GameState, Move, Tile};
use ai::bitboard::{BB_INVALID, BB_TARGET, Bitboard, BitIndex, pos_to_index, index_to_pos};
pub type Ply = u32;
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct InternalGameState {
pub pieces: [Bitboard; 2],
pub ply: Ply,
pub current_player: u8,
}
impl InternalGameState {
fn is_valid_location(index: BitIndex) -> bool {
!BB_INVALID.get_bit(index)
}
fn empty_bb(&self) -> Bitboard {
!self.occupied_bb()
}
fn occupied_bb(&self) -> Bitboard {
self.pieces[0] | self.pieces[1]
}
pub fn won(&self, player: u8) -> bool {
(self.pieces[0] | self.pieces[1]) & BB_TARGET[player as usize] == BB_TARGET[player as usize] && !(self.pieces[player as usize] & BB_TARGET[player as usize]).is_empty()
}
pub fn reachable_from(&self, from: BitIndex) -> Bitboard {
let mut jumping_targets = Bitboard::default();
let mut next_jumping_targets = Bitboard::bit(from);
let occupied = self.occupied_bb();
let empty = !BB_INVALID & self.empty_bb();
while jumping_targets != next_jumping_targets {
jumping_targets = next_jumping_targets;
// shift left
for &(skip, jump) in &[
( 1, 2), // east
( 13, 26), // south west
( 14, 28), // south east
] {
next_jumping_targets |= (occupied << skip) & (jumping_targets << jump);
}
// shift right
for &(skip, jump) in &[
( 1, 2), // west
(13, 26), // north east
(14, 28), // north west
] {
next_jumping_targets |= (occupied >> skip) & (jumping_targets >> jump);
}
next_jumping_targets &= empty;
}
for &slide in &[
255, // west
1, // east
13, // south west
14, // south east
243, // north west
242, // north east
] {
let to = from.wrapping_add(slide);
jumping_targets.set_bit(to);
}
jumping_targets & empty
}
pub fn possible_moves(&self) -> Vec<InternalMove> {
let board = self.pieces[self.current_player as usize];
let mut result = Vec::with_capacity(256);
for from in board.ones() {
result.extend(self.reachable_from(from).ones().map(|to| InternalMove { from, to } ));
}
result
}
pub fn make_move(&mut self, mov: InternalMove) {
self.pieces[self.current_player as usize].set_bit(mov.to);
self.pieces[self.current_player as usize].unset_bit(mov.from);
self.current_player = 1-self.current_player;
}
pub fn unmake_move(&mut self, mov: InternalMove) {
self.current_player = 1-self.current_player;
self.pieces[self.current_player as usize].set_bit(mov.from);
self.pieces[self.current_player as usize].unset_bit(mov.to);
}
}
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct InternalMove {
pub from: BitIndex,
pub to: BitIndex,
}
impl InternalMove {
pub fn inverse(&self) -> Self {
InternalMove {
from: self.to,
to: self.from,
}
}
pub fn to_move(&self) -> Move {
Move {
from: index_to_pos(self.from),
to: index_to_pos(self.to),
}
}
}
impl From<Move> for InternalMove {
fn from(mov: Move) -> InternalMove {
let from = pos_to_index(mov.from.0 as u8, mov.from.1 as u8);
let to = pos_to_index(mov.to.0 as u8, mov.to.1 as u8);
InternalMove {
from,
to,
}
}
}
impl From<GameState> for InternalGameState {
fn from(state: GameState) -> Self {
let mut pieces: [Bitboard; 2] = Default::default();
for x in 0..BOARD_WIDTH {
for y in 0..BOARD_HEIGHT {
if let Tile::Player(player) = state.get(x as i8, y as i8) {
pieces[player as usize].set_bit(pos_to_index(x, y));
}
}
}
assert!((pieces[0] & BB_INVALID).is_empty());
assert!((pieces[1] & BB_INVALID).is_empty());
InternalGameState {
pieces,
ply: state.ply as Ply,
current_player: state.current_player,
}
}
}
mod tests{
#[test]
fn test_pos_to_index()
|
}
|
{
use ai::internal_game_state::pos_to_index;
assert_eq!(pos_to_index(6, 0), 0x06);
assert_eq!(pos_to_index(6, 1), 0x13);
assert_eq!(pos_to_index(7, 1), 0x14);
assert_eq!(pos_to_index(5, 2), 0x20);
assert_eq!(pos_to_index(6, 2), 0x21);
assert_eq!(pos_to_index(7, 2), 0x22);
assert_eq!(pos_to_index(6, 16), 0xDE);
}
|
m3u.py
|
import re
from dataclasses import dataclass
from logging import getLogger
from typing import List
logger = getLogger('M3U')
@dataclass
class M3UMedia:
title: str
tvg_name: str
tvg_ID: str
tvg_logo: str
tvg_group: str
link: str
class M3UParser:
"""Mod from https://github.com/Timmy93/M3uParser/blob/master/M3uParser.py"""
def __init__(self, content: str = None):
self.lines: List[str] = []
self.data: List[M3UMedia] = []
if content is not None:
self.read_data(content)
if self.lines:
self.scan_all()
def read_data(self, content: str):
self.lines = [line.rstrip('\n') for line in content.splitlines()]
def scan_all(self):
for index, line in enumerate(self.lines):
if line.startswith('#EXTINF'):
self.process_ext_inf(index)
def
|
(self, n):
line_info = self.lines[n]
line_link = self.lines[n + 1]
m = re.search("tvg-id=\"(.*?)\"", line_info)
tid = m.group(1)
m = re.search("tvg-logo=\"(.*?)\"", line_info)
logo = m.group(1)
m = re.search("group-title=\"(.*?)\"", line_info)
group = m.group(1)
m = re.search("[,](?!.*[,])(.*?)$", line_info)
title = m.group(1)
self.data.append(M3UMedia(title, '', tid, logo, group, line_link))
|
process_ext_inf
|
bug388.go
|
// errorcheck
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Issue 2231
package main
import "runtime"
func foo(runtime.UintType, i int) { // ERROR "cannot declare name runtime.UintType|mixed named and unnamed|undefined identifier"
println(i, runtime.UintType) // GCCGO_ERROR "undefined identifier"
}
func bar(i int) {
runtime.UintType := i // ERROR "cannot declare name runtime.UintType|non-name on left side|undefined identifier"
println(runtime.UintType) // GCCGO_ERROR "invalid use of type|undefined identifier"
}
func baz() {
main.i := 1 // ERROR "non-name main.i|non-name on left side"
println(main.i) // GCCGO_ERROR "no fields or methods"
}
func qux() {
var main.i // ERROR "unexpected [.]|expected type"
println(main.i)
}
func corge() {
var foo.i int // ERROR "unexpected [.]|expected type"
println(foo.i)
}
func
|
() {
foo(42,43)
bar(1969)
}
|
main
|
elasticsearchservice.py
|
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_tags(ARN=None, TagList=None):
"""
Attaches tags to an existing Elasticsearch domain. Tags are a set of case-sensitive key value pairs. An Elasticsearch domain may have up to 10 tags. See Tagging Amazon Elasticsearch Service Domains for more information.
See also: AWS API Documentation
:example: response = client.add_tags(
ARN='string',
TagList=[
{
'Key': 'string',
'Value': 'string'
},
]
)
:type ARN: string
:param ARN: [REQUIRED]
Specify the ARN for which you want to add the tags.
:type TagList: list
:param TagList: [REQUIRED]
List of Tag that need to be added for the Elasticsearch domain.
(dict) --Specifies a key value pair for a resource tag.
Key (string) -- [REQUIRED]Specifies the TagKey , the name of the tag. Tag keys must be unique for the Elasticsearch domain to which they are attached.
Value (string) -- [REQUIRED]Specifies the TagValue , the value assigned to the corresponding tag key. Tag values can be null and do not have to be unique in a tag set. For example, you can have a key value pair in a tag set of project : Trinity and cost-center : Trinity
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def create_elasticsearch_domain(DomainName=None, ElasticsearchVersion=None, ElasticsearchClusterConfig=None, EBSOptions=None, AccessPolicies=None, SnapshotOptions=None, AdvancedOptions=None):
"""
Creates a new Elasticsearch domain. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide .
See also: AWS API Documentation
:example: response = client.create_elasticsearch_domain(
DomainName='string',
ElasticsearchVersion='string',
ElasticsearchClusterConfig={
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
EBSOptions={
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
AccessPolicies='string',
SnapshotOptions={
'AutomatedSnapshotStartHour': 123
},
AdvancedOptions={
'string': 'string'
}
)
:type DomainName: string
:param DomainName: [REQUIRED]
The name of the Elasticsearch domain that you are creating. Domain names are unique across the domains owned by an account within an AWS region. Domain names must start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:type ElasticsearchVersion: string
:param ElasticsearchVersion: String of format X.Y to specify version for the Elasticsearch domain eg. '1.5' or '2.3'. For more information, see Creating Elasticsearch Domains in the Amazon Elasticsearch Service Developer Guide .
:type ElasticsearchClusterConfig: dict
:param ElasticsearchClusterConfig: Configuration options for an Elasticsearch domain. Specifies the instance type and number of instances in the domain cluster.
InstanceType (string) --The instance type for an Elasticsearch cluster.
InstanceCount (integer) --The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --A boolean value to indicate whether a dedicated master node is enabled. See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information.
DedicatedMasterType (string) --The instance type for a dedicated master node.
DedicatedMasterCount (integer) --Total number of dedicated master nodes, active and on standby, for the cluster.
:type EBSOptions: dict
:param EBSOptions: Options to enable, disable and specify the type and size of EBS storage volumes.
EBSEnabled (boolean) --Specifies whether EBS-based storage is enabled.
VolumeType (string) --Specifies the volume type for EBS-based storage.
VolumeSize (integer) --Integer to specify the size of an EBS volume.
Iops (integer) --Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
:type AccessPolicies: string
:param AccessPolicies: IAM access policy as a JSON-formatted string.
:type SnapshotOptions: dict
:param SnapshotOptions: Option to set time, in UTC format, of the daily automated snapshot. Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.
:type AdvancedOptions: dict
:param AdvancedOptions: Option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true . See Configuration Advanced Options for more information.
(string) --
(string) --
:rtype: dict
:return: {
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Processing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'AdvancedOptions': {
'string': 'string'
}
}
}
:returns:
(string) --
(string) --
"""
pass
def delete_elasticsearch_domain(DomainName=None):
"""
Permanently deletes the specified Elasticsearch domain and all of its data. Once a domain is deleted, it cannot be recovered.
See also: AWS API Documentation
:example: response = client.delete_elasticsearch_domain(
DomainName='string'
)
:type DomainName: string
:param DomainName: [REQUIRED]
The name of the Elasticsearch domain that you want to permanently delete.
:rtype: dict
:return: {
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Processing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'AdvancedOptions': {
'string': 'string'
}
}
}
"""
pass
def describe_elasticsearch_domain(DomainName=None):
"""
Returns domain configuration information about the specified Elasticsearch domain, including the domain ID, domain endpoint, and domain ARN.
See also: AWS API Documentation
:example: response = client.describe_elasticsearch_domain(
DomainName='string'
)
:type DomainName: string
:param DomainName: [REQUIRED]
The name of the Elasticsearch domain for which you want information.
:rtype: dict
:return: {
'DomainStatus': {
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Processing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'AdvancedOptions': {
'string': 'string'
}
}
}
"""
pass
def describe_elasticsearch_domain_config(DomainName=None):
"""
Provides cluster configuration information about the specified Elasticsearch domain, such as the state, creation date, update version, and update date for cluster options.
See also: AWS API Documentation
:example: response = client.describe_elasticsearch_domain_config(
DomainName='string'
)
:type DomainName: string
:param DomainName: [REQUIRED]
The Elasticsearch domain that you want to get information about.
:rtype: dict
:return: {
'DomainConfig': {
'ElasticsearchVersion': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'ElasticsearchClusterConfig': {
'Options': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EBSOptions': {
'Options': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AccessPolicies': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'SnapshotOptions': {
'Options': {
'AutomatedSnapshotStartHour': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AdvancedOptions': {
'Options': {
'string': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
}
}
}
"""
pass
def describe_elasticsearch_domains(DomainNames=None):
"""
Returns domain configuration information about the specified Elasticsearch domains, including the domain ID, domain endpoint, and domain ARN.
See also: AWS API Documentation
:example: response = client.describe_elasticsearch_domains(
DomainNames=[
'string',
]
)
:type DomainNames: list
:param DomainNames: [REQUIRED]
The Elasticsearch domains for which you want information.
(string) --The name of an Elasticsearch domain. Domain names are unique across the domains owned by an account within an AWS region. Domain names start with a letter or number and can contain the following characters: a-z (lowercase), 0-9, and - (hyphen).
:rtype: dict
:return: {
'DomainStatusList': [
{
'DomainId': 'string',
'DomainName': 'string',
'ARN': 'string',
'Created': True|False,
'Deleted': True|False,
'Endpoint': 'string',
'Processing': True|False,
'ElasticsearchVersion': 'string',
'ElasticsearchClusterConfig': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'EBSOptions': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'AccessPolicies': 'string',
'SnapshotOptions': {
'AutomatedSnapshotStartHour': 123
},
'AdvancedOptions': {
'string': 'string'
}
},
]
}
"""
pass
def describe_elasticsearch_instance_type_limits(DomainName=None, InstanceType=None, ElasticsearchVersion=None):
"""
Describe Elasticsearch Limits for a given InstanceType and ElasticsearchVersion. When modifying existing Domain, specify the `` DomainName `` to know what Limits are supported for modifying.
See also: AWS API Documentation
:example: response = client.describe_elasticsearch_instance_type_limits(
DomainName='string',
InstanceType='m3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
ElasticsearchVersion='string'
)
:type DomainName: string
:param DomainName: DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for Elasticsearch `` Limits `` for existing domain.
:type InstanceType: string
:param InstanceType: [REQUIRED]
The instance type for an Elasticsearch cluster for which Elasticsearch `` Limits `` are needed.
:type ElasticsearchVersion: string
:param ElasticsearchVersion: [REQUIRED]
Version of Elasticsearch for which `` Limits `` are needed.
:rtype: dict
:return: {
'LimitsByRole': {
'string': {
'StorageTypes': [
{
'StorageTypeName': 'string',
'StorageSubTypeName': 'string',
'StorageTypeLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
},
],
'InstanceLimits': {
'InstanceCountLimits': {
'MinimumInstanceCount': 123,
'MaximumInstanceCount': 123
}
},
'AdditionalLimits': [
{
'LimitName': 'string',
'LimitValues': [
'string',
]
},
]
}
}
}
:returns:
Data: If the given InstanceType is used as Data node
Master: If the given InstanceType is used as Master node
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_waiter():
"""
"""
pass
def list_domain_names():
"""
Returns the name of all Elasticsearch domains owned by the current user's account.
See also: AWS API Documentation
:example: response = client.list_domain_names()
:rtype: dict
:return: {
'DomainNames': [
{
'DomainName': 'string'
},
]
}
"""
pass
def list_elasticsearch_instance_types(ElasticsearchVersion=None, DomainName=None, MaxResults=None, NextToken=None):
"""
List all Elasticsearch instance types that are supported for given ElasticsearchVersion
See also: AWS API Documentation
:example: response = client.list_elasticsearch_instance_types(
ElasticsearchVersion='string',
DomainName='string',
MaxResults=123,
NextToken='string'
)
:type ElasticsearchVersion: string
:param ElasticsearchVersion: [REQUIRED]
Version of Elasticsearch for which list of supported elasticsearch instance types are needed.
:type DomainName: string
:param DomainName: DomainName represents the name of the Domain that we are trying to modify. This should be present only if we are querying for list of available Elasticsearch instance types when modifying existing domain.
:type MaxResults: integer
:param MaxResults: Set this value to limit the number of results returned. Value provided must be greater than 30 else it wont be honored.
:type NextToken: string
:param NextToken: NextToken should be sent in case if earlier API call produced result containing NextToken. It is used for pagination.
:rtype: dict
:return: {
'ElasticsearchInstanceTypes': [
'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_elasticsearch_versions(MaxResults=None, NextToken=None):
"""
List all supported Elasticsearch versions
See also: AWS API Documentation
:example: response = client.list_elasticsearch_versions(
MaxResults=123,
NextToken='string'
)
:type MaxResults: integer
:param MaxResults: Set this value to limit the number of results returned. Value provided must be greater than 10 else it wont be honored.
:type NextToken: string
:param NextToken: Paginated APIs accepts NextToken input to returns next page results and provides a NextToken output in the response which can be used by the client to retrieve more results.
:rtype: dict
:return: {
'ElasticsearchVersions': [
'string',
],
'NextToken': 'string'
}
:returns:
(string) --
"""
pass
def list_tags(ARN=None):
"""
Returns all tags for the given Elasticsearch domain.
See also: AWS API Documentation
|
)
:type ARN: string
:param ARN: [REQUIRED]
Specify the ARN for the Elasticsearch domain to which the tags are attached that you want to view.
:rtype: dict
:return: {
'TagList': [
{
'Key': 'string',
'Value': 'string'
},
]
}
"""
pass
def remove_tags(ARN=None, TagKeys=None):
"""
Removes the specified set of tags from the specified Elasticsearch domain.
See also: AWS API Documentation
:example: response = client.remove_tags(
ARN='string',
TagKeys=[
'string',
]
)
:type ARN: string
:param ARN: [REQUIRED]
Specifies the ARN for the Elasticsearch domain from which you want to delete the specified tags.
:type TagKeys: list
:param TagKeys: [REQUIRED]
Specifies the TagKey list which you want to remove from the Elasticsearch domain.
(string) --
"""
pass
def update_elasticsearch_domain_config(DomainName=None, ElasticsearchClusterConfig=None, EBSOptions=None, SnapshotOptions=None, AdvancedOptions=None, AccessPolicies=None):
"""
Modifies the cluster configuration of the specified Elasticsearch domain, setting as setting the instance type and the number of instances.
See also: AWS API Documentation
:example: response = client.update_elasticsearch_domain_config(
DomainName='string',
ElasticsearchClusterConfig={
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
EBSOptions={
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
SnapshotOptions={
'AutomatedSnapshotStartHour': 123
},
AdvancedOptions={
'string': 'string'
},
AccessPolicies='string'
)
:type DomainName: string
:param DomainName: [REQUIRED]
The name of the Elasticsearch domain that you are updating.
:type ElasticsearchClusterConfig: dict
:param ElasticsearchClusterConfig: The type and number of instances to instantiate for the domain cluster.
InstanceType (string) --The instance type for an Elasticsearch cluster.
InstanceCount (integer) --The number of instances in the specified domain cluster.
DedicatedMasterEnabled (boolean) --A boolean value to indicate whether a dedicated master node is enabled. See About Dedicated Master Nodes for more information.
ZoneAwarenessEnabled (boolean) --A boolean value to indicate whether zone awareness is enabled. See About Zone Awareness for more information.
DedicatedMasterType (string) --The instance type for a dedicated master node.
DedicatedMasterCount (integer) --Total number of dedicated master nodes, active and on standby, for the cluster.
:type EBSOptions: dict
:param EBSOptions: Specify the type and size of the EBS volume that you want to use.
EBSEnabled (boolean) --Specifies whether EBS-based storage is enabled.
VolumeType (string) --Specifies the volume type for EBS-based storage.
VolumeSize (integer) --Integer to specify the size of an EBS volume.
Iops (integer) --Specifies the IOPD for a Provisioned IOPS EBS volume (SSD).
:type SnapshotOptions: dict
:param SnapshotOptions: Option to set the time, in UTC format, for the daily automated snapshot. Default value is 0 hours.
AutomatedSnapshotStartHour (integer) --Specifies the time, in UTC format, when the service takes a daily automated snapshot of the specified Elasticsearch domain. Default value is 0 hours.
:type AdvancedOptions: dict
:param AdvancedOptions: Modifies the advanced option to allow references to indices in an HTTP request body. Must be false when configuring access to individual sub-resources. By default, the value is true . See Configuration Advanced Options for more information.
(string) --
(string) --
:type AccessPolicies: string
:param AccessPolicies: IAM access policy as a JSON-formatted string.
:rtype: dict
:return: {
'DomainConfig': {
'ElasticsearchVersion': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'ElasticsearchClusterConfig': {
'Options': {
'InstanceType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'InstanceCount': 123,
'DedicatedMasterEnabled': True|False,
'ZoneAwarenessEnabled': True|False,
'DedicatedMasterType': 'm3.medium.elasticsearch'|'m3.large.elasticsearch'|'m3.xlarge.elasticsearch'|'m3.2xlarge.elasticsearch'|'m4.large.elasticsearch'|'m4.xlarge.elasticsearch'|'m4.2xlarge.elasticsearch'|'m4.4xlarge.elasticsearch'|'m4.10xlarge.elasticsearch'|'t2.micro.elasticsearch'|'t2.small.elasticsearch'|'t2.medium.elasticsearch'|'r3.large.elasticsearch'|'r3.xlarge.elasticsearch'|'r3.2xlarge.elasticsearch'|'r3.4xlarge.elasticsearch'|'r3.8xlarge.elasticsearch'|'i2.xlarge.elasticsearch'|'i2.2xlarge.elasticsearch'|'d2.xlarge.elasticsearch'|'d2.2xlarge.elasticsearch'|'d2.4xlarge.elasticsearch'|'d2.8xlarge.elasticsearch'|'c4.large.elasticsearch'|'c4.xlarge.elasticsearch'|'c4.2xlarge.elasticsearch'|'c4.4xlarge.elasticsearch'|'c4.8xlarge.elasticsearch'|'r4.large.elasticsearch'|'r4.xlarge.elasticsearch'|'r4.2xlarge.elasticsearch'|'r4.4xlarge.elasticsearch'|'r4.8xlarge.elasticsearch'|'r4.16xlarge.elasticsearch',
'DedicatedMasterCount': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'EBSOptions': {
'Options': {
'EBSEnabled': True|False,
'VolumeType': 'standard'|'gp2'|'io1',
'VolumeSize': 123,
'Iops': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AccessPolicies': {
'Options': 'string',
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'SnapshotOptions': {
'Options': {
'AutomatedSnapshotStartHour': 123
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
},
'AdvancedOptions': {
'Options': {
'string': 'string'
},
'Status': {
'CreationDate': datetime(2015, 1, 1),
'UpdateDate': datetime(2015, 1, 1),
'UpdateVersion': 123,
'State': 'RequiresIndexDocuments'|'Processing'|'Active',
'PendingDeletion': True|False
}
}
}
}
:returns:
(string) --
(string) --
"""
pass
|
:example: response = client.list_tags(
ARN='string'
|
admin.py
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from django.contrib.auth.forms import UserChangeForm
from models import (
Assignment,
AudioAsset,
Call,
Comment,
ContentLicense,
ContractorProfile,
ContractorSubscription,
Discussion,
DocumentAsset,
Event,
Facet,
FacetTemplate,
ImageAsset,
Network,
Note,
Organization,
OrganizationContractorAffiliation,
OrganizationSubscription,
OrganizationPublicProfile,
Pitch,
Platform,
PlatformAccount,
PrivateMessage,
Project,
SimpleAudio,
SimpleDocument,
SimpleImage,
SimpleVideo,
Story,
TalentEditorProfile,
Task,
User,
VideoAsset,
)
from .forms import FacetTemplateForm
class FacetUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
fields = "__all__"
@admin.register(User)
class FacetUserAdmin(UserAdmin):
|
@admin.register(FacetTemplate)
class FacetTemplateAdmin(admin.ModelAdmin):
list_display = ['name', 'description', 'organization']
form = FacetTemplateForm
admin.site.register(ContractorProfile)
admin.site.register(TalentEditorProfile)
admin.site.register(OrganizationContractorAffiliation)
admin.site.register(Organization)
admin.site.register(OrganizationPublicProfile)
admin.site.register(OrganizationSubscription)
admin.site.register(ContractorSubscription)
admin.site.register(Network)
admin.site.register(Platform)
admin.site.register(PlatformAccount)
admin.site.register(Project)
admin.site.register(Story)
admin.site.register(Facet)
admin.site.register(Task)
admin.site.register(Event)
admin.site.register(Pitch)
admin.site.register(Call)
admin.site.register(Assignment)
admin.site.register(ContentLicense)
admin.site.register(Note)
admin.site.register(ImageAsset)
admin.site.register(DocumentAsset)
admin.site.register(AudioAsset)
admin.site.register(VideoAsset)
admin.site.register(SimpleImage)
admin.site.register(SimpleDocument)
admin.site.register(SimpleAudio)
admin.site.register(SimpleVideo)
admin.site.register(Comment)
admin.site.register(Discussion)
admin.site.register(PrivateMessage)
|
form = FacetUserChangeForm
fieldsets = UserAdmin.fieldsets + (("Facet", {'fields': ('organization',
'user_type', 'credit_name', 'title', 'phone', 'bio', 'location', 'expertise',
'notes', 'photo'
)}), )
|
repository.go
|
// Copyright Project Harbor Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package registry
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
// "time"
"github.com/docker/distribution/manifest/schema1"
"github.com/docker/distribution/manifest/schema2"
commonhttp "github.com/goharbor/harbor/src/common/http"
"github.com/goharbor/harbor/src/common/utils"
)
// Repository holds information of a repository entity
type Repository struct {
Name string
Endpoint *url.URL
client *http.Client
}
// NewRepository returns an instance of Repository
func NewRepository(name, endpoint string, client *http.Client) (*Repository, error) {
name = strings.TrimSpace(name)
u, err := utils.ParseEndpoint(endpoint)
if err != nil {
return nil, err
}
repository := &Repository{
Name: name,
Endpoint: u,
client: client,
}
return repository, nil
}
func parseError(err error) error {
if urlErr, ok := err.(*url.Error); ok {
if regErr, ok := urlErr.Err.(*commonhttp.Error); ok {
return regErr
}
}
return err
}
// ListTag ...
func (r *Repository) ListTag() ([]string, error) {
tags := []string{}
req, err := http.NewRequest("GET", buildTagListURL(r.Endpoint.String(), r.Name), nil)
if err != nil {
return tags, err
}
resp, err := r.client.Do(req)
if err != nil {
return tags, parseError(err)
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return tags, err
}
if resp.StatusCode == http.StatusOK {
tagsResp := struct {
Tags []string `json:"tags"`
}{}
if err := json.Unmarshal(b, &tagsResp); err != nil {
return tags, err
}
sort.Strings(tags)
tags = tagsResp.Tags
return tags, nil
} else if resp.StatusCode == http.StatusNotFound {
// TODO remove the logic if the bug of registry is fixed
// It's a workaround for a bug of registry: when listing tags of
// a repository which is being pushed, a "NAME_UNKNOWN" error will
// been returned, while the catalog API can list this repository.
return tags, nil
}
return tags, &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
}
// ManifestExist ...
func (r *Repository) ManifestExist(reference string) (digest string, exist bool, err error) {
req, err := http.NewRequest("HEAD", buildManifestURL(r.Endpoint.String(), r.Name, reference), nil)
if err != nil {
return
}
req.Header.Add(http.CanonicalHeaderKey("Accept"), schema1.MediaTypeManifest)
req.Header.Add(http.CanonicalHeaderKey("Accept"), schema2.MediaTypeManifest)
resp, err := r.client.Do(req)
if err != nil {
err = parseError(err)
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
exist = true
digest = resp.Header.Get(http.CanonicalHeaderKey("Docker-Content-Digest"))
return
}
if resp.StatusCode == http.StatusNotFound {
return
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
return
}
// PullManifest ...
func (r *Repository) PullManifest(reference string, acceptMediaTypes []string) (digest, mediaType string, payload []byte, err error) {
req, err := http.NewRequest("GET", buildManifestURL(r.Endpoint.String(), r.Name, reference), nil)
if err != nil {
return
}
for _, mediaType := range acceptMediaTypes {
req.Header.Add(http.CanonicalHeaderKey("Accept"), mediaType)
}
resp, err := r.client.Do(req)
if err != nil {
err = parseError(err)
return
}
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
if resp.StatusCode == http.StatusOK {
digest = resp.Header.Get(http.CanonicalHeaderKey("Docker-Content-Digest"))
mediaType = resp.Header.Get(http.CanonicalHeaderKey("Content-Type"))
payload = b
return
}
err = &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
return
}
// PushManifest ...
func (r *Repository) PushManifest(reference, mediaType string, payload []byte) (digest string, err error) {
req, err := http.NewRequest("PUT", buildManifestURL(r.Endpoint.String(), r.Name, reference),
bytes.NewReader(payload))
if err != nil {
return
}
req.Header.Set(http.CanonicalHeaderKey("Content-Type"), mediaType)
resp, err := r.client.Do(req)
if err != nil {
err = parseError(err)
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusCreated || resp.StatusCode == http.StatusOK {
digest = resp.Header.Get(http.CanonicalHeaderKey("Docker-Content-Digest"))
return
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
return
}
// DeleteManifest ...
func (r *Repository) DeleteManifest(digest string) error {
req, err := http.NewRequest("DELETE", buildManifestURL(r.Endpoint.String(), r.Name, digest), nil)
if err != nil {
return err
}
resp, err := r.client.Do(req)
if err != nil {
return parseError(err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusAccepted {
return nil
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
}
// MountBlob ...
func (r *Repository) MountBlob(digest, from string) error {
req, err := http.NewRequest("POST", buildMountBlobURL(r.Endpoint.String(), r.Name, digest, from), nil)
req.Header.Set(http.CanonicalHeaderKey("Content-Length"), "0")
resp, err := r.client.Do(req)
if err != nil {
return err
}
if resp.StatusCode/100 != 2 {
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return fmt.Errorf("status %d, body: %s", resp.StatusCode, string(b))
}
return nil
}
// DeleteTag ...
func (r *Repository) DeleteTag(tag string) error {
digest, exist, err := r.ManifestExist(tag)
if err != nil {
return err
}
if !exist {
return &commonhttp.Error{
Code: http.StatusNotFound,
}
}
return r.DeleteManifest(digest)
}
// BlobExist ...
func (r *Repository) BlobExist(digest string) (bool, error) {
req, err := http.NewRequest("HEAD", buildBlobURL(r.Endpoint.String(), r.Name, digest), nil)
if err != nil {
return false, err
}
resp, err := r.client.Do(req)
if err != nil {
return false, parseError(err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return true, nil
}
if resp.StatusCode == http.StatusNotFound {
return false, nil
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
return false, &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
}
// PullBlob : client must close data if it is not nil
func (r *Repository) PullBlob(digest string) (size int64, data io.ReadCloser, err error) {
req, err := http.NewRequest("GET", buildBlobURL(r.Endpoint.String(), r.Name, digest), nil)
if err != nil {
return
}
resp, err := r.client.Do(req)
if err != nil {
err = parseError(err)
return
}
if resp.StatusCode == http.StatusOK {
contengLength := resp.Header.Get(http.CanonicalHeaderKey("Content-Length"))
size, err = strconv.ParseInt(contengLength, 10, 64)
if err != nil {
return
}
data = resp.Body
return
}
// can not close the connect if the status code is 200
defer resp.Body.Close()
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
return
}
func (r *Repository) initiateBlobUpload(name string) (location, uploadUUID string, err error) {
req, err := http.NewRequest("POST", buildInitiateBlobUploadURL(r.Endpoint.String(), r.Name), nil)
req.Header.Set(http.CanonicalHeaderKey("Content-Length"), "0")
resp, err := r.client.Do(req)
if err != nil {
err = parseError(err)
return
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusAccepted {
location = resp.Header.Get(http.CanonicalHeaderKey("Location"))
uploadUUID = resp.Header.Get(http.CanonicalHeaderKey("Docker-Upload-UUID"))
return
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return
}
err = &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
return
}
func (r *Repository) monolithicBlobUpload(location, digest string, size int64, data io.Reader) error {
url, err := buildMonolithicBlobUploadURL(r.Endpoint.String(), location, digest)
if err != nil {
return err
}
req, err := http.NewRequest("PUT", url, data)
if err != nil {
return err
}
resp, err := r.client.Do(req)
if err != nil {
return parseError(err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusCreated {
return nil
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
}
// PushBlob ...
func (r *Repository) PushBlob(digest string, size int64, data io.Reader) error {
location, _, err := r.initiateBlobUpload(r.Name)
if err != nil {
return err
}
return r.monolithicBlobUpload(location, digest, size, data)
}
// DeleteBlob ...
func (r *Repository) DeleteBlob(digest string) error {
req, err := http.NewRequest("DELETE", buildBlobURL(r.Endpoint.String(), r.Name, digest), nil)
if err != nil {
return err
}
resp, err := r.client.Do(req)
if err != nil {
return parseError(err)
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusAccepted {
return nil
}
b, err := ioutil.ReadAll(resp.Body)
if err != nil {
return err
}
return &commonhttp.Error{
Code: resp.StatusCode,
Message: string(b),
}
}
func buildPingURL(endpoint string) string {
return fmt.Sprintf("%s/v2/", endpoint)
}
func buildTagListURL(endpoint, repoName string) string {
return fmt.Sprintf("%s/v2/%s/tags/list", endpoint, repoName)
}
func buildManifestURL(endpoint, repoName, reference string) string {
return fmt.Sprintf("%s/v2/%s/manifests/%s", endpoint, repoName, reference)
}
func
|
(endpoint, repoName, reference string) string {
return fmt.Sprintf("%s/v2/%s/blobs/%s", endpoint, repoName, reference)
}
func buildMountBlobURL(endpoint, repoName, digest, from string) string {
return fmt.Sprintf("%s/v2/%s/blobs/uploads/?mount=%s&from=%s", endpoint, repoName, digest, from)
}
func buildInitiateBlobUploadURL(endpoint, repoName string) string {
return fmt.Sprintf("%s/v2/%s/blobs/uploads/", endpoint, repoName)
}
func buildMonolithicBlobUploadURL(endpoint, location, digest string) (string, error) {
relative, err := isRelativeURL(location)
if err != nil {
return "", err
}
// when the registry enables "relativeurls", the location returned
// has no scheme and host part
if relative {
location = endpoint + location
}
query := ""
if strings.ContainsRune(location, '?') {
query = "&"
} else {
query = "?"
}
query += fmt.Sprintf("digest=%s", digest)
return fmt.Sprintf("%s%s", location, query), nil
}
func isRelativeURL(endpoint string) (bool, error) {
u, err := url.Parse(endpoint)
if err != nil {
return false, err
}
return !u.IsAbs(), nil
}
|
buildBlobURL
|
numpy_backend_test.py
|
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for implementations of batched variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import hypothesis as hp
from hypothesis import strategies as hps
from hypothesis.extra import numpy as hpnp
import numpy as np
import tensorflow as tf
from tensorflow_probability.python.experimental.auto_batching import backend_test_lib as backend_test
from tensorflow_probability.python.experimental.auto_batching import instructions as inst
from tensorflow_probability.python.experimental.auto_batching import numpy_backend
NP_BACKEND = numpy_backend.NumpyBackend()
def var_init(max_stack_depth, initial_value):
type_ = inst.TensorType(initial_value.dtype, initial_value.shape[1:])
var = NP_BACKEND.create_variable(
None, inst.VariableAllocation.FULL, type_,
max_stack_depth, batch_size=initial_value.shape[0])
return var.update(
initial_value, NP_BACKEND.full_mask(initial_value.shape[0]))
# A TF test case for self.assertAllEqual, but doesn't use TF so doesn't care
# about Eager vs Graph mode.
class NumpyVariableTest(tf.test.TestCase, backend_test.VariableTestCase):
def testNumpySmoke(self):
|
@hp.given(hps.data())
@hp.settings(
deadline=None,
max_examples=100)
def testNumpyVariableRandomOps(self, data):
# Hypothesis strategy:
# Generate a random max stack depth and value shape
# Deduce the batch size from the value shape
# Make a random dtype
# Generate a random initial value of that dtype and shape
# Generate ops, some of which write random values of that dtype and shape
max_stack_depth = data.draw(hps.integers(min_value=1, max_value=1000))
value_shape = data.draw(hpnp.array_shapes(min_dims=1))
batch_size = value_shape[0]
dtype = data.draw(hpnp.scalar_dtypes())
masks = hpnp.arrays(dtype=np.bool, shape=[batch_size])
values = hpnp.arrays(dtype, value_shape)
init_val = data.draw(values)
ops = data.draw(
hps.lists(
hps.one_of(
hps.tuples(hps.just('update'), values, masks),
hps.tuples(hps.just('push'), masks),
hps.tuples(hps.just('pop'), masks), # preserve line break
hps.tuples(hps.just('read')))))
self.check_same_results((max_stack_depth, init_val), ops, var_init)
if __name__ == '__main__':
tf.test.main()
|
"""Test the property on specific example, without relying on Hypothesis."""
init = (12, np.random.randn(3, 2, 2).astype(np.float32))
ops = [('pop', [False, False, True]),
('push', [True, False, True]),
('update', np.ones((3, 2, 2), dtype=np.float32),
[True, True, False]),
('pop', [True, False, True])]
self.check_same_results(init, ops, var_init)
|
_funnel.py
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Funnel(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "funnel"
_valid_props = {
"alignmentgroup",
"cliponaxis",
"connector",
"constraintext",
"customdata",
"customdatasrc",
"dx",
"dy",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextanchor",
"insidetextfont",
"legendgroup",
"legendgrouptitle",
"legendrank",
"marker",
"meta",
"metasrc",
"name",
"offset",
"offsetgroup",
"opacity",
"orientation",
"outsidetextfont",
"selectedpoints",
"showlegend",
"stream",
"text",
"textangle",
"textfont",
"textinfo",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"type",
"uid",
"uirevision",
"visible",
"width",
"x",
"x0",
"xaxis",
"xhoverformat",
"xperiod",
"xperiod0",
"xperiodalignment",
"xsrc",
"y",
"y0",
"yaxis",
"yhoverformat",
"yperiod",
"yperiod0",
"yperiodalignment",
"ysrc",
}
# alignmentgroup
# --------------
@property
def alignmentgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same alignmentgroup. This controls whether bars
compute their positional range dependently or independently.
The 'alignmentgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["alignmentgroup"]
@alignmentgroup.setter
def alignmentgroup(self, val):
self["alignmentgroup"] = val
# cliponaxis
# ----------
@property
def cliponaxis(self):
"""
Determines whether the text nodes are clipped about the subplot
axes. To show the text nodes above axis lines and tick labels,
make sure to set `xaxis.layer` and `yaxis.layer` to *below
traces*.
The 'cliponaxis' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cliponaxis"]
@cliponaxis.setter
def cliponaxis(self, val):
self["cliponaxis"] = val
# connector
# ---------
@property
def connector(self):
"""
The 'connector' property is an instance of Connector
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Connector`
- A dict of string/value properties that will be passed
to the Connector constructor
Supported dict properties:
fillcolor
Sets the fill color.
line
:class:`plotly.graph_objects.funnel.connector.L
ine` instance or dict with compatible
properties
visible
Determines if connector regions and lines are
drawn.
Returns
-------
plotly.graph_objs.funnel.Connector
"""
return self["connector"]
@connector.setter
def connector(self, val):
self["connector"] = val
# constraintext
# -------------
@property
def constraintext(self):
"""
Constrain the size of text inside or outside a bar to be no
larger than the bar itself.
The 'constraintext' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'both', 'none']
Returns
-------
Any
"""
return self["constraintext"]
@constraintext.setter
def constraintext(self, val):
self["constraintext"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`customdata`.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dx
# --
@property
def dx(self):
"""
Sets the x coordinate step. See `x0` for more info.
The 'dx' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dx"]
@dx.setter
def dx(self, val):
self["dx"] = val
# dy
# --
@property
def dy(self):
"""
Sets the y coordinate step. See `y0` for more info.
The 'dy' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dy"]
@dy.setter
def dy(self, val):
self["dy"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['name', 'x', 'y', 'text', 'percent initial', 'percent previous', 'percent total'] joined with '+' characters
(e.g. 'name+x')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for `align`.
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for `bgcolor`.
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for `bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for `namelength`.
Returns
-------
plotly.graph_objs.funnel.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y: %{y}"
as well as %{xother}, {%_xother}, {%_xother_}, {%xother_}. When
showing info for several points, "xother" will be added to
those with different x positions from the first point. An
underscore before or after "(x|y)other" will add a space on
that side, only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. The variables available in `hovertemplate`
are the ones emitted as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`percentInitial`, `percentPrevious` and `percentTotal`.
Anything contained in tag `<extra>` is displayed in the
secondary box, for example "<extra>{fullData.name}</extra>". To
hide the secondary box completely, use an empty tag
`<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each (x,y) pair. If a
single string, the same string appears over all the data
points. If an array of string, the items are mapped in order to
the this trace's (x,y) coordinates. To be seen, trace
`hoverinfo` must contain a "text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`hovertext`.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ids`.
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# insidetextanchor
# ----------------
@property
def insidetextanchor(self):
"""
Determines if texts are kept at center or start/end points in
`textposition` "inside" mode.
The 'insidetextanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['end', 'middle', 'start']
Returns
-------
Any
"""
return self["insidetextanchor"]
@insidetextanchor.setter
def insidetextanchor(self, val):
self["insidetextanchor"] = val
# insidetextfont
# --------------
@property
def insidetextfont(self):
"""
Sets the font used for `text` lying inside the bar.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.funnel.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# legendgrouptitle
# ----------------
@property
def legendgrouptitle(self):
"""
The 'legendgrouptitle' property is an instance of Legendgrouptitle
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Legendgrouptitle`
- A dict of string/value properties that will be passed
to the Legendgrouptitle constructor
Supported dict properties:
font
Sets this legend group's title font.
text
Sets the title of the legend group.
Returns
-------
plotly.graph_objs.funnel.Legendgrouptitle
"""
return self["legendgrouptitle"]
@legendgrouptitle.setter
def legendgrouptitle(self, val):
self["legendgrouptitle"] = val
# legendrank
# ----------
@property
def legendrank(self):
"""
Sets the legend rank for this trace. Items and groups with
smaller ranks are presented on top/left side while with
`*reversed* `legend.traceorder` they are on bottom/right side.
The default legendrank is 1000, so that you can use ranks less
than 1000 to place certain items before all unranked items, and
ranks greater than 1000 to go after all unranked items.
The 'legendrank' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["legendrank"]
@legendrank.setter
def legendrank(self, val):
self["legendrank"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.colorscale`. Has an
effect only if in `marker.color`is set to a
numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.color`) or the bounds set in
`marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color`is set to a numerical
array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.cmin` and/or `marker.cmax` to
be equidistant to this point. Has an effect
only if in `marker.color`is set to a numerical
array. Value should have the same units as in
`marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.color`is set to a
numerical array. Value should have the same
units as in `marker.color` and if set,
`marker.cmax` must be set as well.
color
Sets themarkercolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorbar
:class:`plotly.graph_objects.funnel.marker.Colo
rBar` instance or dict with compatible
properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color`is set to a numerical array. The
colorscale must be an array containing arrays
mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.cmin` and
`marker.cmax`. Alternatively, `colorscale` may
be a palette name string of the following list:
Blackbody,Bluered,Blues,Cividis,Earth,Electric,
Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
line
:class:`plotly.graph_objects.funnel.marker.Line
` instance or dict with compatible properties
opacity
Sets the opacity of the bars.
opacitysrc
Sets the source reference on Chart Studio Cloud
for `opacity`.
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.color`is set to a
numerical array. If true, `marker.cmin` will
correspond to the last color in the array and
`marker.cmax` will correspond to the first
color.
showscale
Determines whether or not a colorbar is
displayed for this trace. Has an effect only if
in `marker.color`is set to a numerical array.
Returns
-------
plotly.graph_objs.funnel.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for `meta`.
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# offset
# ------
@property
def offset(self):
"""
Shifts the position where the bar is drawn (in position axis
units). In "group" barmode, traces that set "offset" will be
excluded and drawn in "overlay" mode instead.
The 'offset' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["offset"]
@offset.setter
def offset(self, val):
self["offset"] = val
# offsetgroup
# -----------
@property
def offsetgroup(self):
"""
Set several traces linked to the same position axis or matching
axes to the same offsetgroup where bars of the same position
coordinate will line up.
The 'offsetgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["offsetgroup"]
@offsetgroup.setter
def offsetgroup(self, val):
self["offsetgroup"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# orientation
# -----------
@property
def orientation(self):
"""
Sets the orientation of the funnels. With "v" ("h"), the value
of the each bar spans along the vertical (horizontal). By
default funnels are tend to be oriented horizontally; unless
only "y" array is presented or orientation is set to "v". Also
regarding graphs including only 'horizontal' funnels,
"autorange" on the "y-axis" are set to "reversed".
The 'orientation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['v', 'h']
Returns
-------
Any
"""
return self["orientation"]
@orientation.setter
def orientation(self, val):
self["orientation"] = val
# outsidetextfont
# ---------------
@property
def outsidetextfont(self):
"""
Sets the font used for `text` lying outside the bar.
The 'outsidetextfont' property is an instance of Outsidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Outsidetextfont`
- A dict of string/value properties that will be passed
to the Outsidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.funnel.Outsidetextfont
"""
return self["outsidetextfont"]
@outsidetextfont.setter
def outsidetextfont(self, val):
self["outsidetextfont"] = val
# selectedpoints
# --------------
@property
def selectedpoints(self):
"""
Array containing integer indices of selected points. Has an
effect only for traces that support selections. Note that an
empty array means an empty selection where the `unselected` are
turned on for all points, whereas, any other non-array values
means no selection all where the `selected` and `unselected`
styles have no effect.
The 'selectedpoints' property accepts values of any type
Returns
-------
Any
"""
return self["selectedpoints"]
@selectedpoints.setter
def selectedpoints(self, val):
self["selectedpoints"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.funnel.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each (x,y) pair. If a single
string, the same string appears over all the data points. If an
array of string, the items are mapped in order to the this
trace's (x,y) coordinates. If trace `hoverinfo` contains a
"text" flag and "hovertext" is not set, these elements will be
seen in the hover labels.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textangle
# ---------
@property
def textangle(self):
"""
Sets the angle of the tick labels with respect to the bar. For
example, a `tickangle` of -90 draws the tick labels vertically.
With "auto" the texts may automatically be rotated to fit with
the maximum size in bars.
The 'textangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["textangle"]
@textangle.setter
def textangle(self, val):
self["textangle"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used for `text`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnel.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
Returns
-------
plotly.graph_objs.funnel.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textinfo
# --------
@property
def textinfo(self):
"""
Determines which trace information appear on the graph. In the
case of having multiple funnels, percentages & totals are
computed separately (per trace).
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'percent initial', 'percent previous', 'percent total', 'value'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Specifies the location of the `text`. "inside" positions `text`
inside, next to the bar end (rotated and scaled if needed).
"outside" positions `text` outside, next to the bar end (scaled
if needed), unless there is another bar stacked on this one,
then the text gets pushed inside. "auto" tries to position
`text` inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If "none", no
text appears.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'outside', 'auto', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`textposition`.
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `text`.
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format for
details on the formatting syntax. Dates are formatted using
d3-time-format's syntax %{variable|d3-time-format}, for example
"Day: %{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the date
formatting syntax. Every attributes that can be specified per-
point (the ones that are `arrayOk: true`) are available.
variables `percentInitial`, `percentPrevious`, `percentTotal`,
`label` and `value`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# width
# -----
@property
def width(self):
"""
Sets the bar width (in position axis units).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# x
# -
@property
def x(self):
"""
Sets the x coordinates.
The 'x' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# x0
# --
@property
def x0(self):
"""
Alternate to `x`. Builds a linear space of x coordinates. Use
with `dx` where `x0` is the starting coordinate and `dx` the
step.
The 'x0' property accepts values of any type
Returns
-------
Any
"""
return self["x0"]
@x0.setter
def x0(self, val):
self["x0"] = val
# xaxis
# -----
@property
def xaxis(self):
"""
Sets a reference between this trace's x coordinates and a 2D
cartesian x axis. If "x" (the default value), the x coordinates
refer to `layout.xaxis`. If "x2", the x coordinates refer to
`layout.xaxis2`, and so on.
The 'xaxis' property is an identifier of a particular
subplot, of type 'x', that may be specified as the string 'x'
optionally followed by an integer >= 1
(e.g. 'x', 'x1', 'x2', 'x3', etc.)
Returns
-------
str
"""
return self["xaxis"]
@xaxis.setter
def xaxis(self, val):
self["xaxis"] = val
# xhoverformat
# ------------
@property
def xhoverformat(self):
"""
Sets the hover text formatting rulefor `x` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `xaxis.hoverformat`.
The 'xhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["xhoverformat"]
@xhoverformat.setter
def xhoverformat(self, val):
self["xhoverformat"] = val
# xperiod
# -------
@property
def xperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the x axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'xperiod' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod"]
@xperiod.setter
def xperiod(self, val):
self["xperiod"] = val
# xperiod0
# --------
@property
def xperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the x0
axis. When `x0period` is round number of weeks, the `x0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'xperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["xperiod0"]
@xperiod0.setter
def xperiod0(self, val):
self["xperiod0"] = val
# xperiodalignment
# ----------------
@property
def xperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
The 'xperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["xperiodalignment"]
@xperiodalignment.setter
def xperiodalignment(self, val):
self["xperiodalignment"] = val
# xsrc
# ----
@property
def xsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `x`.
The 'xsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["xsrc"]
@xsrc.setter
def xsrc(self, val):
self["xsrc"] = val
# y
# -
@property
def y(self):
"""
Sets the y coordinates.
The 'y' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["y"]
@y.setter
def y(self, val):
|
# y0
# --
@property
def y0(self):
"""
Alternate to `y`. Builds a linear space of y coordinates. Use
with `dy` where `y0` is the starting coordinate and `dy` the
step.
The 'y0' property accepts values of any type
Returns
-------
Any
"""
return self["y0"]
@y0.setter
def y0(self, val):
self["y0"] = val
# yaxis
# -----
@property
def yaxis(self):
"""
Sets a reference between this trace's y coordinates and a 2D
cartesian y axis. If "y" (the default value), the y coordinates
refer to `layout.yaxis`. If "y2", the y coordinates refer to
`layout.yaxis2`, and so on.
The 'yaxis' property is an identifier of a particular
subplot, of type 'y', that may be specified as the string 'y'
optionally followed by an integer >= 1
(e.g. 'y', 'y1', 'y2', 'y3', etc.)
Returns
-------
str
"""
return self["yaxis"]
@yaxis.setter
def yaxis(self, val):
self["yaxis"] = val
# yhoverformat
# ------------
@property
def yhoverformat(self):
"""
Sets the hover text formatting rulefor `y` using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display *09~15~23.46*By default the values
are formatted using `yaxis.hoverformat`.
The 'yhoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["yhoverformat"]
@yhoverformat.setter
def yhoverformat(self, val):
self["yhoverformat"] = val
# yperiod
# -------
@property
def yperiod(self):
"""
Only relevant when the axis `type` is "date". Sets the period
positioning in milliseconds or "M<n>" on the y axis. Special
values in the form of "M<n>" could be used to declare the
number of months. In this case `n` must be a positive integer.
The 'yperiod' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod"]
@yperiod.setter
def yperiod(self, val):
self["yperiod"] = val
# yperiod0
# --------
@property
def yperiod0(self):
"""
Only relevant when the axis `type` is "date". Sets the base for
period positioning in milliseconds or date string on the y0
axis. When `y0period` is round number of weeks, the `y0period0`
by default would be on a Sunday i.e. 2000-01-02, otherwise it
would be at 2000-01-01.
The 'yperiod0' property accepts values of any type
Returns
-------
Any
"""
return self["yperiod0"]
@yperiod0.setter
def yperiod0(self, val):
self["yperiod0"] = val
# yperiodalignment
# ----------------
@property
def yperiodalignment(self):
"""
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
The 'yperiodalignment' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'middle', 'end']
Returns
-------
Any
"""
return self["yperiodalignment"]
@yperiodalignment.setter
def yperiodalignment(self, val):
self["yperiodalignment"] = val
# ysrc
# ----
@property
def ysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `y`.
The 'ysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ysrc"]
@ysrc.setter
def ysrc(self, val):
self["ysrc"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
connector
:class:`plotly.graph_objects.funnel.Connector` instance
or dict with compatible properties
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnel.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `percentInitial`,
`percentPrevious` and `percentTotal`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnel.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.funnel.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the funnels. With "v" ("h"),
the value of the each bar spans along the vertical
(horizontal). By default funnels are tend to be
oriented horizontally; unless only "y" array is
presented or orientation is set to "v". Also regarding
graphs including only 'horizontal' funnels, "autorange"
on the "y-axis" are set to "reversed".
outsidetextfont
Sets the font used for `text` lying outside the bar.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnel.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on the graph.
In the case of having multiple funnels, percentages &
totals are computed separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `percentInitial`,
`percentPrevious`, `percentTotal`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
"""
def __init__(
self,
arg=None,
alignmentgroup=None,
cliponaxis=None,
connector=None,
constraintext=None,
customdata=None,
customdatasrc=None,
dx=None,
dy=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextanchor=None,
insidetextfont=None,
legendgroup=None,
legendgrouptitle=None,
legendrank=None,
marker=None,
meta=None,
metasrc=None,
name=None,
offset=None,
offsetgroup=None,
opacity=None,
orientation=None,
outsidetextfont=None,
selectedpoints=None,
showlegend=None,
stream=None,
text=None,
textangle=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
uid=None,
uirevision=None,
visible=None,
width=None,
x=None,
x0=None,
xaxis=None,
xhoverformat=None,
xperiod=None,
xperiod0=None,
xperiodalignment=None,
xsrc=None,
y=None,
y0=None,
yaxis=None,
yhoverformat=None,
yperiod=None,
yperiod0=None,
yperiodalignment=None,
ysrc=None,
**kwargs,
):
"""
Construct a new Funnel object
Visualize stages in a process using length-encoded bars. This
trace can be used to show data in either a part-to-whole
representation wherein each item appears in a single stage, or
in a "drop-off" representation wherein each item appears in
each stage it traversed. See also the "funnelarea" trace type
for a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnel`
alignmentgroup
Set several traces linked to the same position axis or
matching axes to the same alignmentgroup. This controls
whether bars compute their positional range dependently
or independently.
cliponaxis
Determines whether the text nodes are clipped about the
subplot axes. To show the text nodes above axis lines
and tick labels, make sure to set `xaxis.layer` and
`yaxis.layer` to *below traces*.
connector
:class:`plotly.graph_objects.funnel.Connector` instance
or dict with compatible properties
constraintext
Constrain the size of text inside or outside a bar to
be no larger than the bar itself.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
`customdata`.
dx
Sets the x coordinate step. See `x0` for more info.
dy
Sets the y coordinate step. See `y0` for more info.
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
`hoverinfo`.
hoverlabel
:class:`plotly.graph_objects.funnel.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}" as well as %{xother}, {%_xother},
{%_xother_}, {%xother_}. When showing info for several
points, "xother" will be added to those with different
x positions from the first point. An underscore before
or after "(x|y)other" will add a space on that side,
only when this field is shown. Numbers are formatted
using d3-format's syntax %{variable:d3-format}, for
example "Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. The variables available in
`hovertemplate` are the ones emitted as event data
described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `percentInitial`,
`percentPrevious` and `percentTotal`. Anything
contained in tag `<extra>` is displayed in the
secondary box, for example
"<extra>{fullData.name}</extra>". To hide the secondary
box completely, use an empty tag `<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
`hovertemplate`.
hovertext
Sets hover text elements associated with each (x,y)
pair. If a single string, the same string appears over
all the data points. If an array of string, the items
are mapped in order to the this trace's (x,y)
coordinates. To be seen, trace `hoverinfo` must contain
a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
`hovertext`.
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
`ids`.
insidetextanchor
Determines if texts are kept at center or start/end
points in `textposition` "inside" mode.
insidetextfont
Sets the font used for `text` lying inside the bar.
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
legendgrouptitle
:class:`plotly.graph_objects.funnel.Legendgrouptitle`
instance or dict with compatible properties
legendrank
Sets the legend rank for this trace. Items and groups
with smaller ranks are presented on top/left side while
with `*reversed* `legend.traceorder` they are on
bottom/right side. The default legendrank is 1000, so
that you can use ranks less than 1000 to place certain
items before all unranked items, and ranks greater than
1000 to go after all unranked items.
marker
:class:`plotly.graph_objects.funnel.Marker` instance or
dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
`meta`.
name
Sets the trace name. The trace name appear as the
legend item and on hover.
offset
Shifts the position where the bar is drawn (in position
axis units). In "group" barmode, traces that set
"offset" will be excluded and drawn in "overlay" mode
instead.
offsetgroup
Set several traces linked to the same position axis or
matching axes to the same offsetgroup where bars of the
same position coordinate will line up.
opacity
Sets the opacity of the trace.
orientation
Sets the orientation of the funnels. With "v" ("h"),
the value of the each bar spans along the vertical
(horizontal). By default funnels are tend to be
oriented horizontally; unless only "y" array is
presented or orientation is set to "v". Also regarding
graphs including only 'horizontal' funnels, "autorange"
on the "y-axis" are set to "reversed".
outsidetextfont
Sets the font used for `text` lying outside the bar.
selectedpoints
Array containing integer indices of selected points.
Has an effect only for traces that support selections.
Note that an empty array means an empty selection where
the `unselected` are turned on for all points, whereas,
any other non-array values means no selection all where
the `selected` and `unselected` styles have no effect.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnel.Stream` instance or
dict with compatible properties
text
Sets text elements associated with each (x,y) pair. If
a single string, the same string appears over all the
data points. If an array of string, the items are
mapped in order to the this trace's (x,y) coordinates.
If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in
the hover labels.
textangle
Sets the angle of the tick labels with respect to the
bar. For example, a `tickangle` of -90 draws the tick
labels vertically. With "auto" the texts may
automatically be rotated to fit with the maximum size
in bars.
textfont
Sets the font used for `text`.
textinfo
Determines which trace information appear on the graph.
In the case of having multiple funnels, percentages &
totals are computed separately (per trace).
textposition
Specifies the location of the `text`. "inside"
positions `text` inside, next to the bar end (rotated
and scaled if needed). "outside" positions `text`
outside, next to the bar end (scaled if needed), unless
there is another bar stacked on this one, then the text
gets pushed inside. "auto" tries to position `text`
inside the bar, but if the bar is too small and no bar
is stacked on this one the text is moved outside. If
"none", no text appears.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
`textposition`.
textsrc
Sets the source reference on Chart Studio Cloud for
`text`.
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}".
https://github.com/d3/d3-format/tree/v1.4.5#d3-format
for details on the formatting syntax. Dates are
formatted using d3-time-format's syntax
%{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format for details on the
date formatting syntax. Every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `percentInitial`,
`percentPrevious`, `percentTotal`, `label` and `value`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
`texttemplate`.
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
width
Sets the bar width (in position axis units).
x
Sets the x coordinates.
x0
Alternate to `x`. Builds a linear space of x
coordinates. Use with `dx` where `x0` is the starting
coordinate and `dx` the step.
xaxis
Sets a reference between this trace's x coordinates and
a 2D cartesian x axis. If "x" (the default value), the
x coordinates refer to `layout.xaxis`. If "x2", the x
coordinates refer to `layout.xaxis2`, and so on.
xhoverformat
Sets the hover text formatting rulefor `x` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `xaxis.hoverformat`.
xperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the x
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
xperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the x0 axis. When `x0period` is round number
of weeks, the `x0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
xperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the x axis.
xsrc
Sets the source reference on Chart Studio Cloud for
`x`.
y
Sets the y coordinates.
y0
Alternate to `y`. Builds a linear space of y
coordinates. Use with `dy` where `y0` is the starting
coordinate and `dy` the step.
yaxis
Sets a reference between this trace's y coordinates and
a 2D cartesian y axis. If "y" (the default value), the
y coordinates refer to `layout.yaxis`. If "y2", the y
coordinates refer to `layout.yaxis2`, and so on.
yhoverformat
Sets the hover text formatting rulefor `y` using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display *09~15~23.46*By default the values are
formatted using `yaxis.hoverformat`.
yperiod
Only relevant when the axis `type` is "date". Sets the
period positioning in milliseconds or "M<n>" on the y
axis. Special values in the form of "M<n>" could be
used to declare the number of months. In this case `n`
must be a positive integer.
yperiod0
Only relevant when the axis `type` is "date". Sets the
base for period positioning in milliseconds or date
string on the y0 axis. When `y0period` is round number
of weeks, the `y0period0` by default would be on a
Sunday i.e. 2000-01-02, otherwise it would be at
2000-01-01.
yperiodalignment
Only relevant when the axis `type` is "date". Sets the
alignment of data points on the y axis.
ysrc
Sets the source reference on Chart Studio Cloud for
`y`.
Returns
-------
Funnel
"""
super(Funnel, self).__init__("funnel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Funnel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Funnel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("alignmentgroup", None)
_v = alignmentgroup if alignmentgroup is not None else _v
if _v is not None:
self["alignmentgroup"] = _v
_v = arg.pop("cliponaxis", None)
_v = cliponaxis if cliponaxis is not None else _v
if _v is not None:
self["cliponaxis"] = _v
_v = arg.pop("connector", None)
_v = connector if connector is not None else _v
if _v is not None:
self["connector"] = _v
_v = arg.pop("constraintext", None)
_v = constraintext if constraintext is not None else _v
if _v is not None:
self["constraintext"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dx", None)
_v = dx if dx is not None else _v
if _v is not None:
self["dx"] = _v
_v = arg.pop("dy", None)
_v = dy if dy is not None else _v
if _v is not None:
self["dy"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextanchor", None)
_v = insidetextanchor if insidetextanchor is not None else _v
if _v is not None:
self["insidetextanchor"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("legendgrouptitle", None)
_v = legendgrouptitle if legendgrouptitle is not None else _v
if _v is not None:
self["legendgrouptitle"] = _v
_v = arg.pop("legendrank", None)
_v = legendrank if legendrank is not None else _v
if _v is not None:
self["legendrank"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("offset", None)
_v = offset if offset is not None else _v
if _v is not None:
self["offset"] = _v
_v = arg.pop("offsetgroup", None)
_v = offsetgroup if offsetgroup is not None else _v
if _v is not None:
self["offsetgroup"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("orientation", None)
_v = orientation if orientation is not None else _v
if _v is not None:
self["orientation"] = _v
_v = arg.pop("outsidetextfont", None)
_v = outsidetextfont if outsidetextfont is not None else _v
if _v is not None:
self["outsidetextfont"] = _v
_v = arg.pop("selectedpoints", None)
_v = selectedpoints if selectedpoints is not None else _v
if _v is not None:
self["selectedpoints"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textangle", None)
_v = textangle if textangle is not None else _v
if _v is not None:
self["textangle"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("x0", None)
_v = x0 if x0 is not None else _v
if _v is not None:
self["x0"] = _v
_v = arg.pop("xaxis", None)
_v = xaxis if xaxis is not None else _v
if _v is not None:
self["xaxis"] = _v
_v = arg.pop("xhoverformat", None)
_v = xhoverformat if xhoverformat is not None else _v
if _v is not None:
self["xhoverformat"] = _v
_v = arg.pop("xperiod", None)
_v = xperiod if xperiod is not None else _v
if _v is not None:
self["xperiod"] = _v
_v = arg.pop("xperiod0", None)
_v = xperiod0 if xperiod0 is not None else _v
if _v is not None:
self["xperiod0"] = _v
_v = arg.pop("xperiodalignment", None)
_v = xperiodalignment if xperiodalignment is not None else _v
if _v is not None:
self["xperiodalignment"] = _v
_v = arg.pop("xsrc", None)
_v = xsrc if xsrc is not None else _v
if _v is not None:
self["xsrc"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("y0", None)
_v = y0 if y0 is not None else _v
if _v is not None:
self["y0"] = _v
_v = arg.pop("yaxis", None)
_v = yaxis if yaxis is not None else _v
if _v is not None:
self["yaxis"] = _v
_v = arg.pop("yhoverformat", None)
_v = yhoverformat if yhoverformat is not None else _v
if _v is not None:
self["yhoverformat"] = _v
_v = arg.pop("yperiod", None)
_v = yperiod if yperiod is not None else _v
if _v is not None:
self["yperiod"] = _v
_v = arg.pop("yperiod0", None)
_v = yperiod0 if yperiod0 is not None else _v
if _v is not None:
self["yperiod0"] = _v
_v = arg.pop("yperiodalignment", None)
_v = yperiodalignment if yperiodalignment is not None else _v
if _v is not None:
self["yperiodalignment"] = _v
_v = arg.pop("ysrc", None)
_v = ysrc if ysrc is not None else _v
if _v is not None:
self["ysrc"] = _v
# Read-only literals
# ------------------
self._props["type"] = "funnel"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
self["y"] = val
|
main.go
|
"net/http"
"reflect"
"strings"
"github.com/emicklei/go-restful"
"fmt"
"github.com/go-chassis/go-restful-swagger20"
"os"
"path/filepath"
)
type Book struct {
Id string
Title string
Author string
Student []Student
}
type ID string
type Age int64
type Student struct {
Id ID `swag:"string"`
Age Age
Name string
}
func modelTypeNameHandler(st reflect.Type) (string, bool) {
key := st.String()
if len(st.Name()) == 0 {
key = strings.Replace(key, "[]", "", -1)
}
if key == "main.Age" {
return "number", true
}
return key, true
}
func main() {
ws := new(restful.WebService)
ws.Path("/book")
ws.Consumes(restful.MIME_JSON, restful.MIME_XML)
ws.Produces(restful.MIME_JSON, restful.MIME_XML)
restful.Add(ws)
ws.Route(ws.GET("/{medium}").To(getBookById).
Doc("Search a books").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Param(ws.QueryParameter("language", "en,nl,de").DataType("string")).Metadata("tags", []string{"users", "desc"}).
Param(ws.HeaderParameter("If-Modified-Since", "last known timestamp").DataType("string").DataFormat("datetime")).
Returns(200, "haha", Book{}))
ws.Route(ws.PUT("/{medium}").To(modifyBookById).
Operation("modifyBookById").
Doc("modify a book").
Param(ws.PathParameter("medium", "digital or paperback").DataType("string")).
Reads(Book{Id: "2", Title: "go", Author: "lisi"}).
Do(returns200, returns500))
ws.Route(ws.POST("/add").To(addBook).
Notes("add a book").
Reads(Student{}).
Do(returns200, returns500))
ws.ApiVersion("1.0.1")
val := os.Getenv("SWAGGERFILEPATH")
fmt.Println(val)
if val == "" {
val, _ = filepath.Abs(filepath.Dir(os.Args[0]))
}
config := swagger.Config{
WebServices: restful.DefaultContainer.RegisteredWebServices(), // you control what services are visible
WebServicesUrl: "http://localhost:8080",
ApiPath: "/apidocs.json",
//FileStyle: "json",
OpenService: true,
SwaggerPath: "/apidocs/",
OutFilePath: filepath.Join(val, "api.yaml"),
ModelTypeNameHandler: modelTypeNameHandler}
config.Info.Description = "This is a sample server Book server"
config.Info.Title = "swagger Book"
swagger.RegisterSwaggerService(config, restful.DefaultContainer)
log.Print("start listening on localhost:8080")
server := &http.Server{Addr: ":8080", Handler: restful.DefaultContainer}
log.Fatal(server.ListenAndServe())
}
func getBookById(req *restful.Request, resp *restful.Response) {
book := Book{Id: "1", Title: "java", Author: "zhangsan"}
id := req.PathParameter("medium")
if id != book.Id {
resp.WriteErrorString(http.StatusNotFound, "Book could not be found.")
} else {
resp.WriteEntity(book)
}
}
func modifyBookById(req *restful.Request, resp *restful.Response) {}
func addBook(req *restful.Request, resp *restful.Response) {}
func returns200(b *restful.RouteBuilder) {
b.Returns(http.StatusOK, "OK", map[string]Book{})
}
func returns500(b *restful.RouteBuilder) {
b.Returns(http.StatusInternalServerError, "Bummer, something went wrong", nil)
}
|
package main
import (
"log"
|
|
align_crabs.rs
|
use std::{
cmp::Ordering,
collections::HashMap,
fs::File,
io::{self, Read},
};
pub struct CrabSwarm {
positions: HashMap<usize, usize>,
}
impl CrabSwarm {
pub fn init_from_file(path: &str) -> io::Result<CrabSwarm> {
let mut file = File::open(path)?;
let mut buf = String::new();
file.read_to_string(&mut buf)?;
CrabSwarm::new(&buf)
}
pub fn new(input: &str) -> io::Result<CrabSwarm> {
let mut positions = HashMap::new();
for pos in input.trim().split(',').map(|s| s.parse::<usize>().unwrap()) {
*positions.entry(pos).or_default() += 1;
}
Ok(CrabSwarm { positions })
}
pub fn best_alignment(&self) -> usize {
let mut min = usize::MAX;
for &pos in self.positions.keys() {
let mut cand = 0;
for (p, v) in self.positions.iter() {
match pos.cmp(p) {
Ordering::Greater => cand += v * (pos - p),
Ordering::Less => cand += v * (p - pos),
Ordering::Equal => (),
}
}
if cand < min {
min = cand;
}
}
min
}
pub fn best_alignment_for_crab_engine(&self) -> usize {
let &left = self.positions.keys().min().unwrap();
let &right = self.positions.keys().max().unwrap();
|
let mut min = usize::MAX;
for pos in left..=right {
let mut cand = 0;
for (p, v) in self.positions.iter() {
match pos.cmp(p) {
Ordering::Greater => cand += v * crab_engine_fuel_cost(pos - p),
Ordering::Less => cand += v * crab_engine_fuel_cost(p - pos),
Ordering::Equal => (),
}
}
if cand < min {
min = cand;
}
}
min
}
}
fn crab_engine_fuel_cost(changes: usize) -> usize {
((1 + changes) * changes) / 2
}
#[cfg(test)]
mod tests {
use super::{crab_engine_fuel_cost, CrabSwarm};
#[test]
fn test_day7() {
let crab_swarm = CrabSwarm::new("16,1,2,0,4,2,7,1,2,14").expect("parse error");
assert_eq!(37, crab_swarm.best_alignment());
assert_eq!(168, crab_swarm.best_alignment_for_crab_engine());
}
#[test]
fn test_crab_engine_fuel_cost() {
assert_eq!(66, crab_engine_fuel_cost(16 - 5));
assert_eq!(10, crab_engine_fuel_cost(5 - 1));
assert_eq!(6, crab_engine_fuel_cost(5 - 2));
assert_eq!(15, crab_engine_fuel_cost(5 - 0));
assert_eq!(45, crab_engine_fuel_cost(14 - 5));
}
}
| |
corpusdiagnostics.py
|
from server import app, format_decades
from model import Decade, Country, Book, connect_to_db, db
from textprocessor import unpickle_data
from random import sample
from collections import Counter
def measure_and_sample_corpus(data_type, want_sample):
with app.app_context():
decades = format_decades()
for decade in decades:
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
num_books_from_decade = len(books_from_decade)
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
words_from_decade = len(decade_set)
print(f"The {decade} corpus contains {words_from_decade} unique words")
if want_sample == True:
decade_sample = sample(decade_set, k=10)
print(f"Ten of those words are: {decade_sample}")
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
decade_unique_bigrams = "{:,}".format(len(decade_dict))
decade_total = "{:,}".format(sum(decade_dict.values()))
print(f"The {decade} corpus contains {decade_unique_bigrams} unique and {decade_total} total bigrams")
if want_sample == True:
decade_sample = sample(decade_dict.keys(), k=10)
print(f"Ten of those bigrams are {decade_sample}")
def
|
(data_type, decade):
with app.app_context():
books_from_decade = Book.query.filter_by(decade=decade).all()
if data_type == "word_set":
decade_set = set()
for book in books_from_decade:
decade_set.update(unpickle_data(book.word_set))
print(f"The {decade} word set:")
print(sorted(decade_set))
if data_type == "bigram_dict":
decade_dict = Counter({})
for book in books_from_decade:
book_bigrams = Counter(unpickle_data(book.bigram_dict))
decade_dict += book_bigrams
print(f"The {decade} bigram dictionary:")
print(decade_dict)
if __name__ == "__main__":
connect_to_db(app)
measure_and_sample_corpus("word_set", True)
#measure_and_sample_corpus("bigram_dict", False)
#print_whole_decade_set("word_set", "1920s")
|
print_whole_decade_set
|
replay_test.go
|
package consensus
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"math/rand"
"os"
"path/filepath"
"runtime"
"sort"
"testing"
"time"
"github.com/gogo/protobuf/proto"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
dbm "github.com/tendermint/tm-db"
abciclient "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/kvstore"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/internal/mempool"
"github.com/tendermint/tendermint/internal/proxy"
sm "github.com/tendermint/tendermint/internal/state"
sf "github.com/tendermint/tendermint/internal/state/test/factory"
"github.com/tendermint/tendermint/internal/store"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/privval"
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
"github.com/tendermint/tendermint/types"
)
// These tests ensure we can always recover from failure at any part of the consensus process.
// There are two general failure scenarios: failure during consensus, and failure while applying the block.
// Only the latter interacts with the app and store,
// but the former has to deal with restrictions on re-use of priv_validator keys.
// The `WAL Tests` are for failures during the consensus;
// the `Handshake Tests` are for failures in applying the block.
// With the help of the WAL, we can recover from it all!
//------------------------------------------------------------------------------------------
// WAL Tests
// TODO: It would be better to verify explicitly which states we can recover from without the wal
// and which ones we need the wal for - then we'd also be able to only flush the
// wal writer when we need to, instead of with every message.
func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config,
lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) {
logger := log.TestingLogger()
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
blockStore := store.NewBlockStore(dbm.NewMemDB())
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewApplication(),
blockStore,
)
cs.SetLogger(logger)
bytes, _ := ioutil.ReadFile(cs.config.WalFile())
t.Logf("====== WAL: \n\r%X\n", bytes)
err = cs.Start()
require.NoError(t, err)
defer func() {
if err := cs.Stop(); err != nil {
t.Error(err)
}
}()
// This is just a signal that we haven't halted; its not something contained
// in the WAL itself. Assuming the consensus state is running, replay of any
// WAL, including the empty one, should eventually be followed by a new
// block, or else something is wrong.
newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
require.NoError(t, err)
select {
case <-newBlockSub.Out():
case <-newBlockSub.Canceled():
t.Fatal("newBlockSub was canceled")
case <-time.After(120 * time.Second):
t.Fatal("Timed out waiting for new block (see trace above)")
}
}
func sendTxs(ctx context.Context, cs *State) {
for i := 0; i < 256; i++ {
select {
case <-ctx.Done():
return
default:
tx := []byte{byte(i)}
if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil {
panic(err)
}
i++
}
}
}
// TestWALCrash uses crashing WAL to test we can recover from any WAL failure.
func TestWALCrash(t *testing.T) {
testCases := []struct {
name string
initFn func(dbm.DB, *State, context.Context)
heightToStop int64
}{
{"empty block",
func(stateDB dbm.DB, cs *State, ctx context.Context) {},
1},
{"many non-empty blocks",
func(stateDB dbm.DB, cs *State, ctx context.Context) {
go sendTxs(ctx, cs)
},
3},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
consensusReplayConfig, err := ResetConfig(tc.name)
require.NoError(t, err)
crashWALandCheckLiveness(t, consensusReplayConfig, tc.initFn, tc.heightToStop)
})
}
}
func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config,
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
walPanicked := make(chan error)
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
i := 1
LOOP:
for {
t.Logf("====== LOOP %d\n", i)
// create consensus state from a clean slate
logger := log.NewNopLogger()
blockDB := dbm.NewMemDB()
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
require.NoError(t, err)
privValidator := loadPrivValidator(consensusReplayConfig)
cs := newStateWithConfigAndBlockStore(
consensusReplayConfig,
state,
privValidator,
kvstore.NewApplication(),
blockStore,
)
cs.SetLogger(logger)
// start sending transactions
ctx, cancel := context.WithCancel(context.Background())
initFn(stateDB, cs, ctx)
// clean up WAL file from the previous iteration
walFile := cs.config.WalFile()
os.Remove(walFile)
// set crashing WAL
csWal, err := cs.OpenWAL(walFile)
require.NoError(t, err)
crashingWal.next = csWal
// reset the message counter
crashingWal.msgIndex = 1
cs.wal = crashingWal
// start consensus state
err = cs.Start()
require.NoError(t, err)
i++
select {
case err := <-walPanicked:
t.Logf("WAL panicked: %v", err)
// make sure we can make blocks after a crash
startNewStateAndWaitForBlock(t, consensusReplayConfig, cs.Height, blockDB, stateStore)
// stop consensus state and transactions sender (initFn)
cs.Stop() //nolint:errcheck // Logging this error causes failure
cancel()
// if we reached the required height, exit
if _, ok := err.(ReachedHeightToStopError); ok {
break LOOP
}
case <-time.After(10 * time.Second):
t.Fatal("WAL did not panic for 10 seconds (check the log)")
}
}
}
// crashingWAL is a WAL which crashes or rather simulates a crash during Save
// (before and after). It remembers a message for which we last panicked
// (lastPanickedForMsgIndex), so we don't panic for it in subsequent iterations.
type crashingWAL struct {
next WAL
panicCh chan error
heightToStop int64
msgIndex int // current message index
lastPanickedForMsgIndex int // last message for which we panicked
}
var _ WAL = &crashingWAL{}
// WALWriteError indicates a WAL crash.
type WALWriteError struct {
msg string
}
func (e WALWriteError) Error() string {
return e.msg
}
// ReachedHeightToStopError indicates we've reached the required consensus
// height and may exit.
type ReachedHeightToStopError struct {
height int64
}
func (e ReachedHeightToStopError) Error() string {
return fmt.Sprintf("reached height to stop %d", e.height)
}
// Write simulate WAL's crashing by sending an error to the panicCh and then
// exiting the cs.receiveRoutine.
func (w *crashingWAL) Write(m WALMessage) error {
if endMsg, ok := m.(EndHeightMessage); ok {
if endMsg.Height == w.heightToStop {
w.panicCh <- ReachedHeightToStopError{endMsg.Height}
runtime.Goexit()
return nil
}
return w.next.Write(m)
}
if w.msgIndex > w.lastPanickedForMsgIndex {
w.lastPanickedForMsgIndex = w.msgIndex
_, file, line, _ := runtime.Caller(1)
w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)}
runtime.Goexit()
return nil
}
w.msgIndex++
return w.next.Write(m)
}
func (w *crashingWAL) WriteSync(m WALMessage) error {
return w.Write(m)
}
func (w *crashingWAL) FlushAndSync() error { return w.next.FlushAndSync() }
func (w *crashingWAL) SearchForEndHeight(
height int64,
options *WALSearchOptions) (rd io.ReadCloser, found bool, err error) {
return w.next.SearchForEndHeight(height, options)
}
func (w *crashingWAL) Start() error { return w.next.Start() }
func (w *crashingWAL) Stop() error { return w.next.Stop() }
func (w *crashingWAL) Wait() { w.next.Wait() }
//------------------------------------------------------------------------------------------
type simulatorTestSuite struct {
GenesisState sm.State
Config *config.Config
Chain []*types.Block
Commits []*types.Commit
CleanupFunc cleanupFunc
Mempool mempool.Mempool
Evpool sm.EvidencePool
}
const (
numBlocks = 6
)
//---------------------------------------
// Test handshake/replay
// 0 - all synced up
// 1 - saved block but app and state are behind
// 2 - save block and committed but state is behind
// 3 - save block and committed with truncated block store and state behind
var modes = []uint{0, 1, 2, 3}
// This is actually not a test, it's for storing validator change tx data for testHandshakeReplay
func
|
(t *testing.T) *simulatorTestSuite {
t.Helper()
cfg := configSetup(t)
sim := &simulatorTestSuite{
Mempool: emptyMempool{},
Evpool: sm.EmptyEvidencePool{},
}
nPeers := 7
nVals := 4
css, genDoc, cfg, cleanup := randConsensusNetWithPeers(
cfg,
nVals,
nPeers,
"replay_test",
newMockTickerFunc(true),
newPersistentKVStoreWithPath)
sim.Config = cfg
sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
sim.CleanupFunc = cleanup
partSize := types.BlockPartSizeBytes
newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)
proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)
vss := make([]*validatorStub, nPeers)
for i := 0; i < nPeers; i++ {
vss[i] = newValidatorStub(css[i].privValidator, int32(i))
}
height, round := css[0].Height, css[0].Round
// start the machine
startTestRound(css[0], height, round)
incrementHeight(vss...)
ensureNewRound(newRoundCh, height, 0)
ensureNewProposal(proposalCh, height, round)
rs := css[0].GetRoundState()
signAddVotes(sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 2
height++
incrementHeight(vss...)
newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1)
require.NoError(t, err)
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{})
assert.Nil(t, err)
propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts := propBlock.MakePartSet(partSize)
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
p := proposal.ToProto()
if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 3
height++
incrementHeight(vss...)
updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1)
require.NoError(t, err)
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
signAddVotes(sim.Config, css[0], tmproto.PrecommitType,
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
vss[1:nVals]...)
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 4
height++
incrementHeight(vss...)
newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2)
require.NoError(t, err)
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err)
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3)
require.NoError(t, err)
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
newVss := make([]*validatorStub, nVals+1)
copy(newVss, vss[:nVals+1])
sort.Sort(ValidatorStubsByPower(newVss))
valIndexFn := func(cssIdx int) int {
for i, vs := range newVss {
vsPubKey, err := vs.GetPubKey(context.Background())
require.NoError(t, err)
cssPubKey, err := css[cssIdx].privValidator.GetPubKey(context.Background())
require.NoError(t, err)
if vsPubKey.Equals(cssPubKey) {
return i
}
}
panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx))
}
selfIndex := valIndexFn(0)
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{})
assert.Nil(t, err)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 5
height++
incrementHeight(vss...)
// Reflect the changes to vss[nVals] at height 3 and resort newVss.
newVssIdx := valIndexFn(nVals)
newVss[newVssIdx].VotingPower = 25
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+1; i++ {
if i == selfIndex {
continue
}
signAddVotes(sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
// HEIGHT 6
height++
incrementHeight(vss...)
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{})
assert.Nil(t, err)
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
propBlockParts = propBlock.MakePartSet(partSize)
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
newVss = make([]*validatorStub, nVals+3)
copy(newVss, vss[:nVals+3])
sort.Sort(ValidatorStubsByPower(newVss))
selfIndex = valIndexFn(0)
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
p = proposal.ToProto()
if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil {
t.Fatal("failed to sign bad proposal", err)
}
proposal.Signature = p.Signature
// set the proposal block
if err := css[0].SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil {
t.Fatal(err)
}
ensureNewProposal(proposalCh, height, round)
rs = css[0].GetRoundState()
for i := 0; i < nVals+3; i++ {
if i == selfIndex {
continue
}
signAddVotes(sim.Config, css[0],
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
rs.ProposalBlockParts.Header(), newVss[i])
}
ensureNewRound(newRoundCh, height+1, 0)
sim.Chain = make([]*types.Block, 0)
sim.Commits = make([]*types.Commit, 0)
for i := 1; i <= numBlocks; i++ {
sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))
}
if sim.CleanupFunc != nil {
t.Cleanup(sim.CleanupFunc)
}
return sim
}
// Sync from scratch
func TestHandshakeReplayAll(t *testing.T) {
sim := setupSimulator(t)
for _, m := range modes {
testHandshakeReplay(t, sim, 0, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, sim, 0, m, true)
}
}
// Sync many, not from scratch
func TestHandshakeReplaySome(t *testing.T) {
sim := setupSimulator(t)
for _, m := range modes {
testHandshakeReplay(t, sim, 2, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, sim, 2, m, true)
}
}
// Sync from lagging by one
func TestHandshakeReplayOne(t *testing.T) {
sim := setupSimulator(t)
for _, m := range modes {
testHandshakeReplay(t, sim, numBlocks-1, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, sim, numBlocks-1, m, true)
}
}
// Sync from caught up
func TestHandshakeReplayNone(t *testing.T) {
sim := setupSimulator(t)
for _, m := range modes {
testHandshakeReplay(t, sim, numBlocks, m, false)
}
for _, m := range modes {
testHandshakeReplay(t, sim, numBlocks, m, true)
}
}
// Test mockProxyApp should not panic when app return ABCIResponses with some empty ResponseDeliverTx
func TestMockProxyApp(t *testing.T) {
sim := setupSimulator(t) // setup config and simulator
cfg := sim.Config
assert.NotNil(t, cfg)
logger := log.TestingLogger()
var validTxs, invalidTxs = 0, 0
txIndex := 0
assert.NotPanics(t, func() {
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
// called when saveABCIResponses:
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
require.NoError(t, err)
loadedAbciRes := new(tmstate.ABCIResponses)
// this also happens sm.LoadABCIResponses
err = proto.Unmarshal(bytes, loadedAbciRes)
require.NoError(t, err)
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
abciRes := new(tmstate.ABCIResponses)
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
// Execute transactions and get hash.
proxyCb := func(req *abci.Request, res *abci.Response) {
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
// TODO: make use of res.Log
// TODO: make use of this info
// Blocks may include invalid txs.
txRes := r.DeliverTx
if txRes.Code == abci.CodeTypeOK {
validTxs++
} else {
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
invalidTxs++
}
abciRes.DeliverTxs[txIndex] = txRes
txIndex++
}
}
mock.SetResponseCallback(proxyCb)
someTx := []byte("tx")
_, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx})
assert.NoError(t, err)
})
assert.True(t, validTxs == 1)
assert.True(t, invalidTxs == 0)
}
func tempWALWithData(data []byte) string {
walFile, err := ioutil.TempFile("", "wal")
if err != nil {
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
}
_, err = walFile.Write(data)
if err != nil {
panic(fmt.Sprintf("failed to write to temp WAL file: %v", err))
}
if err := walFile.Close(); err != nil {
panic(fmt.Sprintf("failed to close temp WAL file: %v", err))
}
return walFile.Name()
}
// Make some blocks. Start a fresh app and apply nBlocks blocks.
// Then restart the app and sync it up with the remaining blocks
func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) {
var chain []*types.Block
var commits []*types.Commit
var store *mockBlockStore
var stateDB dbm.DB
var genesisState sm.State
cfg := sim.Config
if testValidatorsChange {
testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_m", t.Name(), mode))
require.NoError(t, err)
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
stateDB = dbm.NewMemDB()
genesisState = sim.GenesisState
cfg = sim.Config
chain = append([]*types.Block{}, sim.Chain...) // copy chain
commits = sim.Commits
store = newMockBlockStore(cfg, genesisState.ConsensusParams)
} else { // test single node
testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
require.NoError(t, err)
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
walBody, err := WALWithNBlocks(t, numBlocks)
require.NoError(t, err)
walFile := tempWALWithData(walBody)
cfg.Consensus.SetWalFile(walFile)
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err)
wal, err := NewWAL(walFile)
require.NoError(t, err)
wal.SetLogger(log.TestingLogger())
err = wal.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := wal.Stop(); err != nil {
t.Error(err)
}
})
chain, commits, err = makeBlockchainFromWAL(wal)
require.NoError(t, err)
pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err)
stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion)
}
stateStore := sm.NewStore(stateDB)
store.chain = chain
store.commits = commits
state := genesisState.Copy()
// run the chain through state.ApplyBlock to build up the tendermint state
state = buildTMStateFromChain(cfg, sim.Mempool, sim.Evpool, stateStore, state, chain, nBlocks, mode, store)
latestAppHash := state.AppHash
// make a new client creator
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) })
clientCreator2 := abciclient.NewLocalCreator(kvstoreApp)
if nBlocks > 0 {
// run nBlocks against a new client to build up the app state.
// use a throwaway tendermint state
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
stateDB1 := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB1)
err := stateStore.Save(genesisState)
require.NoError(t, err)
buildAppStateFromChain(proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
}
// Prune block store if requested
expectError := false
if mode == 3 {
pruned, err := store.PruneBlocks(2)
require.NoError(t, err)
require.EqualValues(t, 1, pruned)
expectError = int64(nBlocks) < 2
}
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
handshaker := NewHandshaker(stateStore, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator2, proxy.NopMetrics())
if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
err := handshaker.Handshake(proxyApp)
if expectError {
require.Error(t, err)
return
} else if err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
// get the latest app hash from the app
res, err := proxyApp.Query().InfoSync(context.Background(), abci.RequestInfo{Version: ""})
if err != nil {
t.Fatal(err)
}
// the app hash should be synced up
if !bytes.Equal(latestAppHash, res.LastBlockAppHash) {
t.Fatalf(
"Expected app hashes to match after handshake/replay. got %X, expected %X",
res.LastBlockAppHash,
latestAppHash)
}
expectedBlocksToSync := numBlocks - nBlocks
if nBlocks == numBlocks && mode > 0 {
expectedBlocksToSync++
} else if nBlocks > 0 && mode == 1 {
expectedBlocksToSync++
}
if handshaker.NBlocks() != expectedBlocksToSync {
t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks())
}
}
func applyBlock(stateStore sm.Store,
mempool mempool.Mempool,
evpool sm.EvidencePool,
st sm.State,
blk *types.Block,
proxyApp proxy.AppConns,
blockStore *mockBlockStore) sm.State {
testPartSize := types.BlockPartSizeBytes
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()}
newState, err := blockExec.ApplyBlock(st, blkID, blk)
if err != nil {
panic(err)
}
return newState
}
func buildAppStateFromChain(
proxyApp proxy.AppConns,
stateStore sm.Store,
mempool mempool.Mempool,
evpool sm.EvidencePool,
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint,
blockStore *mockBlockStore) {
// start a new app without handshake, play nBlocks blocks
if err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop() //nolint:errcheck // ignore
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{
Validators: validators,
}); err != nil {
panic(err)
}
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
panic(err)
}
switch mode {
case 0:
for i := 0; i < nBlocks; i++ {
block := chain[i]
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
}
case 1, 2, 3:
for i := 0; i < nBlocks-1; i++ {
block := chain[i]
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
}
if mode == 2 || mode == 3 {
// update the kvstore height and apphash
// as if we ran commit but not
state = applyBlock(stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
}
default:
panic(fmt.Sprintf("unknown mode %v", mode))
}
}
func buildTMStateFromChain(
cfg *config.Config,
mempool mempool.Mempool,
evpool sm.EvidencePool,
stateStore sm.Store,
state sm.State,
chain []*types.Block,
nBlocks int,
mode uint,
blockStore *mockBlockStore) sm.State {
// run the whole chain against this client to build up the tendermint state
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
defer kvstoreApp.Close()
clientCreator := abciclient.NewLocalCreator(kvstoreApp)
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
if err := proxyApp.Start(); err != nil {
panic(err)
}
defer proxyApp.Stop() //nolint:errcheck
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
validators := types.TM2PB.ValidatorUpdates(state.Validators)
if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{
Validators: validators,
}); err != nil {
panic(err)
}
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
panic(err)
}
switch mode {
case 0:
// sync right up
for _, block := range chain {
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
}
case 1, 2, 3:
// sync up to the penultimate as if we stored the block.
// whether we commit or not depends on the appHash
for _, block := range chain[:len(chain)-1] {
state = applyBlock(stateStore, mempool, evpool, state, block, proxyApp, blockStore)
}
// apply the final block to a state copy so we can
// get the right next appHash but keep the state back
applyBlock(stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
default:
panic(fmt.Sprintf("unknown mode %v", mode))
}
return state
}
func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
// 1. Initialize tendermint and commit 3 blocks with the following app hashes:
// - 0x01
// - 0x02
// - 0x03
cfg, err := ResetConfig("handshake_test_")
require.NoError(t, err)
t.Cleanup(func() { os.RemoveAll(cfg.RootDir) })
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err)
const appVersion = 0x0
pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err)
stateDB, state, store := stateAndStore(cfg, pubKey, appVersion)
stateStore := sm.NewStore(stateDB)
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
state.LastValidators = state.Validators.Copy()
// mode = 0 for committing all the blocks
blocks := sf.MakeBlocks(3, &state, privVal)
store.chain = blocks
// 2. Tendermint must panic if app returns wrong hash for the first block
// - RANDOM HASH
// - 0x02
// - 0x03
{
app := &badApp{numBlocks: 3, allHashesAreWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
err := proxyApp.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
assert.Panics(t, func() {
h := NewHandshaker(stateStore, state, store, genDoc)
if err = h.Handshake(proxyApp); err != nil {
t.Log(err)
}
})
}
// 3. Tendermint must panic if app returns wrong hash for the last block
// - 0x01
// - 0x02
// - RANDOM HASH
{
app := &badApp{numBlocks: 3, onlyLastHashIsWrong: true}
clientCreator := abciclient.NewLocalCreator(app)
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
err := proxyApp.Start()
require.NoError(t, err)
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
assert.Panics(t, func() {
h := NewHandshaker(stateStore, state, store, genDoc)
if err = h.Handshake(proxyApp); err != nil {
t.Log(err)
}
})
}
}
type badApp struct {
abci.BaseApplication
numBlocks byte
height byte
allHashesAreWrong bool
onlyLastHashIsWrong bool
}
func (app *badApp) Commit() abci.ResponseCommit {
app.height++
if app.onlyLastHashIsWrong {
if app.height == app.numBlocks {
return abci.ResponseCommit{Data: tmrand.Bytes(8)}
}
return abci.ResponseCommit{Data: []byte{app.height}}
} else if app.allHashesAreWrong {
return abci.ResponseCommit{Data: tmrand.Bytes(8)}
}
panic("either allHashesAreWrong or onlyLastHashIsWrong must be set")
}
//--------------------------
// utils for making blocks
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
var height int64
// Search for height marker
gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{})
if err != nil {
return nil, nil, err
}
if !found {
return nil, nil, fmt.Errorf("wal does not contain height %d", height)
}
defer gr.Close()
// log.Notice("Build a blockchain by reading from the WAL")
var (
blocks []*types.Block
commits []*types.Commit
thisBlockParts *types.PartSet
thisBlockCommit *types.Commit
)
dec := NewWALDecoder(gr)
for {
msg, err := dec.Decode()
if err == io.EOF {
break
} else if err != nil {
return nil, nil, err
}
piece := readPieceFromWAL(msg)
if piece == nil {
continue
}
switch p := piece.(type) {
case EndHeightMessage:
// if its not the first one, we have a full block
if thisBlockParts != nil {
var pbb = new(tmproto.Block)
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
if err != nil {
panic(err)
}
err = proto.Unmarshal(bz, pbb)
if err != nil {
panic(err)
}
block, err := types.BlockFromProto(pbb)
if err != nil {
panic(err)
}
if block.Height != height+1 {
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Height
if commitHeight != height+1 {
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
height++
}
case *types.PartSetHeader:
thisBlockParts = types.NewPartSetFromHeader(*p)
case *types.Part:
_, err := thisBlockParts.AddPart(p)
if err != nil {
return nil, nil, err
}
case *types.Vote:
if p.Type == tmproto.PrecommitType {
thisBlockCommit = types.NewCommit(p.Height, p.Round,
p.BlockID, []types.CommitSig{p.CommitSig()})
}
}
}
// grab the last block too
bz, err := ioutil.ReadAll(thisBlockParts.GetReader())
if err != nil {
panic(err)
}
var pbb = new(tmproto.Block)
err = proto.Unmarshal(bz, pbb)
if err != nil {
panic(err)
}
block, err := types.BlockFromProto(pbb)
if err != nil {
panic(err)
}
if block.Height != height+1 {
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
}
commitHeight := thisBlockCommit.Height
if commitHeight != height+1 {
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
}
blocks = append(blocks, block)
commits = append(commits, thisBlockCommit)
return blocks, commits, nil
}
func readPieceFromWAL(msg *TimedWALMessage) interface{} {
// for logging
switch m := msg.Msg.(type) {
case msgInfo:
switch msg := m.Msg.(type) {
case *ProposalMessage:
return &msg.Proposal.BlockID.PartSetHeader
case *BlockPartMessage:
return msg.Part
case *VoteMessage:
return msg.Vote
}
case EndHeightMessage:
return m
}
return nil
}
// fresh state and mock store
func stateAndStore(
cfg *config.Config,
pubKey crypto.PubKey,
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
stateDB := dbm.NewMemDB()
stateStore := sm.NewStore(stateDB)
state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
state.Version.Consensus.App = appVersion
store := newMockBlockStore(cfg, state.ConsensusParams)
if err := stateStore.Save(state); err != nil {
panic(err)
}
return stateDB, state, store
}
//----------------------------------
// mock block store
type mockBlockStore struct {
cfg *config.Config
params types.ConsensusParams
chain []*types.Block
commits []*types.Commit
base int64
}
// TODO: NewBlockStore(db.NewMemDB) ...
func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
return &mockBlockStore{cfg, params, nil, nil, 0}
}
func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) }
func (bs *mockBlockStore) Base() int64 { return bs.base }
func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 }
func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) }
func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] }
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
return bs.chain[int64(len(bs.chain))-1]
}
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
block := bs.chain[height-1]
return &types.BlockMeta{
BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()},
Header: block.Header,
}
}
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
}
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
return bs.commits[height-1]
}
func (bs *mockBlockStore) LoadSeenCommit() *types.Commit {
return bs.commits[len(bs.commits)-1]
}
func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
pruned := uint64(0)
for i := int64(0); i < height-1; i++ {
bs.chain[i] = nil
bs.commits[i] = nil
pruned++
}
bs.base = height
return pruned, nil
}
//---------------------------------------
// Test handshake/init chain
func TestHandshakeUpdatesValidators(t *testing.T) {
val, _ := factory.RandValidator(true, 10)
vals := types.NewValidatorSet([]*types.Validator{val})
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
clientCreator := abciclient.NewLocalCreator(app)
cfg, err := ResetConfig("handshake_test_")
require.NoError(t, err)
t.Cleanup(func() { _ = os.RemoveAll(cfg.RootDir) })
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
require.NoError(t, err)
pubKey, err := privVal.GetPubKey(context.Background())
require.NoError(t, err)
stateDB, state, store := stateAndStore(cfg, pubKey, 0x0)
stateStore := sm.NewStore(stateDB)
oldValAddr := state.Validators.Validators[0].Address
// now start the app using the handshake - it should sync
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
handshaker := NewHandshaker(stateStore, state, store, genDoc)
proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics())
if err := proxyApp.Start(); err != nil {
t.Fatalf("Error starting proxy app connections: %v", err)
}
t.Cleanup(func() {
if err := proxyApp.Stop(); err != nil {
t.Error(err)
}
})
if err := handshaker.Handshake(proxyApp); err != nil {
t.Fatalf("Error on abci handshake: %v", err)
}
// reload the state, check the validator set was updated
state, err = stateStore.Load()
require.NoError(t, err)
newValAddr := state.Validators.Validators[0].Address
expectValAddr := val.Address
assert.NotEqual(t, oldValAddr, newValAddr)
assert.Equal(t, newValAddr, expectValAddr)
}
// returns the vals on InitChain
type initChainApp struct {
abci.BaseApplication
vals []abci.ValidatorUpdate
}
func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {
return abci.ResponseInitChain{
Validators: ica.vals,
}
}
|
setupSimulator
|
asset-event.d.ts
|
import { CreateAssetInput, DeleteAssetInput, UpdateAssetInput } from '@vendure/common/lib/generated-types';
import { ID } from '@vendure/common/lib/shared-types';
import { RequestContext } from '../../api';
import { Asset } from '../../entity';
import { VendureEntityEvent } from '../vendure-entity-event';
declare type AssetInputTypes = CreateAssetInput | UpdateAssetInput | DeleteAssetInput | ID;
/**
* @description
* This event is fired whenever a {@link Asset} is added, updated or deleted.
*
* @docsCategory events
* @docsPage Event Types
* @since 1.4
*/
export declare class AssetEvent extends VendureEntityEvent<Asset, AssetInputTypes> {
constructor(ctx: RequestContext, entity: Asset, type: 'created' | 'updated' | 'deleted', input?: AssetInputTypes);
/**
* Return an asset field to become compatible with the
* deprecated old version of AssetEvent
* @deprecated Use `entity` instead
|
}
export {};
|
* @since 1.4
*/
get asset(): Asset;
|
file.go
|
// Copyright 2020 Marius Wilms. All rights reserved.
// Copyright 2018 Atelier Disko. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package author
import (
"os"
"path/filepath"
)
const (
// CanonicalBasename is the canonical name of the file.
CanonicalBasename = "AUTHORS.txt"
)
func FindFile(path string) (bool, string, error) {
try := filepath.Join(path, CanonicalBasename)
_, err := os.Stat(try)
if err != nil {
if os.IsNotExist(err)
|
return false, try, err
}
return true, try, nil
}
|
{
return false, try, nil
}
|
http.go
|
package chef
import (
"bytes"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/json"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"log"
"net"
"net/http"
"net/url"
"path"
"strings"
"time"
)
// ChefVersion that we pretend to emulate
const ChefVersion = "14.0.0"
// Body wraps io.Reader and adds methods for calculating hashes and detecting content
type Body struct {
io.Reader
}
// AuthConfig representing a client and a private key used for encryption
// This is embedded in the Client type
type AuthConfig struct {
PrivateKey *rsa.PrivateKey
ClientName string
}
// Client is vessel for public methods used against the chef-server
type Client struct {
Auth *AuthConfig
BaseURL *url.URL
client *http.Client
ACLs *ACLService
Associations *AssociationService
AuthenticateUser *AuthenticateUserService
Clients *ApiClientService
Containers *ContainerService
Cookbooks *CookbookService
CookbookArtifacts *CBAService
DataBags *DataBagService
Environments *EnvironmentService
Groups *GroupService
License *LicenseService
Nodes *NodeService
Organizations *OrganizationService
Policies *PolicyService
PolicyGroups *PolicyGroupService
Principals *PrincipalService
RequiredRecipe *RequiredRecipeService
Roles *RoleService
Sandboxes *SandboxService
Search *SearchService
Stats *StatsService
Status *StatusService
Universe *UniverseService
UpdatedSince *UpdatedSinceService
Users *UserService
}
// Config contains the configuration options for a chef client. This structure is used primarily in the NewClient() constructor in order to setup a proper client object
type Config struct {
// This should be the user ID on the chef server
Name string
// This is the plain text private Key for the user
Key string
// BaseURL is the chef server URL used to connect to. If using orgs you should include your org in the url and terminate the url with a "/"
BaseURL string
// When set to false (default) this will enable SSL Cert Verification. If you need to disable Cert Verification set to true
SkipSSL bool
// RootCAs is a reference to x509.CertPool for TLS
RootCAs *x509.CertPool
// Time to wait in seconds before giving up on a request to the server
Timeout int
}
/*
An ErrorResponse reports one or more errors caused by an API request.
Thanks to https://github.com/google/go-github
The Response structure includes:
Status string
StatusCode int
*/
type ErrorResponse struct {
Response *http.Response // HTTP response that caused this error
}
// Buffer creates a byte.Buffer copy from a io.Reader resets read on reader to 0,0
func (body *Body) Buffer() *bytes.Buffer {
var b bytes.Buffer
if body.Reader == nil {
return &b
}
b.ReadFrom(body.Reader)
_, err := body.Reader.(io.Seeker).Seek(0, 0)
if err != nil {
log.Fatal(err)
}
return &b
}
// Hash calculates the body content hash
func (body *Body) Hash() (h string) {
b := body.Buffer()
// empty buffs should return a empty string
if b.Len() == 0 {
h = HashStr("")
}
h = HashStr(b.String())
return
}
// ContentType returns the content-type string of Body as detected by http.DetectContentType()
func (body *Body) ContentType() string {
if json.Unmarshal(body.Buffer().Bytes(), &struct{}{}) == nil {
return "application/json"
}
return http.DetectContentType(body.Buffer().Bytes())
}
func (r *ErrorResponse) Error() string {
return fmt.Sprintf("%v %v: %d",
r.Response.Request.Method, r.Response.Request.URL,
r.Response.StatusCode)
}
func (r *ErrorResponse) StatusCode() int {
return r.Response.StatusCode
}
func (r *ErrorResponse) StatusMethod() string {
return r.Response.Request.Method
}
func (r *ErrorResponse) StatusURL() *url.URL {
return r.Response.Request.URL
}
// NewClient is the client generator used to instantiate a client for talking to a chef-server
// It is a simple constructor for the Client struct intended as a easy interface for issuing
// signed requests
func NewClient(cfg *Config) (*Client, error) {
pk, err := PrivateKeyFromString([]byte(cfg.Key))
if err != nil {
return nil, err
}
baseUrl, _ := url.Parse(cfg.BaseURL)
tlsConfig := &tls.Config{InsecureSkipVerify: cfg.SkipSSL}
if cfg.RootCAs != nil {
tlsConfig.RootCAs = cfg.RootCAs
}
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSClientConfig: tlsConfig,
TLSHandshakeTimeout: 10 * time.Second,
}
c := &Client{
Auth: &AuthConfig{
PrivateKey: pk,
ClientName: cfg.Name,
},
client: &http.Client{
Transport: tr,
Timeout: time.Duration(cfg.Timeout) * time.Second,
},
BaseURL: baseUrl,
}
c.ACLs = &ACLService{client: c}
c.AuthenticateUser = &AuthenticateUserService{client: c}
c.Associations = &AssociationService{client: c}
c.Clients = &ApiClientService{client: c}
c.Containers = &ContainerService{client: c}
c.Cookbooks = &CookbookService{client: c}
c.CookbookArtifacts = &CBAService{client: c}
c.DataBags = &DataBagService{client: c}
c.Environments = &EnvironmentService{client: c}
c.Groups = &GroupService{client: c}
c.License = &LicenseService{client: c}
c.Nodes = &NodeService{client: c}
c.Organizations = &OrganizationService{client: c}
c.Policies = &PolicyService{client: c}
c.PolicyGroups = &PolicyGroupService{client: c}
c.RequiredRecipe = &RequiredRecipeService{client: c}
c.Principals = &PrincipalService{client: c}
c.Roles = &RoleService{client: c}
c.Sandboxes = &SandboxService{client: c}
c.Search = &SearchService{client: c}
c.Stats = &StatsService{client: c}
c.Status = &StatusService{client: c}
c.UpdatedSince = &UpdatedSinceService{client: c}
c.Universe = &UniverseService{client: c}
c.Users = &UserService{client: c}
return c, nil
}
// basicRequestDecoder performs a request on an endpoint, and decodes the response into the passed in Type
// basicRequestDecoder is the same code as magic RequestDecoder with the addition of a generated Authentication: Basic header
// to the http request
func (c *Client) basicRequestDecoder(method, path string, body io.Reader, v interface{}, user string, password string) error {
req, err := c.NewRequest(method, path, body)
if err != nil {
return err
}
basicAuthHeader(req, user, password)
debug("\n\nRequest: %+v \n", req)
res, err := c.Do(req, v)
if res != nil {
defer res.Body.Close()
}
debug("Response: %+v\n", res)
if err != nil {
return err
}
return err
}
// magicRequestDecoder performs a request on an endpoint, and decodes the response into the passed in Type
func (c *Client) magicRequestDecoder(method, path string, body io.Reader, v interface{}) error {
req, err := c.NewRequest(method, path, body)
if err != nil {
return err
}
debug("\n\nRequest: %+v \n", req)
res, err := c.Do(req, v)
if res != nil {
defer res.Body.Close()
}
debug("Response: %+v\n", res)
if err != nil {
return err
}
return err
}
// NewRequest returns a signed request suitable for the chef server
func (c *Client) NewRequest(method string, requestUrl string, body io.Reader) (*http.Request, error) {
relativeUrl, err := url.Parse(requestUrl)
if err != nil {
return nil, err
}
u := c.BaseURL.ResolveReference(relativeUrl)
// NewRequest uses a new value object of body
req, err := http.NewRequest(method, u.String(), body)
if err != nil {
return nil, err
}
// parse and encode Querystring Values
values := req.URL.Query()
req.URL.RawQuery = values.Encode()
debug("Encoded url %+v\n", u)
myBody := &Body{body}
if body != nil {
// Detect Content-type
req.Header.Set("Content-Type", myBody.ContentType())
}
// Calculate the body hash
req.Header.Set("X-Ops-Content-Hash", myBody.Hash())
// don't have to check this works, signRequest only emits error when signing hash is not valid, and we baked that in
c.Auth.SignRequest(req)
return req, nil
}
// basicAuth does base64 encoding of a user and password
func basicAuth(user string, password string) string {
creds := user + ":" + password
return base64.StdEncoding.EncodeToString([]byte(creds))
}
// basicAuthHeader adds an Authentication Basic header to the request
// The user and password values should be clear text. They will be
// base64 encoded for the header.
func basicAuthHeader(r *http.Request, user string, password string) {
r.Header.Add("authorization", "Basic "+basicAuth(user, password))
}
// CheckResponse receives a pointer to a http.Response and generates an Error via unmarshalling
func CheckResponse(r *http.Response) error {
if c := r.StatusCode; 200 <= c && c <= 299 {
return nil
}
errorResponse := &ErrorResponse{Response: r}
data, err := ioutil.ReadAll(r.Body)
if err == nil && data != nil {
json.Unmarshal(data, errorResponse)
}
return errorResponse
}
// ChefError tries to unwind a chef client err return embedded in an error
// Unwinding allows easy access the StatusCode, StatusMethod and StatusURL functions
func ChefError(err error) (cerr *ErrorResponse, nerr error) {
if err == nil {
return cerr, err
}
if cerr, ok := err.(*ErrorResponse); ok {
return cerr, err
}
return cerr, err
}
// Do is used either internally via our magic request shite or a user may use it
func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) {
res, err := c.client.Do(req)
if err != nil {
return nil, err
}
// BUG(fujin) tightly coupled
err = CheckResponse(res)
if err != nil
|
var resBuf bytes.Buffer
resTee := io.TeeReader(res.Body, &resBuf)
// no response interface specified
if v == nil {
if debug_on() {
// show the response body as a string
resbody, _ := ioutil.ReadAll(resTee)
debug("Response body: %+v\n", string(resbody))
}
debug("No response body requested\n")
return res, nil
}
// response interface, v, is an io writer
if w, ok := v.(io.Writer); ok {
debug("Response output desired is an io Writer\n")
_, err = io.Copy(w, resTee)
return res, err
}
// response content-type specifies JSON encoded - decode it
if hasJsonContentType(res) {
err = json.NewDecoder(resTee).Decode(v)
if debug_on() {
// show the response body as a string
resbody, _ := ioutil.ReadAll(&resBuf)
debug("Response body: %+v\n", string(resbody))
}
debug("Response body specifies content as JSON: %+v Err:\n", v, err)
if err != nil {
return res, err
}
return res, nil
}
// response interface, v, is type string and the content is plain text
if _, ok := v.(*string); ok && hasTextContentType(res) {
resbody, _ := ioutil.ReadAll(resTee)
if err != nil {
return res, err
}
out := string(resbody)
debug("Response body parsed as string: %+v\n", out)
*v.(*string) = out
return res, nil
}
// Default response: Content-Type is not JSON. Assume v is a struct and decode the response as json
err = json.NewDecoder(resTee).Decode(v)
if debug_on() {
// show the response body as a string
resbody, _ := ioutil.ReadAll(&resBuf)
debug("Response body: %+v\n", string(resbody))
}
debug("Response body defaulted to JSON parsing: %+v Err:\n", v, err)
if err != nil {
return res, err
}
return res, nil
}
func hasJsonContentType(res *http.Response) bool {
contentType := res.Header.Get("Content-Type")
return contentType == "application/json"
}
func hasTextContentType(res *http.Response) bool {
contentType := res.Header.Get("Content-Type")
return contentType == "text/plain"
}
// SignRequest modifies headers of an http.Request
func (ac AuthConfig) SignRequest(request *http.Request) error {
// sanitize the path for the chef-server
// chef-server doesn't support '//' in the Hash Path.
var endpoint string
if request.URL.Path != "" {
endpoint = path.Clean(request.URL.Path)
request.URL.Path = endpoint
} else {
endpoint = request.URL.Path
}
vals := map[string]string{
"Method": request.Method,
"Hashed Path": HashStr(endpoint),
"Accept": "application/json",
"X-Chef-Version": ChefVersion,
"X-Ops-Server-API-Version": "1",
"X-Ops-Timestamp": time.Now().UTC().Format(time.RFC3339),
"X-Ops-UserId": ac.ClientName,
"X-Ops-Sign": "algorithm=sha1;version=1.0",
"X-Ops-Content-Hash": request.Header.Get("X-Ops-Content-Hash"),
}
for _, key := range []string{"Method", "Accept", "X-Chef-Version", "X-Ops-Server-API-Version", "X-Ops-Timestamp", "X-Ops-UserId", "X-Ops-Sign"} {
request.Header.Set(key, vals[key])
}
// To validate the signature it seems to be very particular
var content string
for _, key := range []string{"Method", "Hashed Path", "X-Ops-Content-Hash", "X-Ops-Timestamp", "X-Ops-UserId"} {
content += fmt.Sprintf("%s:%s\n", key, vals[key])
}
content = strings.TrimSuffix(content, "\n")
// generate signed string of headers
// Since we've gone through additional validation steps above,
// we shouldn't get an error at this point
signature, err := GenerateSignature(ac.PrivateKey, content)
if err != nil {
return err
}
// TODO: THIS IS CHEF PROTOCOL SPECIFIC
// Signature is made up of n 60 length chunks
base64sig := Base64BlockEncode(signature, 60)
// roll over the auth slice and add the apropriate header
for index, value := range base64sig {
request.Header.Set(fmt.Sprintf("X-Ops-Authorization-%d", index+1), string(value))
}
return nil
}
// PrivateKeyFromString parses an RSA private key from a string
func PrivateKeyFromString(key []byte) (*rsa.PrivateKey, error) {
block, _ := pem.Decode(key)
if block == nil {
return nil, fmt.Errorf("private key block size invalid")
}
rsaKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return nil, err
}
return rsaKey, nil
}
|
{
return res, err
}
|
problem_1.py
|
""" If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9.
The sum of these multiples is 23. Find the sum of all the multiples of 3 or 5 below 1000.
"""
def
|
(a: int=3, b: int=5):
max_num = 1000
all_nums = [x for x in range(1, max_num) if (x % 3 == 0) | (x % 5 == 0)]
return sum(all_nums)
if __name__ == "__main__":
result = mul_sum()
print(result)
|
mul_sum
|
whitebox_ml_pipe.py
|
"""Whitebox MLPipeline."""
import warnings
from typing import Union, Tuple, cast
from .base import MLPipeline
from ..features.wb_pipeline import WBFeatures
from ..selection.base import EmptySelector
from ...dataset.np_pd_dataset import NumpyDataset, PandasDataset
from ...ml_algo.tuning.base import ParamsTuner
from ...ml_algo.whitebox import WbMLAlgo
|
class WBPipeline(MLPipeline):
"""Special pipeline to handle whitebox model."""
@property
def whitebox(self) -> WbMLAlgo:
if len(self.ml_algos[0].models) > 1:
warnings.warn('More than 1 whitebox model is fitted during cross validation. Only first is returned')
return self.ml_algos[0].models[0]
def __init__(self, whitebox: TunedWB):
"""Create whitebox MLPipeline.
Args:
whitebox: WbMLAlgo or tuple WbMLAlgo with params tuner.
"""
super().__init__([whitebox], True, features_pipeline=WBFeatures())
self._used_features = None
def fit_predict(self, train_valid: TrainValidIterator) -> NumpyDataset:
"""Fit whitebox.
Args:
train_valid: TrainValidIterator.
Returns:
Dataset.
"""
_subsamp_to_refit = train_valid.train[:5]
val_pred = super().fit_predict(train_valid)
self._prune_pipelines(_subsamp_to_refit)
return cast(NumpyDataset, val_pred)
def predict(self, dataset: PandasDataset, report: bool = False) -> NumpyDataset:
"""Predict whitebox.
Additional report param stands for whitebox report generation.
Args:
dataset: PandasDataset of input features.
report: generate report.
Returns:
Dataset.
"""
dataset = self.features_pipeline.transform(dataset)
args = []
if self.ml_algos[0].params['report']:
args = [report]
pred = self.ml_algos[0].predict(dataset, *args)
return pred
def _prune_pipelines(self, subsamp: PandasDataset):
# upd used features attribute from list of whiteboxes
feats_from_wb = set.union(*[set(list(x.features_fit.index)) for x in self.ml_algos[0].models])
# cols wo prefix - numerics and categories
raw_columns = list(set(subsamp.features).intersection(feats_from_wb))
diff_cols = list(set(feats_from_wb).difference(subsamp.features))
seasons = ['__'.join(x.split('__')[1:]) for x in diff_cols if x.startswith('season_')]
base_diff = [x.split('__') for x in diff_cols if x.startswith('basediff_')]
base_diff = [('_'.join(x[0].split('_')[1:]), '__'.join(x[1:])) for x in base_diff]
base_dates, compare_dates = [x[0] for x in base_diff], [x[1] for x in base_diff]
dates = list(set(base_dates + compare_dates + seasons))
raw_columns.extend(dates)
subsamp = subsamp[:, raw_columns]
self.features_pipeline = WBFeatures()
self.pre_selection = EmptySelector()
self.post_selection = EmptySelector()
train_valid = DummyIterator(subsamp)
train_valid = train_valid.apply_selector(self.pre_selection)
train_valid = train_valid.apply_feature_pipeline(self.features_pipeline)
train_valid.apply_selector(self.post_selection)
return
|
from ...validation.base import TrainValidIterator, DummyIterator
TunedWB = Union[WbMLAlgo, Tuple[WbMLAlgo, ParamsTuner]]
|
lib.rs
|
#![recursion_limit = "128"]
extern crate proc_macro;
use crate::proc_macro::TokenStream;
use quote::ToTokens;
use syn::{self, parse_macro_input, parse_quote, AttributeArgs, FnArg, ItemFn, Stmt};
/// Custom attribute for recursive parser
#[proc_macro_attribute]
pub fn recursive_parser(attr: TokenStream, item: TokenStream) -> TokenStream
|
fn impl_recursive_parser(_attr: &AttributeArgs, item: &ItemFn) -> TokenStream {
let before = impl_recursive_parser_bofore(&item);
let body = impl_recursive_parser_body(&item);
let mut item = item.clone();
item.block.stmts.clear();
item.block.stmts.push(before);
item.block.stmts.push(body);
item.into_token_stream().into()
}
fn impl_recursive_parser_bofore(item: &ItemFn) -> Stmt {
let ident = &item.sig.ident;
let input = if let Some(x) = &item.sig.inputs.first() {
match x {
FnArg::Typed(arg) => &arg.pat,
_ => panic!("function with #[recursive_parser] must have an argument"),
}
} else {
panic!("function with #[recursive_parser] must have an argument");
};
parse_quote! {
let #input = {
let id = nom_recursive::RECURSIVE_STORAGE.with(|storage| {
storage.borrow_mut().get(stringify!(#ident))
});
use nom_recursive::HasRecursiveInfo;
let mut info = #input.get_recursive_info();
use nom::AsBytes;
let ptr = #input.as_bytes().as_ptr();
if ptr != info.get_ptr() {
#[cfg(feature = "trace")]
{
use nom_tracable::Tracable;
nom_tracable::custom_trace(&#input, stringify!(#ident), "recursion flag clear", "\u{001b}[1;36m")
};
info.clear_flags();
info.set_ptr(ptr);
}
if info.check_flag(id) {
#[cfg(feature = "trace")]
{
use nom_tracable::Tracable;
nom_tracable::custom_trace(&#input, stringify!(#ident), "recursion detected", "\u{001b}[1;36m")
};
return Err(nom::Err::Error(nom::error::make_error(s, nom::error::ErrorKind::Fix)));
}
#[cfg(feature = "trace")]
{
use nom_tracable::Tracable;
nom_tracable::custom_trace(&#input, stringify!(#ident), "recursion flag set", "\u{001b}[1;36m")
};
info.set_flag(id);
#input.set_recursive_info(info)
};
}
}
fn impl_recursive_parser_body(item: &ItemFn) -> Stmt {
let body = item.block.as_ref();
parse_quote! {
#body
}
}
|
{
let attr = parse_macro_input!(attr as AttributeArgs);
let item = parse_macro_input!(item as ItemFn);
impl_recursive_parser(&attr, &item)
}
|
app.py
|
from flask import Flask, render_template, request, jsonify
import base64
import logging
import numpy as np
from deepface import DeepFace
from PIL import Image
from io import BytesIO
import subprocess
import os
import cv2
import random
import webbrowser
app = Flask(__name__)
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
error_path = {'race': {'asian': 0, 'indian': 0, 'black': 0, 'white': 0,
'middle eastern': 0, 'latino hispanic': 0}, 'dominant_race': '?'}
directory = 'static/img'
if 'img' not in os.listdir('static/'):
os.mkdir(directory)
for f in os.listdir(directory):
os.remove(os.path.join(directory, f))
def generate_random_string():
numbers = '1234567890'
res = ''.join(random.choice(numbers) for _ in range(10))
return f'{directory}/{res}.png'
@app.route('/')
def main():
|
@app.route('/photocap')
def photo_cap():
photo_base64 = request.args.get('photo')
_, encoded = photo_base64.split(",", 1)
binary_data = base64.b64decode(encoded)
f = BytesIO()
f.write(binary_data)
f.seek(0)
image = Image.open(f)
image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
fn = generate_random_string()
cv2.imwrite(fn, image)
try:
obj = DeepFace.analyze(image, actions=['race'])
obj['filename'] = fn
return jsonify(obj)
except ValueError:
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
except Exception as e:
print(e)
other_json = error_path
other_json['filename'] = fn
return jsonify(other_json)
if __name__ == "__main__":
# p = subprocess.Popen(['python -m SimpleHTTPServer'], shell=True) #Only for macOS
webbrowser.open_new('http://127.0.0.1:8000/')
app.run(host='localhost', port=8000, debug=True)
|
return render_template('index.html')
|
topSaved.js
|
const router = require("express").Router();
const { topSaved } = require("../../controllers");
|
module.exports = router;
|
router
.route("/")
.get(topSaved.topSaved);
|
KeepRandomBeaconService.go
|
// Code generated - DO NOT EDIT.
// This file is a generated command and any manual changes will be lost.
package cmd
import (
"fmt"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/core/types"
"github.com/keep-network/keep-common/pkg/chain/ethereum/ethutil"
"github.com/keep-network/keep-common/pkg/cmd"
"github.com/keep-network/keep-core/config"
"github.com/keep-network/keep-core/pkg/chain/gen/contract"
"github.com/urfave/cli"
)
var KeepRandomBeaconServiceCommand cli.Command
var keepRandomBeaconServiceDescription = `The keep-random-beacon-service command allows calling the KeepRandomBeaconService contract on an
Ethereum network. It has subcommands corresponding to each contract method,
which respectively each take parameters based on the contract method's
parameters.
Subcommands will submit a non-mutating call to the network and output the
result.
All subcommands can be called against a specific block by passing the
-b/--block flag.
All subcommands can be used to investigate the result of a previous
transaction that called that same method by passing the -t/--transaction
flag with the transaction hash.
Subcommands for mutating methods may be submitted as a mutating transaction
by passing the -s/--submit flag. In this mode, this command will terminate
successfully once the transaction has been submitted, but will not wait for
the transaction to be included in a block. They return the transaction hash.
Calls that require ether to be paid will get 0 ether by default, which can
be changed by passing the -v/--value flag.`
func init() {
AvailableCommands = append(AvailableCommands, cli.Command{
Name: "keep-random-beacon-service",
Usage: `Provides access to the KeepRandomBeaconService contract.`,
Description: keepRandomBeaconServiceDescription,
Subcommands: []cli.Command{{
Name: "entry-fee-breakdown",
Usage: "Calls the constant method entryFeeBreakdown on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsEntryFeeBreakdown,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "request-subsidy-fee-pool",
Usage: "Calls the constant method requestSubsidyFeePool on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsRequestSubsidyFeePool,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "version",
Usage: "Calls the constant method version on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsVersion,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "dkg-contribution-margin",
Usage: "Calls the constant method dkgContributionMargin on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsDkgContributionMargin,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "entry-fee-estimate",
Usage: "Calls the constant method entryFeeEstimate on the KeepRandomBeaconService contract.",
ArgsUsage: "[callbackGas] ",
Action: krbsEntryFeeEstimate,
Before: cmd.ArgCountChecker(1),
Flags: cmd.ConstFlags,
}, {
Name: "callback-surplus-recipient",
Usage: "Calls the constant method callbackSurplusRecipient on the KeepRandomBeaconService contract.",
ArgsUsage: "[requestId] ",
Action: krbsCallbackSurplusRecipient,
Before: cmd.ArgCountChecker(1),
Flags: cmd.ConstFlags,
}, {
Name: "initialized",
Usage: "Calls the constant method initialized on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsInitialized,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "select-operator-contract",
Usage: "Calls the constant method selectOperatorContract on the KeepRandomBeaconService contract.",
ArgsUsage: "[seed] ",
Action: krbsSelectOperatorContract,
Before: cmd.ArgCountChecker(1),
Flags: cmd.ConstFlags,
}, {
Name: "base-callback-gas",
Usage: "Calls the constant method baseCallbackGas on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsBaseCallbackGas,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "dkg-fee-pool",
Usage: "Calls the constant method dkgFeePool on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsDkgFeePool,
Before: cmd.ArgCountChecker(0),
Flags: cmd.ConstFlags,
}, {
Name: "fund-request-subsidy-fee-pool",
Usage: "Calls the payable method fundRequestSubsidyFeePool on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsFundRequestSubsidyFeePool,
Before: cli.BeforeFunc(cmd.PayableArgsChecker.AndThen(cmd.ArgCountChecker(0))),
Flags: cmd.PayableFlags,
}, {
Name: "remove-operator-contract",
Usage: "Calls the method removeOperatorContract on the KeepRandomBeaconService contract.",
ArgsUsage: "[operatorContract] ",
Action: krbsRemoveOperatorContract,
Before: cli.BeforeFunc(cmd.NonConstArgsChecker.AndThen(cmd.ArgCountChecker(1))),
Flags: cmd.NonConstFlags,
}, {
Name: "request-relay-entry",
Usage: "Calls the payable method requestRelayEntry on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsRequestRelayEntry,
Before: cli.BeforeFunc(cmd.PayableArgsChecker.AndThen(cmd.ArgCountChecker(0))),
Flags: cmd.PayableFlags,
}, {
Name: "request-relay-entry0",
Usage: "Calls the payable method requestRelayEntry0 on the KeepRandomBeaconService contract.",
ArgsUsage: "[callbackContract] [callbackGas] ",
Action: krbsRequestRelayEntry0,
Before: cli.BeforeFunc(cmd.PayableArgsChecker.AndThen(cmd.ArgCountChecker(2))),
Flags: cmd.PayableFlags,
}, {
Name: "add-operator-contract",
Usage: "Calls the method addOperatorContract on the KeepRandomBeaconService contract.",
ArgsUsage: "[operatorContract] ",
Action: krbsAddOperatorContract,
Before: cli.BeforeFunc(cmd.NonConstArgsChecker.AndThen(cmd.ArgCountChecker(1))),
Flags: cmd.NonConstFlags,
}, {
Name: "execute-callback",
Usage: "Calls the method executeCallback on the KeepRandomBeaconService contract.",
ArgsUsage: "[requestId] [entry] ",
Action: krbsExecuteCallback,
Before: cli.BeforeFunc(cmd.NonConstArgsChecker.AndThen(cmd.ArgCountChecker(2))),
Flags: cmd.NonConstFlags,
}, {
Name: "initialize",
Usage: "Calls the method initialize on the KeepRandomBeaconService contract.",
ArgsUsage: "[dkgContributionMargin] [registry] ",
Action: krbsInitialize,
Before: cli.BeforeFunc(cmd.NonConstArgsChecker.AndThen(cmd.ArgCountChecker(2))),
Flags: cmd.NonConstFlags,
}, {
Name: "entry-created",
Usage: "Calls the method entryCreated on the KeepRandomBeaconService contract.",
ArgsUsage: "[requestId] [entry] [submitter] ",
Action: krbsEntryCreated,
Before: cli.BeforeFunc(cmd.NonConstArgsChecker.AndThen(cmd.ArgCountChecker(3))),
Flags: cmd.NonConstFlags,
}, {
Name: "fund-dkg-fee-pool",
Usage: "Calls the payable method fundDkgFeePool on the KeepRandomBeaconService contract.",
ArgsUsage: "",
Action: krbsFundDkgFeePool,
Before: cli.BeforeFunc(cmd.PayableArgsChecker.AndThen(cmd.ArgCountChecker(0))),
Flags: cmd.PayableFlags,
}},
})
}
/// ------------------- Const methods -------------------
func krbsEntryFeeBreakdown(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.EntryFeeBreakdownAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsRequestSubsidyFeePool(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.RequestSubsidyFeePoolAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsVersion(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.VersionAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsDkgContributionMargin(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.DkgContributionMarginAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsEntryFeeEstimate(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
callbackGas, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter callbackGas, a uint256, from passed value %v",
c.Args()[0],
)
}
result, err := contract.EntryFeeEstimateAtBlock(
callbackGas,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsCallbackSurplusRecipient(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
requestId, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter requestId, a uint256, from passed value %v",
c.Args()[0],
)
}
result, err := contract.CallbackSurplusRecipientAtBlock(
requestId,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsInitialized(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.InitializedAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsSelectOperatorContract(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
seed, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter seed, a uint256, from passed value %v",
c.Args()[0],
)
}
result, err := contract.SelectOperatorContractAtBlock(
seed,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsBaseCallbackGas(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.BaseCallbackGasAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
func krbsDkgFeePool(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
result, err := contract.DkgFeePoolAtBlock(
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
return nil
}
/// ------------------- Non-const methods -------------------
func krbsFundRequestSubsidyFeePool(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.FundRequestSubsidyFeePool(
cmd.ValueFlagValue.Uint)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallFundRequestSubsidyFeePool(
cmd.ValueFlagValue.Uint, cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
func krbsRemoveOperatorContract(c *cli.Context) error
|
func krbsRequestRelayEntry(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
var (
transaction *types.Transaction
result *big.Int
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.RequestRelayEntry(
cmd.ValueFlagValue.Uint)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
result, err = contract.CallRequestRelayEntry(
cmd.ValueFlagValue.Uint, cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
}
return nil
}
func krbsRequestRelayEntry0(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
callbackContract, err := ethutil.AddressFromHex(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter callbackContract, a address, from passed value %v",
c.Args()[0],
)
}
callbackGas, err := hexutil.DecodeBig(c.Args()[1])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter callbackGas, a uint256, from passed value %v",
c.Args()[1],
)
}
var (
transaction *types.Transaction
result *big.Int
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.RequestRelayEntry0(
callbackContract,
callbackGas,
cmd.ValueFlagValue.Uint)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
result, err = contract.CallRequestRelayEntry0(
callbackContract,
callbackGas,
cmd.ValueFlagValue.Uint, cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(result)
}
return nil
}
func krbsAddOperatorContract(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
operatorContract, err := ethutil.AddressFromHex(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter operatorContract, a address, from passed value %v",
c.Args()[0],
)
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.AddOperatorContract(
operatorContract,
)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallAddOperatorContract(
operatorContract,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
func krbsExecuteCallback(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
requestId, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter requestId, a uint256, from passed value %v",
c.Args()[0],
)
}
entry, err := hexutil.DecodeBig(c.Args()[1])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter entry, a uint256, from passed value %v",
c.Args()[1],
)
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.ExecuteCallback(
requestId,
entry,
)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallExecuteCallback(
requestId,
entry,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
func krbsInitialize(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
dkgContributionMargin, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter dkgContributionMargin, a uint256, from passed value %v",
c.Args()[0],
)
}
registry, err := ethutil.AddressFromHex(c.Args()[1])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter registry, a address, from passed value %v",
c.Args()[1],
)
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.Initialize(
dkgContributionMargin,
registry,
)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallInitialize(
dkgContributionMargin,
registry,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
func krbsEntryCreated(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
requestId, err := hexutil.DecodeBig(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter requestId, a uint256, from passed value %v",
c.Args()[0],
)
}
entry, err := hexutil.Decode(c.Args()[1])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter entry, a bytes, from passed value %v",
c.Args()[1],
)
}
submitter, err := ethutil.AddressFromHex(c.Args()[2])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter submitter, a address, from passed value %v",
c.Args()[2],
)
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.EntryCreated(
requestId,
entry,
submitter,
)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallEntryCreated(
requestId,
entry,
submitter,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
func krbsFundDkgFeePool(c *cli.Context) error {
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.FundDkgFeePool(
cmd.ValueFlagValue.Uint)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallFundDkgFeePool(
cmd.ValueFlagValue.Uint, cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
/// ------------------- Initialization -------------------
func initializeKeepRandomBeaconService(c *cli.Context) (*contract.KeepRandomBeaconService, error) {
config, err := config.ReadEthereumConfig(c.GlobalString("config"))
if err != nil {
return nil, fmt.Errorf("error reading Ethereum config from file: [%v]", err)
}
client, _, _, err := ethutil.ConnectClients(config.URL, config.URLRPC)
if err != nil {
return nil, fmt.Errorf("error connecting to Ethereum node: [%v]", err)
}
key, err := ethutil.DecryptKeyFile(
config.Account.KeyFile,
config.Account.KeyFilePassword,
)
if err != nil {
return nil, fmt.Errorf(
"failed to read KeyFile: %s: [%v]",
config.Account.KeyFile,
err,
)
}
checkInterval := cmd.DefaultMiningCheckInterval
maxGasPrice := cmd.DefaultMaxGasPrice
if config.MiningCheckInterval != 0 {
checkInterval = time.Duration(config.MiningCheckInterval) * time.Second
}
if config.MaxGasPrice != nil {
maxGasPrice = config.MaxGasPrice.Int
}
miningWaiter := ethutil.NewMiningWaiter(client, checkInterval, maxGasPrice)
address := common.HexToAddress(config.ContractAddresses["KeepRandomBeaconService"])
return contract.NewKeepRandomBeaconService(
address,
key,
client,
ethutil.NewNonceManager(key.Address, client),
miningWaiter,
&sync.Mutex{},
)
}
|
{
contract, err := initializeKeepRandomBeaconService(c)
if err != nil {
return err
}
operatorContract, err := ethutil.AddressFromHex(c.Args()[0])
if err != nil {
return fmt.Errorf(
"couldn't parse parameter operatorContract, a address, from passed value %v",
c.Args()[0],
)
}
var (
transaction *types.Transaction
)
if c.Bool(cmd.SubmitFlag) {
// Do a regular submission. Take payable into account.
transaction, err = contract.RemoveOperatorContract(
operatorContract,
)
if err != nil {
return err
}
cmd.PrintOutput(transaction.Hash)
} else {
// Do a call.
err = contract.CallRemoveOperatorContract(
operatorContract,
cmd.BlockFlagValue.Uint,
)
if err != nil {
return err
}
cmd.PrintOutput(nil)
}
return nil
}
|
CustomOverlay.js
|
import React from 'react';
import { StyleSheet, View, Dimensions } from 'react-native';
import MapView, { ProviderPropType } from 'react-native-maps';
import XMarksTheSpot from './CustomOverlayXMarksTheSpot';
const { width, height } = Dimensions.get('window');
const ASPECT_RATIO = width / height;
const LATITUDE = 37.78825;
const LONGITUDE = -122.4324;
const LATITUDE_DELTA = 0.0922;
const LONGITUDE_DELTA = LATITUDE_DELTA * ASPECT_RATIO;
class CustomOverlay extends React.Component {
constructor(props) {
super(props);
this.state = {
region: {
latitude: LATITUDE,
longitude: LONGITUDE,
latitudeDelta: LATITUDE_DELTA,
longitudeDelta: LONGITUDE_DELTA,
},
coordinates: [
{
longitude: -122.442753,
latitude: 37.79879,
},
{
longitude: -122.424728,
latitude: 37.801232,
},
{
longitude: -122.422497,
latitude: 37.790651,
},
{
longitude: -122.440693,
latitude: 37.788209,
},
],
center: {
longitude: -122.4326648935676,
latitude: 37.79418561114521,
},
};
}
render() {
const { coordinates, center, region } = this.state;
return (
<View style={styles.container}>
<MapView
provider={this.props.provider}
style={styles.map}
|
</MapView>
</View>
);
}
}
CustomOverlay.propTypes = {
provider: ProviderPropType,
};
const styles = StyleSheet.create({
container: {
...StyleSheet.absoluteFillObject,
justifyContent: 'flex-end',
alignItems: 'center',
},
map: {
...StyleSheet.absoluteFillObject,
},
});
export default CustomOverlay;
|
initialRegion={region}
>
<XMarksTheSpot coordinates={coordinates} center={center} />
|
ast.go
|
// Package ast contains the definitions of the abstract-syntax tree
// that our parse produces, and our interpreter executes.
package ast
import (
"bytes"
"fmt"
"strings"
"github.com/skx/monkey/token"
)
// Node reresents a node.
type Node interface {
// TokenLiteral returns the literal of the token.
TokenLiteral() string
// String returns this object as a string.
String() string
}
// Statement represents a single statement.
type Statement interface {
// Node is the node holding the actual statement
Node
statementNode()
}
// Expression represents a single expression.
type Expression interface {
// Node is the node holding the expression.
Node
expressionNode()
}
// Program represents a complete program.
type Program struct {
// Statements is the set of statements which the program is comprised
// of.
Statements []Statement
}
// TokenLiteral returns the literal token of our program.
func (p *Program) TokenLiteral() string {
if len(p.Statements) > 0 {
return p.Statements[0].TokenLiteral()
}
return ""
}
// String returns this object as a string.
func (p *Program) String() string {
var out bytes.Buffer
for _, stmt := range p.Statements {
out.WriteString(stmt.String())
}
return out.String()
}
// LetStatement holds a let-statemnt
type LetStatement struct {
|
Name *Identifier
// Value is the thing we're storing in the variable.
Value Expression
}
func (ls *LetStatement) statementNode() {}
// TokenLiteral returns the literal token.
func (ls *LetStatement) TokenLiteral() string { return ls.Token.Literal }
// String returns this object as a string.
func (ls *LetStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.TokenLiteral())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
// ConstStatement is the same as let-statement, but the value
// can't be changed later.
type ConstStatement struct {
// Token is the token
Token token.Token
// Name is the name of the variable we're setting
Name *Identifier
// Value contains the value which is to be set
Value Expression
}
func (ls *ConstStatement) statementNode() {}
// TokenLiteral returns the literal token.
func (ls *ConstStatement) TokenLiteral() string { return ls.Token.Literal }
// String returns this object as a string.
func (ls *ConstStatement) String() string {
var out bytes.Buffer
out.WriteString(ls.TokenLiteral() + " ")
out.WriteString(ls.Name.TokenLiteral())
out.WriteString(" = ")
if ls.Value != nil {
out.WriteString(ls.Value.String())
}
out.WriteString(";")
return out.String()
}
// Identifier holds a single identifier.
type Identifier struct {
// Token is the literal token
Token token.Token
// Value is the name of the identifier
Value string
}
func (i *Identifier) expressionNode() {}
// TokenLiteral returns the literal token.
func (i *Identifier) TokenLiteral() string { return i.Token.Literal }
// String returns this object as a string.
func (i *Identifier) String() string {
return i.Value
}
// ReturnStatement stores a return-statement
type ReturnStatement struct {
// Token contains the literal token.
Token token.Token
// ReturnValue is the value whichis to be returned.
ReturnValue Expression
}
func (rs *ReturnStatement) statementNode() {}
// TokenLiteral returns the literal token.
func (rs *ReturnStatement) TokenLiteral() string { return rs.Token.Literal }
// String returns this object as a string.
func (rs *ReturnStatement) String() string {
var out bytes.Buffer
out.WriteString(rs.TokenLiteral() + " ")
if rs.ReturnValue != nil {
out.WriteString(rs.ReturnValue.TokenLiteral())
}
out.WriteString(";")
return out.String()
}
// ExpressionStatement is an expression
type ExpressionStatement struct {
// Token is the literal token
Token token.Token
// Expression holds the expression
Expression Expression
}
func (es *ExpressionStatement) statementNode() {}
// TokenLiteral returns the literal token.
func (es *ExpressionStatement) TokenLiteral() string { return es.Token.Literal }
// String returns this object as a string.
func (es *ExpressionStatement) String() string {
if es.Expression != nil {
return es.Expression.String()
}
return ""
}
// IntegerLiteral holds an integer
type IntegerLiteral struct {
// Token is the literal token
Token token.Token
// Value holds the integer.
Value int64
}
func (il *IntegerLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (il *IntegerLiteral) TokenLiteral() string { return il.Token.Literal }
// String returns this object as a string.
func (il *IntegerLiteral) String() string { return il.Token.Literal }
// FloatLiteral holds a floating-point number
type FloatLiteral struct {
// Token is the literal token
Token token.Token
// Value holds the floating-point number.
Value float64
}
func (fl *FloatLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (fl *FloatLiteral) TokenLiteral() string { return fl.Token.Literal }
// String returns this object as a string.
func (fl *FloatLiteral) String() string { return fl.Token.Literal }
// PrefixExpression holds a prefix-based expression
type PrefixExpression struct {
// Token holds the token. e.g. "!"
Token token.Token
// Operator holds the operator being invoked (e.g. "!" ).
Operator string
// Right holds the thing to be operated upon
Right Expression
}
func (pe *PrefixExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (pe *PrefixExpression) TokenLiteral() string { return pe.Token.Literal }
// String returns this object as a string.
func (pe *PrefixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Operator)
out.WriteString(pe.Right.String())
out.WriteString(")")
return out.String()
}
// InfixExpression stores an infix expression.
type InfixExpression struct {
// Token holds the literal expression
Token token.Token
// Left holds the left-most argument
Left Expression
// Operator holds the operation to be carried out (e.g. "+", "-" )
Operator string
// Right holds the right-most argument
Right Expression
}
func (ie *InfixExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (ie *InfixExpression) TokenLiteral() string { return ie.Token.Literal }
// String returns this object as a string.
func (ie *InfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString(" " + ie.Operator + " ")
out.WriteString(ie.Right.String())
out.WriteString(")")
return out.String()
}
// PostfixExpression holds a postfix-based expression
type PostfixExpression struct {
// Token holds the token we're operating upon
Token token.Token
// Operator holds the postfix token, e.g. ++
Operator string
}
func (pe *PostfixExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (pe *PostfixExpression) TokenLiteral() string { return pe.Token.Literal }
// String returns this object as a string.
func (pe *PostfixExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(pe.Token.Literal)
out.WriteString(pe.Operator)
out.WriteString(")")
return out.String()
}
// NullLiteral represents a literal null
type NullLiteral struct {
// Token holds the actual token
Token token.Token
}
func (n *NullLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (n *NullLiteral) TokenLiteral() string { return n.Token.Literal }
// String returns this object as a string.
func (n *NullLiteral) String() string { return n.Token.Literal }
// Boolean holds a boolean type
type Boolean struct {
// Token holds the actual token
Token token.Token
// Value stores the bools' value: true, or false.
Value bool
}
func (b *Boolean) expressionNode() {}
// TokenLiteral returns the literal token.
func (b *Boolean) TokenLiteral() string { return b.Token.Literal }
// String returns this object as a string.
func (b *Boolean) String() string { return b.Token.Literal }
// BlockStatement holds a group of statements, which are treated
// as a block. (For example the body of an `if` expression.)
type BlockStatement struct {
// Token holds the actual token
Token token.Token
// Statements contain the set of statements within the block
Statements []Statement
}
func (bs *BlockStatement) statementNode() {}
// TokenLiteral returns the literal token.
func (bs *BlockStatement) TokenLiteral() string { return bs.Token.Literal }
// String returns this object as a string.
func (bs *BlockStatement) String() string {
var out bytes.Buffer
for _, s := range bs.Statements {
out.WriteString(s.String())
}
return out.String()
}
// IfExpression holds an if-statement
type IfExpression struct {
// Token is the actual token
Token token.Token
// Condition is the thing that is evaluated to determine
// which block should be executed.
Condition Expression
// Consequence is the set of statements executed if the
// condition is true.
Consequence *BlockStatement
// Alternative is the set of statements executed if the
// condition is not true (optional).
Alternative *BlockStatement
}
func (ie *IfExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (ie *IfExpression) TokenLiteral() string { return ie.Token.Literal }
// String returns this object as a string.
func (ie *IfExpression) String() string {
var out bytes.Buffer
out.WriteString("if")
out.WriteString(ie.Condition.String())
out.WriteString(" ")
out.WriteString(ie.Consequence.String())
if ie.Alternative != nil {
out.WriteString("else")
out.WriteString(ie.Alternative.String())
}
return out.String()
}
// TernaryExpression holds a ternary-expression.
type TernaryExpression struct {
// Token is the actual token.
Token token.Token
// Condition is the thing that is evaluated to determine
// which expression should be returned
Condition Expression
// IfTrue is the expression to return if the condition is true.
IfTrue Expression
// IFFalse is the expression to return if the condition is not true.
IfFalse Expression
}
// ForeachStatement holds a foreach-statement.
type ForeachStatement struct {
// Token is the actual token
Token token.Token
// Index is the variable we'll set with the index, for the blocks' scope
//
// This is optional.
Index string
// Ident is the variable we'll set with each item, for the blocks' scope
Ident string
// Value is the thing we'll range over.
Value Expression
// Body is the block we'll execute.
Body *BlockStatement
}
func (fes *ForeachStatement) expressionNode() {}
// TokenLiteral returns the literal token.
func (fes *ForeachStatement) TokenLiteral() string { return fes.Token.Literal }
// String returns this object as a string.
func (fes *ForeachStatement) String() string {
var out bytes.Buffer
out.WriteString("foreach ")
out.WriteString(fes.Ident)
out.WriteString(" ")
out.WriteString(fes.Value.String())
out.WriteString(fes.Body.String())
return out.String()
}
func (te *TernaryExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (te *TernaryExpression) TokenLiteral() string { return te.Token.Literal }
// String returns this object as a string.
func (te *TernaryExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(te.Condition.String())
out.WriteString(" ? ")
out.WriteString(te.IfTrue.String())
out.WriteString(" : ")
out.WriteString(te.IfFalse.String())
out.WriteString(")")
return out.String()
}
// ForLoopExpression holds a for-loop
type ForLoopExpression struct {
// Token is the actual token
Token token.Token
// Condition is the expression used to determine if the loop
// is still running.
Condition Expression
// Consequence is the set of statements to be executed for the
// loop body.
Consequence *BlockStatement
}
func (fle *ForLoopExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (fle *ForLoopExpression) TokenLiteral() string { return fle.Token.Literal }
// String returns this object as a string.
func (fle *ForLoopExpression) String() string {
var out bytes.Buffer
out.WriteString("for (")
out.WriteString(fle.Condition.String())
out.WriteString(" ) {")
out.WriteString(fle.Consequence.String())
out.WriteString("}")
return out.String()
}
// FunctionLiteral holds a function-definition
//
// See-also FunctionDefineLiteral.
type FunctionLiteral struct {
// Token is the actual token
Token token.Token
// Parameters is the list of parameters the function receives.
Parameters []*Identifier
// Defaults holds any default values for arguments which aren't
// specified
Defaults map[string]Expression
// Body contains the set of statements within the function.
Body *BlockStatement
}
func (fl *FunctionLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (fl *FunctionLiteral) TokenLiteral() string { return fl.Token.Literal }
// String returns this object as a string.
func (fl *FunctionLiteral) String() string {
var out bytes.Buffer
params := make([]string, 0)
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
// FunctionDefineLiteral holds a function-definition.
//
// See-also FunctionLiteral.
type FunctionDefineLiteral struct {
// Token holds the token
Token token.Token
// Paremeters holds the function parameters.
Parameters []*Identifier
// Defaults holds any default-arguments.
Defaults map[string]Expression
// Body holds the set of statements in the functions' body.
Body *BlockStatement
}
func (fl *FunctionDefineLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (fl *FunctionDefineLiteral) TokenLiteral() string {
return fl.Token.Literal
}
// String returns this object as a string.
func (fl *FunctionDefineLiteral) String() string {
var out bytes.Buffer
params := make([]string, 0)
for _, p := range fl.Parameters {
params = append(params, p.String())
}
out.WriteString(fl.TokenLiteral())
out.WriteString("(")
out.WriteString(strings.Join(params, ", "))
out.WriteString(") ")
out.WriteString(fl.Body.String())
return out.String()
}
// CallExpression holds the invokation of a method-call.
type CallExpression struct {
// Token stores the literal token
Token token.Token
// Function is the function to be invoked.
Function Expression
// Arguments are the arguments to be applied
Arguments []Expression
}
func (ce *CallExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (ce *CallExpression) TokenLiteral() string { return ce.Token.Literal }
// String returns this object as a string.
func (ce *CallExpression) String() string {
var out bytes.Buffer
args := make([]string, 0)
for _, a := range ce.Arguments {
args = append(args, a.String())
}
out.WriteString(ce.Function.String())
out.WriteString("(")
out.WriteString(strings.Join(args, ", "))
out.WriteString(")")
return out.String()
}
// ObjectCallExpression is used when calling a method on an object.
type ObjectCallExpression struct {
// Token is the literal token
Token token.Token
// Object is the object against which the call is invoked.
Object Expression
// Call is the method-name.
Call Expression
}
func (oce *ObjectCallExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (oce *ObjectCallExpression) TokenLiteral() string {
return oce.Token.Literal
}
// String returns this object as a string.
func (oce *ObjectCallExpression) String() string {
var out bytes.Buffer
out.WriteString(oce.Object.String())
out.WriteString(".")
out.WriteString(oce.Call.String())
return out.String()
}
// StringLiteral holds a string
type StringLiteral struct {
// Token is the token
Token token.Token
// Value is the value of the string.
Value string
}
func (sl *StringLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (sl *StringLiteral) TokenLiteral() string { return sl.Token.Literal }
// String returns this object as a string.
func (sl *StringLiteral) String() string { return sl.Token.Literal }
// RegexpLiteral holds a regular-expression.
type RegexpLiteral struct {
// Token is the token
Token token.Token
// Value is the value of the regular expression.
Value string
// Flags contains any flags associated with the regexp.
Flags string
}
func (rl *RegexpLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (rl *RegexpLiteral) TokenLiteral() string { return rl.Token.Literal }
// String returns this object as a string.
func (rl *RegexpLiteral) String() string {
return (fmt.Sprintf("/%s/%s", rl.Value, rl.Flags))
}
// BacktickLiteral holds details of a command to be executed
type BacktickLiteral struct {
// Token is the actual token
Token token.Token
// Value is the name of the command to execute.
Value string
}
func (bl *BacktickLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (bl *BacktickLiteral) TokenLiteral() string { return bl.Token.Literal }
// String returns this object as a string.
func (bl *BacktickLiteral) String() string { return bl.Token.Literal }
// ArrayLiteral holds an inline array
type ArrayLiteral struct {
// Token is the token
Token token.Token
// Elements holds the members of the array.
Elements []Expression
}
func (al *ArrayLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (al *ArrayLiteral) TokenLiteral() string { return al.Token.Literal }
// String returns this object as a string.
func (al *ArrayLiteral) String() string {
var out bytes.Buffer
elements := make([]string, 0)
for _, el := range al.Elements {
elements = append(elements, el.String())
}
out.WriteString("[")
out.WriteString(strings.Join(elements, ", "))
out.WriteString("]")
return out.String()
}
// IndexExpression holds an index-expression
type IndexExpression struct {
// Token is the actual token
Token token.Token
// Left is the thing being indexed.
Left Expression
// Index is the value we're indexing
Index Expression
}
func (ie *IndexExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (ie *IndexExpression) TokenLiteral() string { return ie.Token.Literal }
// String returns this object as a string.
func (ie *IndexExpression) String() string {
var out bytes.Buffer
out.WriteString("(")
out.WriteString(ie.Left.String())
out.WriteString("[")
out.WriteString(ie.Index.String())
out.WriteString("])")
return out.String()
}
// HashLiteral holds a hash definition
type HashLiteral struct {
// Token holds the token
Token token.Token // the '{' token
// Pairs stores the name/value sets of the hash-content
Pairs map[Expression]Expression
}
func (hl *HashLiteral) expressionNode() {}
// TokenLiteral returns the literal token.
func (hl *HashLiteral) TokenLiteral() string { return hl.Token.Literal }
// String returns this object as a string.
func (hl *HashLiteral) String() string {
var out bytes.Buffer
pairs := make([]string, 0)
for key, value := range hl.Pairs {
pairs = append(pairs, key.String()+":"+value.String())
}
out.WriteString("{")
out.WriteString(strings.Join(pairs, ", "))
out.WriteString("}")
return out.String()
}
// AssignStatement is generally used for a (let-less) assignment,
// such as "x = y", however we allow an operator to be stored ("=" in that
// example), such that we can do self-operations.
//
// Specifically "x += y" is defined as an assignment-statement with
// the operator set to "+=". The same applies for "+=", "-=", "*=", and
// "/=".
type AssignStatement struct {
Token token.Token
Name *Identifier
Operator string
Value Expression
}
func (as *AssignStatement) expressionNode() {}
// TokenLiteral returns the literal token.
func (as *AssignStatement) TokenLiteral() string { return as.Token.Literal }
// String returns this object as a string.
func (as *AssignStatement) String() string {
var out bytes.Buffer
out.WriteString(as.Name.String())
out.WriteString(as.Operator)
out.WriteString(as.Value.String())
return out.String()
}
// CaseExpression handles the case within a switch statement
type CaseExpression struct {
// Token is the actual token
Token token.Token
// Default branch?
Default bool
// The thing we match
Expr []Expression
// The code to execute if there is a match
Block *BlockStatement
}
func (ce *CaseExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (ce *CaseExpression) TokenLiteral() string { return ce.Token.Literal }
// String returns this object as a string.
func (ce *CaseExpression) String() string {
var out bytes.Buffer
if ce.Default {
out.WriteString("default ")
} else {
out.WriteString("case ")
tmp := []string{}
for _, exp := range ce.Expr {
tmp = append(tmp, exp.String())
}
out.WriteString(strings.Join(tmp, ","))
}
out.WriteString(ce.Block.String())
return out.String()
}
// SwitchExpression handles a switch statement
type SwitchExpression struct {
// Token is the actual token
Token token.Token
// Value is the thing that is evaluated to determine
// which block should be executed.
Value Expression
// The branches we handle
Choices []*CaseExpression
}
func (se *SwitchExpression) expressionNode() {}
// TokenLiteral returns the literal token.
func (se *SwitchExpression) TokenLiteral() string { return se.Token.Literal }
// String returns this object as a string.
func (se *SwitchExpression) String() string {
var out bytes.Buffer
out.WriteString("\nswitch (")
out.WriteString(se.Value.String())
out.WriteString(")\n{\n")
for _, tmp := range se.Choices {
if tmp != nil {
out.WriteString(tmp.String())
}
}
out.WriteString("}\n")
return out.String()
}
|
// Token holds the token
Token token.Token
// Name is the name of the variable to which we're assigning
|
relay.go
|
package relayer
import (
"io"
"net/http"
"github.com/mailchain/mailchain/errs"
)
type RelayFunc func(req *http.Request) (*http.Request, error)
// ServeHTTP calls f(w, r).
func (f RelayFunc) HandleRequest(w http.ResponseWriter, req *http.Request) {
r, err := f(req)
if err != nil {
errs.JSONWriter(w, http.StatusBadRequest, err)
return
}
client := http.Client{}
resp, err := client.Do(r)
if err != nil {
errs.JSONWriter(w, http.StatusBadGateway, err)
return
}
defer resp.Body.Close()
copyHeader(resp.Header, w.Header())
w.WriteHeader(resp.StatusCode)
_, _ = io.Copy(w, resp.Body)
}
func ChangeURL(url string) RelayFunc {
return func(req *http.Request) (*http.Request, error) {
proxyReq, err := http.NewRequest(req.Method, url, req.Body)
if err != nil
|
copyHeader(req.Header, proxyReq.Header)
return proxyReq, nil
}
}
func copyHeader(src, dst http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
|
{
return nil, err
}
|
RcsbQuery.ts
|
import ApolloClient from 'apollo-boost';
import * as configBorregoGraphQL from "../RcsbServerConfig/codegen.borrego.json";
import * as configYosemiteGraphQL from "../RcsbServerConfig/codegen.yosemite.json";
export class RcsbQuery{
borregoClient: ApolloClient<any> = new ApolloClient({
uri: (<any>configBorregoGraphQL).schema
});
yosemiteClient: ApolloClient<any> = new ApolloClient({
|
uri: (<any>configYosemiteGraphQL).schema
});
}
|
|
translatable-set-of-normalized-string-editor.component.ts
|
// Copyright 2020 The Oppia Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS-IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @fileoverview Directive for translatable set of normalized string editor.
*/
import { ChangeDetectorRef, Component, EventEmitter, Input, Output } from '@angular/core';
import { downgradeComponent } from '@angular/upgrade/static';
export interface TranslatableSetOfStringSchema {
type: string;
items: { type: string };
validators: { id: string }[];
}
@Component({
selector: 'translatable-set-of-normalized-string-editor',
templateUrl: './translatable-set-of-normalized-string-editor.component.html'
})
export class
|
{
// This property is initialized using Angular lifecycle hooks
// and we need to do non-null assertion, for more information see
// https://github.com/oppia/oppia/wiki/Guide-on-defining-types#ts-7-1
@Input() value!: { normalizedStrSet: string };
@Output() valueChanged = new EventEmitter();
schema: TranslatableSetOfStringSchema = {
type: 'list',
items: {
type: 'unicode'
},
validators: [{
id: 'is_uniquified'
}]
};
constructor(private changeDetectorRef: ChangeDetectorRef) { }
updateValue(val: string): void {
if (this.value.normalizedStrSet === val) {
return;
}
this.value.normalizedStrSet = val;
this.valueChanged.emit(this.value);
this.changeDetectorRef.detectChanges();
}
getSchema(): TranslatableSetOfStringSchema {
return this.schema;
}
}
angular.module('oppia').directive(
'translatableSetOfNormalizedStringEditor',
downgradeComponent({
component: TranslatableSetOfNormalizedStringEditorComponent
}) as angular.IDirectiveFactory);
|
TranslatableSetOfNormalizedStringEditorComponent
|
clothingBot.py
|
import sys
import json
from time import sleep
from selenium import webdriver
from selenium.webdriver.support.ui import Select
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
site_000 = 'https://apolina-kids.com/collections/all'
def findItem(prodName):
elem = driver.find_element_by_xpath('//img[contains(@alt,"'+prodName+'")]')
action = webdriver.common.action_chains.ActionChains(driver)
action.move_to_element_with_offset(elem, 5, 5)
action.click()
action.perform()
def selectSize():
try:
select = driver.find_element_by_xpath(
"//select[@id=\"product-select-4540644753485-option-0\"]")
all_options = select.find_elements_by_tag_name("option")
for option in all_options:
value = option.get_attribute("value")
if value == "5-7Y":
print("Value is: %s" % value)
option.click()
except:
print('No select found')
def addToCart():
try:
addToCart = driver.find_element_by_xpath(
"//input[@value='Add to Cart']")
except:
print('Add To Cart button not found')
try:
addToCart.send_keys(webdriver.common.keys.Keys.RETURN)
except:
try:
addToCart.click()
except:
print("Could not click 'Add to cart'")
sleep(2)
checkout = driver.find_element_by_xpath(
"//input[@value='Check Out']")
checkout.send_keys(webdriver.common.keys.Keys.RETURN)
def clickButton(id):
if (id is None):
cont = driver.find_element_by_name("button")
else:
cont = driver.find_element_by_id(id)
cont.send_keys(webdriver.common.keys.Keys.RETURN)
def shippingDetails():
with open('info.json') as f:
data = json.load(f)
email = driver.find_element_by_id("checkout_email")
email.send_keys(data['email'])
firstName = driver.find_element_by_id(
"checkout_shipping_address_first_name")
firstName.send_keys(data['firstName'])
lastName = driver.find_element_by_id("checkout_shipping_address_last_name")
lastName.send_keys(data['lastName'])
address = driver.find_element_by_id(
"checkout_shipping_address_address1")
address.send_keys(data['address'])
try:
aprtment = driver.find_element_by_id(
"checkout_shipping_address_address2")
aprtment.send_keys(data['apartment'])
except:
print('Not an apartment')
city = driver.find_element_by_id(
"checkout_shipping_address_city")
city.send_keys(data['city'])
country = driver.find_element_by_id(
"checkout_shipping_address_country")
all_options = country.find_elements_by_tag_name("option")
for option in all_options:
value = option.get_attribute("value")
if value == "United States":
option.click()
break
state = driver.find_element_by_id(
"checkout_shipping_address_province")
state_options = state.find_elements_by_tag_name("option")
for states in state_options:
value1 = states.get_attribute("value")
print("Value1 is: %s" % value1)
if value1 == data['state']:
print("Value is: %s" % value1)
states.click()
break
zipcode = driver.find_element_by_id(
"checkout_shipping_address_zip")
zipcode.send_keys(data['zipcode'])
phone = driver.find_element_by_id("checkout_shipping_address_phone")
phone.send_keys(data['phone'])
# def inputPayment():
# driver.switch_to.frame(driver.find_element_by_xpath(
# "//*[contains(@id,'card-fields-number')]"))
# wait = WebDriverWait(driver, 10)
# wait.until(EC.frame_to_be_available_and_switch_to_it(
# (By.CLASS_NAME, "card-fields-iframe")))
# cardNumber = driver.find_element_by_id("number")
# cardNumber.send_keys('4930 0000 0000 0000')
# name = WebDriverWait(driver, 20).until(EC.element_to_be_clickable(
# (By.XPATH, "//input[@id='name']")))
# driver.execute_script("arguments[0].setAttribute(arguments[1], arguments[2]);",
# name,
# "value",
# "NNAAAAMME")
# name.send_keys('NAME')
|
# js = "arguments[0].setAttribute('value','\"+NAME+\"')"
# expiry = driver.find_element_by_id("expiry")
# driver.execute_script("arguments[0].setAttribute(arguments[1], arguments[2]);",
# expiry,
# "value",
# "04 / 34")
# verification_value = driver.find_element_by_id("verification_value")
# driver.execute_script("arguments[0].setAttribute(arguments[1], arguments[2]);",
# verification_value,
# "value",
# "123")
# sleep(10)
# driver.switch_to.default_content()
if __name__ == '__main__':
# setting the site and driver
driver = webdriver.Firefox()
# load the site
URL = site_000
driver.get(URL)
sleep(1)
findItem('POL DRESS - FARM CHECK / HAY')
sleep(1)
selectSize()
addToCart()
sleep(3)
shippingDetails()
clickButton(None)
sleep(2.5)
clickButton(None)
sleep(3)
|
# name.send_keys(webdriver.common.keys.Keys.RETURN)
|
MOPS_Timings.py
|
'''
Timings Class
Arrival and departure times for all Route Sections on a Route on a particular
schedule and shows the time into a section and the time out of a section
Model Operations Processing System. Copyright Brian Fairbairn 2009-2010. Licenced under the EUPL.
You may not use this work except in compliance with the Licence. You may obtain a copy of the
Licence at http://ec.europa.eu/idabc/eupl or as attached with this application (see Licence file).
Unless required by applicable law or agreed to in writing, software distributed under the Licence
is distributed on an 'AS IS' basis WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either expressed
or implied. See the Licence governing permissions and limitations under the Licence.
Changes:
15/08/2010 Ver 1 Removed unused variables
Added handling of bad database return codes
'''
import MOPS_Element
class cTimings(MOPS_Element.cElement):
"""Details about Timings. Inherits from ListHandler class.
Timings are contained in fixed-length data records.
Id 10 Automatically generated reference
Section 10 link to Section that timing is for
Schedule 10 Link to Schedule
DepartStation 10 Copied from Route Section.
ArrivalStation 10 Copied from Route Section.
PlannedDepartTime 12 Planned departure time from station
PlannedArriveTime 12 Planned arrival time at station
"""
extract_code = 'select * from timings'
extract_header = 'id|section|schedule|depart_station|arrive_station|planned_depart|planned_arrive\n'
def adtims(self, message):
"""add timings to a section. this is a basic addition process;
other facilities will help copy/duplicate timings. this process is a special
process as, having been given a route, it will prompt for subsequent departure
and arrival times until the route is complete. the process can be abandoned by
entering an x at the input prompt
"""
if self.show_access(message, 'ADTIMS schedule', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#check it exists
data = (schedule, 'I')
sql = 'select id, direction, route from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE CODE DOES NOT EXIST OR NOT IN INACTIVE STATUS')
return
print('SCHEDULE ENTRY MODE: ENTER TIME HHMM OR <X> TO QUIT')
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station from timings ' +\
'where schedule = ? order by id'
count, ds_timings = self.db_read(sql, data)
if count < 0:
return
last_time = '0000'
for timing_row in ds_timings:
#build the input prompt strings
depart_station = timing_row[2]
arrive_station = timing_row[3]
t2 = (depart_station,)
sql = 'select short_name from station where station = ?'
count, ds_departs = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_departs:
depart_name = station_row[0]
t2 = (arrive_station,)
sql = 'select short_name from station where station = ?'
count, ds_arrives = self.db_read(sql, t2)
if count < 0:
return
for station_row in ds_arrives:
arrive_name = station_row[0]
#get the departing time
re_enter = True
while re_enter:
new_time = raw_input('TIME DEPARTING ' + depart_station + ' ' + depart_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
departure_time = new_time
last_time = new_time
re_enter = False
#get the arriving time
re_enter = True
while re_enter:
new_time = raw_input('TIME ARRIVING ' + arrive_station + ' ' + arrive_name + ' >')
if new_time == 'x':
print('EXITING INPUT OF TIMINGS FOR SCHEDULE')
return
if self.validate_time(new_time, last_time) == 0:
arrival_time = new_time
last_time = new_time
re_enter = False
data = (departure_time, arrival_time, timing_row[0])
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('UPDATE OF SCHEDULE TIMINGS FOR ' + schedule + ' COMPLETED')
return
def
|
(self, message):
"""allows changes to the timings of an individual section. This routine can also
be used for batch loading times from a file. Enter the route, section and depart
and arrive times. note that there is no validation on timings on previous or
following sections, only within the section itself.
"""
if self.show_access(message, 'CHTIMS schedule;section;depart;arrive', 'S') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#read the database
data = (schedule, 'I')
sql = 'select id from schedule where schedule = ? and status = ?'
count, dummy = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE DOES NOT EXIST OR IS ACTIVE AND CANNOT BE AMENDED')
return
#section code-------------------------------------------------------------------------------
section, rc = self.extract_field(message, 1, 'SECTION CODE')
if rc > 0:
return
#read the database
data = (schedule, section)
sql = 'select depart_station, arrive_station, id from timings ' +\
'where schedule = ? and section = ?'
count, ds_sections = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('* SCHEDULE/SECTION DOES NOT EXIST')
return
for row in ds_sections:
departing = row[0]
arriving = row[1]
timings_id = row[2]
#depart time -----------------------------------------------------------------
depart_time, rc = self.extract_field(message, 2, 'DEPARTURE TIME')
if rc > 0:
return
if len(depart_time) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return
hours = int(depart_time[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return
minutes = int(depart_time[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return
#arrival time -----------------------------------------------------------------
arrive_time, rc = self.extract_field(message, 3, 'ARRIVAL TIME')
if rc > 0:
return
if self.validate_time(arrive_time, depart_time) != 0:
return
#carry out the update and report ----------------------------------------------
data = (depart_time, arrive_time, timings_id)
sql = 'update timings set planned_depart = ?, planned_arrive = ? where id = ?'
if self.db_update(sql, data) != 0:
return
print('SCHEDULE TIMINGS CHANGED FOR:' + schedule, departing + ':' + depart_time + arriving + ':' + arrive_time)
return
def validate_time(self, hhmm, prev_time):
"""internal routine to validate a given time to make sure it corresponds
to an hhmm format. if a previous_time is entered then it makes sure that the
new time is later, unless the previous time > 2000 (8pm) and the new time is
less than 0400 (4am), in which case a new day is assumed
"""
if len(hhmm) != 4:
print('* TIME MUST BE ENTERED IN FORMAT HHMM')
return 1
try:
hours = int(hhmm[0:2])
if hours < 0 or hours > 23:
print('* HOURS MUST BE ENTERED IN RANGE 00-23')
return 2
minutes = int(hhmm[2:4])
if minutes < 0 or minutes > 59:
print('* MINUTES MUST BE ENTERED IN RANGE 00-59')
return 3
except:
print('* TIME MUST BE ENTERED IN MINUTES AND HOURS')
return 5
if prev_time > '2100':
if hhmm < '0300':
return 0
if hhmm < prev_time:
print('* NEW TIME MUST BE LATE THAN PREVIOUS TIME')
return 4
return 0
def timing(self, message):
"""Lists times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'TIMING schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route, run_days from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
schedule_days = row[4]
data = (schedule_route,)
sql = 'select default_direction from route where route = ?'
count, ds_routes = self.db_read(sql, data)
if count < 0:
return
for row in ds_routes:
default_direction = row[0]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'W':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
rundays = ''
if schedule_days[0:1] == '1':
rundays = ' MON'
if schedule_days[1:2] == '2':
rundays = rundays + ' TUE'
if schedule_days[2:3] == '3':
rundays = rundays + ' WED'
if schedule_days[3:4] == '4':
rundays = rundays + ' THU'
if schedule_days[4:5] == '5':
rundays = rundays + ' FRI'
if schedule_days[5:6] == '6':
rundays = rundays + ' SAT'
if schedule_days[6:7] == '7':
rundays = rundays + ' SUN'
if schedule_days[7:8] == '8':
rundays = rundays + ' HOL'
print('SCHEDULE:', schedule, schedule_name,' (SCHEDULE STATUS:' + status + ')')
print('DIRECTION:',direction, ' RUNS:', rundays)
data = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
data = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
print(' - ', row[0])
print(' ' )
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
if default_direction == schedule_dirn:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
else:
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section DESC'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
arrival = ' '
depart_station = ''
arrive_station = ''
arrive_name = ''
depart_name = ''
station_type = ''
planned_arrive = ''
dummy = ''
instructions = ''
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print(self.x_field(row[2], self.staxsize) + " " +
self.x_field(depart_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(arrival, 4) + " " +
self.x_field(row[4], 4) + " " +
self.x_field(instructions, 40))
arrival = planned_arrive
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print(self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(arrive_name, 8) + " " +
self.x_field(station_type, self.statsize) + " " +
self.x_field(planned_arrive, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print(self.x_field(dummy, self.staxsize) + " " +
self.x_field(dummy, 8) + " " +
self.x_field(dummy, self.statsize) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(dummy, 4) + " " +
self.x_field(instructions, 40))
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def ldtims(self, message):
"""Gives detail of Timing records for checking timetables vs routes
"""
if self.show_access(message, 'LDTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print('SCHEDULE: ', schedule, schedule_name,' (SCHEDULE STATUS: ' + status + ')')
print(' DIRECTION:',direction)
# build the column titles ------------------------------------------
titles = self.x_field('SECTION===', 10) + ' ' + \
self.x_field('DEPARTS===', self.staxsize) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('ARRIVES===', self.staxsize) + ' ' +\
self.x_field('=ARR', 4)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by section'
timing_count, ds_timings = self.db_read(sql, data)
if count < 0:
return
#report the extracted data -----------------------------------------
line_count = 0
for row in ds_timings:
section = row[1]
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
if line_count == 0:
print(titles)
print(self.x_field(section , 10) + " " +
self.x_field(depart_station, self.staxsize) + " " +
self.x_field(planned_depart, 4) + " " +
self.x_field(arrive_station, self.staxsize) + " " +
self.x_field(planned_arrive, 4))
line_count = line_count + 1
if line_count > 20:
line_count = 0
reply = raw_input('+')
if reply == 'x':
break
print(' ** END OF DATA: ' + str(timing_count) + ' RECORDS DISPLAYED **')
return
def prtims(self, message, Params):
"""Prints times and associated information for a schedule, including station type,
instructions
"""
if self.show_access(message, 'PRTIMS schedule', 'R') != 0:
return
#schedule code -----------------------------------------------------------------------------
schedule, rc = self.extract_field(message, 0, 'SCHEDULE CODE')
if rc > 0:
return
self.temp = {}
i = 0
#get the schedule detail to display
data = (schedule,)
sql = 'select name, direction, status, route from schedule where schedule = ?'
count, ds_schedules = self.db_read(sql, data)
if count < 0:
return
if count == 0:
print('NO SCHEDULE TO DISPLAY')
return
else:
for row in ds_schedules:
schedule_name = row[0]
schedule_dirn = row[1]
schedule_stat = row[2]
schedule_route = row[3]
if schedule_dirn == 'N':
direction = 'NORTH'
elif schedule_dirn == 'S':
direction = 'SOUTH'
elif schedule_dirn == 'E':
direction = 'EAST'
elif schedule_dirn == 'WEST':
direction = 'WEST'
elif schedule_dirn == 'U':
direction = 'UP'
elif schedule_dirn == 'D':
direction = 'DOWN'
else:
direction = 'NOT KNOWN'
if schedule_stat == 'I':
status = 'INACTIVE'
elif schedule_stat == 'A':
status = 'ACTIVE'
elif schedule_stat == 'R':
status = 'RUNNING'
else:
status = 'NOT KNOWN'
print_line = ('SCHEDULE: ' + schedule + ' ' + schedule_name +' (SCHEDULE STATUS:' + status + ')')
self.temp[i]= print_line
i = i + 1
print_line = (' DIRECTION: ' + direction)
self.temp[i]= print_line
i = i + 1
t = (schedule,)
sql = 'select instruction from instructions where schedule = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
t = (schedule_route,)
sql = 'select instruction from instructions where route = ?'
count, ds_instructions = self.db_read(sql, t)
for row in ds_instructions:
print_line = (' - ' + row[0])
self.temp[i]= print_line
i = i + 1
print_line = (' ' )
self.temp[i]= print_line
i = i + 1
# build the column titles ------------------------------------------
titles = self.x_field('STATION===', self.staxsize) + ' ' + \
self.x_field('NAME====', 8) + ' ' +\
self.x_field('TYPE======', self.statsize) + ' ' +\
self.x_field('=ARR', 4) + ' ' +\
self.x_field('=DEP', 4) + ' ' +\
self.x_field('INSTRUCTIONS =========================', 40)
data = (schedule,)
sql = 'select id, section, depart_station, arrive_station, planned_depart, ' +\
'planned_arrive from timings where schedule = ? order by id'
timing_count, ds_timings = self.db_read(sql, data)
if timing_count < 0:
return
#report the extracted data -----------------------------------------
arrival = ' '
for row in ds_timings:
depart_station = row[2]
arrive_station = row[3]
planned_depart = row[4]
planned_arrive = row[5]
#get the name for the departure station
data = (depart_station,)
sql = 'select short_name, stationtype from station where station = ?'
stax_count, ds_departs = self.db_read(sql, data)
if stax_count < 0:
return
for stax_row in ds_departs:
depart_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
count, ds_instructions = self.db_read(sql, data)
instructions = ' '
for inst_row in ds_instructions:
instructions = inst_row[0]
if not(planned_depart.strip() == '' and planned_arrive.strip() == ''):
print_line = (self.x_field(depart_station, self.staxsize) + ' ' +
self.x_field(depart_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(arrival, 4) + ' ' +
self.x_field(planned_depart, 4) + ' ' +
self.x_field(instructions, 40))
arrival = planned_arrive
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
dummy = ' '
for inst_row in ds_instructions:
line = line + 1
instructions = inst_row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get the long name for the arrive station (for the last entry)
sql = 'select short_name, stationtype from station where station = ?'
data = (arrive_station,)
stax_count, ds_arrives = self.db_read(sql, data)
for stax_row in ds_arrives:
arrive_name = stax_row[0]
station_type = stax_row[1]
#get any station instructions - just print the first one
sql = 'select instruction from instructions where station = ? limit 1'
instructions = ' '
count, ds_instructions = self.db_read(sql, data)
for row in ds_instructions:
instructions = row[0]
print_line = (self.x_field(arrive_station, self.staxsize) + ' ' +
self.x_field(arrive_name, 8) + ' ' +
self.x_field(station_type, self.statsize) + ' ' +
self.x_field(planned_arrive, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#get any station instructions - now print the rest
sql = 'select instruction from instructions where station = ?'
count, ds_instructions = self.db_read(sql, data)
line = 0
for row in ds_instructions:
line = line + 1
instructions = row[0]
if line != 1:
print_line = (self.x_field(dummy, self.staxsize) + ' ' +
self.x_field(dummy, 8) + ' ' +
self.x_field(dummy, self.statsize) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(dummy, 4) + ' ' +
self.x_field(instructions, 40))
self.temp[i]= print_line
i = i + 1
#report the extracted data ---------------------------------------
self.print_report (titles = titles,
report_id = 'PRTIMS',
report_name = 'TIMETABLE FOR ' + schedule,
Params = Params)
return
|
chtims
|
app.py
|
import streamlit as st
import json
import requests
|
st.title('Nural Network Visualizer')
st.sidebar.markdown('## Input Image')
if st.button('Get Random Prediction'):
response = requests.post(URI, data={})
response = json.loads(response.text)
preds = response.get('prediction')
image = response.get('image')
image = np.reshape(image, (28, 28))
st.sidebar.image(image, width=150)
for layer, p in enumerate(preds):
numbers = np.squeeze(np.array(p))
plt.figure(figsize=(32, 4))
if layer == 2:
row = 1
col = 10
else:
row = 2
col = 16
for i, number in enumerate(numbers):
plt.subplot(row, col, i+1)
plt.imshow(number * np.ones((8, 8, 3)).astype('float32'))
plt.xticks([])
plt.yticks([])
if layer == 2:
plt.xlabel(str(i), fontsize=40)
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.tight_layout()
st.text('Layer {}'.format(layer + 1))
st.pyplot()
|
import matplotlib.pyplot as plt
import numpy as np
URI = 'http://neural-net-viz-flask.herokuapp.com'
|
DescribeRegionsRequest.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkoos.endpoint import endpoint_data
class DescribeRegionsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'oos', '2019-06-01', 'DescribeRegions','oos')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_AcceptLanguage(self):
return self.get_query_params().get('AcceptLanguage')
def
|
(self,AcceptLanguage):
self.add_query_param('AcceptLanguage',AcceptLanguage)
|
set_AcceptLanguage
|
DomainImage.ts
|
/* tslint:disable */
/* eslint-disable */
|
/**
* CrowdStrike API Specification
* Use this API specification as a reference for the API endpoints you can use to interact with your Falcon environment. These endpoints support authentication via OAuth2 and interact with detections and network containment. For detailed usage guides and more information about API endpoints that don\'t yet support OAuth2, see our [documentation inside the Falcon console](https://falcon.crowdstrike.com/support/documentation). To use the APIs described below, combine the base URL with the path shown for each API endpoint. For commercial cloud customers, your base URL is `https://api.crowdstrike.com`. Each API endpoint requires authorization via an OAuth2 token. Your first API request should retrieve an OAuth2 token using the `oauth2/token` endpoint, such as `https://api.crowdstrike.com/oauth2/token`. For subsequent requests, include the OAuth2 token in an HTTP authorization header. Tokens expire after 30 minutes, after which you should make a new token request to continue making API requests.
*
* The version of the OpenAPI document: rolling
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
import { exists, mapValues } from "../runtime";
/**
*
* @export
* @interface DomainImage
*/
export interface DomainImage {
/**
*
* @type {number}
* @memberof DomainImage
*/
height?: number;
/**
*
* @type {string}
* @memberof DomainImage
*/
url: string;
/**
*
* @type {number}
* @memberof DomainImage
*/
width?: number;
}
/**
* Check if a given object implements the DomainImage interface.
*/
export function instanceOfDomainImage(value: object): boolean {
let isInstance = true;
isInstance = isInstance && "url" in value;
return isInstance;
}
export function DomainImageFromJSON(json: any): DomainImage {
return DomainImageFromJSONTyped(json, false);
}
export function DomainImageFromJSONTyped(json: any, ignoreDiscriminator: boolean): DomainImage {
if (json === undefined || json === null) {
return json;
}
return {
height: !exists(json, "height") ? undefined : json["height"],
url: json["url"],
width: !exists(json, "width") ? undefined : json["width"],
};
}
export function DomainImageToJSON(value?: DomainImage | null): any {
if (value === undefined) {
return undefined;
}
if (value === null) {
return null;
}
return {
height: value.height,
url: value.url,
width: value.width,
};
}
| |
dog_grpc.py
|
from .generated import dog_pb2, dog_pb2_grpc
from .owner_client import GrpcOwnerClient
class Dog(dog_pb2_grpc.dogServicer):
def
|
(self):
self.owner_client = GrpcOwnerClient()
def CallDog(self, request, context):
dog_name = request.dogName
res = f"{dog_name} is going to bark!"
print(self.owner_client.notify_owner(dog_name))
print(res)
return dog_pb2.DogResponse(dogBark=res)
|
__init__
|
eds_test.go
|
// Copyright Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package xds_test
import (
"encoding/json"
"errors"
"fmt"
"io"
"log"
"net/http"
"net/http/httptest"
"os"
"path/filepath"
"reflect"
"runtime"
"strconv"
"strings"
"sync"
"testing"
"time"
endpoint "github.com/envoyproxy/go-control-plane/envoy/config/endpoint/v3"
uatomic "go.uber.org/atomic"
"istio.io/istio/pilot/pkg/model"
"istio.io/istio/pilot/pkg/networking"
"istio.io/istio/pilot/pkg/xds"
v3 "istio.io/istio/pilot/pkg/xds/v3"
"istio.io/istio/pkg/adsc"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schema/gvk"
"istio.io/istio/pkg/test/env"
)
// The connect and reconnect tests are removed - ADS already has coverage, and the
// StreamEndpoints is not used in 1.0+
const (
asdcLocality = "region1/zone1/subzone1"
asdc2Locality = "region2/zone2/subzone2"
edsIncSvc = "eds.test.svc.cluster.local"
edsIncVip = "10.10.1.2"
)
func TestIncrementalPush(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{ConfigString: mustReadFile(t, "tests/testdata/config/destination-rule-all.yaml")})
ads := s.Connect(nil, nil, watchAll)
t.Run("Full Push", func(t *testing.T) {
s.Discovery.Push(&model.PushRequest{Full: true})
if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
t.Fatal(err)
}
})
t.Run("Incremental Push", func(t *testing.T) {
ads.WaitClear()
s.Discovery.Push(&model.PushRequest{Full: false})
if err := ads.WaitSingle(time.Second*5, v3.EndpointType, v3.ClusterType); err != nil {
t.Fatal(err)
}
})
t.Run("Incremental Push with updated services", func(t *testing.T) {
ads.WaitClear()
s.Discovery.Push(&model.PushRequest{
Full: false,
ConfigsUpdated: map[model.ConfigKey]struct{}{
{Name: "destall.default.svc.cluster.local", Namespace: "testns", Kind: gvk.ServiceEntry}: {},
},
})
if err := ads.WaitSingle(time.Second*5, v3.EndpointType, v3.ClusterType); err != nil {
t.Fatal(err)
}
})
t.Run("Full Push with updated services", func(t *testing.T) {
ads.WaitClear()
s.Discovery.Push(&model.PushRequest{
Full: true,
ConfigsUpdated: map[model.ConfigKey]struct{}{
{Name: "foo.bar", Namespace: "default", Kind: gvk.ServiceEntry}: {},
{Name: "destall", Namespace: "testns", Kind: gvk.DestinationRule}: {},
},
})
if _, err := ads.Wait(time.Second*5, watchAll...); err != nil {
t.Fatal(err)
}
if len(ads.GetEndpoints()) < 3 {
t.Fatalf("Expected a full EDS update, but got: %v", ads.GetEndpoints())
}
})
t.Run("Full Push without updated services", func(t *testing.T) {
ads.WaitClear()
s.Discovery.Push(&model.PushRequest{
Full: true,
ConfigsUpdated: map[model.ConfigKey]struct{}{
{Name: "destall", Namespace: "testns", Kind: gvk.DestinationRule}: {},
},
})
if _, err := ads.Wait(time.Second*5, v3.ClusterType, v3.EndpointType); err != nil {
t.Fatal(err)
}
if len(ads.GetEndpoints()) < 3 {
t.Fatalf("Expected a full EDS update, but got: %v", ads.GetEndpoints())
}
})
}
func TestEds(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{
ConfigString: mustReadFile(t, "tests/testdata/config/destination-rule-locality.yaml"),
DiscoveryServerModifier: func(s *xds.DiscoveryServer) {
addUdsEndpoint(s)
// enable locality load balancing and add relevant endpoints in order to test
addLocalityEndpoints(s, "locality.cluster.local")
addLocalityEndpoints(s, "locality-no-outlier-detection.cluster.local")
// Add the test ads clients to list of service instances in order to test the context dependent locality coloring.
addTestClientEndpoints(s)
s.MemRegistry.AddHTTPService(edsIncSvc, edsIncVip, 8080)
s.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.1", "hello-sa", "v1"))
},
})
adscConn := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.10"}}, nil, watchAll)
adscConn2 := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.11"}}, nil, watchAll)
t.Run("TCPEndpoints", func(t *testing.T) {
testTCPEndpoints("127.0.0.1", adscConn, t)
})
t.Run("edsz", func(t *testing.T) {
testEdsz(t, s, "test-1.default")
})
t.Run("LocalityPrioritizedEndpoints", func(t *testing.T) {
testLocalityPrioritizedEndpoints(adscConn, adscConn2, t)
})
t.Run("UDSEndpoints", func(t *testing.T) {
testUdsEndpoints(adscConn, t)
})
t.Run("PushIncremental", func(t *testing.T) {
edsUpdateInc(s, adscConn, t)
})
t.Run("Push", func(t *testing.T) {
edsUpdates(s, adscConn, t)
})
t.Run("MultipleRequest", func(t *testing.T) {
multipleRequest(s, false, 20, 5, 25*time.Second, nil, t)
})
// 5 pushes for 100 clients, using EDS incremental only.
t.Run("MultipleRequestIncremental", func(t *testing.T) {
multipleRequest(s, true, 20, 5, 25*time.Second, nil, t)
})
t.Run("CDSSave", func(t *testing.T) {
// Moved from cds_test, using new client
clusters := adscConn.GetClusters()
if len(clusters) == 0 {
t.Error("No clusters in ADS response")
}
strResponse, _ := json.MarshalIndent(clusters, " ", " ")
_ = os.WriteFile(env.IstioOut+"/cdsv2_sidecar.json", strResponse, 0o644)
})
}
// newEndpointWithAccount is a helper for IstioEndpoint creation. Creates endpoints with
// port name "http", with the given IP, service account and a 'version' label.
// nolint: unparam
func newEndpointWithAccount(ip, account, version string) []*model.IstioEndpoint {
return []*model.IstioEndpoint{
{
Address: ip,
ServicePortName: "http-main",
EndpointPort: 80,
Labels: map[string]string{"version": version},
ServiceAccount: account,
},
}
}
func TestTunnelServerEndpointEds(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
s.Discovery.MemRegistry.AddHTTPService(edsIncSvc, edsIncVip, 8080)
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
[]*model.IstioEndpoint{
{
Address: "127.0.0.1",
ServicePortName: "http-main",
EndpointPort: 80,
// Labels: map[string]string{"version": version},
ServiceAccount: "hello-sa",
TunnelAbility: networking.MakeTunnelAbility(networking.H2Tunnel),
},
})
t.Run("TestClientWantsTunnelEndpoints", func(t *testing.T) {
t.Helper()
adscConn1 := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.10"}, Metadata: &model.NodeMetadata{
ProxyConfig: &model.NodeMetaProxyConfig{
ProxyMetadata: map[string]string{
"tunnel": networking.H2TunnelTypeName,
},
},
}}, nil, watchAll)
testTunnelEndpoints("127.0.0.1", 15009, adscConn1, t)
})
t.Run("TestClientWantsNoTunnelEndpoints", func(t *testing.T) {
t.Helper()
adscConn2 := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.11"}, Metadata: &model.NodeMetadata{
ProxyConfig: &model.NodeMetaProxyConfig{},
}}, nil, watchAll)
testTunnelEndpoints("127.0.0.1", 80, adscConn2, t)
})
}
func TestNoTunnelServerEndpointEds(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
// Add the test ads clients to list of service instances in order to test the context dependent locality coloring.
addTestClientEndpoints(s.Discovery)
s.Discovery.MemRegistry.AddHTTPService(edsIncSvc, edsIncVip, 8080)
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
[]*model.IstioEndpoint{
{
Address: "127.0.0.1",
ServicePortName: "http-main",
EndpointPort: 80,
// Labels: map[string]string{"version": version},
ServiceAccount: "hello-sa",
// No Tunnel Support at this endpoint.
TunnelAbility: networking.MakeTunnelAbility(),
},
})
t.Run("TestClientWantsTunnelEndpoints", func(t *testing.T) {
adscConn := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.10"}, Metadata: &model.NodeMetadata{
ProxyConfig: &model.NodeMetaProxyConfig{
ProxyMetadata: map[string]string{
"tunnel": networking.H2TunnelTypeName,
},
},
}}, nil, watchAll)
testTunnelEndpoints("127.0.0.1", 80, adscConn, t)
})
t.Run("TestClientWantsNoTunnelEndpoints", func(t *testing.T) {
adscConn := s.Connect(&model.Proxy{IPAddresses: []string{"10.10.10.11"}, Metadata: &model.NodeMetadata{}}, nil, watchAll)
testTunnelEndpoints("127.0.0.1", 80, adscConn, t)
})
}
func mustReadFile(t *testing.T, fpaths ...string) string {
result := ""
for _, fpath := range fpaths {
if !strings.HasPrefix(fpath, ".") {
fpath = filepath.Join(env.IstioSrc, fpath)
}
bytes, err := os.ReadFile(fpath)
if err != nil {
t.Fatal(err)
}
result += "---\n"
result += string(bytes)
}
return result
}
func mustReadfolder(t *testing.T, folder string) string {
result := ""
fpathRoot := folder
if !strings.HasPrefix(fpathRoot, ".") {
fpathRoot = filepath.Join(env.IstioSrc, folder)
}
f, err := os.ReadDir(fpathRoot)
if err != nil {
t.Fatal(err)
}
for _, fpath := range f {
bytes, err := os.ReadFile(filepath.Join(fpathRoot, fpath.Name()))
if err != nil {
t.Fatal(err)
}
result += "---\n"
result += string(bytes)
}
return result
}
func TestEdsWeightedServiceEntry(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{ConfigString: mustReadFile(t, "tests/testdata/config/static-weighted-se.yaml")})
adscConn := s.Connect(nil, nil, watchEds)
endpoints := adscConn.GetEndpoints()
lbe, f := endpoints["outbound|80||weighted.static.svc.cluster.local"]
if !f || len(lbe.Endpoints) == 0 {
t.Fatalf("No lb endpoints for %v, %v", "outbound|80||weighted.static.svc.cluster.local", adscConn.EndpointsJSON())
}
expected := map[string]uint32{
"a": 9, // sum of 1 and 8
"b": 3,
"3.3.3.3": 1, // no weight provided is normalized to 1
"2.2.2.2": 8,
"1.1.1.1": 3,
}
got := make(map[string]uint32)
for _, lbe := range lbe.Endpoints {
got[lbe.Locality.Region] = lbe.LoadBalancingWeight.Value
for _, e := range lbe.LbEndpoints {
got[e.GetEndpoint().Address.GetSocketAddress().Address] = e.LoadBalancingWeight.Value
}
}
if !reflect.DeepEqual(expected, got) {
t.Errorf("Expected LB weights %v got %v", expected, got)
}
}
var (
watchEds = []string{v3.ClusterType, v3.EndpointType}
watchAll = []string{v3.ClusterType, v3.EndpointType, v3.ListenerType, v3.RouteType}
)
func TestEDSOverlapping(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
addOverlappingEndpoints(s)
adscon := s.Connect(nil, nil, watchEds)
testOverlappingPorts(s, adscon, t)
}
// Validates the behavior when Service resolution type is updated after initial EDS push.
// See https://github.com/istio/istio/issues/18355 for more details.
func TestEDSServiceResolutionUpdate(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
addEdsCluster(s, "edsdns.svc.cluster.local", "http", "10.0.0.53", 8080)
addEdsCluster(s, "other.local", "http", "1.1.1.1", 8080)
adscConn := s.Connect(nil, nil, watchAll)
// Validate that endpoints are pushed correctly.
testEndpoints("10.0.0.53", "outbound|8080||edsdns.svc.cluster.local", adscConn, t)
// Now update the service resolution to DNSLB with a DNS endpoint.
updateServiceResolution(s)
if _, err := adscConn.Wait(5*time.Second, v3.EndpointType); err != nil {
t.Fatal(err)
}
// Validate that endpoints are skipped.
lbe := adscConn.GetEndpoints()["outbound|8080||edsdns.svc.cluster.local"]
if lbe != nil && len(lbe.Endpoints) > 0 {
t.Fatalf("endpoints not expected for %s, but got %v", "edsdns.svc.cluster.local", adscConn.EndpointsJSON())
}
}
// Validate that when endpoints of a service flipflop between 1 and 0 does not trigger a full push.
func TestEndpointFlipFlops(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
addEdsCluster(s, "flipflop.com", "http", "10.0.0.53", 8080)
adscConn := s.Connect(nil, nil, watchAll)
// Validate that endpoints are pushed correctly.
testEndpoints("10.0.0.53", "outbound|8080||flipflop.com", adscConn, t)
// Clear the endpoint and validate it does not trigger a full push.
s.Discovery.MemRegistry.SetEndpoints("flipflop.com", "", []*model.IstioEndpoint{})
upd, _ := adscConn.Wait(5*time.Second, v3.EndpointType)
if contains(upd, "cds") {
t.Fatalf("Expecting only EDS update as part of a partial push. But received CDS also %v", upd)
}
if len(upd) > 0 && !contains(upd, v3.EndpointType) {
t.Fatalf("Expecting EDS push as part of a partial push. But received %v", upd)
}
lbe := adscConn.GetEndpoints()["outbound|8080||flipflop.com"]
if len(lbe.Endpoints) != 0 {
t.Fatalf("There should be no endpoints for outbound|8080||flipflop.com. Endpoints:\n%v", adscConn.EndpointsJSON())
}
// Validate that keys in service still exist in EndpointShardsByService - this prevents full push.
if len(s.Discovery.EndpointShardsByService["flipflop.com"]) == 0 {
t.Fatalf("Expected service key %s to be present in EndpointShardsByService. But missing %v", "flipflop.com", s.Discovery.EndpointShardsByService)
}
// Set the endpoints again and validate it does not trigger full push.
s.Discovery.MemRegistry.SetEndpoints("flipflop.com", "",
[]*model.IstioEndpoint{
{
Address: "10.10.1.1",
ServicePortName: "http",
EndpointPort: 8080,
},
})
upd, _ = adscConn.Wait(5*time.Second, v3.EndpointType)
if contains(upd, v3.ClusterType) {
t.Fatalf("expecting only EDS update as part of a partial push. But received CDS also %+v", upd)
}
if len(upd) > 0 && !contains(upd, v3.EndpointType) {
t.Fatalf("expecting EDS push as part of a partial push. But did not receive %+v", upd)
}
testEndpoints("10.10.1.1", "outbound|8080||flipflop.com", adscConn, t)
}
// Validate that deleting a service clears entries from EndpointShardsByService.
func TestDeleteService(t *testing.T) {
s := xds.NewFakeDiscoveryServer(t, xds.FakeOptions{})
addEdsCluster(s, "removeservice.com", "http", "10.0.0.53", 8080)
adscConn := s.Connect(nil, nil, watchEds)
// Validate that endpoints are pushed correctly.
testEndpoints("10.0.0.53", "outbound|8080||removeservice.com", adscConn, t)
s.Discovery.MemRegistry.RemoveService("removeservice.com")
if len(s.Discovery.EndpointShardsByService["removeservice.com"]) != 0 {
t.Fatalf("Expected service key %s to be deleted in EndpointShardsByService. But is still there %v",
"removeservice.com", s.Discovery.EndpointShardsByService)
}
}
func TestUpdateServiceAccount(t *testing.T) {
cluster1Endppoints := []*model.IstioEndpoint{
{Address: "10.172.0.1", ServiceAccount: "sa1"},
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
}
testCases := []struct {
name string
shardKey model.ShardKey
endpoints []*model.IstioEndpoint
expect bool
}{
{
name: "added new endpoint",
shardKey: "c1",
endpoints: append(cluster1Endppoints, &model.IstioEndpoint{Address: "10.172.0.3", ServiceAccount: "sa1"}),
expect: false,
},
{
name: "added new sa",
shardKey: "c1",
endpoints: append(cluster1Endppoints, &model.IstioEndpoint{Address: "10.172.0.3", ServiceAccount: "sa2"}),
expect: true,
},
{
name: "updated endpoints address",
shardKey: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.5", ServiceAccount: "sa1"},
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
},
expect: false,
},
{
name: "deleted one endpoint with unique sa",
shardKey: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.1", ServiceAccount: "sa1"},
},
expect: true,
},
{
name: "deleted one endpoint with duplicate sa",
shardKey: "c1",
endpoints: []*model.IstioEndpoint{
{Address: "10.172.0.2", ServiceAccount: "sa-vm1"},
},
expect: false,
},
{
name: "deleted endpoints",
shardKey: "c1",
endpoints: nil,
expect: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
s := new(xds.DiscoveryServer)
originalEndpointsShard := &xds.EndpointShards{
Shards: map[model.ShardKey][]*model.IstioEndpoint{
"c1": cluster1Endppoints,
"c2": {{Address: "10.244.0.1", ServiceAccount: "sa1"}, {Address: "10.244.0.2", ServiceAccount: "sa-vm2"}},
},
ServiceAccounts: map[string]struct{}{
"sa1": {},
"sa-vm1": {},
"sa-vm2": {},
},
}
originalEndpointsShard.Shards[tc.shardKey] = tc.endpoints
ret := s.UpdateServiceAccount(originalEndpointsShard, "test-svc")
if ret != tc.expect {
t.Errorf("expect UpdateServiceAccount %v, but got %v", tc.expect, ret)
}
})
}
}
func fullPush(s *xds.FakeDiscoveryServer) {
s.Discovery.Push(&model.PushRequest{Full: true})
}
func addTestClientEndpoints(server *xds.DiscoveryServer) {
server.MemRegistry.AddService("test-1.default", &model.Service{
Hostname: "test-1.default",
Ports: model.PortList{
{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
},
})
server.MemRegistry.AddInstance("test-1.default", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: "10.10.10.10",
ServicePortName: "http",
EndpointPort: 80,
Locality: model.Locality{Label: asdcLocality},
},
ServicePort: &model.Port{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
})
server.MemRegistry.AddInstance("test-1.default", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: "10.10.10.11",
ServicePortName: "http",
EndpointPort: 80,
Locality: model.Locality{Label: asdc2Locality},
},
ServicePort: &model.Port{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
})
}
// Verify server sends the endpoint. This check for a single endpoint with the given
// address.
func testTCPEndpoints(expected string, adsc *adsc.ADSC, t *testing.T) {
t.Helper()
testEndpoints(expected, "outbound|8080||eds.test.svc.cluster.local", adsc, t)
}
// Verify server sends the endpoint. This check for a single endpoint with the given
// address.
func testEndpoints(expected string, cluster string, adsc *adsc.ADSC, t *testing.T) {
t.Helper()
lbe, f := adsc.GetEndpoints()[cluster]
if !f || len(lbe.Endpoints) == 0 {
t.Fatalf("No lb endpoints for %v, %v", cluster, adsc.EndpointsJSON())
}
var found []string
for _, lbe := range lbe.Endpoints {
for _, e := range lbe.LbEndpoints {
addr := e.GetEndpoint().Address.GetSocketAddress().Address
found = append(found, addr)
if expected == addr {
return
}
}
}
t.Fatalf("Expecting %s got %v", expected, found)
}
// Verify server sends the tunneled endpoints.
// nolint: unparam
func testTunnelEndpoints(expectIP string, expectPort uint32, adsc *adsc.ADSC, t *testing.T) {
t.Helper()
cluster := "outbound|8080||eds.test.svc.cluster.local"
allClusters := adsc.GetEndpoints()
cla, f := allClusters[cluster]
if !f || len(cla.Endpoints) == 0 {
t.Fatalf("No lb endpoints for %v, %v", cluster, adsc.EndpointsJSON())
}
var found []string
for _, lbe := range cla.Endpoints {
for _, e := range lbe.LbEndpoints {
addr := e.GetEndpoint().Address.GetSocketAddress().Address
port := e.GetEndpoint().Address.GetSocketAddress().GetPortValue()
found = append(found, fmt.Sprintf("%s:%d", addr, port))
if expectIP == addr && expectPort == port {
return
}
}
}
t.Errorf("REACH HERE cannot find %s:%d", expectIP, expectPort)
t.Fatalf("Expecting address %s:%d got %v", expectIP, expectPort, found)
}
func testLocalityPrioritizedEndpoints(adsc *adsc.ADSC, adsc2 *adsc.ADSC, t *testing.T)
|
// Tests that Services with multiple ports sharing the same port number are properly sent endpoints.
// Real world use case for this is kube-dns, which uses port 53 for TCP and UDP.
func testOverlappingPorts(s *xds.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
// Test initial state
testEndpoints("10.0.0.53", "outbound|53||overlapping.cluster.local", adsc, t)
s.Discovery.Push(&model.PushRequest{
Full: true,
ConfigsUpdated: map[model.ConfigKey]struct{}{{
Kind: gvk.ServiceEntry,
Name: "overlapping.cluster.local",
}: {}},
})
_, _ = adsc.Wait(5 * time.Second)
// After the incremental push, we should still see the endpoint
testEndpoints("10.0.0.53", "outbound|53||overlapping.cluster.local", adsc, t)
}
func verifyNoLocalityPriorities(eps []*endpoint.LocalityLbEndpoints, t *testing.T) {
for _, ep := range eps {
if ep.GetPriority() != 0 {
t.Errorf("expected no locality priorities to apply, got priority %v.", ep.GetPriority())
}
}
}
func verifyLocalityPriorities(proxyLocality string, eps []*endpoint.LocalityLbEndpoints, t *testing.T) {
items := strings.SplitN(proxyLocality, "/", 3)
region, zone, subzone := items[0], items[1], items[2]
for _, ep := range eps {
if ep.GetLocality().Region == region {
if ep.GetLocality().Zone == zone {
if ep.GetLocality().SubZone == subzone {
if ep.GetPriority() != 0 {
t.Errorf("expected endpoint pool from same locality to have priority of 0, got %v", ep.GetPriority())
}
} else if ep.GetPriority() != 1 {
t.Errorf("expected endpoint pool from a different subzone to have priority of 1, got %v", ep.GetPriority())
}
} else {
if ep.GetPriority() != 2 {
t.Errorf("expected endpoint pool from a different zone to have priority of 2, got %v", ep.GetPriority())
}
}
} else {
if ep.GetPriority() != 3 {
t.Errorf("expected endpoint pool from a different region to have priority of 3, got %v", ep.GetPriority())
}
}
}
}
// Verify server sends UDS endpoints
func testUdsEndpoints(adsc *adsc.ADSC, t *testing.T) {
// Check the UDS endpoint ( used to be separate test - but using old unused GRPC method)
// The new test also verifies CDS is pusing the UDS cluster, since adsc.eds is
// populated using CDS response
lbe, f := adsc.GetEndpoints()["outbound|0||localuds.cluster.local"]
if !f || len(lbe.Endpoints) == 0 {
t.Error("No UDS lb endpoints")
} else {
ep0 := lbe.Endpoints[0]
if len(ep0.LbEndpoints) != 1 {
t.Fatalf("expected 1 LB endpoint but got %d", len(ep0.LbEndpoints))
}
lbep := ep0.LbEndpoints[0]
path := lbep.GetEndpoint().GetAddress().GetPipe().GetPath()
if path != udsPath {
t.Fatalf("expected Pipe to %s, got %s", udsPath, path)
}
}
}
// Update
func edsUpdates(s *xds.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
// Old style (non-incremental)
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.3", "hello-sa", "v1"))
xds.AdsPushAll(s.Discovery)
// will trigger recompute and push
if _, err := adsc.Wait(5*time.Second, v3.EndpointType); err != nil {
t.Fatal("EDS push failed", err)
}
testTCPEndpoints("127.0.0.3", adsc, t)
}
// edsFullUpdateCheck checks for updates required in a full push after the CDS update
func edsFullUpdateCheck(adsc *adsc.ADSC, t *testing.T) {
t.Helper()
if upd, err := adsc.Wait(15*time.Second, watchAll...); err != nil {
t.Fatal("Expecting CDS, EDS, LDS, and RDS update as part of a full push", err, upd)
}
}
// This test must be run in isolation, can't be parallelized with any other v2 test.
// It makes different kind of updates, and checks that incremental or full push happens.
// In particular:
// - just endpoint changes -> incremental
// - service account changes -> full ( in future: CDS only )
// - label changes -> full
func edsUpdateInc(s *xds.FakeDiscoveryServer, adsc *adsc.ADSC, t *testing.T) {
// TODO: set endpoints for a different cluster (new shard)
// Verify initial state
testTCPEndpoints("127.0.0.1", adsc, t)
adsc.WaitClear() // make sure there are no pending pushes.
// Equivalent with the event generated by K8S watching the Service.
// Will trigger a push.
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.2", "hello-sa", "v1"))
upd, err := adsc.Wait(5*time.Second, v3.EndpointType)
if err != nil {
t.Fatal("Incremental push failed", err)
}
if contains(upd, v3.ClusterType) {
t.Fatal("Expecting EDS only update, got", upd)
}
testTCPEndpoints("127.0.0.2", adsc, t)
// Update the endpoint with different SA - expect full
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.3", "account2", "v1"))
edsFullUpdateCheck(adsc, t)
testTCPEndpoints("127.0.0.3", adsc, t)
// Update the endpoint again, no SA change - expect incremental
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.4", "account2", "v1"))
upd, err = adsc.Wait(5 * time.Second)
if err != nil {
t.Fatal("Incremental push failed", err)
}
if !reflect.DeepEqual(upd, []string{v3.EndpointType}) {
t.Fatal("Expecting EDS only update, got", upd)
}
testTCPEndpoints("127.0.0.4", adsc, t)
// Update the endpoint to original SA - expect full
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.2", "hello-sa", "v1"))
edsFullUpdateCheck(adsc, t)
testTCPEndpoints("127.0.0.2", adsc, t)
// Update the endpoint again, no label change - expect incremental
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "",
newEndpointWithAccount("127.0.0.5", "hello-sa", "v1"))
upd, err = adsc.Wait(5 * time.Second)
if err != nil {
t.Fatal("Incremental push failed", err)
}
if !reflect.DeepEqual(upd, []string{v3.EndpointType}) {
t.Fatal("Expecting EDS only update, got", upd)
}
testTCPEndpoints("127.0.0.5", adsc, t)
// Wipe out all endpoints - expect full
s.Discovery.MemRegistry.SetEndpoints(edsIncSvc, "", []*model.IstioEndpoint{})
if upd, err := adsc.Wait(15*time.Second, v3.EndpointType); err != nil {
t.Fatal("Expecting EDS update as part of a partial push", err, upd)
}
lbe := adsc.GetEndpoints()["outbound|8080||eds.test.svc.cluster.local"]
if len(lbe.Endpoints) != 0 {
t.Fatalf("There should be no endpoints for outbound|8080||eds.test.svc.cluster.local. Endpoints:\n%v", adsc.EndpointsJSON())
}
}
// Make a direct EDS grpc request to pilot, verify the result is as expected.
// This test includes a 'bad client' regression test, which fails to read on the
// stream.
func multipleRequest(s *xds.FakeDiscoveryServer, inc bool, nclients,
nPushes int, to time.Duration, _ map[string]string, t *testing.T) {
wgConnect := &sync.WaitGroup{}
wg := &sync.WaitGroup{}
errChan := make(chan error, nclients)
// Bad client - will not read any response. This triggers Write to block, which should
// be detected
// This is not using adsc, which consumes the events automatically.
ads := s.ConnectADS()
ads.Request(t, nil)
n := nclients
wg.Add(n)
wgConnect.Add(n)
rcvPush := uatomic.NewInt32(0)
rcvClients := uatomic.NewInt32(0)
for i := 0; i < n; i++ {
current := i
go func(id int) {
defer wg.Done()
// Connect and get initial response
adscConn := s.Connect(&model.Proxy{IPAddresses: []string{fmt.Sprintf("1.1.1.%d", id)}}, nil, nil)
_, err := adscConn.Wait(15*time.Second, v3.RouteType)
if err != nil {
errChan <- errors.New("failed to get initial rds: " + err.Error())
wgConnect.Done()
return
}
if len(adscConn.GetEndpoints()) == 0 {
errChan <- errors.New("no endpoints")
wgConnect.Done()
return
}
wgConnect.Done()
// Check we received all pushes
log.Println("Waiting for pushes ", id)
// Pushes may be merged so we may not get nPushes pushes
got, err := adscConn.Wait(15*time.Second, v3.EndpointType)
// If in incremental mode, shouldn't receive cds|rds|lds here
if inc {
for _, g := range got {
if g == "cds" || g == "rds" || g == "lds" {
errChan <- fmt.Errorf("should be eds incremental but received cds. %v %v",
err, id)
return
}
}
}
rcvPush.Inc()
if err != nil {
log.Println("Recv failed", err, id)
errChan <- fmt.Errorf("failed to receive a response in 15 s %v %v",
err, id)
return
}
log.Println("Received all pushes ", id)
rcvClients.Inc()
adscConn.Close()
}(current)
}
ok := waitTimeout(wgConnect, to)
if !ok {
t.Fatal("Failed to connect")
}
log.Println("Done connecting")
// All clients are connected - this can start pushing changes.
for j := 0; j < nPushes; j++ {
if inc {
// This will be throttled - we want to trigger a single push
s.Discovery.AdsPushAll(strconv.Itoa(j), &model.PushRequest{
Full: false,
ConfigsUpdated: map[model.ConfigKey]struct{}{{
Kind: gvk.ServiceEntry,
Name: edsIncSvc,
}: {}},
Push: s.Discovery.Env.PushContext,
})
} else {
xds.AdsPushAll(s.Discovery)
}
log.Println("Push done ", j)
}
ok = waitTimeout(wg, to)
if !ok {
t.Errorf("Failed to receive all responses %d %d", rcvClients.Load(), rcvPush.Load())
buf := make([]byte, 1<<16)
runtime.Stack(buf, true)
fmt.Printf("%s", buf)
}
close(errChan)
// moved from ads_test, which had a duplicated test.
for e := range errChan {
t.Error(e)
}
}
func waitTimeout(wg *sync.WaitGroup, timeout time.Duration) bool {
c := make(chan struct{})
go func() {
defer close(c)
wg.Wait()
}()
select {
case <-c:
return true
case <-time.After(timeout):
return false
}
}
const udsPath = "/var/run/test/socket"
func addUdsEndpoint(s *xds.DiscoveryServer) {
s.MemRegistry.AddService("localuds.cluster.local", &model.Service{
Hostname: "localuds.cluster.local",
Ports: model.PortList{
{
Name: "grpc",
Port: 0,
Protocol: protocol.GRPC,
},
},
MeshExternal: true,
Resolution: model.ClientSideLB,
})
s.MemRegistry.AddInstance("localuds.cluster.local", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: udsPath,
EndpointPort: 0,
ServicePortName: "grpc",
Locality: model.Locality{Label: "localhost"},
Labels: map[string]string{"socket": "unix"},
},
ServicePort: &model.Port{
Name: "grpc",
Port: 0,
Protocol: protocol.GRPC,
},
})
pushReq := &model.PushRequest{
Full: true,
Reason: []model.TriggerReason{model.ConfigUpdate},
}
s.ConfigUpdate(pushReq)
}
func addLocalityEndpoints(server *xds.DiscoveryServer, hostname host.Name) {
server.MemRegistry.AddService(hostname, &model.Service{
Hostname: hostname,
Ports: model.PortList{
{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
},
})
localities := []string{
"region1/zone1/subzone1",
"region1/zone1/subzone2",
"region1/zone2/subzone1",
"region2/zone1/subzone1",
"region2/zone1/subzone2",
"region2/zone2/subzone1",
"region2/zone2/subzone2",
}
for i, locality := range localities {
server.MemRegistry.AddInstance(hostname, &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: fmt.Sprintf("10.0.0.%v", i),
EndpointPort: 80,
ServicePortName: "http",
Locality: model.Locality{Label: locality},
},
ServicePort: &model.Port{
Name: "http",
Port: 80,
Protocol: protocol.HTTP,
},
})
}
}
// nolint: unparam
func addEdsCluster(s *xds.FakeDiscoveryServer, hostName string, portName string, address string, port int) {
s.Discovery.MemRegistry.AddService(host.Name(hostName), &model.Service{
Hostname: host.Name(hostName),
Ports: model.PortList{
{
Name: portName,
Port: port,
Protocol: protocol.HTTP,
},
},
})
s.Discovery.MemRegistry.AddInstance(host.Name(hostName), &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: address,
EndpointPort: uint32(port),
ServicePortName: portName,
},
ServicePort: &model.Port{
Name: portName,
Port: port,
Protocol: protocol.HTTP,
},
})
fullPush(s)
}
func updateServiceResolution(s *xds.FakeDiscoveryServer) {
s.Discovery.MemRegistry.AddService("edsdns.svc.cluster.local", &model.Service{
Hostname: "edsdns.svc.cluster.local",
Ports: model.PortList{
{
Name: "http",
Port: 8080,
Protocol: protocol.HTTP,
},
},
Resolution: model.DNSLB,
})
s.Discovery.MemRegistry.AddInstance("edsdns.svc.cluster.local", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: "somevip.com",
EndpointPort: 8080,
ServicePortName: "http",
},
ServicePort: &model.Port{
Name: "http",
Port: 8080,
Protocol: protocol.HTTP,
},
})
fullPush(s)
}
func addOverlappingEndpoints(s *xds.FakeDiscoveryServer) {
s.Discovery.MemRegistry.AddService("overlapping.cluster.local", &model.Service{
Hostname: "overlapping.cluster.local",
Ports: model.PortList{
{
Name: "dns",
Port: 53,
Protocol: protocol.UDP,
},
{
Name: "tcp-dns",
Port: 53,
Protocol: protocol.TCP,
},
},
})
s.Discovery.MemRegistry.AddInstance("overlapping.cluster.local", &model.ServiceInstance{
Endpoint: &model.IstioEndpoint{
Address: "10.0.0.53",
EndpointPort: 53,
ServicePortName: "tcp-dns",
},
ServicePort: &model.Port{
Name: "tcp-dns",
Port: 53,
Protocol: protocol.TCP,
},
})
fullPush(s)
}
// Verify the endpoint debug interface is installed and returns some string.
// TODO: parse response, check if data captured matches what we expect.
// TODO: use this in integration tests.
// TODO: refine the output
// TODO: dump the ServiceInstances as well
func testEdsz(t *testing.T, s *xds.FakeDiscoveryServer, proxyID string) {
req, err := http.NewRequest("GET", "/debug/edsz?proxyID="+proxyID, nil)
if err != nil {
t.Fatal(err)
}
rr := httptest.NewRecorder()
debug := http.HandlerFunc(s.Discovery.Edsz)
debug.ServeHTTP(rr, req)
data, err := io.ReadAll(rr.Body)
if err != nil {
t.Fatalf("Failed to read /edsz")
}
statusStr := string(data)
if !strings.Contains(statusStr, "\"outbound|8080||eds.test.svc.cluster.local\"") {
t.Fatal("Mock eds service not found ", statusStr)
}
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
|
{
endpoints1 := adsc.GetEndpoints()
endpoints2 := adsc2.GetEndpoints()
verifyLocalityPriorities(asdcLocality, endpoints1["outbound|80||locality.cluster.local"].GetEndpoints(), t)
verifyLocalityPriorities(asdc2Locality, endpoints2["outbound|80||locality.cluster.local"].GetEndpoints(), t)
// No outlier detection specified for this cluster, so we shouldn't apply priority.
verifyNoLocalityPriorities(endpoints1["outbound|80||locality-no-outlier-detection.cluster.local"].GetEndpoints(), t)
verifyNoLocalityPriorities(endpoints2["outbound|80||locality-no-outlier-detection.cluster.local"].GetEndpoints(), t)
}
|
_embedded_ram28lu.rs
|
#[doc = "Register `_EmbeddedRAM28LU` reader"]
|
type Target = crate::R<_EMBEDDEDRAM28LU_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl From<crate::R<_EMBEDDEDRAM28LU_SPEC>> for R {
#[inline(always)]
fn from(reader: crate::R<_EMBEDDEDRAM28LU_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `_EmbeddedRAM28LU` writer"]
pub struct W(crate::W<_EMBEDDEDRAM28LU_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<_EMBEDDEDRAM28LU_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl From<crate::W<_EMBEDDEDRAM28LU_SPEC>> for W {
#[inline(always)]
fn from(writer: crate::W<_EMBEDDEDRAM28LU_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Field `RAM_LU` reader - RAM_LU stores the second 8 bits of the 32 bit CRC"]
pub struct RAM_LU_R(crate::FieldReader<u8, u8>);
impl RAM_LU_R {
#[inline(always)]
pub(crate) fn new(bits: u8) -> Self {
RAM_LU_R(crate::FieldReader::new(bits))
}
}
impl core::ops::Deref for RAM_LU_R {
type Target = crate::FieldReader<u8, u8>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `RAM_LU` writer - RAM_LU stores the second 8 bits of the 32 bit CRC"]
pub struct RAM_LU_W<'a> {
w: &'a mut W,
}
impl<'a> RAM_LU_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = value;
self.w
}
}
impl R {
#[doc = "Bits 0:7 - RAM_LU stores the second 8 bits of the 32 bit CRC"]
#[inline(always)]
pub fn ram_lu(&self) -> RAM_LU_R {
RAM_LU_R::new(self.bits)
}
}
impl W {
#[doc = "Bits 0:7 - RAM_LU stores the second 8 bits of the 32 bit CRC"]
#[inline(always)]
pub fn ram_lu(&mut self) -> RAM_LU_W {
RAM_LU_W { w: self }
}
#[doc = "Writes raw bits to the register."]
#[inline(always)]
pub unsafe fn bits(&mut self, bits: u8) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "CSE PRAM28LU register.\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [_embedded_ram28lu](index.html) module"]
pub struct _EMBEDDEDRAM28LU_SPEC;
impl crate::RegisterSpec for _EMBEDDEDRAM28LU_SPEC {
type Ux = u8;
}
#[doc = "`read()` method returns [_embedded_ram28lu::R](R) reader structure"]
impl crate::Readable for _EMBEDDEDRAM28LU_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [_embedded_ram28lu::W](W) writer structure"]
impl crate::Writable for _EMBEDDEDRAM28LU_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets _EmbeddedRAM28LU to value 0"]
impl crate::Resettable for _EMBEDDEDRAM28LU_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
}
|
pub struct R(crate::R<_EMBEDDEDRAM28LU_SPEC>);
impl core::ops::Deref for R {
|
main.rs
|
fn
|
() {
let c = 'z';
println!("c is: {}", c);
let heart_eyed_cat = '😻';
println!("heart_eyed_cat is: {}", heart_eyed_cat);
let tup: (i32, f64, u8) = (500, 6.4, 1);
let (x, y, _) = tup;
println!("x, y is: {}, {}", x, y);
println!("tup is: {}, {}, {}", tup.0, tup.1, tup.2);
let arr = [1, 2, 3, 4, 5];
println!("arr is: {}", arr[0]);
let arr = [3;5];
println!("arr[4] is: {}", arr[4]);
}
|
main
|
validator.go
|
package host
import (
"context"
"encoding/json"
"fmt"
"math"
"net"
"strings"
"time"
"github.com/coreos/ignition/v2/config/v3_2"
"github.com/coreos/ignition/v2/config/v3_2/types"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
"github.com/openshift/assisted-service/internal/common"
"github.com/openshift/assisted-service/internal/constants"
"github.com/openshift/assisted-service/internal/hardware"
"github.com/openshift/assisted-service/internal/host/hostutil"
"github.com/openshift/assisted-service/internal/network"
"github.com/openshift/assisted-service/internal/operators"
"github.com/openshift/assisted-service/internal/provider/registry"
"github.com/openshift/assisted-service/models"
"github.com/openshift/assisted-service/pkg/conversions"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/thoas/go-funk"
"gorm.io/gorm"
)
type ValidationStatus string
const (
ValidationSuccess ValidationStatus = "success"
ValidationSuccessSuppressOutput ValidationStatus = "success-suppress-output"
ValidationFailure ValidationStatus = "failure"
ValidationPending ValidationStatus = "pending"
ValidationError ValidationStatus = "error"
ValidationDisabled ValidationStatus = "disabled"
)
const OpenStackPlatform = "OpenStack Compute"
var (
ImageStatusDownloadRateThreshold = 0.001
invalidPlatforms = []string{
OpenStackPlatform,
}
forbiddenHostnames = []string{
"localhost",
}
)
func (v ValidationStatus) String() string {
return string(v)
}
type validationContext struct {
host *models.Host
cluster *common.Cluster
infraEnv *common.InfraEnv
inventory *models.Inventory
db *gorm.DB
clusterHostRequirements *models.ClusterHostRequirements
minCPUCoresRequirement int64
minRAMMibRequirement int64
}
type validationCondition func(context *validationContext) ValidationStatus
type validationStringFormatter func(context *validationContext, status ValidationStatus) string
type validation struct {
id validationID
condition validationCondition
formatter validationStringFormatter
skippedStates []models.HostStage
}
func (c *validationContext) loadCluster() error {
var err error
if c.cluster == nil {
c.cluster, err = common.GetClusterFromDBWithHosts(c.db, *c.host.ClusterID)
}
return err
}
func (c *validationContext) loadInfraEnv() error {
var err error
if c.infraEnv == nil {
c.infraEnv, err = common.GetInfraEnvFromDB(c.db, c.host.InfraEnvID)
}
return err
}
func (c *validationContext) loadInventory() error {
if c.host.Inventory != "" {
var inventory models.Inventory
err := json.Unmarshal([]byte(c.host.Inventory), &inventory)
if err != nil {
return err
}
if inventory.CPU == nil || inventory.Memory == nil || len(inventory.Disks) == 0 {
return errors.Errorf("Inventory is not valid")
}
c.inventory = &inventory
}
return nil
}
func (v *validator) getBootDeviceInfo(host *models.Host) (*models.DiskInfo, error) {
bootDevice, err := hardware.GetBootDevice(v.hwValidator, host)
if err != nil {
return nil, err
}
info, err := common.GetDiskInfo(host.DisksInfo, bootDevice)
if err != nil {
return nil, err
}
return info, nil
}
func (c *validationContext) validateRole() error {
switch common.GetEffectiveRole(c.host) {
case models.HostRoleMaster, models.HostRoleWorker, models.HostRoleAutoAssign:
return nil
default:
return errors.Errorf("Illegal role defined: %s", common.GetEffectiveRole(c.host))
}
}
func (c *validationContext) validateMachineCIDR() error {
var err error
for _, machineNetwork := range c.cluster.MachineNetworks {
_, _, err = net.ParseCIDR(string(machineNetwork.Cidr))
if err != nil {
return err
}
}
return nil
}
func (c *validationContext) loadClusterHostRequirements(hwValidator hardware.Validator) error {
requirements, err := hwValidator.GetClusterHostRequirements(context.TODO(), c.cluster, c.host)
c.clusterHostRequirements = requirements
return err
}
func (c *validationContext) loadInfraEnvHostRequirements(hwValidator hardware.Validator) error {
requirements, err := hwValidator.GetInfraEnvHostRequirements(context.TODO(), c.infraEnv)
c.clusterHostRequirements = requirements
return err
}
func (c *validationContext) loadGeneralMinRequirements(hwValidator hardware.Validator) error {
requirements, err := hwValidator.GetPreflightHardwareRequirements(context.TODO(), c.cluster)
if err != nil {
return err
}
c.minCPUCoresRequirement = int64(math.Min(float64(requirements.Ocp.Master.Quantitative.CPUCores), float64(requirements.Ocp.Worker.Quantitative.CPUCores)))
c.minRAMMibRequirement = int64(math.Min(float64(requirements.Ocp.Master.Quantitative.RAMMib), float64(requirements.Ocp.Worker.Quantitative.RAMMib)))
return err
}
func (c *validationContext) loadGeneralInfraEnvMinRequirements(hwValidator hardware.Validator) error {
requirements, err := hwValidator.GetPreflightInfraEnvHardwareRequirements(context.TODO(), c.infraEnv)
if err != nil {
return err
}
c.minCPUCoresRequirement = int64(math.Min(float64(requirements.Ocp.Master.Quantitative.CPUCores), float64(requirements.Ocp.Worker.Quantitative.CPUCores)))
c.minRAMMibRequirement = int64(math.Min(float64(requirements.Ocp.Master.Quantitative.RAMMib), float64(requirements.Ocp.Worker.Quantitative.RAMMib)))
return err
}
func newValidationContext(host *models.Host, c *common.Cluster, i *common.InfraEnv, db *gorm.DB, hwValidator hardware.Validator) (*validationContext, error) {
ret := &validationContext{
host: host,
db: db,
cluster: c,
infraEnv: i,
}
if host.ClusterID != nil {
err := ret.loadCluster()
if err != nil {
return nil, err
}
err = ret.loadInventory()
if err != nil {
return nil, err
}
err = ret.validateRole()
if err != nil {
return nil, err
}
err = ret.validateMachineCIDR()
if err != nil {
return nil, err
}
err = ret.loadClusterHostRequirements(hwValidator)
if err != nil {
return nil, err
}
err = ret.loadGeneralMinRequirements(hwValidator)
if err != nil {
return nil, err
}
} else {
err := ret.loadInfraEnv()
if err != nil {
return nil, err
}
err = ret.loadInventory()
if err != nil {
return nil, err
}
err = ret.validateRole()
if err != nil {
return nil, err
}
err = ret.loadInfraEnvHostRequirements(hwValidator)
if err != nil {
return nil, err
}
err = ret.loadGeneralInfraEnvMinRequirements(hwValidator)
if err != nil {
return nil, err
}
}
return ret, nil
}
func boolValue(b bool) ValidationStatus {
if b {
return ValidationSuccess
} else {
return ValidationFailure
}
}
type validator struct {
log logrus.FieldLogger
hwValidatorCfg *hardware.ValidatorCfg
hwValidator hardware.Validator
operatorsAPI operators.API
providerRegistry registry.ProviderRegistry
}
func (v *validator) isConnected(c *validationContext) ValidationStatus {
return boolValue(c.host.CheckedInAt.String() == "" || time.Since(time.Time(c.host.CheckedInAt)) <= MaxHostDisconnectionTime)
}
func (v *validator) printConnected(context *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Host is connected"
case ValidationFailure:
return "Host is disconnected"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasInventory(c *validationContext) ValidationStatus {
return boolValue(c.inventory != nil)
}
func (v *validator) printHasInventory(context *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Valid inventory exists for the host"
case ValidationFailure:
return "Inventory has not been received for the host"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasMinCpuCores(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
return boolValue(c.inventory.CPU.Count >= c.minCPUCoresRequirement)
}
func (v *validator) printHasMinCpuCores(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Sufficient CPU cores"
case ValidationFailure:
return fmt.Sprintf("The host is not eligible to participate in Openshift Cluster because the minimum required CPU cores for any role is %d, found only %d", c.minCPUCoresRequirement, c.inventory.CPU.Count)
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasMinMemory(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
return boolValue(c.inventory.Memory.PhysicalBytes >= conversions.MibToBytes(c.minRAMMibRequirement))
}
func (v *validator) compatibleWithClusterPlatform(c *validationContext) ValidationStatus {
// Late binding
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if *c.cluster.Kind == models.ClusterKindAddHostsCluster {
return ValidationSuccess
}
if c.inventory == nil || common.PlatformTypeValue(c.cluster.Platform.Type) == "" {
return ValidationPending
}
supported, err := v.providerRegistry.IsHostSupported(common.PlatformTypeValue(c.cluster.Platform.Type), c.host)
if err != nil {
return ValidationError
}
if supported {
return ValidationSuccess
}
return ValidationFailure
}
func (v *validator) printCompatibleWithClusterPlatform(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Host is compatible with cluster platform %s", common.PlatformTypeValue(c.cluster.Platform.Type))
case ValidationFailure:
hostAvailablePlatforms, _ := v.providerRegistry.GetSupportedProvidersByHosts([]*models.Host{c.host})
return fmt.Sprintf("Host is not compatible with cluster platform %s; either disable this host or choose a compatible cluster platform (%v)",
common.PlatformTypeValue(c.cluster.Platform.Type), hostAvailablePlatforms)
case ValidationPending:
return "Missing inventory or platform isn't set"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func isDiskEncryptionEnabledForRole(encryption models.DiskEncryption, role models.HostRole) bool {
switch swag.StringValue(encryption.EnableOn) {
case models.DiskEncryptionEnableOnAll:
return true
case models.DiskEncryptionEnableOnMasters:
return role == models.HostRoleMaster || role == models.HostRoleBootstrap
case models.DiskEncryptionEnableOnWorkers:
return role == models.HostRoleWorker
default:
return false
}
}
func (v *validator) getDiskEncryptionForDay2(host *models.Host) (*types.Luks, error) {
var response models.APIVipConnectivityResponse
if err := json.Unmarshal([]byte(host.APIVipConnectivity), &response); err != nil {
// APIVipConnectivityResponse is not available yet - retrying.
return nil, err
}
// Parse ignition from APIVipConnectivity (LUKS is supported in version >= 3.2)
config, _, err := v3_2.Parse([]byte(response.Ignition))
if err != nil {
v.log.WithError(err).Warn("Ignition is empty or invalid - can't get disk encryption")
return nil, nil
}
// Checks if LUKS (disk encryption) exists
if config.Storage.Luks == nil || len(config.Storage.Luks) == 0 {
// Disk encryption is disabled
return nil, nil
}
// Return LUKS object
return &config.Storage.Luks[0], nil
}
func (v *validator) diskEncryptionRequirementsSatisfied(c *validationContext) ValidationStatus {
if c.infraEnv != nil || swag.StringValue(c.cluster.DiskEncryption.EnableOn) == models.DiskEncryptionEnableOnNone {
return ValidationSuccessSuppressOutput
}
if c.inventory == nil {
return ValidationPending
}
//day2 validation is taking the disk encryption data solely from
//the host inventory and set the diskEncryption field on the cluster
//according to that information
if hostutil.IsDay2Host(c.host) {
luks, err := v.getDiskEncryptionForDay2(c.host)
if err != nil {
return ValidationPending
}
if luks == nil {
// Disk encryption is disabled for workers on day1 cluster
return ValidationSuccessSuppressOutput
}
c.cluster.DiskEncryption = &models.DiskEncryption{}
if swag.BoolValue(luks.Clevis.Tpm2) {
c.cluster.DiskEncryption.Mode = swag.String(models.DiskEncryptionModeTpmv2)
// If Tpm2 is enabled for workers, check whether supported by the host.
return boolValue(c.inventory.TpmVersion == models.InventoryTpmVersionNr20)
} else if len(luks.Clevis.Tang) != 0 {
c.cluster.DiskEncryption.Mode = swag.String(models.DiskEncryptionModeTang)
// No nee to validate Tang
return ValidationSuccessSuppressOutput
} else {
// Only Tpm2 and Tang are available for disk encryption
return ValidationFailure
}
}
//day 1 validation is relying on the host's role and the user
//configuration to check if the disk encryption setup is valid
role := common.GetEffectiveRole(c.host)
if role == models.HostRoleAutoAssign {
return ValidationPending
}
if !isDiskEncryptionEnabledForRole(*c.cluster.DiskEncryption, role) {
return ValidationSuccessSuppressOutput
}
if *c.cluster.DiskEncryption.Mode != models.DiskEncryptionModeTpmv2 {
return ValidationSuccess
}
return boolValue(c.inventory.TpmVersion == models.InventoryTpmVersionNr20)
}
func (v *validator) printDiskEncryptionRequirementsSatisfied(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Installation disk can be encrypted using %s", *c.cluster.DiskEncryption.Mode)
case ValidationFailure:
if c.inventory.TpmVersion == models.InventoryTpmVersionNone {
return "TPM version could not be found, make sure TPM is enabled in host's BIOS"
} else if c.cluster.DiskEncryption.Mode == nil {
return "Invalid LUKS object in ignition - both TPM2 and Tang are not available"
} else {
return fmt.Sprintf("The host's TPM version is not supported, expected-version: %s, actual-version: %s",
models.InventoryTpmVersionNr20, c.inventory.TpmVersion)
}
case ValidationPending:
if c.inventory == nil {
return "Missing host inventory"
}
return "Missing role assignment"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) printHasMinMemory(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Sufficient minimum RAM"
case ValidationFailure:
return fmt.Sprintf("The host is not eligible to participate in Openshift Cluster because the minimum required RAM for any role is %s, found only %s",
conversions.BytesToString(conversions.MibToBytes(c.minRAMMibRequirement)), conversions.BytesToString(c.inventory.Memory.PhysicalBytes))
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasMinValidDisks(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
disks := v.hwValidator.ListEligibleDisks(c.inventory)
return boolValue(len(disks) > 0)
}
func (v *validator) printHasMinValidDisks(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Sufficient disk capacity"
case ValidationFailure:
return "No eligible disks were found, please check specific disks to see why they are not eligible"
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) isMachineCidrDefined(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
return boolValue(swag.BoolValue(c.cluster.UserManagedNetworking) || swag.StringValue(c.cluster.Kind) == models.ClusterKindAddHostsCluster || network.IsMachineCidrAvailable(c.cluster))
}
func (v *validator) printIsMachineCidrDefined(context *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
if swag.BoolValue(context.cluster.UserManagedNetworking) {
return "No Machine Network CIDR needed: User Managed Networking"
}
if swag.StringValue(context.cluster.Kind) == models.ClusterKindAddHostsCluster {
return "No Machine Network CIDR needed: Day2 cluster"
}
return "Machine Network CIDR is defined"
case ValidationFailure:
if swag.BoolValue(context.cluster.VipDhcpAllocation) {
return "Machine Network CIDR is undefined"
} else {
return "Machine Network CIDR is undefined; the Machine Network CIDR can be defined by setting either the API or Ingress virtual IPs"
}
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasCPUCoresForRole(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
return boolValue(c.inventory.CPU.Count >= c.clusterHostRequirements.Total.CPUCores)
}
func (v *validator) printHasCPUCoresForRole(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Sufficient CPU cores for role %s", common.GetEffectiveRole(c.host))
case ValidationFailure:
return fmt.Sprintf("Require at least %d CPU cores for %s role, found only %d", c.clusterHostRequirements.Total.CPUCores, common.GetEffectiveRole(c.host), c.inventory.CPU.Count)
case ValidationPending:
return "Missing inventory or role"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasMemoryForRole(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
requiredBytes := conversions.MibToBytes(c.clusterHostRequirements.Total.RAMMib)
return boolValue(c.inventory.Memory.PhysicalBytes >= requiredBytes)
}
func (v *validator) isValidPlatformNetworkSettings(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
if c.inventory.SystemVendor == nil {
return ValidationError
}
if funk.ContainsString(invalidPlatforms, c.inventory.SystemVendor.ProductName) {
// In case there is no cluster validation is pending
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
} else {
//In case userManagedNetworking is true, we don't care about the platform
return boolValue(swag.BoolValue(c.cluster.UserManagedNetworking))
}
}
return ValidationSuccess
}
func (v *validator) printValidPlatformNetworkSettings(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Platform %s is allowed", c.inventory.SystemVendor.ProductName)
case ValidationFailure:
return fmt.Sprintf("Platform %s is allowed only for Single Node OpenShift or user-managed networking", c.inventory.SystemVendor.ProductName)
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) printHasMemoryForRole(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Sufficient RAM for role %s", common.GetEffectiveRole(c.host))
case ValidationFailure:
return fmt.Sprintf("Require at least %s RAM for role %s, found only %s",
conversions.BytesToString(conversions.MibToBytes(c.clusterHostRequirements.Total.RAMMib)), common.GetEffectiveRole(c.host), conversions.BytesToString(c.inventory.Memory.PhysicalBytes))
case ValidationPending:
return "Missing inventory or role"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) belongsToMachineCidr(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if swag.StringValue(c.cluster.Kind) == models.ClusterKindAddHostsCluster || (swag.BoolValue(c.cluster.UserManagedNetworking) && !common.IsSingleNodeCluster(c.cluster)) {
return ValidationSuccess
}
if c.inventory == nil || !network.IsMachineCidrAvailable(c.cluster) {
return ValidationPending
}
return boolValue(network.IsHostInPrimaryMachineNetCidr(v.log, c.cluster, c.host))
}
func (v *validator) printBelongsToMachineCidr(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
if swag.BoolValue(c.cluster.UserManagedNetworking) {
return "No machine network CIDR validation needed: User Managed Networking"
}
if swag.StringValue(c.cluster.Kind) == models.ClusterKindAddHostsCluster {
return "No machine network CIDR validation needed: Day2 cluster"
}
return "Host belongs to all machine network CIDRs"
case ValidationFailure:
return "Host does not belong to machine network CIDRs. Verify that the host belongs to every CIDR listed under machine networks"
case ValidationPending:
return "Missing inventory or machine network CIDR"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func getRealHostname(host *models.Host, inventory *models.Inventory) string {
if host.RequestedHostname != "" {
return host.RequestedHostname
}
return inventory.Hostname
}
func (v *validator) isHostnameUnique(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if c.inventory == nil {
return ValidationPending
}
realHostname := getRealHostname(c.host, c.inventory)
for _, h := range c.cluster.Hosts {
if h.ID.String() != c.host.ID.String() && h.Inventory != "" {
var otherInventory models.Inventory
if err := json.Unmarshal([]byte(h.Inventory), &otherInventory); err != nil {
v.log.WithError(err).Warnf("Illegal inventory for host %s", h.ID.String())
// It is not our hostname
continue
}
if realHostname == getRealHostname(h, &otherInventory) {
return ValidationFailure
}
}
}
return ValidationSuccess
}
func (v *validator) printHostnameUnique(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Hostname %s is unique in cluster", getRealHostname(c.host, c.inventory))
case ValidationFailure:
return fmt.Sprintf("Hostname %s is not unique in cluster", getRealHostname(c.host, c.inventory))
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) isHostnameValid(c *validationContext) ValidationStatus {
if c.inventory == nil {
return ValidationPending
}
return boolValue(!funk.ContainsString(forbiddenHostnames, getRealHostname(c.host, c.inventory)))
}
func (v *validator) printHostnameValid(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return fmt.Sprintf("Hostname %s is allowed", getRealHostname(c.host, c.inventory))
case ValidationFailure:
return fmt.Sprintf("Hostname %s is forbidden", getRealHostname(c.host, c.inventory))
case ValidationPending:
return "Missing inventory"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) isIgnitionDownloadable(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if !hostutil.IsDay2Host(c.host) || swag.BoolValue(c.cluster.UserManagedNetworking) {
return ValidationSuccessSuppressOutput
}
if c.host.APIVipConnectivity == "" {
return ValidationPending
}
var response models.APIVipConnectivityResponse
if err := json.Unmarshal([]byte(c.host.APIVipConnectivity), &response); err != nil {
return ValidationFailure
}
return boolValue(response.IsSuccess)
}
func (v *validator) printIgnitionDownloadable(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
if swag.BoolValue(c.cluster.UserManagedNetworking) {
return "No API VIP needed: User Managed Networking"
}
return "Ignition is downloadable"
case ValidationFailure:
return "Ignition is not downloadable. Please ensure host connectivity to the cluster's API VIP."
case ValidationPending:
return "Ignition is not ready, pending API VIP connectivity."
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) belongsToL2MajorityGroup(c *validationContext, majorityGroups map[string][]strfmt.UUID) ValidationStatus {
if !network.IsMachineCidrAvailable(c.cluster) {
return ValidationPending
}
// TODO(mko) This rule should be revised as soon as OCP supports multiple machineNetwork
// entries using the same IP stack.
ret := true
for _, machineNet := range c.cluster.MachineNetworks {
ret = ret && funk.Contains(majorityGroups[string(machineNet.Cidr)], *c.host.ID)
}
return boolValue(ret)
}
func (v *validator) belongsToL3MajorityGroup(c *validationContext, majorityGroups map[string][]strfmt.UUID) ValidationStatus {
ipv4, ipv6, err := network.GetConfiguredAddressFamilies(c.cluster)
if err != nil {
v.log.WithError(err).Warn("Get configured address families")
return ValidationError
}
if !(ipv4 || ipv6) {
return ValidationFailure
}
ret := true
if ipv4 {
ret = ret && funk.Contains(majorityGroups[network.IPv4.String()], *c.host.ID)
}
if ipv6 {
ret = ret && funk.Contains(majorityGroups[network.IPv6.String()], *c.host.ID)
}
return boolValue(ret)
}
func (v *validator) belongsToMajorityGroup(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if hostutil.IsDay2Host(c.host) || common.IsSingleNodeCluster(c.cluster) {
return ValidationSuccess
}
if c.cluster.ConnectivityMajorityGroups == "" {
return ValidationPending
}
var majorityGroups map[string][]strfmt.UUID
err := json.Unmarshal([]byte(c.cluster.ConnectivityMajorityGroups), &majorityGroups)
if err != nil {
v.log.WithError(err).Warn("Parse majority group")
return ValidationError
}
var ret ValidationStatus
if swag.BoolValue(c.cluster.UserManagedNetworking) {
ret = v.belongsToL3MajorityGroup(c, majorityGroups)
} else {
ret = v.belongsToL2MajorityGroup(c, majorityGroups)
}
if ret == ValidationFailure && len(c.cluster.Hosts) < 3 {
return ValidationPending
}
return ret
}
func (v *validator) printBelongsToMajorityGroup(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
if hostutil.IsDay2Host(c.host) {
return "Day2 host is not required to be connected to other hosts in the cluster"
}
return "Host has connectivity to the majority of hosts in the cluster"
case ValidationFailure:
return "No connectivity to the majority of hosts in the cluster"
case ValidationError:
return "Parse error for connectivity majority group"
case ValidationPending:
if !network.IsMachineCidrAvailable(c.cluster) || c.cluster.ConnectivityMajorityGroups == "" {
return "Machine Network CIDR or Connectivity Majority Groups missing"
} else if len(c.cluster.Hosts) < 3 {
return "Not enough hosts in cluster to calculate connectivity groups"
}
// Shouldn't happen
return "Not enough information to calculate host majority groups"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) missingNTPSyncResult(db *gorm.DB, host *models.Host) ValidationStatus {
unboundStatuses := []string{
models.HostStatusInsufficientUnbound,
models.HostStatusDisconnectedUnbound,
models.HostStatusDiscoveringUnbound,
models.HostStatusKnownUnbound,
}
if funk.ContainsString(unboundStatuses, swag.StringValue(host.Status)) {
sources, err := common.GetHostNTPSources(db, host)
if err != nil {
v.log.WithError(err).Errorf("Failed to get sources for host %s", host.ID.String())
return ValidationError
}
if sources == "" {
return ValidationSuccessSuppressOutput
}
}
return ValidationFailure
}
func (v *validator) isNTPSynced(c *validationContext) ValidationStatus {
var sources []*models.NtpSource
if c.host.NtpSources == "" {
return v.missingNTPSyncResult(c.db, c.host)
}
if err := json.Unmarshal([]byte(c.host.NtpSources), &sources); err != nil {
v.log.WithError(err).Warn("Parse NTP sources")
return ValidationError
}
for _, source := range sources {
if source.SourceState == models.SourceStateSynced {
return ValidationSuccess
}
}
return v.missingNTPSyncResult(c.db, c.host)
}
func (v *validator) printNTPSynced(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Host NTP is synced"
case ValidationFailure:
return "Host couldn't synchronize with any NTP server"
case ValidationError:
return "Parse error for NTP sources"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) sucessfullOrUnknownContainerImagesAvailability(c *validationContext) ValidationStatus {
imageStatuses, err := common.UnmarshalImageStatuses(c.host.ImagesStatus)
if err != nil {
v.log.WithError(err).Warn("Parse container image statuses")
return ValidationError
}
return boolValue(allImagesValid(imageStatuses))
}
func (v *validator) printSucessfullOrUnknownContainerImagesAvailability(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "All required container images were either pulled successfully or no attempt was made to pull them"
case ValidationFailure:
images, err := v.getFailedImagesNames(c.host)
if err == nil {
return fmt.Sprintf("Failed to fetch container images needed for installation from %s. "+
"This may be due to a network hiccup. Retry to install again. If this problem persists, "+
"check your network settings to make sure you’re not blocked.", strings.Join(images, ","))
}
fallthrough
case ValidationError:
return "Parse error for container image statuses"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) getFailedImagesNames(host *models.Host) ([]string, error) {
imageStatuses, err := common.UnmarshalImageStatuses(host.ImagesStatus)
if err != nil {
return nil, err
}
imageNames := make([]string, 0)
for _, imageStatus := range imageStatuses {
if isInvalidImageStatus(imageStatus) {
imageNames = append(imageNames, imageStatus.Name)
}
}
return imageNames, nil
}
func isInvalidImageStatus(imageStatus *models.ContainerImageAvailability) bool {
return imageStatus.Result == models.ContainerImageAvailabilityResultFailure ||
(imageStatus.SizeBytes > 0 && imageStatus.DownloadRate < ImageStatusDownloadRateThreshold)
}
func allImagesValid(imageStatuses common.ImageStatuses) bool {
for _, imageStatus := range imageStatuses {
if isInvalidImageStatus(imageStatus) {
return false
}
}
return true
}
/*
This is a pre-install validation that checks that the boot device was either not tested for sufficient disk speed
or the disk speed check has been successful. Since disk speed test is performed after installation has started,
in order to have result for such test, the result has to be from a previous installation attempt.
Since all pre-install validations have to pass before starting installation, it is mandatory that in case installation
on the current boot device has not been attempted yet, this validation must pass.
*/
func (v *validator) sufficientOrUnknownInstallationDiskSpeed(c *validationContext) ValidationStatus {
info, err := v.getBootDeviceInfo(c.host)
if err != nil {
return ValidationError
}
return boolValue(info == nil || info.DiskSpeed == nil || !info.DiskSpeed.Tested || info.DiskSpeed.ExitCode == 0)
}
func (v *validator) printSufficientOrUnknownInstallationDiskSpeed(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
info, _ := v.getBootDeviceInfo(c.host)
if info == nil || info.DiskSpeed == nil || !info.DiskSpeed.Tested {
return "Speed of installation disk has not yet been measured"
}
return "Speed of installation disk is sufficient"
case ValidationFailure:
return "While preparing the previous installation the installation disk speed measurement failed or was found to be insufficient"
case ValidationError:
return "Error occurred while getting boot device"
default:
return fmt.Sprintf("Unexpected status %s", status.String())
}
}
func (v *validator) hasSufficientNetworkLatencyRequirementForRole(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if len(c.cluster.Hosts) == 1 || c.clusterHostRequirements.Total.NetworkLatencyThresholdMs == nil || common.GetEffectiveRole(c.host) == models.HostRoleAutoAssign || hostutil.IsDay2Host(c.host) {
// Single Node use case || no requirements defined || role is auto assign
return ValidationSuccess
}
if len(c.host.Connectivity) == 0 {
return ValidationPending
}
status, _, _ := v.validateNetworkLatencyForRole(c.host, c.clusterHostRequirements, c.cluster.Hosts)
return status
}
func (v *validator) validateNetworkLatencyForRole(host *models.Host, clusterRoleReqs *models.ClusterHostRequirements, hosts []*models.Host) (ValidationStatus, []string, error) {
connectivityReport, err := hostutil.UnmarshalConnectivityReport(host.Connectivity)
if err != nil {
v.log.Errorf("Unable to unmarshall host connectivity for %s:%s", host.ID, err)
return ValidationError, nil, nil
}
failedHostIPs := map[string]struct{}{}
failedHostLatencies := []string{}
for _, r := range connectivityReport.RemoteHosts {
for _, l3 := range r.L3Connectivity {
if l3.AverageRTTMs > *clusterRoleReqs.Total.NetworkLatencyThresholdMs {
if _, ok := failedHostIPs[l3.RemoteIPAddress]; !ok {
hostname, role, err := GetHostnameAndEffectiveRoleByIP(l3.RemoteIPAddress, hosts)
if err != nil {
v.log.Error(err)
return ValidationFailure, nil, err
}
if role == common.GetEffectiveRole(host) {
failedHostIPs[l3.RemoteIPAddress] = struct{}{}
failedHostLatencies = append(failedHostLatencies, fmt.Sprintf(" %s (%.2f ms)", hostname, l3.AverageRTTMs))
}
}
}
}
}
if len(failedHostLatencies) > 0 {
return ValidationFailure, failedHostLatencies, nil
}
return ValidationSuccess, nil, nil
}
const (
lessThanOr = "less than or"
equals = "equals"
)
func comparisonBuilder(value float64) string {
if value > 0 {
return fmt.Sprintf("%s %s", lessThanOr, equals)
}
return equals
}
func (v *validator) printSufficientNetworkLatencyRequirementForRole(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Network latency requirement has been satisfied."
case ValidationFailure:
_, hostLatencies, err := v.validateNetworkLatencyForRole(c.host, c.clusterHostRequirements, c.cluster.Hosts)
if err != nil {
return fmt.Sprintf("Error while attempting to validate network latency: %s", err)
}
return fmt.Sprintf("Network latency requirements of %s %.2f ms not met for connectivity between %s and%s.", comparisonBuilder(*c.clusterHostRequirements.Total.NetworkLatencyThresholdMs), *c.clusterHostRequirements.Total.NetworkLatencyThresholdMs, c.host.ID, strings.Join(hostLatencies, ","))
case ValidationPending:
return "Missing network latency information."
case ValidationError:
return "Parse error while attempting to process the connectivity report"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasSufficientPacketLossRequirementForRole(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if len(c.cluster.Hosts) == 1 || c.clusterHostRequirements.Total.PacketLossPercentage == nil || common.GetEffectiveRole(c.host) == models.HostRoleAutoAssign || hostutil.IsDay2Host(c.host) {
// Single Node use case || no requirements defined || role is auto assign
return ValidationSuccess
}
if len(c.host.Connectivity) == 0 {
return ValidationPending
}
status, _, _ := v.validatePacketLossForRole(c.host, c.clusterHostRequirements, c.cluster.Hosts)
return status
}
func (v *validator) validatePacketLossForRole(host *models.Host, clusterRoleReqs *models.ClusterHostRequirements, hosts []*models.Host) (ValidationStatus, []string, error) {
connectivityReport, err := hostutil.UnmarshalConnectivityReport(host.Connectivity)
if err != nil {
v.log.Errorf("Unable to unmarshall host connectivity for %s:%s", host.ID, err)
return ValidationError, nil, nil
}
failedHostIPs := map[string]struct{}{}
failedHostPacketLoss := []string{}
for _, r := range connectivityReport.RemoteHosts {
for _, l3 := range r.L3Connectivity {
if l3.PacketLossPercentage > *clusterRoleReqs.Total.PacketLossPercentage {
if _, ok := failedHostIPs[l3.RemoteIPAddress]; !ok {
hostname, role, err := GetHostnameAndEffectiveRoleByIP(l3.RemoteIPAddress, hosts)
if err != nil {
v.log.Error(err)
return ValidationFailure, nil, err
}
if role == common.GetEffectiveRole(host) {
failedHostIPs[l3.RemoteIPAddress] = struct{}{}
failedHostPacketLoss = append(failedHostPacketLoss, fmt.Sprintf(" %s (%.2f%%)", hostname, l3.PacketLossPercentage))
}
}
}
}
}
if len(failedHostPacketLoss) > 0 {
return ValidationFailure, failedHostPacketLoss, nil
}
return ValidationSuccess, nil, nil
}
func (v *validator) printSufficientPacketLossRequirementForRole(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Packet loss requirement has been satisfied."
case ValidationFailure:
_, hostPacketLoss, err := v.validatePacketLossForRole(c.host, c.clusterHostRequirements, c.cluster.Hosts)
if err != nil {
return fmt.Sprintf("Error while attempting to validate packet loss validation: %s", err)
}
return fmt.Sprintf("Packet loss percentage requirement of %s %.2f%% not met for connectivity between %s and%s.", comparisonBuilder(*c.clusterHostRequirements.Total.PacketLossPercentage), *c.clusterHostRequirements.Total.PacketLossPercentage, c.host.ID, strings.Join(hostPacketLoss, ","))
case ValidationPending:
return "Missing packet loss information."
case ValidationError:
return "Parse error while attempting to process the connectivity report"
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func (v *validator) hasDefaultRoute(c *validationContext) ValidationStatus {
if len(c.host.Inventory) == 0 {
return ValidationPending
}
inv, err := common.UnmarshalInventory(c.host.Inventory)
if err != nil || len(inv.Routes) == 0 {
return ValidationFailure
}
if v.validateDefaultRoute(inv.Routes) {
return ValidationSuccess
}
return ValidationFailure
}
func (v *validator) validateDefaultRoute(routes []*models.Route) bool {
for _, r := range routes {
if len(r.Destination) == 0 || len(r.Gateway) == 0 {
continue
}
dst := net.ParseIP(r.Destination)
if dst == nil {
v.log.Errorf("unable to parse destination IP: %s", r.Destination)
continue
}
gw := net.ParseIP(r.Gateway)
if gw == nil {
v.log.Errorf("unable to parse gateway IP: %s", r.Gateway)
continue
}
if dst.IsUnspecified() && !gw.IsUnspecified() {
return true
}
}
return false
}
func (v *validator) printDefaultRoute(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Host has been configured with at least one default route."
case ValidationFailure:
return "Host has not yet been configured with a default route."
case ValidationPending:
return "Missing default routing information."
default:
return fmt.Sprintf("Unexpected status %s", status)
}
}
func shouldValidateDnsResolution(c *validationContext) bool {
// Skip DNS resolution checks in IPI network mode
if !swag.BoolValue(c.cluster.UserManagedNetworking) {
return false
}
// If its an SNO cluster with DNSMasq manifests enabled the check should be skipped
networkCfg, err := network.NewConfig()
if err != nil {
return false
}
return !(common.IsSingleNodeCluster(c.cluster) && networkCfg.EnableSingleNodeDnsmasq)
}
func domainNameToResolve(c *validationContext, name string) string {
return fmt.Sprintf("%s.%s.%s", name, c.cluster.Name, c.cluster.BaseDNSDomain)
}
func (v *validator) isAPIDomainNameResolvedCorrectly(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if !shouldValidateDnsResolution(c) {
return ValidationSuccess
}
apiDomainName := domainNameToResolve(c, constants.APIName)
return checkDomainNameResolution(c, apiDomainName)
}
func (v *validator) printIsAPIDomainNameResolvedCorrectly(c *validationContext, status ValidationStatus) string {
apiDomainName := domainNameToResolve(c, constants.APIName)
return printIsDomainNameResolvedCorrectly(c, status, apiDomainName, "API load balancer")
}
func (v *validator) isAPIInternalDomainNameResolvedCorrectly(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if !shouldValidateDnsResolution(c) {
return ValidationSuccess
}
apiInternalDomainName := domainNameToResolve(c, constants.APIInternalName)
return checkDomainNameResolution(c, apiInternalDomainName)
}
func (v *validator) printIsAPIInternalDomainNameResolvedCorrectly(c *validationContext, status ValidationStatus) string {
apiInternalDomainName := domainNameToResolve(c, constants.APIInternalName)
return printIsDomainNameResolvedCorrectly(c, status, apiInternalDomainName, "API load balancer")
}
func (v *validator) isAppsDomainNameResolvedCorrectly(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if !shouldValidateDnsResolution(c) {
return ValidationSuccess
}
appsDomainName := fmt.Sprintf("%s.apps.%s.%s", constants.AppsSubDomainNameHostDNSValidation, c.cluster.Name, c.cluster.BaseDNSDomain)
return checkDomainNameResolution(c, appsDomainName)
}
func (v *validator) printIsAppsDomainNameResolvedCorrectly(c *validationContext, status ValidationStatus) string {
appsDomainName := domainNameToResolve(c, "*.apps")
return printIsDomainNameResolvedCorrectly(c, status, appsDomainName, "application Ingress load balancer")
}
func checkDomainNameResolution(c *validationContext, domainName string) ValidationStatus {
var response *models.DomainResolutionResponse
if err := json.Unmarshal([]byte(c.host.DomainNameResolutions), &response); err != nil {
return ValidationError
}
for _, domain := range response.Resolutions {
if domain.DomainName != nil && *domain.DomainName == domainName {
if len(domain.IPV4Addresses) != 0 || len(domain.IPV6Addresses) != 0 {
return ValidationSuccess
}
}
}
return ValidationFailure
}
func printIsDomainNameResolvedCorrectly(c *validationContext, status ValidationStatus, domainName string, destination string) string {
switch status {
case ValidationSuccess:
if !swag.BoolValue(c.cluster.UserManagedNetworking) {
return "Domain name resolution is not required (managed networking)"
}
return fmt.Sprintf("Domain name resolution was successful for domain %s", domainName)
case ValidationFailure:
return fmt.Sprintf("Couldn't resolve domain name %s on the host. To continue installation, create the necessary DNS entries to resolve this domain name to your %s.", domainName, destination)
case ValidationError:
return "Parse error for domain name resolutions result"
default:
return "Unexpected status"
}
}
func (v *validator) isDNSWildcardNotConfigured(c *validationContext) ValidationStatus {
if c.infraEnv != nil {
return ValidationSuccessSuppressOutput
}
if hostutil.IsDay2Host(c.host) {
return ValidationSuccess
}
var response *models.DomainResolutionResponse
if err := json.Unmarshal([]byte(c.host.DomainNameResolutions), &response); err != nil {
return ValidationError
}
dnsWildcardName := domainNameToResolve(c, constants.DNSWildcardFalseDomainName)
// Note that we're validating that the wildcard DNS *.<cluster_name>.<base_domain> is NOT configured, since this causes known problems for OpenShift
for _, domain := range response.Resolutions {
if domain.DomainName != nil && *domain.DomainName == dnsWildcardName {
if len(domain.IPV4Addresses) == 0 && len(domain.IPV6Addresses) == 0 {
return ValidationSuccess
}
}
}
return ValidationFailure
}
func (v *validator) printIsDNSWildcardNotConfigured(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
if hostutil.IsDay2Host(c.host) {
return "DNS wildcard check is not required for day2"
}
return "DNS wildcard check was successful"
case ValidationFailure:
return fmt.Sprintf("DNS wildcard configuration was detected for domain *.%s.%s The installation will not be able to complete while the entry exists. Please remove it to proceed.", c.cluster.Name, c.cluster.BaseDNSDomain)
case ValidationError:
return "Parse error for domain name resolutions result"
default:
return "Unexpected status"
}
}
func ar
|
*validationContext) (ValidationStatus, error) {
if c.inventory == nil || c.cluster == nil {
return ValidationPending, nil
}
families, err := network.GetClusterAddressFamilies(c.cluster)
if err != nil {
return ValidationError, err
}
for _, family := range families {
var networks []string
switch family {
case network.IPv4:
networks, err = network.GetIPv4Networks(c.inventory)
case network.IPv6:
networks, err = network.GetIPv6Networks(c.inventory)
}
if err != nil {
return ValidationError, err
}
for i := 0; i < len(networks); i++ {
for j := i + 1; j < len(networks); j++ {
if err = network.NetworksOverlap(networks[i], networks[j]); err != nil {
return ValidationFailure, err
}
}
}
}
return ValidationSuccess, nil
}
func (v *validator) nonOverlappingSubnets(c *validationContext) ValidationStatus {
ret, err := areNetworksOverlapping(c)
if err != nil {
v.log.WithError(err).Errorf("Failed to check if CIDRs are overlapping for host %s infra-env %s", c.host.ID.String(), c.host.InfraEnvID.String())
}
return ret
}
func (v *validator) printNonOverlappingSubnets(c *validationContext, status ValidationStatus) string {
switch status {
case ValidationSuccess:
return "Host subnets are not overlapping"
case ValidationPending:
return "Missing inventory, or missing cluster"
case ValidationFailure:
_, err := areNetworksOverlapping(c)
return fmt.Sprintf("Address networks are overlapping: %s", err.Error())
case ValidationError:
_, err := areNetworksOverlapping(c)
return fmt.Sprintf("Unexpected error: %s", err.Error())
}
return fmt.Sprintf("Unexpected status %s", status)
}
|
eNetworksOverlapping(c
|
AppListItem.tsx
|
import * as React from "react";
import helmIcon from "../../icons/helm.svg";
import placeholder from "../../placeholder.png";
import { IAppOverview } from "../../shared/types";
import * as url from "../../shared/url";
import InfoCard from "../InfoCard/InfoCard";
import Tooltip from "components/js/Tooltip";
import "./AppListItem.css";
export interface IAppListItemProps {
app: IAppOverview;
cluster: string;
}
function AppListItem(props: IAppListItemProps) {
const { app, cluster } = props;
const icon = app.icon ? app.icon : placeholder;
const appStatus = app.status.toLocaleLowerCase();
let tooltip = <></>;
const updateAvailable = app.updateInfo && !app.updateInfo.error && !app.updateInfo.upToDate;
if (app.updateInfo && updateAvailable) {
if (app.updateInfo.appLatestVersion !== app.chartMetadata.appVersion) {
tooltip = (
<div className="color-icon-info">
<Tooltip
label="update-tooltip"
id={`${app.releaseName}-update-tooltip`}
icon="circle-arrow"
position="top-left"
iconProps={{ solid: true, size: "md", color: "blue" }}
>
New App Version: {app.updateInfo.appLatestVersion}
</Tooltip>
</div>
);
} else {
tooltip = (
<div className="color-icon-info">
<Tooltip
label="update-tooltip"
id={`${app.releaseName}-update-tooltip`}
icon="circle-arrow"
position="top-left"
iconProps={{ solid: true, size: "md" }}
>
New Chart Version: {app.updateInfo.chartLatestVersion}
</Tooltip>
</div>
);
}
}
return (
<InfoCard
|
link={url.app.apps.get(cluster, app.namespace, app.releaseName)}
title={app.releaseName}
icon={icon}
info={
<div>
<span>
App: {app.chartMetadata.name}{" "}
{app.chartMetadata.appVersion ? `v${app.chartMetadata.appVersion}` : ""}
</span>
<br />
<span>Chart: {app.chartMetadata.version}</span>
</div>
}
description={app.chartMetadata.description}
tag1Content={appStatus}
tag1Class={appStatus === "deployed" ? "label-success" : "label-warning"}
tooltip={tooltip}
bgIcon={helmIcon}
/>
);
}
export default AppListItem;
|
key={`${app.namespace}/${app.releaseName}`}
|
cmake.py
|
import os
import platform
from collections import OrderedDict
from itertools import chain
from conans.client import defs_to_string, join_arguments
from conans.client.build.cppstd_flags import cppstd_flag
from conans.client.tools import cross_building
from conans.client.tools.oss import get_cross_building_settings
from conans.errors import ConanException
from conans.model.conan_file import ConanFile
from conans.model.version import Version
from conans.util.env_reader import get_env
from conans.util.files import mkdir, get_abs_path
from conans.tools import cpu_count, args_to_string
from conans import tools
from conans.util.log import logger
from conans.util.config_parser import get_bool_from_text
from conans.client.build.compiler_flags import architecture_flag
def _get_env_cmake_system_name():
env_system_name = get_env("CONAN_CMAKE_SYSTEM_NAME", "")
return {"False": False, "True": True, "": None}.get(env_system_name, env_system_name)
class CMake(object):
def __init__(self, conanfile, generator=None, cmake_system_name=True,
parallel=True, build_type=None, toolset=None, make_program=None,
set_cmake_flags=False):
"""
:param settings_or_conanfile: Conanfile instance (or settings for retro compatibility)
:param generator: Generator name to use or none to autodetect
:param cmake_system_name: False to not use CMAKE_SYSTEM_NAME variable,
True for auto-detect or directly a string with the system name
:param parallel: Try to build with multiple cores if available
:param build_type: Overrides default build type comming from settings
:param toolset: Toolset name to use (such as llvm-vs2014) or none for default one,
applies only to certain generators (e.g. Visual Studio)
:param set_cmake_flags: whether or not to set CMake flags like CMAKE_CXX_FLAGS, CMAKE_C_FLAGS, etc.
it's vital to set for certain projects (e.g. using CMAKE_SIZEOF_VOID_P or CMAKE_LIBRARY_ARCHITECTURE)
"""
if not isinstance(conanfile, ConanFile):
raise ConanException("First argument of CMake() has to be ConanFile. Use CMake(self)")
self._settings = conanfile.settings
self._conanfile = conanfile
self._os = self._settings.get_safe("os")
self._os_build, _, self._os_host, _ = get_cross_building_settings(self._settings)
self._compiler = self._settings.get_safe("compiler")
self._compiler_version = self._settings.get_safe("compiler.version")
self._arch = self._settings.get_safe("arch")
os_ver_str = "os.api_level" if self._os == "Android" else "os.version"
self._op_system_version = self._settings.get_safe(os_ver_str)
self._libcxx = self._settings.get_safe("compiler.libcxx")
self._runtime = self._settings.get_safe("compiler.runtime")
self._build_type = self._settings.get_safe("build_type")
self._cppstd = self._settings.get_safe("cppstd")
self.generator = generator or self._generator()
self.toolset = self._toolset(toolset)
self.build_dir = None
self._cmake_system_name = _get_env_cmake_system_name()
if self._cmake_system_name is None: # Not overwritten using environment
self._cmake_system_name = cmake_system_name
self.parallel = parallel
self._set_cmake_flags = set_cmake_flags
self.definitions = self._get_cmake_definitions()
if build_type and build_type != self._build_type:
# Call the setter to warn and update the definitions if needed
self.build_type = build_type
make_program = os.getenv("CONAN_MAKE_PROGRAM") or make_program
if make_program:
if not tools.which(make_program):
self._conanfile.output.warn("The specified make program '%s' cannot be found"
"and will be ignored" % make_program)
else:
self._conanfile.output.info("Using '%s' as CMAKE_MAKE_PROGRAM" % make_program)
self.definitions["CMAKE_MAKE_PROGRAM"] = make_program
@property
def build_folder(self):
return self.build_dir
@build_folder.setter
def build_folder(self, value):
self.build_dir = value
@property
def build_type(self):
return self._build_type
@build_type.setter
def build_type(self, build_type):
settings_build_type = self._settings.get_safe("build_type")
if build_type != settings_build_type:
self._conanfile.output.warn(
'Set CMake build type "%s" is different than the settings build_type "%s"'
% (build_type, settings_build_type))
self._build_type = build_type
self.definitions.update(self._build_type_definition())
@property
def
|
(self):
return defs_to_string(self.definitions)
def _generator(self):
if "CONAN_CMAKE_GENERATOR" in os.environ:
return os.environ["CONAN_CMAKE_GENERATOR"]
if not self._compiler or not self._compiler_version or not self._arch:
if self._os_build == "Windows":
# Not enough settings to set a generator in Windows
return None
return "Unix Makefiles"
if self._compiler == "Visual Studio":
_visuals = {'8': '8 2005',
'9': '9 2008',
'10': '10 2010',
'11': '11 2012',
'12': '12 2013',
'14': '14 2015',
'15': '15 2017'}
base = "Visual Studio %s" % _visuals.get(self._compiler_version,
"UnknownVersion %s" % self._compiler_version)
if self._arch == "x86_64":
return base + " Win64"
elif "arm" in self._arch:
return base + " ARM"
else:
return base
# The generator depends on the build machine, not the target
if self._os_build == "Windows":
return "MinGW Makefiles" # it is valid only under Windows
return "Unix Makefiles"
def _toolset(self, toolset=None):
if toolset:
return toolset
elif self._settings.get_safe("compiler") == "Visual Studio":
subs_toolset = self._settings.get_safe("compiler.toolset")
if subs_toolset:
return subs_toolset
return None
def _cmake_compiler_options(self):
cmake_definitions = OrderedDict()
if str(self._os).lower() == "macos":
if self._arch == "x86":
cmake_definitions["CMAKE_OSX_ARCHITECTURES"] = "i386"
return cmake_definitions
def _cmake_cross_build_defines(self):
ret = OrderedDict()
os_ver = get_env("CONAN_CMAKE_SYSTEM_VERSION", self._op_system_version)
toolchain_file = get_env("CONAN_CMAKE_TOOLCHAIN_FILE", "")
if toolchain_file != "":
logger.info("Setting Cross build toolchain file: %s" % toolchain_file)
ret["CMAKE_TOOLCHAIN_FILE"] = toolchain_file
return ret
if self._cmake_system_name is False:
return ret
# System name and system version
if self._cmake_system_name is not True: # String not empty
ret["CMAKE_SYSTEM_NAME"] = self._cmake_system_name
ret["CMAKE_SYSTEM_VERSION"] = os_ver
else: # detect if we are cross building and the system name and version
if cross_building(self._conanfile.settings): # We are cross building
if self._os != self._os_build:
if self._os: # the_os is the host (regular setting)
ret["CMAKE_SYSTEM_NAME"] = "Darwin" if self._os in ["iOS", "tvOS",
"watchOS"] else self._os
if os_ver:
ret["CMAKE_SYSTEM_VERSION"] = os_ver
else:
ret["CMAKE_SYSTEM_NAME"] = "Generic"
# system processor
cmake_system_processor = os.getenv("CONAN_CMAKE_SYSTEM_PROCESSOR", None)
if cmake_system_processor:
ret["CMAKE_SYSTEM_PROCESSOR"] = cmake_system_processor
if ret: # If enabled cross compile
for env_var in ["CONAN_CMAKE_FIND_ROOT_PATH",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_PROGRAM",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_LIBRARY",
"CONAN_CMAKE_FIND_ROOT_PATH_MODE_INCLUDE"]:
value = os.getenv(env_var, None)
if value:
ret[env_var] = value
if self._conanfile and self._conanfile.deps_cpp_info.sysroot:
sysroot_path = self._conanfile.deps_cpp_info.sysroot
else:
sysroot_path = os.getenv("CONAN_CMAKE_FIND_ROOT_PATH", None)
if sysroot_path:
# Needs to be set here, can't be managed in the cmake generator, CMake needs
# to know about the sysroot before any other thing
ret["CMAKE_SYSROOT"] = sysroot_path.replace("\\", "/")
# Adjust Android stuff
if self._os == "Android":
arch_abi_settings = {"armv8": "arm64-v8a",
"armv7": "armeabi-v7a",
"armv7hf": "armeabi-v7a",
"armv6": "armeabi-v6",
"armv5": "armeabi"
}.get(self._arch,
self._arch)
if arch_abi_settings:
ret["CMAKE_ANDROID_ARCH_ABI"] = arch_abi_settings
logger.info("Setting Cross build flags: %s"
% ", ".join(["%s=%s" % (k, v) for k, v in ret.items()]))
return ret
@property
def is_multi_configuration(self):
""" some IDEs are multi-configuration, as Visual. Makefiles or Ninja are single-conf
"""
if "Visual" in self.generator or "Xcode" in self.generator:
return True
# TODO: complete logic
return False
@property
def command_line(self):
args = ['-G "%s"' % self.generator] if self.generator else []
args.append(self.flags)
args.append('-Wno-dev')
if self.toolset:
args.append('-T "%s"' % self.toolset)
return join_arguments(args)
def _build_type_definition(self):
if self._build_type and not self.is_multi_configuration:
return {'CMAKE_BUILD_TYPE': self._build_type}
return {}
@property
def runtime(self):
return defs_to_string(self._runtime_definition())
def _runtime_definition(self):
if self._runtime:
return {"CONAN_LINK_RUNTIME": "/%s" % self._runtime}
return {}
@property
def build_config(self):
""" cmake --build tool have a --config option for Multi-configuration IDEs
"""
if self._build_type and self.is_multi_configuration:
return "--config %s" % self._build_type
return ""
def _get_cmake_definitions(self):
def add_cmake_flag(cmake_flags, name, flag):
"""
appends compiler linker flags (if already present), or just sets
"""
if flag:
if name not in cmake_flags:
cmake_flags[name] = flag
else:
cmake_flags[name] = ' ' + flag
return cmake_flags
ret = OrderedDict()
ret.update(self._build_type_definition())
ret.update(self._runtime_definition())
ret.update(self._cmake_compiler_options())
ret.update(self._cmake_cross_build_defines())
ret.update(self._get_cpp_standard_vars())
ret["CONAN_EXPORTED"] = "1"
if self._compiler:
ret["CONAN_COMPILER"] = self._compiler
if self._compiler_version:
ret["CONAN_COMPILER_VERSION"] = str(self._compiler_version)
# Force compiler flags -- TODO: give as environment/setting parameter?
arch_flag = architecture_flag(compiler=self._compiler, arch=self._arch)
ret = add_cmake_flag(ret, 'CONAN_CXX_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CONAN_SHARED_LINKER_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CONAN_C_FLAGS', arch_flag)
if self._set_cmake_flags:
ret = add_cmake_flag(ret, 'CMAKE_CXX_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CMAKE_SHARED_LINKER_FLAGS', arch_flag)
ret = add_cmake_flag(ret, 'CMAKE_C_FLAGS', arch_flag)
if self._libcxx:
ret["CONAN_LIBCXX"] = self._libcxx
# Shared library
try:
ret["BUILD_SHARED_LIBS"] = "ON" if self._conanfile.options.shared else "OFF"
except ConanException:
pass
# Install to package folder
try:
if self._conanfile.package_folder:
ret["CMAKE_INSTALL_PREFIX"] = self._conanfile.package_folder
except AttributeError:
pass
if str(self._os) in ["Windows", "WindowsStore"] and self._compiler == "Visual Studio":
if self.parallel:
cpus = tools.cpu_count()
ret["CONAN_CXX_FLAGS"] = "/MP%s" % cpus
ret["CONAN_C_FLAGS"] = "/MP%s" % cpus
# fpic
if str(self._os) not in ["Windows", "WindowsStore"]:
fpic = self._conanfile.options.get_safe("fPIC")
if fpic is not None:
shared = self._conanfile.options.get_safe("shared")
ret["CONAN_CMAKE_POSITION_INDEPENDENT_CODE"] = "ON" if (fpic or shared) else "OFF"
# Adjust automatically the module path in case the conanfile is using the cmake_find_package
if "cmake_find_package" in self._conanfile.generators:
ret["CMAKE_MODULE_PATH"] = self._conanfile.install_folder.replace("\\", "/")
# Disable CMake export registry #3070 (CMake installing modules in user home's)
ret["CMAKE_EXPORT_NO_PACKAGE_REGISTRY"] = "ON"
return ret
def _get_dirs(self, source_folder, build_folder, source_dir, build_dir, cache_build_folder):
if (source_folder or build_folder) and (source_dir or build_dir):
raise ConanException("Use 'build_folder'/'source_folder' arguments")
def get_dir(folder, origin):
if folder:
if os.path.isabs(folder):
return folder
return os.path.join(origin, folder)
return origin
if source_dir or build_dir: # OLD MODE
build_ret = build_dir or self.build_dir or self._conanfile.build_folder
source_ret = source_dir or self._conanfile.source_folder
else:
build_ret = get_dir(build_folder, self._conanfile.build_folder)
source_ret = get_dir(source_folder, self._conanfile.source_folder)
if self._conanfile.in_local_cache and cache_build_folder:
build_ret = get_dir(cache_build_folder, self._conanfile.build_folder)
return source_ret, build_ret
def _run(self, command):
if self._compiler == 'Visual Studio' and self.generator in ['Ninja', 'NMake Makefiles', 'NMake Makefiles JOM']:
with tools.vcvars(self._settings, force=True, filter_known_paths=False):
self._conanfile.run(command)
else:
self._conanfile.run(command)
def configure(self, args=None, defs=None, source_dir=None, build_dir=None,
source_folder=None, build_folder=None, cache_build_folder=None,
pkg_config_paths=None):
# TODO: Deprecate source_dir and build_dir in favor of xxx_folder
if not self._conanfile.should_configure:
return
args = args or []
defs = defs or {}
source_dir, self.build_dir = self._get_dirs(source_folder, build_folder,
source_dir, build_dir,
cache_build_folder)
mkdir(self.build_dir)
arg_list = join_arguments([
self.command_line,
args_to_string(args),
defs_to_string(defs),
args_to_string([source_dir])
])
if pkg_config_paths:
pkg_env = {"PKG_CONFIG_PATH":
os.pathsep.join(get_abs_path(f, self._conanfile.install_folder)
for f in pkg_config_paths)}
else:
# If we are using pkg_config generator automate the pcs location, otherwise it could
# read wrong files
set_env = "pkg_config" in self._conanfile.generators \
and "PKG_CONFIG_PATH" not in os.environ
pkg_env = {"PKG_CONFIG_PATH": self._conanfile.install_folder} if set_env else {}
with tools.environment_append(pkg_env):
command = "cd %s && cmake %s" % (args_to_string([self.build_dir]), arg_list)
if platform.system() == "Windows" and self.generator == "MinGW Makefiles":
with tools.remove_from_path("sh"):
self._conanfile.run(command)
else:
self._conanfile.run(command)
def build(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_build:
return
args = args or []
build_dir = build_dir or self.build_dir or self._conanfile.build_folder
if target is not None:
args = ["--target", target] + args
if self.generator and self.parallel:
if "Makefiles" in self.generator and "NMake" not in self.generator:
if "--" not in args:
args.append("--")
args.append("-j%i" % cpu_count())
elif "Visual Studio" in self.generator and \
self._compiler_version and Version(self._compiler_version) >= "10":
if "--" not in args:
args.append("--")
args.append("/m:%i" % cpu_count())
arg_list = join_arguments([
args_to_string([build_dir]),
self.build_config,
args_to_string(args)
])
command = "cmake --build %s" % arg_list
self._run(command)
def install(self, args=None, build_dir=None):
if not self._conanfile.should_install:
return
mkdir(self._conanfile.package_folder)
if not self.definitions.get("CMAKE_INSTALL_PREFIX"):
raise ConanException("CMAKE_INSTALL_PREFIX not defined for 'cmake.install()'\n"
"Make sure 'package_folder' is defined")
self.build(args=args, build_dir=build_dir, target="install")
def test(self, args=None, build_dir=None, target=None):
if not self._conanfile.should_test:
return
if not target:
target = "RUN_TESTS" if self.is_multi_configuration else "test"
self.build(args=args, build_dir=build_dir, target=target)
@property
def verbose(self):
try:
verbose = self.definitions["CMAKE_VERBOSE_MAKEFILE"]
return get_bool_from_text(str(verbose))
except KeyError:
return False
@verbose.setter
def verbose(self, value):
self.definitions["CMAKE_VERBOSE_MAKEFILE"] = "ON" if value else "OFF"
def patch_config_paths(self):
"""
changes references to the absolute path of the installed package and its dependencies in
exported cmake config files to the appropriate conan variable. This makes
most (sensible) cmake config files portable.
For example, if a package foo installs a file called "fooConfig.cmake" to
be used by cmake's find_package method, normally this file will contain
absolute paths to the installed package folder, for example it will contain
a line such as:
SET(Foo_INSTALL_DIR /home/developer/.conan/data/Foo/1.0.0/...)
This will cause cmake find_package() method to fail when someone else
installs the package via conan.
This function will replace such mentions to
SET(Foo_INSTALL_DIR ${CONAN_FOO_ROOT})
which is a variable that is set by conanbuildinfo.cmake, so that find_package()
now correctly works on this conan package.
For dependent packages, if a package foo installs a file called "fooConfig.cmake" to
be used by cmake's find_package method and if it depends to a package bar,
normally this file will contain absolute paths to the bar package folder,
for example it will contain a line such as:
SET_TARGET_PROPERTIES(foo PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"/home/developer/.conan/data/Bar/1.0.0/user/channel/id/include")
This function will replace such mentions to
SET_TARGET_PROPERTIES(foo PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES
"${CONAN_BAR_ROOT}/include")
If the install() method of the CMake object in the conan file is used, this
function should be called _after_ that invocation. For example:
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
cmake.install()
cmake.patch_config_paths()
"""
if not self._conanfile.should_install:
return
if not self._conanfile.name:
raise ConanException("cmake.patch_config_paths() can't work without package name. "
"Define name in your recipe")
pf = self.definitions.get("CMAKE_INSTALL_PREFIX")
replstr = "${CONAN_%s_ROOT}" % self._conanfile.name.upper()
allwalk = chain(os.walk(self._conanfile.build_folder), os.walk(self._conanfile.package_folder))
for root, _, files in allwalk:
for f in files:
if f.endswith(".cmake"):
path = os.path.join(root, f)
tools.replace_in_file(path, pf, replstr, strict=False)
# patch paths of dependent packages that are found in any cmake files of the current package
path_content = tools.load(path)
for dep in self._conanfile.deps_cpp_info.deps:
from_str = self._conanfile.deps_cpp_info[dep].rootpath
# try to replace only if from str is found
if path_content.find(from_str) != -1:
dep_str = "${CONAN_%s_ROOT}" % dep.upper()
self._conanfile.output.info("Patching paths for %s: %s to %s" % (dep, from_str, dep_str))
tools.replace_in_file(path, from_str, dep_str, strict=False)
def _get_cpp_standard_vars(self):
if not self._cppstd:
return {}
ret = {}
if self._cppstd.startswith("gnu"):
ret["CONAN_CMAKE_CXX_STANDARD"] = self._cppstd[3:]
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "ON"
else:
ret["CONAN_CMAKE_CXX_STANDARD"] = self._cppstd
ret["CONAN_CMAKE_CXX_EXTENSIONS"] = "OFF"
ret["CONAN_STD_CXX_FLAG"] = cppstd_flag(self._compiler, self._compiler_version,
self._cppstd)
return ret
|
flags
|
other_features.py
|
import pandas as pd
from sklearn.preprocessing import LabelEncoder
def categorical_features(data, features):
features['vehicleType'] = data['vehicleType']
features['vehicleOption'] = data['vehicleOption']
features['vehicleTypeOption'] = [a + '_' + b for a, b in zip(data['vehicleType'].values,
data['vehicleOption'].values)]
cat_columns_clusters = ['cluster_dest_db', 'cluster_src_db', 'cluster_src_km', 'cluster_dest_km']
cat_columns_date = ['day', 'month']
cat_columns = ['vehicleType', 'vehicleOption', 'vehicleTypeOption']
# cat_columns += cat_columns_clusters
# cat_columns += cat_columns_date
features = pd.get_dummies(features, columns=cat_columns, drop_first=True)
features['day'] = LabelEncoder().fit_transform(features['day'])
features['month'] = LabelEncoder().fit_transform(features['month'])
return features
def raw_features(data, features):
features['weight'] = data['weight']
|
features['sourceLatitude'] = data['sourceLatitude']
features['sourceLongitude'] = data['sourceLongitude']
features['destinationLatitude'] = data['destinationLatitude']
features['destinationLongitude'] = data['destinationLongitude']
features['src_dest'] = (data['SourceState'] == data['destinationState'])
features['ave_speed'] = data['distanceKM'] / data['taxiDurationMin']
import numpy as np
features['weight_dur'] = np.log((data['taxiDurationMin'] + 30 * data['weight']))
features['weight_dist_dur'] = np.log(1. + (10. + data['weight']) * (100. + data['distanceKM']) *
(1000. + data['taxiDurationMin']))
features['price'] = data['price']
return features
|
features['distanceKM'] = data['distanceKM']
features['taxiDurationMin'] = data['taxiDurationMin']
|
unwise.py
|
import os
import numpy as np
import fitsio
from astrometry.util.fits import fits_table
from astrometry.util.ttime import Time
from wise.unwise import get_unwise_tractor_image
import logging
logger = logging.getLogger('legacypipe.unwise')
def info(*args):
from legacypipe.utils import log_info
log_info(logger, args)
def debug(*args):
from legacypipe.utils import log_debug
log_debug(logger, args)
'''
This function was imported whole from the tractor repo:
wise/forcedphot.py because I figured we were doing enough
LegacySurvey-specific stuff in it that it was time to just import it
and edit it rather than build elaborate options.
'''
def unwise_forcedphot(cat, tiles, band=1, roiradecbox=None,
use_ceres=True, ceres_block=8,
save_fits=False, get_models=False, ps=None,
psf_broadening=None,
pixelized_psf=False,
get_masks=None,
move_crpix=False,
modelsky_dir=None):
|
class wphotduck(object):
pass
def radec_in_unique_area(rr, dd, ra1, ra2, dec1, dec2):
'''Are the given points within the given RA,Dec rectangle?
Returns a boolean array.'''
unique = (dd >= dec1) * (dd < dec2)
if ra1 < ra2:
# normal RA
unique *= (rr >= ra1) * (rr < ra2)
else:
# RA wrap-around
unique[rr > 180] *= (rr[rr > 180] >= ra1)
unique[rr < 180] *= (rr[rr < 180] < ra2)
return unique
def unwise_phot(X):
'''
This is the entry-point from runbrick.py, called via mp.map()
'''
(key, (wcat, tiles, band, roiradec, wise_ceres, pixelized_psf, get_mods, get_masks, ps,
move_crpix, modelsky_dir)) = X
kwargs = dict(roiradecbox=roiradec, band=band, pixelized_psf=pixelized_psf,
get_masks=get_masks, ps=ps, move_crpix=move_crpix,
modelsky_dir=modelsky_dir)
if get_mods:
kwargs.update(get_models=get_mods)
if wise_ceres and len(wcat) == 0:
wise_ceres = False
# DEBUG
#kwargs.update(save_fits=True)
W = None
try:
W = unwise_forcedphot(wcat, tiles, use_ceres=wise_ceres, **kwargs)
except:
import traceback
print('unwise_forcedphot failed:')
traceback.print_exc()
if wise_ceres:
print('Trying without Ceres...')
try:
W = unwise_forcedphot(wcat, tiles, use_ceres=False, **kwargs)
except:
print('unwise_forcedphot failed (2):')
traceback.print_exc()
return key,W
def collapse_unwise_bitmask(bitmask, band):
'''
Converts WISE mask bits (in the unWISE data products) into the
more compact codes reported in the tractor files as
WISEMASK_W[12], and the "maskbits" WISE extensions.
output bits :
# 2^0 = bright star core and wings
# 2^1 = PSF-based diffraction spike
# 2^2 = optical ghost
# 2^3 = first latent
# 2^4 = second latent
# 2^5 = AllWISE-like circular halo
# 2^6 = bright star saturation
# 2^7 = geometric diffraction spike
'''
assert((band == 1) or (band == 2))
from collections import OrderedDict
bits_w1 = OrderedDict([('core_wings', 2**0 + 2**1),
('psf_spike', 2**27),
('ghost', 2**25 + 2**26),
('first_latent', 2**13 + 2**14),
('second_latent', 2**17 + 2**18),
('circular_halo', 2**23),
('saturation', 2**4),
('geom_spike', 2**29)])
bits_w2 = OrderedDict([('core_wings', 2**2 + 2**3),
('psf_spike', 2**28),
('ghost', 2**11 + 2**12),
('first_latent', 2**15 + 2**16),
('second_latent', 2**19 + 2**20),
('circular_halo', 2**24),
('saturation', 2**5),
('geom_spike', 2**30)])
bits = (bits_w1 if (band == 1) else bits_w2)
# hack to handle both scalar and array inputs
result = 0*bitmask
for i, feat in enumerate(bits.keys()):
result += ((2**i)*(np.bitwise_and(bitmask, bits[feat]) != 0)).astype(np.uint8)
return result.astype('uint8')
###
# This is taken directly from tractor/wise.py, replacing only the filename.
###
def unwise_tiles_touching_wcs(wcs, polygons=True):
'''
Returns a FITS table (with RA,Dec,coadd_id) of unWISE tiles
'''
from astrometry.util.miscutils import polygons_intersect
from astrometry.util.starutil_numpy import degrees_between
from pkg_resources import resource_filename
atlasfn = resource_filename('legacypipe', 'data/wise-tiles.fits')
T = fits_table(atlasfn)
trad = wcs.radius()
wrad = np.sqrt(2.) / 2. * 2048 * 2.75 / 3600.
rad = trad + wrad
r, d = wcs.radec_center()
I, = np.nonzero(np.abs(T.dec - d) < rad)
I = I[degrees_between(T.ra[I], T.dec[I], r, d) < rad]
if not polygons:
return T[I]
# now check actual polygon intersection
tw, th = wcs.imagew, wcs.imageh
targetpoly = [(0.5, 0.5), (tw + 0.5, 0.5),
(tw + 0.5, th + 0.5), (0.5, th + 0.5)]
cd = wcs.get_cd()
tdet = cd[0] * cd[3] - cd[1] * cd[2]
if tdet > 0:
targetpoly = list(reversed(targetpoly))
targetpoly = np.array(targetpoly)
keep = []
for i in I:
wwcs = unwise_tile_wcs(T.ra[i], T.dec[i])
cd = wwcs.get_cd()
wdet = cd[0] * cd[3] - cd[1] * cd[2]
H, W = wwcs.shape
poly = []
for x, y in [(0.5, 0.5), (W + 0.5, 0.5), (W + 0.5, H + 0.5), (0.5, H + 0.5)]:
rr,dd = wwcs.pixelxy2radec(x, y)
_,xx,yy = wcs.radec2pixelxy(rr, dd)
poly.append((xx, yy))
if wdet > 0:
poly = list(reversed(poly))
poly = np.array(poly)
if polygons_intersect(targetpoly, poly):
keep.append(i)
I = np.array(keep)
return T[I]
### Also direct from tractor/wise.py
def unwise_tile_wcs(ra, dec, W=2048, H=2048, pixscale=2.75):
from astrometry.util.util import Tan
'''
Returns a Tan WCS object at the given RA,Dec center, axis aligned, with the
given pixel W,H and pixel scale in arcsec/pixel.
'''
cowcs = Tan(ra, dec, (W + 1) / 2., (H + 1) / 2.,
-pixscale / 3600., 0., 0., pixscale / 3600., W, H)
return cowcs
|
'''
Given a list of tractor sources *cat*
and a list of unWISE tiles *tiles* (a fits_table with RA,Dec,coadd_id)
runs forced photometry, returning a FITS table the same length as *cat*.
*get_masks*: the WCS to resample mask bits into.
'''
from tractor import PointSource, Tractor, ExpGalaxy, DevGalaxy
from tractor.sersic import SersicGalaxy
if not pixelized_psf and psf_broadening is None:
# PSF broadening in post-reactivation data, by band.
# Newer version from Aaron's email to decam-chatter, 2018-06-14.
broadening = { 1: 1.0405, 2: 1.0346, 3: None, 4: None }
psf_broadening = broadening[band]
if False:
from astrometry.util.plotutils import PlotSequence
ps = PlotSequence('wise-forced-w%i' % band)
plots = (ps is not None)
if plots:
import pylab as plt
wantims = (plots or save_fits or get_models)
wanyband = 'w'
if get_models:
models = []
wband = 'w%i' % band
Nsrcs = len(cat)
phot = fits_table()
# Filled in based on unique tile overlap
phot.wise_coadd_id = np.array([' '] * Nsrcs, dtype='U8')
phot.wise_x = np.zeros(Nsrcs, np.float32)
phot.wise_y = np.zeros(Nsrcs, np.float32)
phot.set('psfdepth_%s' % wband, np.zeros(Nsrcs, np.float32))
nexp = np.zeros(Nsrcs, np.int16)
mjd = np.zeros(Nsrcs, np.float64)
central_flux = np.zeros(Nsrcs, np.float32)
ra = np.array([src.getPosition().ra for src in cat])
dec = np.array([src.getPosition().dec for src in cat])
fskeys = ['prochi2', 'profracflux']
fitstats = {}
if get_masks:
mh,mw = get_masks.shape
maskmap = np.zeros((mh,mw), np.uint32)
tims = []
for tile in tiles:
info('Reading WISE tile', tile.coadd_id, 'band', band)
tim = get_unwise_tractor_image(tile.unwise_dir, tile.coadd_id, band,
bandname=wanyband, roiradecbox=roiradecbox)
if tim is None:
debug('Actually, no overlap with WISE coadd tile', tile.coadd_id)
continue
if plots:
sig1 = tim.sig1
plt.clf()
plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)
plt.colorbar()
tag = '%s W%i' % (tile.coadd_id, band)
plt.title('%s: tim data' % tag)
ps.savefig()
plt.clf()
plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),
range=(-5,10), bins=100)
plt.xlabel('Per-pixel intensity (Sigma)')
plt.title(tag)
ps.savefig()
if move_crpix and band in [1, 2]:
realwcs = tim.wcs.wcs
x,y = realwcs.crpix
tile_crpix = tile.get('crpix_w%i' % band)
dx = tile_crpix[0] - 1024.5
dy = tile_crpix[1] - 1024.5
realwcs.set_crpix(x+dx, y+dy)
debug('unWISE', tile.coadd_id, 'band', band, 'CRPIX', x,y,
'shift by', dx,dy, 'to', realwcs.crpix)
if modelsky_dir and band in [1, 2]:
fn = os.path.join(modelsky_dir, '%s.%i.mod.fits' % (tile.coadd_id, band))
if not os.path.exists(fn):
raise RuntimeError('WARNING: does not exist:', fn)
x0,x1,y0,y1 = tim.roi
bg = fitsio.FITS(fn)[2][y0:y1, x0:x1]
assert(bg.shape == tim.shape)
if plots:
plt.clf()
plt.subplot(1,2,1)
plt.imshow(tim.getImage(), interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)
plt.subplot(1,2,2)
plt.imshow(bg, interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=5 * sig1)
tag = '%s W%i' % (tile.coadd_id, band)
plt.suptitle(tag)
ps.savefig()
plt.clf()
ha = dict(range=(-5,10), bins=100, histtype='step')
plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),
color='b', label='Original', **ha)
plt.hist(((tim.getImage()-bg) * tim.inverr)[tim.inverr > 0].ravel(),
color='g', label='Minus Background', **ha)
plt.axvline(0, color='k', alpha=0.5)
plt.xlabel('Per-pixel intensity (Sigma)')
plt.legend()
plt.title(tag + ': background')
ps.savefig()
# Actually subtract the background!
tim.data -= bg
# Floor the per-pixel variances,
# and add Poisson contribution from sources
if band in [1,2]:
# in Vega nanomaggies per pixel
floor_sigma = {1: 0.5, 2: 2.0}
poissons = {1: 0.15, 2: 0.3}
with np.errstate(divide='ignore'):
new_ie = 1. / np.sqrt(
(1./tim.inverr)**2 +
floor_sigma[band]**2 +
poissons[band]**2 * np.maximum(0., tim.data))
new_ie[tim.inverr == 0] = 0.
if plots:
plt.clf()
plt.plot((1. / tim.inverr[tim.inverr>0]).ravel(),
(1./new_ie[tim.inverr>0]).ravel(), 'b.')
plt.title('unWISE per-pixel error: %s band %i' %
(tile.coadd_id, band))
plt.xlabel('original')
plt.ylabel('floored')
ps.savefig()
assert(np.all(np.isfinite(new_ie)))
assert(np.all(new_ie >= 0.))
tim.inverr = new_ie
# Expand a 3-pixel radius around weight=0 (saturated) pixels
# from Eddie via crowdsource
# https://github.com/schlafly/crowdsource/blob/7069da3e7d9d3124be1cbbe1d21ffeb63fc36dcc/python/wise_proc.py#L74
## FIXME -- W3/W4 ??
satlimit = 85000
msat = ((tim.data > satlimit) | ((tim.nims == 0) & (tim.nuims > 1)))
from scipy.ndimage.morphology import binary_dilation
xx, yy = np.mgrid[-3:3+1, -3:3+1]
dilate = xx**2+yy**2 <= 3**2
msat = binary_dilation(msat, dilate)
nbefore = np.sum(tim.inverr == 0)
tim.inverr[msat] = 0
nafter = np.sum(tim.inverr == 0)
debug('Masking an additional', (nafter-nbefore), 'near-saturated pixels in unWISE',
tile.coadd_id, 'band', band)
# Read mask file?
if get_masks:
from astrometry.util.resample import resample_with_wcs, OverlapError
# unwise_dir can be a colon-separated list of paths
tilemask = None
for d in tile.unwise_dir.split(':'):
fn = os.path.join(d, tile.coadd_id[:3], tile.coadd_id,
'unwise-%s-msk.fits.gz' % tile.coadd_id)
if os.path.exists(fn):
debug('Reading unWISE mask file', fn)
x0,x1,y0,y1 = tim.roi
tilemask = fitsio.FITS(fn)[0][y0:y1,x0:x1]
break
if tilemask is None:
info('unWISE mask file for tile', tile.coadd_id, 'does not exist')
else:
try:
tanwcs = tim.wcs.wcs
assert(tanwcs.shape == tilemask.shape)
Yo,Xo,Yi,Xi,_ = resample_with_wcs(get_masks, tanwcs,
intType=np.int16)
# Only deal with mask pixels that are set.
I, = np.nonzero(tilemask[Yi,Xi] > 0)
# Trim to unique area for this tile
rr,dd = get_masks.pixelxy2radec(Xo[I]+1, Yo[I]+1)
good = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,
tile.dec1, tile.dec2)
I = I[good]
maskmap[Yo[I],Xo[I]] = tilemask[Yi[I], Xi[I]]
except OverlapError:
# Shouldn't happen by this point
print('Warning: no overlap between WISE tile', tile.coadd_id, 'and brick')
if plots:
plt.clf()
plt.imshow(tilemask, interpolation='nearest', origin='lower')
plt.title('Tile %s: mask' % tile.coadd_id)
ps.savefig()
plt.clf()
plt.imshow(maskmap, interpolation='nearest', origin='lower')
plt.title('Tile %s: accumulated maskmap' % tile.coadd_id)
ps.savefig()
# The tiles have some overlap, so zero out pixels outside the
# tile's unique area.
th,tw = tim.shape
xx,yy = np.meshgrid(np.arange(tw), np.arange(th))
rr,dd = tim.wcs.wcs.pixelxy2radec(xx+1, yy+1)
unique = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,
tile.dec1, tile.dec2)
debug('Tile', tile.coadd_id, '- total of', np.sum(unique),
'unique pixels out of', len(unique.flat), 'total pixels')
if get_models:
# Save the inverr before blanking out non-unique pixels, for making coadds with no gaps!
# (actually, slightly more subtly, expand unique area by 1 pixel)
from scipy.ndimage.morphology import binary_dilation
du = binary_dilation(unique)
tim.coadd_inverr = tim.inverr * du
tim.inverr[unique == False] = 0.
del xx,yy,rr,dd,unique
if plots:
sig1 = tim.sig1
plt.clf()
plt.imshow(tim.getImage() * (tim.inverr > 0),
interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=10 * sig1)
plt.colorbar()
tag = '%s W%i' % (tile.coadd_id, band)
plt.title('%s: tim data (unique)' % tag)
ps.savefig()
if pixelized_psf:
from unwise_psf import unwise_psf
if (band == 1) or (band == 2):
# we only have updated PSFs for W1 and W2
psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id,
modelname='neo6_unwisecat')
else:
psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id)
if band == 4:
# oversample (the unwise_psf models are at native W4 5.5"/pix,
# while the unWISE coadds are made at 2.75"/pix.
ph,pw = psfimg.shape
subpsf = np.zeros((ph*2-1, pw*2-1), np.float32)
from astrometry.util.util import lanczos3_interpolate
xx,yy = np.meshgrid(np.arange(0., pw-0.51, 0.5, dtype=np.float32),
np.arange(0., ph-0.51, 0.5, dtype=np.float32))
xx = xx.ravel()
yy = yy.ravel()
ix = xx.astype(np.int32)
iy = yy.astype(np.int32)
dx = (xx - ix).astype(np.float32)
dy = (yy - iy).astype(np.float32)
psfimg = psfimg.astype(np.float32)
rtn = lanczos3_interpolate(ix, iy, dx, dy, [subpsf.flat], [psfimg])
if plots:
plt.clf()
plt.imshow(psfimg, interpolation='nearest', origin='lower')
plt.title('Original PSF model')
ps.savefig()
plt.clf()
plt.imshow(subpsf, interpolation='nearest', origin='lower')
plt.title('Subsampled PSF model')
ps.savefig()
psfimg = subpsf
del xx, yy, ix, iy, dx, dy
from tractor.psf import PixelizedPSF
psfimg /= psfimg.sum()
fluxrescales = {1: 1.04, 2: 1.005, 3: 1.0, 4: 1.0}
psfimg *= fluxrescales[band]
tim.psf = PixelizedPSF(psfimg)
if psf_broadening is not None and not pixelized_psf:
# psf_broadening is a factor by which the PSF FWHMs
# should be scaled; the PSF is a little wider
# post-reactivation.
psf = tim.getPsf()
from tractor import GaussianMixturePSF
if isinstance(psf, GaussianMixturePSF):
debug('Broadening PSF: from', psf)
p0 = psf.getParams()
pnames = psf.getParamNames()
p1 = [p * psf_broadening**2 if 'var' in name else p
for (p, name) in zip(p0, pnames)]
psf.setParams(p1)
debug('Broadened PSF:', psf)
else:
print('WARNING: cannot apply psf_broadening to WISE PSF of type', type(psf))
wcs = tim.wcs.wcs
_,fx,fy = wcs.radec2pixelxy(ra, dec)
x = np.round(fx - 1.).astype(int)
y = np.round(fy - 1.).astype(int)
good = (x >= 0) * (x < tw) * (y >= 0) * (y < th)
# Which sources are in this brick's unique area?
usrc = radec_in_unique_area(ra, dec, tile.ra1, tile.ra2, tile.dec1, tile.dec2)
I, = np.nonzero(good * usrc)
nexp[I] = tim.nuims[y[I], x[I]]
if hasattr(tim, 'mjdmin') and hasattr(tim, 'mjdmax'):
mjd[I] = (tim.mjdmin + tim.mjdmax) / 2.
phot.wise_coadd_id[I] = tile.coadd_id
phot.wise_x[I] = fx[I] - 1.
phot.wise_y[I] = fy[I] - 1.
central_flux[I] = tim.getImage()[y[I], x[I]]
del x,y,good,usrc
# PSF norm for depth
psf = tim.getPsf()
h,w = tim.shape
patch = psf.getPointSourcePatch(h//2, w//2).patch
psfnorm = np.sqrt(np.sum(patch**2))
# To handle zero-depth, we return 1/nanomaggies^2 units rather than mags.
# In the small empty patches of the sky (eg W4 in 0922p702), we get sig1 = NaN
if np.isfinite(tim.sig1):
phot.get('psfdepth_%s' % wband)[I] = 1. / (tim.sig1 / psfnorm)**2
tim.tile = tile
tims.append(tim)
if plots:
plt.clf()
mn,mx = 0.1, 20000
plt.hist(np.log10(np.clip(central_flux, mn, mx)), bins=100,
range=(np.log10(mn), np.log10(mx)))
logt = np.arange(0, 5)
plt.xticks(logt, ['%i' % i for i in 10.**logt])
plt.title('Central fluxes (W%i)' % band)
plt.axvline(np.log10(20000), color='k')
plt.axvline(np.log10(1000), color='k')
ps.savefig()
# Eddie's non-secret recipe:
#- central pixel <= 1000: 19x19 pix box size
#- central pixel in 1000 - 20000: 59x59 box size
#- central pixel > 20000 or saturated: 149x149 box size
#- object near "bright star": 299x299 box size
nbig = nmedium = nsmall = 0
for src,cflux in zip(cat, central_flux):
if cflux > 20000:
R = 100
nbig += 1
elif cflux > 1000:
R = 30
nmedium += 1
else:
R = 15
nsmall += 1
if isinstance(src, PointSource):
src.fixedRadius = R
else:
### FIXME -- sizes for galaxies..... can we set PSF size separately?
galrad = 0
# RexGalaxy is a subclass of ExpGalaxy
if isinstance(src, (ExpGalaxy, DevGalaxy, SersicGalaxy)):
galrad = src.shape.re
pixscale = 2.75
src.halfsize = int(np.hypot(R, galrad * 5 / pixscale))
debug('Set WISE source sizes:', nbig, 'big', nmedium, 'medium', nsmall, 'small')
tractor = Tractor(tims, cat)
if use_ceres:
from tractor.ceres_optimizer import CeresOptimizer
tractor.optimizer = CeresOptimizer(BW=ceres_block, BH=ceres_block)
tractor.freezeParamsRecursive('*')
tractor.thawPathsTo(wanyband)
t0 = Time()
R = tractor.optimize_forced_photometry(
fitstats=True, variance=True, shared_params=False, wantims=wantims)
info('unWISE forced photometry took', Time() - t0)
if use_ceres:
term = R.ceres_status['termination']
# Running out of memory can cause failure to converge and term
# status = 2. Fail completely in this case.
if term != 0:
info('Ceres termination status:', term)
raise RuntimeError('Ceres terminated with status %i' % term)
if wantims:
ims1 = R.ims1
# can happen if empty source list (we still want to generate coadds)
if ims1 is None:
ims1 = R.ims0
flux_invvars = R.IV
if R.fitstats is not None:
for k in fskeys:
x = getattr(R.fitstats, k)
fitstats[k] = np.array(x).astype(np.float32)
if save_fits:
for i,tim in enumerate(tims):
tile = tim.tile
(dat, mod, _, chi, _) = ims1[i]
wcshdr = fitsio.FITSHDR()
tim.wcs.wcs.add_to_header(wcshdr)
tag = 'fit-%s-w%i' % (tile.coadd_id, band)
fitsio.write('%s-data.fits' %
tag, dat, clobber=True, header=wcshdr)
fitsio.write('%s-mod.fits' % tag, mod,
clobber=True, header=wcshdr)
fitsio.write('%s-chi.fits' % tag, chi,
clobber=True, header=wcshdr)
if plots:
# Create models for just the brightest sources
bright_cat = [src for src in cat
if src.getBrightness().getBand(wanyband) > 1000]
debug('Bright soures:', len(bright_cat))
btr = Tractor(tims, bright_cat)
for tim in tims:
mod = btr.getModelImage(tim)
tile = tim.tile
tag = '%s W%i' % (tile.coadd_id, band)
sig1 = tim.sig1
plt.clf()
plt.imshow(mod, interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)
plt.colorbar()
plt.title('%s: bright-star models' % tag)
ps.savefig()
if get_models:
for i,tim in enumerate(tims):
tile = tim.tile
(dat, mod, _, _, _) = ims1[i]
models.append((tile.coadd_id, band, tim.wcs.wcs, dat, mod,
tim.coadd_inverr))
if plots:
for i,tim in enumerate(tims):
tile = tim.tile
tag = '%s W%i' % (tile.coadd_id, band)
(dat, mod, _, chi, _) = ims1[i]
sig1 = tim.sig1
plt.clf()
plt.imshow(dat, interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)
plt.colorbar()
plt.title('%s: data' % tag)
ps.savefig()
plt.clf()
plt.imshow(mod, interpolation='nearest', origin='lower',
cmap='gray', vmin=-3 * sig1, vmax=25 * sig1)
plt.colorbar()
plt.title('%s: model' % tag)
ps.savefig()
plt.clf()
plt.imshow(chi, interpolation='nearest', origin='lower',
cmap='gray', vmin=-5, vmax=+5)
plt.colorbar()
plt.title('%s: chi' % tag)
ps.savefig()
nm = np.array([src.getBrightness().getBand(wanyband) for src in cat])
nm_ivar = flux_invvars
# Sources out of bounds, eg, never change from their initial
# fluxes. Zero them out instead.
nm[nm_ivar == 0] = 0.
phot.set('flux_%s' % wband, nm.astype(np.float32))
phot.set('flux_ivar_%s' % wband, nm_ivar.astype(np.float32))
for k in fskeys:
phot.set(k + '_' + wband, fitstats.get(k, np.zeros(len(phot), np.float32)))
phot.set('nobs_%s' % wband, nexp)
phot.set('mjd_%s' % wband, mjd)
rtn = wphotduck()
rtn.phot = phot
rtn.models = None
rtn.maskmap = None
if get_models:
rtn.models = models
if get_masks:
rtn.maskmap = maskmap
return rtn
|
mod.rs
|
#![allow(unneeded_field_pattern)]
#![allow(private_in_public)]
use self::addressing_modes::NoTickMode;
use cpu::CPU;
use cpu::CYCLE_TABLE;
use cpu::IRQ_VECTOR;
use cpu::JitInterrupt;
use cpu::Registers;
use cpu::dispatcher::Dispatcher;
use cpu::nes_analyst::Analyst;
use cpu::nes_analyst::BlockAnalysis;
use cpu::nes_analyst::InstructionAnalysis;
use dynasmrt::{AssemblyOffset, DynamicLabel, DynasmApi, DynasmLabelApi, ExecutableBuffer};
use fnv::FnvHashMap;
use memory::MemSegment;
use std::mem;
use std::rc::Rc;
const CARRY: u8 = 0b0000_0001;
const ZERO: u8 = 0b0000_0010;
const SUPPRESS_IRQ: u8 = 0b0000_0100;
const DECIMAL: u8 = 0b0000_1000;
const BREAK: u8 = 0b0001_0000;
const OVERFLOW: u8 = 0b0100_0000;
const SIGN: u8 = 0b1000_0000;
const HIGH_BIT: u8 = 0b1000_0000;
const LOW_BIT: u8 = 0b0000_0001;
macro_rules! offset_of {
($ty:ty, $field:ident) => {
&(*(0 as *const $ty)).$field as *const _ as usize
}
}
macro_rules! offset_of_2 {
($ty:ty, $field:ident, $field2:ident) => {
&(*(0 as *const $ty)).$field.$field2 as *const _ as usize
}
}
pub struct ExecutableBlock {
offset: AssemblyOffset,
buffer: Rc<ExecutableBuffer>,
}
impl ExecutableBlock {
pub fn call(&self, cpu: &mut CPU) {
let cpu: *mut CPU = cpu as _;
let ptr = self.get_ptr();
let f: fn(*mut CPU, *mut [u8; 0x800]) -> () = unsafe { mem::transmute(ptr) };
let ram = unsafe { &mut (*cpu).ram };
trampoline_to_nes(f, cpu, ram);
}
pub fn get_ptr(&self) -> *const u8 {
self.buffer.ptr(self.offset)
}
}
pub fn compile(
addr: u16,
cpu: &mut CPU,
dispatcher: &mut Dispatcher,
) -> FnvHashMap<u16, ExecutableBlock> {
let analysis = Analyst::new(cpu).analyze(addr);
Compiler::new(cpu, dispatcher, analysis).compile_block()
}
// rcx and sub-sections thereof are the general-purpose scratch register.
// Sometimes r8 and rax are used as scratch registers as well
dynasm!(this
; .alias cpu, rbx
; .alias ram, rdx
; .alias arg, r8b
; .alias arg_w, r8w
; .alias n_a, r9b
; .alias n_x, r10b
; .alias n_y, r11b
; .alias n_p, r12b
; .alias n_sp, r13b
; .alias n_pc, r14w
; .alias cyc, r15
);
macro_rules! load_registers {
($this:ident) => {{
dynasm!($this.asm
; lea rcx, cpu => CPU.regs
; xor r8, r8
; movzx r9, BYTE rcx => Registers.a
; movzx r10, BYTE rcx => Registers.x
; movzx r11, BYTE rcx => Registers.y
; movzx r12, BYTE rcx => Registers.p
; movzx r13, BYTE rcx => Registers.sp
; movzx r14, WORD rcx => Registers.pc
; mov cyc, QWORD cpu => CPU.cycle
);
}};
}
macro_rules! store_registers {
($this:ident) => {{
dynasm!($this.asm
; lea rcx, cpu => CPU.regs
; mov BYTE rcx => Registers.a, n_a
; mov BYTE rcx => Registers.x, n_x
; mov BYTE rcx => Registers.y, n_y
; mov BYTE rcx => Registers.p, n_p
; mov BYTE rcx => Registers.sp, n_sp
; mov WORD rcx => Registers.pc, n_pc
; mov QWORD cpu => CPU.cycle, cyc
);
}};
}
#[allow(unused_macros)]
macro_rules! debug_call {
($this:ident, $func:ident) => {dynasm!($this.asm
; mov n_pc, WORD $this.pc as _
;; store_registers!($this)
; push rcx
; push rdx
; push r9
; push r10
; push r11
; mov rax, QWORD $func as _
; sub rsp, 0x20
; call rax
; add rsp, 0x20
; pop r11
; pop r10
; pop r9
; pop rdx
; pop rcx
);};
}
#[cfg(feature = "debug_features")]
macro_rules! call_trace {
($this:ident) => {dynasm!($this.asm
; mov n_pc, WORD $this.pc as _
;; store_registers!($this)
; push rax
; push rcx
; push rdx
; push r9
; push r10
; push r11
; mov rax, QWORD ::cpu::x86_64_compiler::trace as _
; mov rcx, rbx //Pointer to CPU is first arg
; sub rsp, 0x28
; call rax
; add rsp, 0x28
; pop r11
; pop r10
; pop r9
; pop rdx
; pop rcx
; pop rax
);};
}
#[cfg(not(feature = "debug_features"))]
macro_rules! call_trace {
($this:ident) => {};
}
#[cfg(feature = "debug_features")]
pub extern "win64" fn trace(cpu: *mut CPU) {
unsafe { (*cpu).trace() }
}
macro_rules! call_naked {
($this:ident, $addr:expr) => {dynasm!($this.asm
; mov rax, QWORD $addr as _
; call rax
);};
}
fn trampoline_to_nes(
f: fn(*mut CPU, *mut [u8; 0x800]) -> (),
cpu: *mut CPU,
ram: *mut [u8; 0x800],
) {
unsafe {
asm!(
"
${:comment} Recieve the function pointer, CPU and RAM
mov rax, $0
mov rbx, $1
mov rdx, $2
${:comment} Load registers, etc. from struct
xor r8, r8
movzx r9, byte ptr [rbx+$3]
movzx r10, byte ptr [rbx+$4]
movzx r11, byte ptr [rbx+$5]
movzx r12, byte ptr [rbx+$6]
movzx r13, byte ptr [rbx+$7]
movzx r14, word ptr [rbx+$8]
mov r15, qword ptr [rbx+$9]
${:comment} Call generated code block
call rax
${:comment} Store registers, etc. back in struct
mov qword ptr [rbx+$9], r15
mov word ptr [rbx+$8], r14w
mov byte ptr [rbx+$7], r13b
mov byte ptr [rbx+$6], r12b
mov byte ptr [rbx+$5], r11b
mov byte ptr [rbx+$4], r10b
mov byte ptr [rbx+$3], r9b
"
:
: "r"(f),
"r"(cpu),
"r"(ram),
"n"(offset_of_2!(CPU, regs, a))
"n"(offset_of_2!(CPU, regs, x))
"n"(offset_of_2!(CPU, regs, y))
"n"(offset_of_2!(CPU, regs, p))
"n"(offset_of_2!(CPU, regs, sp))
"n"(offset_of_2!(CPU, regs, pc))
"n"(offset_of!(CPU, cycle))
: "rax", "rbx", "rdx", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
: "intel", "alignstack"
)
}
}
#[naked]
extern "C" fn set_zero_flag() {
unsafe {
asm!("
cmp r8b, 0
jz 1f
and r12b, 0FDH
ret
1:
or r12b, 2H
"
:
:
: "r12"
: "intel");
};
}
#[naked]
extern "C" fn set_sign_flag() {
unsafe {
asm!("
test r8b, 80H
jz 1f
or r12b, 80H
ret
1:
and r12b, 7FH
"
:
:
: "r12"
: "intel");
};
}
#[macro_use]
mod addressing_modes;
use self::addressing_modes::AddressingMode;
struct Compiler<'a> {
asm: ::dynasmrt::x64::Assembler,
cpu: &'a mut CPU,
dispatcher: &'a mut Dispatcher,
analysis: BlockAnalysis,
entry_point: u16,
pc: u16,
current_instruction: u16,
current_instr_analysis: InstructionAnalysis,
branch_targets: FnvHashMap<u16, DynamicLabel>,
}
impl<'a> Compiler<'a> {
fn new(
cpu: &'a mut CPU,
dispatcher: &'a mut Dispatcher,
analysis: BlockAnalysis,
) -> Compiler<'a> {
let entry_point = analysis.entry_point;
Compiler {
asm: ::dynasmrt::x64::Assembler::new(),
cpu: cpu,
dispatcher: dispatcher,
analysis: analysis,
entry_point: entry_point,
pc: entry_point,
current_instruction: entry_point,
current_instr_analysis: Default::default(),
branch_targets: FnvHashMap::default(),
}
}
fn compile_block(mut self) -> FnvHashMap<u16, ExecutableBlock> {
let mut addr_to_offset = FnvHashMap::default();
while self.pc <= self.analysis.exit_point {
self.current_instruction = self.pc;
let temp = self.current_instruction;
addr_to_offset.insert(temp, self.asm.offset());
self.current_instr_analysis = self.analysis.instructions.get(&temp).unwrap().clone();
self.emit_branch_target();
self.check_for_interrupt();
if self.cpu.settings.trace_cpu {
call_trace!(self);
}
let opcode = self.read_incr_pc();
self.emit_cycle_count(opcode);
decode_opcode!(opcode, self);
}
let buffer = Rc::new(self.asm.finalize().unwrap());
let result: FnvHashMap<_, _> = addr_to_offset
.iter()
.map(|(addr, offset)| {
(
*addr,
ExecutableBlock {
offset: offset.clone(),
buffer: buffer.clone(),
},
)
})
.collect();
result
}
fn emit_branch_target(&mut self) {
if self.current_instr_analysis.is_branch_target {
let temp_pc = self.current_instruction;
let target_label = self.get_dynamic_label(temp_pc);
dynasm!{self.asm
; => target_label
}
}
}
fn emit_cycle_count(&mut self, opcode: u8) {
let cycles = CYCLE_TABLE[opcode as usize];
dynasm!(self.asm
; add cyc, cycles as _
)
}
fn check_for_interrupt(&mut self) {
dynasm!{self.asm
; lea rcx, cpu => CPU.interrupt
; mov rcx, rcx => JitInterrupt.next_interrupt
; cmp cyc, rcx
; jnae >next
// If the next_interrupt is zero, assume that other code has already updated the
// program counter and don't overwrite it.
; test rcx, rcx
; mov rcx, WORD self.pc as _
; cmovnz n_pc, cx
; ret
; next:
}
}
// Stores
fn stx<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; mov arg, n_x
;; mode.write_from_arg(self)
}
}
fn sty<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; mov arg, n_y
;; mode.write_from_arg(self)
}
}
fn sta<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; mov arg, n_a
;; mode.write_from_arg(self)
}
}
// Loads
fn ldx<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; mov n_x, arg
;; self.set_sign_zero_from_arg()
}
}
fn lda<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; mov n_a, arg
;; self.set_sign_zero_from_arg()
}
}
fn ldy<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; mov n_y, arg
;; self.set_sign_zero_from_arg()
}
}
// Logic/Math Ops
fn bit<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, false)
}
// Set the sign flag
if self.current_instr_analysis.sign_flag_used {
call_naked!(self, set_sign_flag);
}
if self.current_instr_analysis.overflow_flag_used {
dynasm!{self.asm
//Set the overflow flag
; test arg, BYTE 0b0100_0000
; jz >clear
; or n_p, BYTE OVERFLOW as _
; jmp >next
; clear:
; and n_p, BYTE (!OVERFLOW) as _
; next:
}
}
if self.current_instr_analysis.zero_flag_used {
dynasm!{self.asm
//Set the zero flag
; and arg, n_a
;; call_naked!(self, set_zero_flag)
}
}
}
fn and<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; and arg, n_a
;; self.set_sign_zero_from_arg()
; mov n_a, arg
}
}
fn ora<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; or arg, n_a
;; self.set_sign_zero_from_arg()
; mov n_a, arg
}
}
fn eor<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
; xor arg, n_a
;; self.set_sign_zero_from_arg()
; mov n_a, arg
}
}
fn adc<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; xor r8, r8
;; mode.read_to_arg(self, true)
;; self.do_adc()
}
}
fn sbc<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; xor r8, r8
;; mode.read_to_arg(self, true)
; not arg
;; self.do_adc()
}
}
fn do_adc(&mut self) {
dynasm!{self.asm
; dec rsp
; mov [rsp], arg //Save original arg
; add r8w, r9w //Add arg + a
; test n_p, CARRY as _
; jz >next
; inc r8w // add the carry flag
; next:
}
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
//Set carry based on result
; cmp r8w, 0xFF
; ja >set_carry
; and n_p, (!CARRY) as _
; jmp >next
; set_carry:
; or n_p, CARRY as _
; next:
}
}
if self.current_instr_analysis.overflow_flag_used {
dynasm!{self.asm
//Calculate the overflow flag
; mov al, n_a
; xor al, [rsp]
; test al, BYTE HIGH_BIT as _
; jnz >clear_overflow
; mov al, n_a
; xor al, arg
; test al, BYTE HIGH_BIT as _
; jz >clear_overflow
; or n_p, OVERFLOW as _
; jmp >next
; clear_overflow:
; and n_p, (!OVERFLOW) as _
; next:
}
}
dynasm!{self.asm
; mov n_a, arg
; inc rsp
;; self.set_sign_zero_from_arg()
}
}
fn cmp<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, true);
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; cmp n_a, arg
; jb >clear
; or n_p, BYTE CARRY as _
; jmp >next
; clear:
; and n_p, BYTE (!CARRY) as _
; next:
}
}
dynasm!{self.asm
; mov cl, n_a
; sub cl, arg
; mov arg, cl
;; self.set_sign_zero_from_arg()
}
}
fn cpx<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, false);
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; cmp n_x, arg
; jb >clear
; or n_p, BYTE CARRY as _
; jmp >next
; clear:
; and n_p, BYTE (!CARRY) as _
; next:
}
}
dynasm!{self.asm
; mov cl, n_x
; sub cl, arg
; mov arg, cl
;; self.set_sign_zero_from_arg()
}
}
fn cpy<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, false);
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; cmp n_y, arg
; jb >clear
; or n_p, BYTE CARRY as _
; jmp >next
; clear:
; and n_p, BYTE (!CARRY) as _
; next:
}
}
dynasm!{self.asm
; mov cl, n_y
; sub cl, arg
; mov arg, cl
;; self.set_sign_zero_from_arg()
}
}
fn inc<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, false)
; inc arg
;; self.set_sign_zero_from_arg()
;; mode.write_from_arg(self)
}
}
fn iny(&mut self) {
dynasm!{self.asm
; inc n_y
; mov arg, n_y
;; self.set_sign_zero_from_arg()
}
}
fn inx(&mut self) {
dynasm!{self.asm
; inc n_x
; mov arg, n_x
;; self.set_sign_zero_from_arg()
}
}
fn dec<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, false)
; dec arg
;; self.set_sign_zero_from_arg()
;; mode.write_from_arg(self)
}
}
fn dey(&mut self) {
dynasm!{self.asm
; dec n_y
; mov arg, n_y
;; self.set_sign_zero_from_arg()
}
}
fn dex(&mut self) {
dynasm!{self.asm
; dec n_x
; mov arg, n_x
;; self.set_sign_zero_from_arg()
}
}
fn lsr<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, false);
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; test arg, BYTE 0x01
; jz >clear_carry
; or n_p, CARRY as _
; jmp >next
; clear_carry:
; and n_p, (!CARRY) as _
; next:
}
}
dynasm!{self.asm
; shr arg, BYTE 1
;; self.set_sign_zero_from_arg()
;; mode.write_from_arg(self)
}
}
fn asl<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, false);
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; test arg, BYTE HIGH_BIT as _
; jz >clear_carry
; or n_p, CARRY as _
; jmp >next
; clear_carry:
; and n_p, (!CARRY) as _
; next:
}
}
dynasm!{self.asm
; shl arg, BYTE 1
;; self.set_sign_zero_from_arg()
;; mode.write_from_arg(self)
}
}
fn ror<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, false)
; mov al, arg //save original arg
; shr arg, BYTE 1
; test n_p, CARRY as _
; jz >next
; or arg, BYTE HIGH_BIT as _
; next:
}
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; test al, BYTE LOW_BIT as _
; jz >clear_carry
; or n_p, CARRY as _
; jmp >next
; clear_carry:
; and n_p, (!CARRY) as _
; next:
}
}
self.set_sign_zero_from_arg();
mode.write_from_arg(self);
}
fn rol<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, false)
; mov al, arg //save original arg
; shl arg, BYTE 1
; test n_p, CARRY as _
; jz >next
; or arg, BYTE LOW_BIT as _
; next:
}
if self.current_instr_analysis.carry_flag_used {
dynasm!{self.asm
; test al, BYTE HIGH_BIT as _
; jz >clear_carry
; or n_p, CARRY as _
; jmp >next
; clear_carry:
; and n_p, (!CARRY) as _
; next:
}
}
self.set_sign_zero_from_arg();
mode.write_from_arg(self);
}
// Jumps
fn jmp(&mut self) {
let target = self.read_w_incr_pc();
let link = self.dispatcher
.lock_block(target, self.entry_point, self.cpu);
match link {
Some(block) => {
let ptr = block.get_ptr();
dynasm!(self.asm
; mov rax, QWORD ptr as _
; jmp rax
)
}
None => dynasm!(self.asm
; mov n_pc, WORD target as _
; ret
),
}
}
fn jmpi(&mut self) {
let mut target = self.read_w_incr_pc();
if target <= 0x1FFF {
target %= 0x800;
}
let page = target & 0xFF00;
let page_idx = target as u8;
let lo_addr = target;
let hi_addr = page | page_idx.wrapping_add(1) as u16;
if target <= 0x1FFF {
dynasm!{self.asm
; mov al, BYTE [ram + lo_addr as _]
; mov ah, BYTE [ram + hi_addr as _]
; mov n_pc, ax
}
} else {
self.jmpi_slow(lo_addr, hi_addr);
}
dynasm!{self.asm
; ret
}
}
fn jmpi_slow(&mut self, lo_addr: u16, hi_addr: u16) {
dynasm!{self.asm
; mov rdx, QWORD hi_addr as _
;; call_read!(self)
; mov al, arg
; mov ah, al
; mov rdx, QWORD lo_addr as _
;; call_read!(self)
; mov al, arg
; mov n_pc, ax
}
}
fn jsr(&mut self) {
let target = self.read_w_incr_pc();
let ret_addr = self.pc - 1;
self.stack_push_w(ret_addr);
let link = self.dispatcher
.lock_block(target, self.entry_point, self.cpu);
match link {
Some(block) => {
let ptr = block.get_ptr();
dynasm!(self.asm
; mov rax, QWORD ptr as _
; jmp rax
)
}
None => dynasm!(self.asm
; mov n_pc, WORD target as _
; ret
),
}
}
fn rts(&mut self) {
dynasm!{self.asm
; add n_sp, BYTE 2
; mov ax, WORD [ram + r13 + 0xFF]
; inc ax
; mov n_pc, ax
; ret
}
}
fn rti(&mut self) {
dynasm!{self.asm
; mov n_p, BYTE [ram + r13 + 0x101]
; inc n_sp
; or n_p, BYTE 0b0010_0000
; add n_sp, BYTE 2
; mov n_pc, WORD [ram + r13 + 0xFF]
; ret
}
}
fn brk(&mut self) {
let return_addr = self.pc - 1;
let target = self.cpu.read_w(IRQ_VECTOR);
dynasm!{ self.asm
; mov n_pc, target as _
;; self.stack_push_w(return_addr)
; mov arg, n_p
; or arg, BYTE 0b0011_0000
; dec n_sp
; mov BYTE [ram + r13 + 0x101], arg
; ret
}
}
fn unsupported(&mut self, _: u8) {
dynasm!(self.asm
; ret
)
}
fn unofficial(&self) {}
// Branches
fn bcs(&mut self) {
dynasm!{self.asm
; test n_p, CARRY as _
; jz >next
;; self.branch()
; next:
}
}
fn bcc(&mut self) {
dynasm!{self.asm
; test n_p, CARRY as _
; jnz >next
;; self.branch()
; next:
}
}
fn beq(&mut self) {
dynasm!{self.asm
; test n_p, ZERO as _
; jz >next
;; self.branch()
; next:
}
}
fn bne(&mut self) {
dynasm!{self.asm
; test n_p, ZERO as _
; jnz >next
;; self.branch()
; next:
}
}
fn bvs(&mut self) {
dynasm!{self.asm
; test n_p, OVERFLOW as _
; jz >next
;; self.branch()
; next:
}
}
fn bvc(&mut self) {
dynasm!{self.asm
; test n_p, OVERFLOW as _
; jnz >next
;; self.branch()
; next:
}
}
fn bmi(&mut self) {
dynasm!{self.asm
; test n_p, SIGN as _
; jz >next
;; self.branch()
; next:
}
}
fn bpl(&mut self) {
dynasm!{self.asm
; test n_p, SIGN as _
; jnz >next
;; self.branch()
; next:
}
}
fn branch(&mut self) {
let (target, cycle) = self.get_branch_target();
dynasm! {self.asm
; inc cyc
;; self.branch_page_cycle(cycle)
}
if self.analysis.instructions.contains_key(&target) {
// Target is an instruction in this block
let target_label = self.get_dynamic_label(target);
dynasm!{self.asm
; jmp =>target_label
}
} else {
// Target may be before this block, or misaligned with the instructions in this
// block. Either way, safest to treat it as a conditional JMP.
let link = self.dispatcher
.lock_block(target, self.entry_point, self.cpu);
match link {
Some(block) => {
let ptr = block.get_ptr();
dynasm!(self.asm
; mov rax, QWORD ptr as _
; jmp rax
)
}
None => dynasm!(self.asm
; mov n_pc, WORD target as _
; ret
),
}
}
}
// Stack
fn plp(&mut self) {
dynasm!{self.asm
; mov n_p, BYTE [ram + r13 + 0x101]
; inc n_sp
; or n_p, BYTE 0b0010_0000
; and n_p, BYTE (!BREAK) as _
}
}
fn php(&mut self) {
dynasm!{self.asm
; mov arg, n_p
; or arg, BYTE 0b0011_0000
; dec n_sp
; mov BYTE [ram + r13 + 0x101], arg
}
}
fn pla(&mut self) {
dynasm!{self.asm
; mov n_a, BYTE [ram + r13 + 0x101]
; inc n_sp
; mov arg, n_a
;; self.set_sign_zero_from_arg()
}
}
fn pha(&mut self) {
dynasm!{self.asm
; dec n_sp
; mov BYTE [ram + r13 + 0x101], n_a
}
}
// Misc
fn nop(&mut self) {}
fn sec(&mut self) {
dynasm!{self.asm
; or n_p, BYTE CARRY as _
}
}
fn clc(&mut self) {
dynasm!{self.asm
; and n_p, BYTE (!CARRY) as _
}
}
fn sei(&mut self) {
dynasm!{self.asm
; or n_p, BYTE SUPPRESS_IRQ as _
}
}
fn cli(&mut self) {
dynasm!{self.asm
; and n_p, BYTE (!SUPPRESS_IRQ) as _
}
}
fn sed(&mut self) {
dynasm!{self.asm
; or n_p, BYTE DECIMAL as _
}
}
fn cld(&mut self) {
dynasm!{self.asm
; and n_p, BYTE (!DECIMAL) as _
}
}
fn clv(&mut self) {
dynasm!{self.asm
; and n_p, BYTE (!OVERFLOW) as _
}
}
fn tax(&mut self) {
dynasm!{self.asm
; mov n_x, n_a
; mov arg, n_a
;; self.set_sign_zero_from_arg()
}
}
fn tay(&mut self) {
dynasm!{self.asm
; mov n_y, n_a
; mov arg, n_a
;; self.set_sign_zero_from_arg()
}
}
fn tsx(&mut self) {
dynasm!{self.asm
; mov n_x, n_sp
; mov arg, n_sp
;; self.set_sign_zero_from_arg()
}
}
fn txa(&mut self) {
dynasm!{self.asm
; mov n_a, n_x
; mov arg, n_x
;; self.set_sign_zero_from_arg()
}
}
fn txs(&mut self) {
dynasm!{self.asm
; mov n_sp, n_x
}
}
fn tya(&mut self)
|
// Unofficial instructions
fn u_nop<M: AddressingMode>(&mut self, mode: M) {
mode.read_to_arg(self, true);
}
fn lax<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
;; mode.read_to_arg(self, true)
;; self.set_sign_zero_from_arg()
; mov n_a, arg
; mov n_x, arg
}
}
fn sax<M: AddressingMode>(&mut self, mode: M) {
dynasm!{self.asm
; mov arg, n_a
; and arg, n_x
;; mode.write_from_arg(self)
}
}
fn dcp<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.dec(mode);
self.cmp(mode);
}
fn isc<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.inc(mode);
self.sbc(mode);
}
fn slo<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.asl(mode);
self.ora(mode);
}
fn rla<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.rol(mode);
self.and(mode);
}
fn sre<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.lsr(mode);
self.eor(mode);
}
fn rra<M: AddressingMode>(&mut self, mode: M) {
let mode = NoTickMode { mode: mode };
self.ror(mode);
self.adc(mode);
}
fn kil(&mut self) {
dynasm!{self.asm
; mov BYTE cpu => CPU.halted, BYTE true as _
; ret
}
}
fn stack_push_w(&mut self, val: u16) {
let low = (val & 0x00FF) as u8;
let high = ((val & 0xFF00) >> 8) as u8;
dynasm!( self.asm
; sub n_sp, BYTE 2
; mov BYTE [ram + r13 + 0x101], BYTE low as _
; mov BYTE [ram + r13 + 0x102], BYTE high as _
)
}
fn set_sign_zero_from_arg(&mut self) {
if self.current_instr_analysis.zero_flag_used {
call_naked!(self, set_zero_flag);
}
if self.current_instr_analysis.sign_flag_used {
call_naked!(self, set_sign_flag);
}
}
fn relative_addr(&self, disp: u8) -> u16 {
let disp = (disp as i8) as i16; // We want to sign-extend here.
let pc = self.pc as i16;
pc.wrapping_add(disp) as u16
}
fn read_incr_pc(&mut self) -> u8 {
let pc = self.pc;
let val: u8 = self.cpu.read(pc);
self.pc = self.pc.wrapping_add(1);
val
}
fn read_w_incr_pc(&mut self) -> u16 {
self.read_incr_pc() as u16 | ((self.read_incr_pc() as u16) << 8)
}
fn get_branch_target(&mut self) -> (u16, bool) {
let arg = self.read_incr_pc();
let target = self.relative_addr(arg);
let do_page_cycle = (self.pc & 0xFF00) != (target & 0xFF00);
(target, do_page_cycle)
}
fn branch_page_cycle(&mut self, do_page_cycle: bool) {
if do_page_cycle {
dynasm!{self.asm
; inc cyc
}
}
}
fn get_dynamic_label(&mut self, address: u16) -> DynamicLabel {
match self.branch_targets.get(&address).cloned() {
Some(label) => label,
None => {
let label = self.asm.new_dynamic_label();
self.branch_targets.insert(address, label);
label
}
}
}
}
|
{
dynasm!{self.asm
; mov n_a, n_y
; mov arg, n_y
;; self.set_sign_zero_from_arg()
}
}
|
test_sqldb.py
|
# Copyright 2019 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SQLDB specific tests, common tests should be in test_dbs.py"""
from collections import defaultdict
from contextlib import contextmanager
from datetime import datetime, timedelta
from unittest.mock import Mock
import pytest
from sqlalchemy.orm import Session
from mlrun.api.db.sqldb.db import SQLDB
from mlrun.api.db.sqldb.models import _tagged
from tests.conftest import new_run
@contextmanager
def patch(obj, **kw):
old = {}
for k, v in kw.items():
old[k] = getattr(obj, k)
setattr(obj, k, v)
try:
yield obj
finally:
for k, v in old.items():
setattr(obj, k, v)
def test_list_artifact_tags(db: SQLDB, db_session: Session):
db.store_artifact(db_session, "k1", {}, "1", tag="t1", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p1")
db.store_artifact(db_session, "k1", {}, "2", tag="t2", project="p2")
tags = db.list_artifact_tags(db_session, "p1")
assert {"t1", "t2"} == set(tags), "bad tags"
def test_list_artifact_date(db: SQLDB, db_session: Session):
t1 = datetime(2020, 2, 16)
t2 = t1 - timedelta(days=7)
t3 = t2 - timedelta(days=7)
prj = "p7"
db.store_artifact(db_session, "k1", {"updated": t1}, "u1", project=prj)
db.store_artifact(db_session, "k2", {"updated": t2}, "u2", project=prj)
db.store_artifact(db_session, "k3", {"updated": t3}, "u3", project=prj)
arts = db.list_artifacts(db_session, project=prj, since=t3, tag="*")
assert 3 == len(arts), "since t3"
arts = db.list_artifacts(db_session, project=prj, since=t2, tag="*")
assert 2 == len(arts), "since t2"
arts = db.list_artifacts(
db_session, project=prj, since=t1 + timedelta(days=1), tag="*"
)
assert not arts, "since t1+"
arts = db.list_artifacts(db_session, project=prj, until=t2, tag="*")
assert 2 == len(arts), "until t2"
arts = db.list_artifacts(db_session, project=prj, since=t2, until=t2, tag="*")
assert 1 == len(arts), "since/until t2"
def test_list_projects(db: SQLDB, db_session: Session):
for i in range(10):
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
db.store_run(db_session, run, "u7", project=f"prj{i % 3}", iter=i)
assert {"prj0", "prj1", "prj2"} == {p.name for p in db.list_projects(db_session)}
def test_run_iter0(db: SQLDB, db_session: Session):
uid, prj = "uid39", "lemon"
run = new_run("s1", {"l1": "v1", "l2": "v2"}, x=1)
for i in range(7):
db.store_run(db_session, run, uid, prj, i)
db._get_run(db_session, uid, prj, 0) # See issue 140
def test_artifacts_latest(db: SQLDB, db_session: Session):
k1, u1, art1 = "k1", "u1", {"a": 1}
prj = "p38"
db.store_artifact(db_session, k1, art1, u1, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert art1["a"] == arts[0]["a"], "bad artifact"
u2, art2 = "u2", {"a": 17}
db.store_artifact(db_session, k1, art2, u2, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 2 == len(arts), "count"
assert art2["a"] == arts[1]["a"], "bad artifact"
k2, u3, art3 = "k2", "u3", {"a": 99}
db.store_artifact(db_session, k2, art3, u3, project=prj)
arts = db.list_artifacts(db_session, project=prj, tag="latest")
assert 3 == len(arts), "number"
assert {1, 17, 99} == set(art["a"] for art in arts), "latest"
@pytest.mark.parametrize("cls", _tagged)
def test_tags(db: SQLDB, db_session: Session, cls):
|
def _tag_objs(db: SQLDB, db_session: Session, count, project, tags):
by_tag = defaultdict(list)
for i in range(count):
cls = _tagged[i % len(_tagged)]
obj = cls()
by_tag[tags[i % len(tags)]].append(obj)
db_session.add(obj)
db_session.commit()
for tag, objs in by_tag.items():
db.tag_objects(db_session, objs, project, tag)
def test_list_tags(db: SQLDB, db_session: Session):
p1, tags1 = "prj1", ["a", "b", "c"]
_tag_objs(db, db_session, 17, p1, tags1)
p2, tags2 = "prj2", ["b", "c", "d", "e"]
_tag_objs(db, db_session, 11, p2, tags2)
tags = db.list_tags(db_session, p1)
assert set(tags) == set(tags1), "tags"
def test_projects(db: SQLDB, db_session: Session):
prj1 = {
"name": "p1",
"description": "banana",
# 'users': ['u1', 'u2'],
"spec": {"company": "ACME"},
"state": "active",
"created": datetime.now(),
}
pid1 = db.add_project(db_session, prj1)
p1 = db.get_project(db_session, project_id=pid1)
assert p1, f"project {pid1} not found"
out = {
"name": p1.name,
"description": p1.description,
# 'users': sorted(u.name for u in p1.users),
"spec": p1.spec,
"state": p1.state,
"created": p1.created,
}
assert prj1 == out, "bad project"
data = {"description": "lemon"}
db.update_project(db_session, p1.name, data)
p1 = db.get_project(db_session, project_id=pid1)
assert data["description"] == p1.description, "bad update"
prj2 = {"name": "p2"}
db.add_project(db_session, prj2)
prjs = {p.name for p in db.list_projects(db_session)}
assert {prj1["name"], prj2["name"]} == prjs, "list"
def test_cache_projects(db: SQLDB, db_session: Session):
assert 0 == len(db._projects), "empty cache"
name = "prj348"
db.add_project(db_session, {"name": name})
assert {name} == db._projects, "project"
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name + "-new")
mock.assert_called_once()
project_2_name = "project-2"
db.add_project(db_session, {"name": project_2_name})
db._projects = set()
mock = Mock()
with patch(db, add_project=mock):
db._ensure_project(db_session, name)
mock.assert_not_called()
# def test_function_latest(db: SQLDB, db_session: Session):
# fn1, t1 = {'x': 1}, 'u83'
# fn2, t2 = {'x': 2}, 'u23'
# prj, name = 'p388', 'n3023'
# db.store_function(db_session, fn1, name, prj, t1)
# db.store_function(db_session, fn2, name, prj, t2)
#
# fn = db.get_function(db_session, name, prj, 'latest')
# assert fn2 == fn, 'latest'
|
p1, n1 = "prj1", "name1"
obj1, obj2, obj3 = cls(), cls(), cls()
db_session.add(obj1)
db_session.add(obj2)
db_session.add(obj3)
db_session.commit()
db.tag_objects(db_session, [obj1, obj2], p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert {obj1, obj2} == set(objs), "find tags"
db.del_tag(db_session, p1, n1)
objs = db.find_tagged(db_session, p1, n1)
assert [] == objs, "find tags after del"
|
update_service_command_test.go
|
package isolated
import (
"code.cloudfoundry.org/cli/api/cloudcontroller/ccversion"
"code.cloudfoundry.org/cli/integration/helpers"
"code.cloudfoundry.org/cli/integration/helpers/fakeservicebroker"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
. "github.com/onsi/gomega/gbytes"
. "github.com/onsi/gomega/gexec"
)
var _ = Describe("update-service command", func() {
Describe("help", func() {
When("--help flag is set", func() {
It("displays command usage to output", func() {
session := helpers.CF("update-service", "--help")
Eventually(session).Should(Say("NAME:"))
Eventually(session).Should(Say(`\s+update-service - Update a service instance`))
Eventually(session).Should(Say(`USAGE:`))
Eventually(session).Should(Say(`\s+cf update-service SERVICE_INSTANCE \[-p NEW_PLAN\] \[-c PARAMETERS_AS_JSON\] \[-t TAGS\] \[--upgrade\]`))
Eventually(session).Should(Say(`\s+Optionally provide service-specific configuration parameters in a valid JSON object in-line:`))
Eventually(session).Should(Say(`\s+cf update-service SERVICE_INSTANCE -c '{\"name\":\"value\",\"name\":\"value\"}'`))
Eventually(session).Should(Say(`\s+Optionally provide a file containing service-specific configuration parameters in a valid JSON object\.`))
Eventually(session).Should(Say(`\s+The path to the parameters file can be an absolute or relative path to a file:`))
Eventually(session).Should(Say(`\s+cf update-service SERVICE_INSTANCE -c PATH_TO_FILE`))
Eventually(session).Should(Say(`\s+Example of valid JSON object:`))
Eventually(session).Should(Say(`\s+{`))
Eventually(session).Should(Say(`\s+\"cluster_nodes\": {`))
Eventually(session).Should(Say(`\s+\"count\": 5,`))
Eventually(session).Should(Say(`\s+\"memory_mb\": 1024`))
Eventually(session).Should(Say(`\s+}`))
Eventually(session).Should(Say(`\s+}`))
Eventually(session).Should(Say(`\s+ Optionally provide a list of comma-delimited tags that will be written to the VCAP_SERVICES environment variable for any bound applications.`))
Eventually(session).Should(Say(`EXAMPLES:`))
Eventually(session).Should(Say(`\s+cf update-service mydb -p gold`))
Eventually(session).Should(Say(`\s+cf update-service mydb -c '{\"ram_gb\":4}'`))
Eventually(session).Should(Say(`\s+cf update-service mydb -c ~/workspace/tmp/instance_config.json`))
Eventually(session).Should(Say(`\s+cf update-service mydb -t "list, of, tags"`))
Eventually(session).Should(Say(`\s+cf update-service mydb --upgrade`))
Eventually(session).Should(Say(`\s+cf update-service mydb --upgrade --force`))
Eventually(session).Should(Say(`OPTIONS:`))
Eventually(session).Should(Say(`\s+-c\s+Valid JSON object containing service-specific configuration parameters, provided either in-line or in a file\. For a list of supported configuration parameters, see documentation for the particular service offering\.`))
Eventually(session).Should(Say(`\s+-p\s+Change service plan for a service instance`))
Eventually(session).Should(Say(`\s+-t\s+User provided tags`))
Eventually(session).Should(Say(`\s+--upgrade, -u\s+Upgrade the service instance to the latest version of the service plan available. It cannot be combined with flags: -c, -p, -t.`))
Eventually(session).Should(Say(`\s+--force, -f\s+Force the upgrade to the latest available version of the service plan. It can only be used with: -u, --upgrade.`))
Eventually(session).Should(Say(`SEE ALSO:`))
Eventually(session).Should(Say(`\s+rename-service, services, update-user-provided-service`))
Eventually(session).Should(Exit(0))
})
})
})
When("the environment is not setup correctly", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionUpdateServiceInstanceMaintenanceInfoV2)
})
It("fails with the appropriate errors", func() {
// the upgrade flag is passed here to exercise a particular code path before refactoring
helpers.CheckEnvironmentTargetedCorrectly(true, true, ReadOnlyOrg, "update-service", "foo", "--upgrade")
})
})
When("an api is targeted, the user is logged in, and an org and space are targeted", func() {
var (
orgName string
)
BeforeEach(func() {
orgName = helpers.NewOrgName()
var spaceName = helpers.NewSpaceName()
helpers.SetupCF(orgName, spaceName)
})
AfterEach(func() {
helpers.QuickDeleteOrg(orgName)
})
When("there are no service instances", func() {
When("upgrading", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionUpdateServiceInstanceMaintenanceInfoV2)
})
It("displays an informative error before prompting and exits 1", func() {
session := helpers.CF("update-service", "non-existent-service", "--upgrade")
Eventually(session.Err).Should(Say("Service instance non-existent-service not found"))
Eventually(session).Should(Exit(1))
})
})
})
When("providing other arguments while upgrading", func() {
It("displays an informative error message and exits 1", func() {
session := helpers.CF("update-service", "irrelevant", "--upgrade", "-c", "{\"hello\": \"world\"}")
Eventually(session.Err).Should(Say("Incorrect Usage: The following arguments cannot be used together: --upgrade, -t, -c, -p"))
Eventually(session).Should(Say("FAILED"))
Eventually(session).Should(Say("USAGE:"))
Eventually(session).Should(Exit(1))
})
})
When("there is a service instance", func() {
var (
broker *fakeservicebroker.FakeServiceBroker
serviceInstanceName string
username string
)
BeforeEach(func() {
broker = fakeservicebroker.New().Register()
Eventually(helpers.CF("enable-service-access", broker.ServiceName())).Should(Exit(0))
serviceInstanceName = helpers.PrefixedRandomName("SI")
Eventually(helpers.CF("create-service", broker.ServiceName(), broker.ServicePlanName(), serviceInstanceName)).Should(Exit(0))
username, _ = helpers.GetCredentials()
})
AfterEach(func() {
Eventually(helpers.CF("delete-service", serviceInstanceName, "-f")).Should(Exit(0))
broker.Destroy()
})
When("updating to a service plan that does not exist", func() {
It("displays an informative error message, exits 1", func() {
session := helpers.CF("update-service", serviceInstanceName, "-p", "non-existing-service-plan")
Eventually(session).Should(Say("Plan does not exist for the %s service", broker.ServiceName()))
Eventually(session).Should(Exit(1))
})
|
When("updating to the same service plan (no-op)", func() {
It("displays an informative success message, exits 0", func() {
session := helpers.CF("update-service", serviceInstanceName, "-p", broker.ServicePlanName())
Eventually(session).Should(Say("Updating service instance %s as %s...", serviceInstanceName, username))
Eventually(session).Should(Say("OK"))
Eventually(session).Should(Say("No changes were made"))
Eventually(session).Should(Exit(0))
})
})
When("upgrading", func() {
var buffer *Buffer
BeforeEach(func() {
buffer = NewBuffer()
})
When("the user provides --upgrade in an unsupported CAPI version", func() {
BeforeEach(func() {
helpers.SkipIfVersionAtLeast(ccversion.MinVersionUpdateServiceInstanceMaintenanceInfoV2)
})
It("should report that the version of CAPI is too low", func() {
session := helpers.CF("update-service", serviceInstanceName, "--upgrade")
Eventually(session.Err).Should(Say(`Option '--upgrade' requires CF API version %s or higher. Your target is 2\.\d+\.\d+`, ccversion.MinVersionUpdateServiceInstanceMaintenanceInfoV2))
Eventually(session).Should(Exit(1))
})
})
When("when CAPI supports service instance maintenance_info updates", func() {
BeforeEach(func() {
helpers.SkipIfVersionLessThan(ccversion.MinVersionUpdateServiceInstanceMaintenanceInfoV2)
})
When("cancelling the update", func() {
BeforeEach(func() {
_, err := buffer.Write([]byte("n\n"))
Expect(err).ToNot(HaveOccurred())
})
It("does not proceed", func() {
session := helpers.CFWithStdin(buffer, "update-service", serviceInstanceName, "--upgrade")
Eventually(session).Should(Say("You are about to update %s", serviceInstanceName))
Eventually(session).Should(Say("Warning: This operation may be long running and will block further operations on the service until complete."))
Eventually(session).Should(Say("Really update service %s\\? \\[yN\\]:", serviceInstanceName))
Eventually(session).Should(Say("Update cancelled"))
Eventually(session).Should(Exit(0))
})
})
When("proceeding with the update", func() {
BeforeEach(func() {
_, err := buffer.Write([]byte("y\n"))
Expect(err).ToNot(HaveOccurred())
})
When("upgrade is available", func() {
BeforeEach(func() {
broker.Services[0].Plans[0].MaintenanceInfo.Version = "9.1.2"
broker.Update()
})
It("updates the service", func() {
session := helpers.CFWithStdin(buffer, "update-service", serviceInstanceName, "--upgrade")
By("displaying an informative message")
Eventually(session).Should(Say("You are about to update %s", serviceInstanceName))
Eventually(session).Should(Say("Warning: This operation may be long running and will block further operations on the service until complete."))
Eventually(session).Should(Say("Really update service %s\\? \\[yN\\]:", serviceInstanceName))
Eventually(session).Should(Say("Updating service instance %s as %s...", serviceInstanceName, username))
Eventually(session).Should(Exit(0))
By("requesting an upgrade from the platform")
session = helpers.CF("service", serviceInstanceName)
Eventually(session).Should(Say("status:\\s+update succeeded"))
})
})
When("no upgrade is available", func() {
It("does not update the service and outputs informative message", func() {
session := helpers.CFWithStdin(buffer, "update-service", serviceInstanceName, "--upgrade")
Eventually(session).Should(Say("You are about to update %s", serviceInstanceName))
Eventually(session).Should(Say("Warning: This operation may be long running and will block further operations on the service until complete."))
Eventually(session).Should(Say("Really update service %s\\? \\[yN\\]:", serviceInstanceName))
Eventually(session).Should(Say("Updating service instance %s as %s...", serviceInstanceName, username))
Eventually(session.Err).Should(Say("No upgrade is available."))
Eventually(session.Err).Should(Say("TIP: To find out if upgrade is available run `cf service %s`.", serviceInstanceName))
Eventually(session).Should(Exit(1))
})
})
})
When("providing --force argument and upgrade is available", func() {
BeforeEach(func() {
broker.Services[0].Plans[0].MaintenanceInfo.Version = "9.1.2"
broker.Update()
})
It("updates the service without prompting", func() {
session := helpers.CFWithStdin(buffer, "update-service", serviceInstanceName, "--upgrade", "--force")
By("displaying an informative message")
Eventually(session).Should(Say("Updating service instance %s as %s...", serviceInstanceName, username))
Eventually(session).Should(Exit(0))
By("requesting an upgrade from the platform")
session = helpers.CF("service", serviceInstanceName)
Eventually(session).Should(Say("status:\\s+update succeeded"))
})
})
})
})
})
})
})
|
})
|
base.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import urllib
import time
import hashlib
import StringIO
import ssl
import os
import socket
import struct
from pipes import quote as pquote
import libcloud
from libcloud.httplib_ssl import LibcloudHTTPSConnection
from httplib import HTTPConnection as LibcloudHTTPConnection
class RawResponse(object):
def __init__(self, response=None):
self._status = None
self._response = None
self._headers = {}
self._error = None
self._reason = None
@property
def response(self):
if not self._response:
self._response = self.connection.connection.getresponse()
return self._response
@property
def status(self):
if not self._status:
self._status = self.response.status
return self._status
@property
def headers(self):
if not self._headers:
self._headers = dict(self.response.getheaders())
return self._headers
@property
def reason(self):
if not self._reason:
self._reason = self.response.reason
return self._reason
class Response(object):
"""
A Base Response class to derive from.
"""
NODE_STATE_MAP = {}
object = None
body = None
status = httplib.OK
headers = {}
error = None
connection = None
def __init__(self, response):
self.body = response.read()
self.status = response.status
self.headers = dict(response.getheaders())
self.error = response.reason
if not self.success():
raise Exception(self.parse_error())
self.object = self.parse_body()
def parse_body(self):
"""
Parse response body.
Override in a provider's subclass.
@return: Parsed body.
"""
return self.body
def parse_error(self):
"""
Parse the error messages.
Override in a provider's subclass.
@return: Parsed error.
"""
return self.body
def success(self):
"""
Determine if our request was successful.
The meaning of this can be arbitrary; did we receive OK status? Did
the node get created? Were we authenticated?
@return: C{True} or C{False}
"""
return self.status == httplib.OK or self.status == httplib.CREATED
#TODO: Move this to a better location/package
class LoggingConnection():
"""
Debug class to log all HTTP(s) requests as they could be made
with the C{curl} command.
@cvar log: file-like object that logs entries are written to.
"""
log = None
def _log_response(self, r):
rv = "# -------- begin %d:%d response ----------\n" % (id(self), id(r))
ht = ""
v = r.version
if r.version == 10:
v = "HTTP/1.0"
if r.version == 11:
v = "HTTP/1.1"
ht += "%s %s %s\r\n" % (v, r.status, r.reason)
body = r.read()
for h in r.getheaders():
ht += "%s: %s\r\n" % (h[0].title(), h[1])
ht += "\r\n"
# this is evil. laugh with me. ha arharhrhahahaha
class fakesock:
def __init__(self, s):
self.s = s
def
|
(self, mode, foo):
return StringIO.StringIO(self.s)
rr = r
if r.chunked:
ht += "%x\r\n" % (len(body))
ht += body
ht += "\r\n0\r\n"
else:
ht += body
rr = httplib.HTTPResponse(fakesock(ht),
method=r._method,
debuglevel=r.debuglevel)
rr.begin()
rv += ht
rv += ("\n# -------- end %d:%d response ----------\n"
% (id(self), id(r)))
return (rr, rv)
def _log_curl(self, method, url, body, headers):
cmd = ["curl", "-i"]
cmd.extend(["-X", pquote(method)])
for h in headers:
cmd.extend(["-H", pquote("%s: %s" % (h, headers[h]))])
# TODO: in python 2.6, body can be a file-like object.
if body is not None and len(body) > 0:
cmd.extend(["--data-binary", pquote(body)])
cmd.extend([pquote("https://%s:%d%s" % (self.host, self.port, url))])
return " ".join(cmd)
class LoggingHTTPSConnection(LoggingConnection, LibcloudHTTPSConnection):
"""
Utility Class for logging HTTPS connections
"""
def getresponse(self):
r = LibcloudHTTPSConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPSConnection.request(self, method, url, body, headers)
class LoggingHTTPConnection(LoggingConnection, LibcloudHTTPConnection):
"""
Utility Class for logging HTTP connections
"""
def getresponse(self):
r = LibcloudHTTPConnection.getresponse(self)
if self.log is not None:
r, rv = self._log_response(r)
self.log.write(rv + "\n")
self.log.flush()
return r
def request(self, method, url, body=None, headers=None):
headers.update({'X-LC-Request-ID': str(id(self))})
if self.log is not None:
pre = "# -------- begin %d request ----------\n" % id(self)
self.log.write(pre +
self._log_curl(method, url, body, headers) + "\n")
self.log.flush()
return LibcloudHTTPConnection.request(self, method, url,
body, headers)
class ConnectionKey(object):
"""
A Base Connection class to derive from.
"""
#conn_classes = (LoggingHTTPSConnection)
conn_classes = (LibcloudHTTPConnection, LibcloudHTTPSConnection)
responseCls = Response
rawResponseCls = RawResponse
connection = None
host = '127.0.0.1'
port = (80, 443)
secure = 1
driver = None
action = None
def __init__(self, key, secure=True, host=None, force_port=None):
"""
Initialize `user_id` and `key`; set `secure` to an C{int} based on
passed value.
"""
self.key = key
self.secure = secure and 1 or 0
self.ua = []
if host:
self.host = host
if force_port:
self.port = (force_port, force_port)
def connect(self, host=None, port=None):
"""
Establish a connection with the API server.
@type host: C{str}
@param host: Optional host to override our default
@type port: C{int}
@param port: Optional port to override our default
@returns: A connection
"""
host = host or self.host
port = port or self.port[self.secure]
kwargs = {'host': host, 'port': port}
connection = self.conn_classes[self.secure](**kwargs)
# You can uncoment this line, if you setup a reverse proxy server
# which proxies to your endpoint, and lets you easily capture
# connections in cleartext when you setup the proxy to do SSL
# for you
#connection = self.conn_classes[False]("127.0.0.1", 8080)
self.connection = connection
def _user_agent(self):
return 'libcloud/%s (%s)%s' % (
libcloud.__version__,
self.driver.name,
"".join([" (%s)" % x for x in self.ua]))
def user_agent_append(self, token):
"""
Append a token to a user agent string.
Users of the library should call this to uniquely identify thier requests
to a provider.
@type token: C{str}
@param token: Token to add to the user agent.
"""
self.ua.append(token)
def request(self,
action,
params=None,
data='',
headers=None,
method='GET',
raw=False):
"""
Request a given `action`.
Basically a wrapper around the connection
object's `request` that does some helpful pre-processing.
@type action: C{str}
@param action: A path
@type params: C{dict}
@param params: Optional mapping of additional parameters to send. If
None, leave as an empty C{dict}.
@type data: C{unicode}
@param data: A body of data to send with the request.
@type headers: C{dict}
@param headers: Extra headers to add to the request
None, leave as an empty C{dict}.
@type method: C{str}
@param method: An HTTP method such as "GET" or "POST".
@return: An instance of type I{responseCls}
"""
if params is None:
params = {}
if headers is None:
headers = {}
self.action = action
self.method = method
# Extend default parameters
params = self.add_default_params(params)
# Extend default headers
headers = self.add_default_headers(headers)
# We always send a content length and user-agent header
headers.update({'User-Agent': self._user_agent()})
headers.update({'Host': self.host})
# Encode data if necessary
if data != '' and data != None:
data = self.encode_data(data)
if data is not None:
headers.update({'Content-Length': str(len(data))})
if params:
url = '?'.join((action, urllib.urlencode(params)))
else:
url = action
# Removed terrible hack...this a less-bad hack that doesn't execute a
# request twice, but it's still a hack.
self.connect()
try:
# @TODO: Should we just pass File object as body to request method
# instead of dealing with splitting and sending the file ourselves?
if raw:
self.connection.putrequest(method, action)
for key, value in headers.iteritems():
self.connection.putheader(key, value)
self.connection.endheaders()
else:
self.connection.request(method=method, url=url, body=data,
headers=headers)
except ssl.SSLError, e:
raise ssl.SSLError(str(e))
if raw:
response = self.rawResponseCls()
else:
response = self.responseCls(self.connection.getresponse())
response.connection = self
return response
def add_default_params(self, params):
"""
Adds default parameters (such as API key, version, etc.)
to the passed `params`
Should return a dictionary.
"""
return params
def add_default_headers(self, headers):
"""
Adds default headers (such as Authorization, X-Foo-Bar)
to the passed `headers`
Should return a dictionary.
"""
return headers
def encode_data(self, data):
"""
Encode body data.
Override in a provider's subclass.
"""
return data
class ConnectionUserAndKey(ConnectionKey):
"""
Base connection which accepts a user_id and key
"""
user_id = None
def __init__(self, user_id, key, secure=True, host=None, port=None):
super(ConnectionUserAndKey, self).__init__(key, secure, host, port)
self.user_id = user_id
|
makefile
|
test_onset.py
|
#!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <[email protected]>
# unit tests for librosa.onset
from __future__ import print_function
import pytest
from contextlib2 import nullcontext as dnr
# Disable cache
import os
try:
os.environ.pop("LIBROSA_CACHE_DIR")
except:
pass
import warnings
import numpy as np
import librosa
from test_core import srand
__EXAMPLE_FILE = os.path.join("tests", "data", "test1_22050.wav")
@pytest.fixture(scope="module")
def ysr():
return librosa.load(__EXAMPLE_FILE)
@pytest.mark.parametrize(
"feature", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]
)
@pytest.mark.parametrize("n_fft", [512, 2048])
@pytest.mark.parametrize("hop_length", [256, 512])
@pytest.mark.parametrize("lag", [1, 2])
@pytest.mark.parametrize("max_size", [1, 2])
@pytest.mark.parametrize("detrend", [False, True])
@pytest.mark.parametrize("center", [False, True])
@pytest.mark.parametrize("aggregate", [None, np.mean, np.max])
def test_onset_strength_audio(
ysr, feature, n_fft, hop_length, lag, max_size, detrend, center, aggregate
):
y, sr = ysr
oenv = librosa.onset.onset_strength(
y=y,
sr=sr,
S=None,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length,
lag=lag,
max_size=max_size,
)
assert oenv.ndim == 1
S = librosa.feature.melspectrogram(y=y, n_fft=n_fft, hop_length=hop_length)
target_shape = S.shape[-1]
if not detrend:
assert np.all(oenv >= 0)
assert oenv.shape[-1] == target_shape
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badlag(ysr):
y, sr = ysr
librosa.onset.onset_strength(y=y, sr=sr, lag=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badmax(ysr):
y, sr = ysr
librosa.onset.onset_strength(y=y, sr=sr, max_size=0)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_noinput():
librosa.onset.onset_strength(y=None, S=None)
@pytest.fixture(scope="module")
def melspec_sr(ysr):
|
@pytest.mark.parametrize(
"feature", [None, librosa.feature.melspectrogram, librosa.feature.chroma_stft]
)
@pytest.mark.parametrize("n_fft", [512, 2048])
@pytest.mark.parametrize("hop_length", [256, 512])
@pytest.mark.parametrize("detrend", [False, True])
@pytest.mark.parametrize("center", [False, True])
@pytest.mark.parametrize("aggregate", [None, np.mean, np.max])
def test_onset_strength_spectrogram(
melspec_sr, feature, n_fft, hop_length, detrend, center, aggregate
):
S, sr = melspec_sr
oenv = librosa.onset.onset_strength(
y=None,
sr=sr,
S=S,
detrend=detrend,
center=center,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length,
)
assert oenv.ndim == 1
target_shape = S.shape[-1]
if not detrend:
assert np.all(oenv >= 0)
assert oenv.shape[-1] == target_shape
@pytest.mark.parametrize("lag", [1, 2, 3])
@pytest.mark.parametrize("aggregate", [np.mean, np.max])
def test_onset_strength_multi_noagg(melspec_sr, lag, aggregate):
S, sr = melspec_sr
# We only test with max_size=1 here to make the sub-band slicing test simple
odf_multi = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, aggregate=False
)
odf_mean = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, aggregate=aggregate
)
# With no aggregation, output shape should = input shape
assert odf_multi.shape == S.shape
# Result should average out to the same as mean aggregation
assert np.allclose(odf_mean, aggregate(odf_multi, axis=0))
@pytest.fixture(scope="module")
def channels(melspec_sr):
S, _ = melspec_sr
return np.linspace(0, S.shape[0], num=5, dtype=int)
@pytest.mark.parametrize("lag", [1, 2, 3])
def test_onset_strength_multi(melspec_sr, lag, channels):
S, sr = melspec_sr
# We only test with max_size=1 here to make the sub-band slicing test simple
odf_multi = librosa.onset.onset_strength_multi(
S=S, lag=lag, max_size=1, channels=channels
)
assert len(odf_multi) == len(channels) - 1
for i, (s, t) in enumerate(zip(channels, channels[1:])):
odf_single = librosa.onset.onset_strength(S=S[s:t], lag=lag, max_size=1)
assert np.allclose(odf_single, odf_multi[i])
@pytest.fixture(scope="module", params=[64, 512, 2048])
def hop(request):
return request.param
@pytest.fixture(scope="module", params=[False, True], ids=["audio", "oenv"])
def oenv(ysr, hop, request):
if request.param:
y, sr = ysr
return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)
else:
return None
@pytest.mark.parametrize("bt", [False, True])
@pytest.mark.parametrize("normalize", [False, True])
def test_onset_detect_real(ysr, oenv, hop, bt, normalize):
y, sr = ysr
onsets = librosa.onset.onset_detect(
y=y,
sr=sr,
onset_envelope=oenv,
hop_length=hop,
backtrack=bt,
normalize=normalize,
)
if bt:
assert np.all(onsets >= 0)
else:
assert np.all(onsets > 0)
assert np.all(onsets < len(y) * sr // hop)
if oenv is not None:
assert np.all(onsets < len(oenv))
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_detect_nosignal():
librosa.onset.onset_detect(y=None, onset_envelope=None)
@pytest.mark.parametrize("sr", [4000])
@pytest.mark.parametrize("y", [np.zeros(4000), np.ones(4000), -np.ones(4000)])
@pytest.mark.parametrize("hop_length", [64, 512, 2048])
def test_onset_detect_const(y, sr, hop_length):
# Disable padding here
onsets = librosa.onset.onset_detect(
y=y, sr=sr, onset_envelope=None, hop_length=hop_length,
)
# We'll allow one onset at the start of the signal for these examples
# when y is all-ones, zero-padding induces an onset at the beginning of the
# signal
assert len(onsets) == 0 or (y[0] != 0 and len(onsets) == 1)
@pytest.mark.parametrize(
"units, ctx",
[
("frames", dnr()),
("time", dnr()),
("samples", dnr()),
("bad units", pytest.raises(librosa.ParameterError)),
],
)
@pytest.mark.parametrize("hop_length", [512, 1024])
def test_onset_units(ysr, hop_length, units, ctx):
y, sr = ysr
with ctx:
b1 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length)
b2 = librosa.onset.onset_detect(y=y, sr=sr, hop_length=hop_length, units=units)
t1 = librosa.frames_to_time(b1, sr=sr, hop_length=hop_length)
if units == "time":
t2 = b2
elif units == "samples":
t2 = librosa.samples_to_time(b2, sr=sr)
elif units == "frames":
t2 = librosa.frames_to_time(b2, sr=sr, hop_length=hop_length)
assert np.allclose(t1, t2)
@pytest.fixture(scope="module", params=[False, True], ids=["oenv", "rms"])
def energy(ysr, hop, request):
y, sr = ysr
if request.param:
return librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop)
else:
return librosa.feature.rms(y=y, hop_length=hop)
def test_onset_backtrack(ysr, oenv, hop, energy):
y, sr = ysr
onsets = librosa.onset.onset_detect(
y=y, sr=sr, onset_envelope=oenv, hop_length=hop, backtrack=False
)
# Test backtracking
onsets_bt = librosa.onset.onset_backtrack(onsets, energy)
# Make sure there are no negatives
assert np.all(onsets_bt >= 0)
# And that we never roll forward
assert np.all(onsets_bt <= onsets)
# And that the detected peaks are actually minima
assert np.all(energy[onsets_bt] <= energy[np.maximum(0, onsets_bt - 1)])
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_noagg():
S = np.zeros((3, 3))
librosa.onset.onset_strength(S=S, aggregate=False)
@pytest.mark.xfail(raises=librosa.ParameterError)
def test_onset_strength_badref():
S = np.zeros((3, 3))
librosa.onset.onset_strength(S=S, ref=S[:, :2])
def test_onset_strength_multi_ref():
srand()
# Make a random positive spectrum
S = 1 + np.abs(np.random.randn(1025, 10))
# Test with a null reference
null_ref = np.zeros_like(S)
onsets = librosa.onset.onset_strength_multi(
S=S, ref=null_ref, aggregate=False, center=False
)
# since the reference is zero everywhere, S - ref = S
# past the setup phase (first frame)
assert np.allclose(onsets[:, 1:], S[:, 1:])
def test_onset_detect_inplace_normalize():
# This test will fail if the in-place normalization modifies
# the input onset envelope
oenv_in = np.ones(50)
oenv_in[10] = 2
oenv_orig = oenv_in.copy()
librosa.onset.onset_detect(onset_envelope=oenv_in, normalize=True)
assert np.allclose(oenv_in, oenv_orig) and oenv_in is not oenv_orig
|
y, sr = ysr
S = librosa.feature.melspectrogram(y=y, sr=sr)
return S, sr
|
profile_thumbnail.py
|
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import template
from social.apps.django_app.default.models import UserSocialAuth
register = template.Library()
@register.filter(name='profile_thumbnail')
# Converts youtube URL into embed HTML
def youtube_embed_url(user):
try:
return UserSocialAuth.get_social_auth('google-oauth2',user.email).extra_data['image']['url']
except:
return '/static/global/images/placeholder/user.png'
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
manufactureGAP_patches.py
|
import polyadcirc.run_framework.domain as dom
import polyadcirc.pyGriddata.manufacture_gap as manu
grid_dir = '.'
domain = dom.domain(grid_dir)
domain.read_spatial_grid()
x_values = [n.x for n in domain.node.values()]
y_values = [n.y for n in domain.node.values()]
|
xl = min(x_values)
yu = max(y_values)
yl = min(y_values)
p = [[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
[0.7, 0.2, 0.1, 0],
[0.1, 0.1, 0.8, 0],
[0.8, 0.2, 0, 0],
[0.2, 0.4, 0.4, 0],
[0.1, 0.2, 0.7, 0],
[0.2, 0.4, 0.4, 0],
[0.7, 0.3, 0, 0]]
x_points = (xl, 750, xr)
y_points = (yl, -1225, -750, 100, 500, 1150, 1300, yu)
rand_rect = manu.random_patches(x_points, y_points, [1, 2, 3, 4], p_sections=p)
manu.write_gapfile(rand_rect, xl, yl, 'band_sections.asc')
|
xr = max(x_values)
|
unittest_utils.py
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to make unit testing easier."""
import StringIO
import numpy as np
from PIL import Image as PILImage
import tensorflow as tf
def create_random_image(image_format, shape):
"""Creates an image with random values.
Args:
image_format: An image format (PNG or JPEG).
shape: A tuple with image shape (including channels).
Returns:
A tuple (<numpy ndarray>, <a string with encoded image>)
"""
image = np.random.randint(low=0, high=255, size=shape, dtype='uint8')
io = StringIO.StringIO()
image_pil = PILImage.fromarray(image)
image_pil.save(io, image_format, subsampling=0, quality=100)
return image, io.getvalue()
def create_serialized_example(name_to_values):
|
"""Creates a tf.Example proto using a dictionary.
It automatically detects type of values and define a corresponding feature.
Args:
name_to_values: A dictionary.
Returns:
tf.Example proto.
"""
example = tf.train.Example()
for name, values in name_to_values.items():
feature = example.features.feature[name]
if isinstance(values[0], str):
add = feature.bytes_list.value.extend
elif isinstance(values[0], float):
add = feature.float32_list.value.extend
elif isinstance(values[0], int):
add = feature.int64_list.value.extend
else:
raise AssertionError('Unsupported type: %s' % type(values[0]))
add(values)
return example.SerializeToString()
|
|
mod.rs
|
mod coils;
mod data;
pub(crate) mod rtu;
pub(crate) mod tcp;
pub use self::{coils::*, data::*};
use byteorder::{BigEndian, ByteOrder};
use core::fmt;
/// A Modbus function code.
///
/// It is represented by an unsigned 8 bit integer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum FnCode {
ReadCoils,
ReadDiscreteInputs,
WriteSingleCoil,
WriteMultipleCoils,
ReadInputRegisters,
ReadHoldingRegisters,
WriteSingleRegister,
WriteMultipleRegisters,
ReadWriteMultipleRegisters,
#[cfg(feature = "rtu")]
ReadExceptionStatus,
#[cfg(feature = "rtu")]
Diagnostics,
#[cfg(feature = "rtu")]
GetCommEventCounter,
#[cfg(feature = "rtu")]
GetCommEventLog,
#[cfg(feature = "rtu")]
ReportServerId,
//TODO:
//- ReadFileRecord
//- WriteFileRecord
//- MaskWriteRegiger
//TODO:
//- Read FifoQueue
//- EncapsulatedInterfaceTransport
//- CanOpenGeneralReferenceRequestAndResponsePdu
//- ReadDeviceIdentification
Custom(u8),
}
impl From<u8> for FnCode {
fn from(c: u8) -> Self {
use FnCode::*;
match c {
0x01 => ReadCoils,
0x02 => ReadDiscreteInputs,
0x05 => WriteSingleCoil,
0x0F => WriteMultipleCoils,
0x04 => ReadInputRegisters,
0x03 => ReadHoldingRegisters,
0x06 => WriteSingleRegister,
0x10 => WriteMultipleRegisters,
0x17 => ReadWriteMultipleRegisters,
#[cfg(feature = "rtu")]
0x07 => ReadExceptionStatus,
#[cfg(feature = "rtu")]
0x08 => Diagnostics,
#[cfg(feature = "rtu")]
0x0B => GetCommEventCounter,
#[cfg(feature = "rtu")]
0x0C => GetCommEventLog,
#[cfg(feature = "rtu")]
0x11 => ReportServerId,
_ => Custom(c),
}
}
}
impl From<FnCode> for u8 {
fn from(code: FnCode) -> Self {
use FnCode::*;
match code {
ReadCoils => 0x01,
ReadDiscreteInputs => 0x02,
WriteSingleCoil => 0x05,
WriteMultipleCoils => 0x0F,
ReadInputRegisters => 0x04,
ReadHoldingRegisters => 0x03,
WriteSingleRegister => 0x06,
WriteMultipleRegisters => 0x10,
ReadWriteMultipleRegisters => 0x17,
#[cfg(feature = "rtu")]
ReadExceptionStatus => 0x07,
#[cfg(feature = "rtu")]
Diagnostics => 0x08,
#[cfg(feature = "rtu")]
GetCommEventCounter => 0x0B,
#[cfg(feature = "rtu")]
GetCommEventLog => 0x0C,
#[cfg(feature = "rtu")]
ReportServerId => 0x11,
Custom(c) => c,
}
}
}
/// A Modbus sub-function code is represented by an unsigned 16 bit integer.
#[cfg(feature = "rtu")]
pub(crate) type SubFnCode = u16;
/// A Modbus address is represented by 16 bit (from `0` to `65535`).
pub(crate) type Address = u16;
/// A Coil represents a single bit.
///
/// - `true` is equivalent to `ON`, `1` and `0xFF00`.
/// - `false` is equivalent to `OFF`, `0` and `0x0000`.
pub(crate) type Coil = bool;
/// Modbus uses 16 bit for its data items (big-endian representation).
pub(crate) type Word = u16;
/// Number of items to process (`0` - `65535`).
pub(crate) type Quantity = u16;
/// Raw PDU data
type RawData<'r> = &'r [u8];
/// A request represents a message from the client (master) to the server (slave).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum
|
<'r> {
ReadCoils(Address, Quantity),
ReadDiscreteInputs(Address, Quantity),
WriteSingleCoil(Address, Coil),
WriteMultipleCoils(Address, Coils<'r>),
ReadInputRegisters(Address, Quantity),
ReadHoldingRegisters(Address, Quantity),
WriteSingleRegister(Address, Word),
WriteMultipleRegisters(Address, Data<'r>),
ReadWriteMultipleRegisters(Address, Quantity, Address, Data<'r>),
#[cfg(feature = "rtu")]
ReadExceptionStatus,
#[cfg(feature = "rtu")]
Diagnostics(SubFnCode, Data<'r>),
#[cfg(feature = "rtu")]
GetCommEventCounter,
#[cfg(feature = "rtu")]
GetCommEventLog,
#[cfg(feature = "rtu")]
ReportServerId,
//TODO:
//- ReadFileRecord
//- WriteFileRecord
//- MaskWriteRegiger
//TODO:
//- Read FifoQueue
//- EncapsulatedInterfaceTransport
//- CanOpenGeneralReferenceRequestAndResponsePdu
//- ReadDeviceIdentification
Custom(FnCode, &'r [u8]),
}
/// A server (slave) exception response.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ExceptionResponse {
pub function: FnCode,
pub exception: Exception,
}
/// Represents a message from the client (slave) to the server (master).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct RequestPdu<'r>(pub Request<'r>);
/// Represents a message from the server (slave) to the client (master).
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct ResponsePdu<'r>(pub Result<Response<'r>, ExceptionResponse>);
#[cfg(feature = "rtu")]
type Status = u16;
#[cfg(feature = "rtu")]
type EventCount = u16;
#[cfg(feature = "rtu")]
type MessageCount = u16;
/// The response data of a successfull request.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Response<'r> {
ReadCoils(Coils<'r>),
ReadDiscreteInputs(Coils<'r>),
WriteSingleCoil(Address),
WriteMultipleCoils(Address, Quantity),
ReadInputRegisters(Data<'r>),
ReadHoldingRegisters(Data<'r>),
WriteSingleRegister(Address, Word),
WriteMultipleRegisters(Address, Quantity),
ReadWriteMultipleRegisters(Data<'r>),
#[cfg(feature = "rtu")]
ReadExceptionStatus(u8),
#[cfg(feature = "rtu")]
Diagnostics(Data<'r>),
#[cfg(feature = "rtu")]
GetCommEventCounter(Status, EventCount),
#[cfg(feature = "rtu")]
GetCommEventLog(Status, EventCount, MessageCount, &'r [u8]),
#[cfg(feature = "rtu")]
ReportServerId(&'r [u8], bool),
//TODO:
//- ReadFileRecord
//- WriteFileRecord
//- MaskWriteRegiger
//TODO:
//- Read FifoQueue
//- EncapsulatedInterfaceTransport
//- CanOpenGeneralReferenceRequestAndResponsePdu
//- ReadDeviceIdentification
Custom(FnCode, &'r [u8]),
}
impl<'r> From<Request<'r>> for FnCode {
fn from(r: Request<'r>) -> Self {
use FnCode as c;
use Request::*;
match r {
ReadCoils(_, _) => c::ReadCoils,
ReadDiscreteInputs(_, _) => c::ReadDiscreteInputs,
WriteSingleCoil(_, _) => c::WriteSingleCoil,
WriteMultipleCoils(_, _) => c::WriteMultipleCoils,
ReadInputRegisters(_, _) => c::ReadInputRegisters,
ReadHoldingRegisters(_, _) => c::ReadHoldingRegisters,
WriteSingleRegister(_, _) => c::WriteSingleRegister,
WriteMultipleRegisters(_, _) => c::WriteMultipleRegisters,
ReadWriteMultipleRegisters(_, _, _, _) => c::ReadWriteMultipleRegisters,
#[cfg(feature = "rtu")]
ReadExceptionStatus => c::ReadExceptionStatus,
#[cfg(feature = "rtu")]
Diagnostics(_, _) => c::Diagnostics,
#[cfg(feature = "rtu")]
GetCommEventCounter => c::GetCommEventCounter,
#[cfg(feature = "rtu")]
GetCommEventLog => c::GetCommEventLog,
#[cfg(feature = "rtu")]
ReportServerId => c::ReportServerId,
Custom(code, _) => code,
}
}
}
impl<'r> From<Response<'r>> for FnCode {
fn from(r: Response<'r>) -> Self {
use FnCode as c;
use Response::*;
match r {
ReadCoils(_) => c::ReadCoils,
ReadDiscreteInputs(_) => c::ReadDiscreteInputs,
WriteSingleCoil(_) => c::WriteSingleCoil,
WriteMultipleCoils(_, _) => c::WriteMultipleCoils,
ReadInputRegisters(_) => c::ReadInputRegisters,
ReadHoldingRegisters(_) => c::ReadHoldingRegisters,
WriteSingleRegister(_, _) => c::WriteSingleRegister,
WriteMultipleRegisters(_, _) => c::WriteMultipleRegisters,
ReadWriteMultipleRegisters(_) => c::ReadWriteMultipleRegisters,
#[cfg(feature = "rtu")]
ReadExceptionStatus(_) => c::ReadExceptionStatus,
#[cfg(feature = "rtu")]
Diagnostics(_) => c::Diagnostics,
#[cfg(feature = "rtu")]
GetCommEventCounter(_, _) => c::GetCommEventCounter,
#[cfg(feature = "rtu")]
GetCommEventLog(_, _, _, _) => c::GetCommEventLog,
#[cfg(feature = "rtu")]
ReportServerId(_, _) => c::ReportServerId,
Custom(code, _) => code,
}
}
}
/// A server (slave) exception.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Exception {
IllegalFunction = 0x01,
IllegalDataAddress = 0x02,
IllegalDataValue = 0x03,
ServerDeviceFailure = 0x04,
Acknowledge = 0x05,
ServerDeviceBusy = 0x06,
MemoryParityError = 0x08,
GatewayPathUnavailable = 0x0A,
GatewayTargetDevice = 0x0B,
}
impl fmt::Display for Exception {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::Exception::*;
let desc = match *self {
IllegalFunction => "Illegal function",
IllegalDataAddress => "Illegal data address",
IllegalDataValue => "Illegal data value",
ServerDeviceFailure => "Server device failure",
Acknowledge => "Acknowledge",
ServerDeviceBusy => "Server device busy",
MemoryParityError => "Memory parity error",
GatewayPathUnavailable => "Gateway path unavailable",
GatewayTargetDevice => "Gateway target device failed to respond",
};
write!(f, "{}", desc)
}
}
impl<'r> Request<'r> {
/// Number of bytes required for a serialized PDU frame.
pub fn pdu_len(&self) -> usize {
use Request::*;
match *self {
ReadCoils(_, _)
| ReadDiscreteInputs(_, _)
| ReadInputRegisters(_, _)
| ReadHoldingRegisters(_, _)
| WriteSingleRegister(_, _)
| WriteSingleCoil(_, _) => 5,
WriteMultipleCoils(_, coils) => 6 + coils.packed_len(),
WriteMultipleRegisters(_, words) => 6 + words.data.len(),
ReadWriteMultipleRegisters(_, _, _, words) => 10 + words.data.len(),
Custom(_, data) => 1 + data.len(),
#[cfg(feature = "rtu")]
_ => unimplemented!(), // TODO
}
}
}
impl<'r> Response<'r> {
/// Number of bytes required for a serialized PDU frame.
pub fn pdu_len(&self) -> usize {
use Response::*;
match *self {
ReadCoils(coils) | ReadDiscreteInputs(coils) => 2 + coils.packed_len(),
WriteSingleCoil(_) => 3,
WriteMultipleCoils(_, _) | WriteMultipleRegisters(_, _) | WriteSingleRegister(_, _) => {
5
}
ReadInputRegisters(words)
| ReadHoldingRegisters(words)
| ReadWriteMultipleRegisters(words) => 2 + words.len() * 2,
Custom(_, data) => 1 + data.len(),
#[cfg(feature = "rtu")]
_ => unimplemented!(), // TODO
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn function_code_into_u8() {
let x: u8 = FnCode::WriteMultipleCoils.into();
assert_eq!(x, 15);
let x: u8 = FnCode::Custom(0xBB).into();
assert_eq!(x, 0xBB);
}
#[test]
fn function_code_from_u8() {
assert_eq!(FnCode::from(15), FnCode::WriteMultipleCoils);
assert_eq!(FnCode::from(0xBB), FnCode::Custom(0xBB));
}
#[test]
fn function_code_from_request() {
use Request::*;
let requests = &[
(ReadCoils(0, 0), 1),
(ReadDiscreteInputs(0, 0), 2),
(WriteSingleCoil(0, true), 5),
(
WriteMultipleCoils(
0,
Coils {
quantity: 0,
data: &[],
},
),
0x0F,
),
(ReadInputRegisters(0, 0), 0x04),
(ReadHoldingRegisters(0, 0), 0x03),
(WriteSingleRegister(0, 0), 0x06),
(
WriteMultipleRegisters(
0,
Data {
quantity: 0,
data: &[],
},
),
0x10,
),
(
ReadWriteMultipleRegisters(
0,
0,
0,
Data {
quantity: 0,
data: &[],
},
),
0x17,
),
(Custom(FnCode::Custom(88), &[]), 88),
];
for (req, expected) in requests {
let code: u8 = FnCode::from(*req).into();
assert_eq!(*expected, code);
}
}
#[test]
fn function_code_from_response() {
use Response::*;
let responses = &[
(
ReadCoils(Coils {
quantity: 0,
data: &[],
}),
1,
),
(
ReadDiscreteInputs(Coils {
quantity: 0,
data: &[],
}),
2,
),
(WriteSingleCoil(0x0), 5),
(WriteMultipleCoils(0x0, 0x0), 0x0F),
(
ReadInputRegisters(Data {
quantity: 0,
data: &[],
}),
0x04,
),
(
ReadHoldingRegisters(Data {
quantity: 0,
data: &[],
}),
0x03,
),
(WriteSingleRegister(0, 0), 0x06),
(WriteMultipleRegisters(0, 0), 0x10),
(
ReadWriteMultipleRegisters(Data {
quantity: 0,
data: &[],
}),
0x17,
),
(Custom(FnCode::Custom(99), &[]), 99),
];
for (req, expected) in responses {
let code: u8 = FnCode::from(*req).into();
assert_eq!(*expected, code);
}
}
#[test]
fn test_request_pdu_len() {
assert_eq!(Request::ReadCoils(0x12, 5).pdu_len(), 5);
assert_eq!(Request::WriteSingleRegister(0x12, 0x33).pdu_len(), 5);
let buf = &mut [0, 0];
assert_eq!(
Request::WriteMultipleCoils(0, Coils::from_bools(&[true, false], buf).unwrap())
.pdu_len(),
7
);
// TODO: extend test
}
#[test]
fn test_response_pdu_len() {
let buf = &mut [0, 0];
assert_eq!(
Response::ReadCoils(Coils::from_bools(&[true], buf).unwrap()).pdu_len(),
3
);
// TODO: extend test
}
}
|
Request
|
zopetestbrowser.py
|
# -*- coding: utf-8 -*-
# Copyright 2012 splinter authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import re
from lxml.cssselect import CSSSelector
from zope.testbrowser.browser import Browser, ListControl
from splinter.element_list import ElementList
from splinter.exceptions import ElementDoesNotExist
from splinter.driver import DriverAPI, ElementAPI
from splinter.cookie_manager import CookieManagerAPI
import mimetypes
import lxml.html
import mechanize
import time
class CookieManager(CookieManagerAPI):
def __init__(self, browser_cookies):
self._cookies = browser_cookies
def add(self, cookies):
if isinstance(cookies, list):
for cookie in cookies:
for key, value in cookie.items():
self._cookies[key] = value
return
for key, value in cookies.items():
self._cookies[key] = value
def delete(self, *cookies):
if cookies:
for cookie in cookies:
try:
del self._cookies[cookie]
except KeyError:
pass
else:
self._cookies.clearAll()
def all(self, verbose=False):
cookies = {}
for key, value in self._cookies.items():
cookies[key] = value
return cookies
def __getitem__(self, item):
return self._cookies[item]
def __eq__(self, other_object):
if isinstance(other_object, dict):
return dict(self._cookies) == other_object
class ZopeTestBrowser(DriverAPI):
driver_name = "zope.testbrowser"
def __init__(self, user_agent=None, wait_time=2):
self.wait_time = wait_time
mech_browser = self._get_mech_browser(user_agent)
self._browser = Browser(mech_browser=mech_browser)
self._cookie_manager = CookieManager(self._browser.cookies)
self._last_urls = []
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
pass
def visit(self, url):
self._browser.open(url)
def back(self):
self._last_urls.insert(0, self.url)
self._browser.goBack()
def forward(self):
try:
self.visit(self._last_urls.pop())
except IndexError:
pass
def reload(self):
self._browser.reload()
def quit(self):
pass
@property
def htmltree(self):
return lxml.html.fromstring(self.html.decode('utf-8'))
@property
def title(self):
return self._browser.title
@property
def html(self):
return self._browser.contents
@property
def url(self):
return self._browser.url
def find_option_by_value(self, value):
html = self.htmltree
element = html.xpath('//option[@value="%s"]' % value)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="value", query=value)
def find_option_by_text(self, text):
html = self.htmltree
element = html.xpath('//option[normalize-space(text())="%s"]' % text)[0]
control = self._browser.getControl(element.text)
return ElementList([ZopeTestBrowserOptionElement(control, self)], find_by="text", query=text)
def find_by_css(self, selector):
xpath = CSSSelector(selector).path
return self.find_by_xpath(xpath, original_find="css", original_selector=selector)
def find_by_xpath(self, xpath, original_find=None, original_selector=None):
html = self.htmltree
elements = []
for xpath_element in html.xpath(xpath):
if self._element_is_link(xpath_element):
return self._find_links_by_xpath(xpath)
elif self._element_is_control(xpath_element):
return self.find_by_name(xpath_element.name)
else:
elements.append(xpath_element)
find_by = original_find or "xpath"
query = original_selector or xpath
return ElementList([ZopeTestBrowserElement(element, self) for element in elements], find_by=find_by, query=query)
def find_by_tag(self, tag):
return self.find_by_xpath('//%s' % tag, original_find="tag", original_selector=tag)
def find_by_value(self, value):
return self.find_by_xpath('//*[@value="%s"]' % value, original_find="value", original_selector=value)
def find_by_id(self, id_value):
return self.find_by_xpath('//*[@id="%s"][1]' % id_value, original_find="id", original_selector=id_value)
def find_by_name(self, name):
elements = []
index = 0
while True:
try:
control = self._browser.getControl(name=name, index=index)
elements.append(control)
index += 1
except LookupError:
break
return ElementList([ZopeTestBrowserControlElement(element, self) for element in elements], find_by="name", query=name)
def find_link_by_text(self, text):
return self._find_links_by_xpath("//a[text()='%s']" % text)
def find_link_by_href(self, href):
return self._find_links_by_xpath("//a[@href='%s']" % href)
def find_link_by_partial_href(self, partial_href):
return self._find_links_by_xpath("//a[contains(@href, '%s')]" % partial_href)
def find_link_by_partial_text(self, partial_text):
return self._find_links_by_xpath("//a[contains(normalize-space(.), '%s')]" % partial_text)
def fill(self, name, value):
self.find_by_name(name=name).first._control.value = value
def fill_form(self, field_values):
for name, value in field_values.items():
element = self.find_by_name(name)
control = element.first._control
if control.type == 'checkbox':
if value:
control.value = control.options
else:
control.value = []
elif control.type == 'radio':
control.value = [option for option in control.options if option == value]
elif control.type == 'select':
control.value = [value]
else:
# text, textarea, password, tel
control.value = value
def choose(self, name, value):
control = self._browser.getControl(name=name)
control.value = [option for option in control.options if option == value]
def check(self, name):
control = self._browser.getControl(name=name)
control.value = control.options
def uncheck(self, name):
control = self._browser.getControl(name=name)
control.value = []
def attach_file(self, name, file_path):
filename = file_path.split('/')[-1]
control = self._browser.getControl(name=name)
content_type, _ = mimetypes.guess_type(file_path)
control.add_file(open(file_path), content_type, filename)
def _find_links_by_xpath(self, xpath):
html = self.htmltree
links = html.xpath(xpath)
return ElementList([ZopeTestBrowserLinkElement(link, self) for link in links], find_by="xpath", query=xpath)
def select(self, name, value):
self.find_by_name(name).first._control.value = [value]
def is_text_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if self._is_text_present(text):
return True
return False
def _is_text_present(self, text):
try:
body = self.find_by_tag('body').first
return text in body.text
except ElementDoesNotExist:
# This exception will be thrown if the body tag isn't present
# This has occasionally been observed. Assume that the
# page isn't fully loaded yet
|
def is_text_not_present(self, text, wait_time=None):
wait_time = wait_time or self.wait_time
end_time = time.time() + wait_time
while time.time() < end_time:
if not self._is_text_present(text):
return True
return False
def _element_is_link(self, element):
return element.tag == 'a'
def _element_is_control(self, element):
return hasattr(element, 'type')
def _get_mech_browser(self, user_agent):
mech_browser = mechanize.Browser()
if user_agent is not None:
mech_browser.addheaders = [("User-agent", user_agent), ]
return mech_browser
@property
def cookies(self):
return self._cookie_manager
re_extract_inner_html = re.compile(r'^<[^<>]+>(.*)</[^<>]+>$')
class ZopeTestBrowserElement(ElementAPI):
def __init__(self, element, parent):
self._element = element
self.parent = parent
def __getitem__(self, attr):
return self._element.attrib[attr]
def find_by_css(self, selector):
elements = self._element.cssselect(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_xpath(self, selector):
elements = self._element.xpath(selector)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_name(self, name):
elements = self._element.cssselect('[name="%s"]' % name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_tag(self, name):
elements = self._element.cssselect(name)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_value(self, value):
elements = self._element.cssselect('[value="%s"]' % value)
return ElementList([self.__class__(element, self) for element in elements])
def find_by_id(self, id):
elements = self._element.cssselect('#%s' % id)
return ElementList([self.__class__(element, self) for element in elements])
@property
def value(self):
return self._element.text_content()
@property
def text(self):
return self.value
@property
def outer_html(self):
return lxml.html.tostring(self._element, encoding='unicode').strip()
@property
def html(self):
return re_extract_inner_html.match(self.outer_html).group(1)
def has_class(self, class_name):
return len(self._element.find_class(class_name)) > 0
class ZopeTestBrowserLinkElement(ZopeTestBrowserElement):
def __init__(self, element, parent):
super(ZopeTestBrowserLinkElement, self).__init__(element, parent)
self._browser = parent._browser
def __getitem__(self, attr):
return super(ZopeTestBrowserLinkElement, self).__getitem__(attr)
def click(self):
return self._browser.open(self["href"])
class ZopeTestBrowserControlElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.mech_control.attrs[attr]
@property
def value(self):
value = self._control.value
if isinstance(self._control, ListControl) and len(value) == 1:
return value[0]
return value
@property
def checked(self):
return bool(self._control.value)
def click(self):
return self._control.click()
def fill(self, value):
self._control.value = value
def select(self, value):
self._control.value = [value]
class ZopeTestBrowserOptionElement(ZopeTestBrowserElement):
def __init__(self, control, parent):
self._control = control
self.parent = parent
def __getitem__(self, attr):
return self._control.mech_item.attrs[attr]
@property
def text(self):
return self._control.mech_item.get_labels()[0]._text
@property
def value(self):
return self._control.optionValue
@property
def selected(self):
return self._control.mech_item._selected
|
return False
|
change_detection_jit_generator.js
|
import { assertionsEnabled, isBlank } from 'angular2/src/facade/lang';
import { BaseException } from 'angular2/src/facade/exceptions';
import { ListWrapper } from 'angular2/src/facade/collection';
import { AbstractChangeDetector } from './abstract_change_detector';
import { ChangeDetectionUtil } from './change_detection_util';
import { RecordType } from './proto_record';
import { CodegenNameUtil, sanitizeName } from './codegen_name_util';
import { CodegenLogicUtil } from './codegen_logic_util';
import { codify } from './codegen_facade';
import { ChangeDetectorState } from './constants';
import { createPropertyRecords, createEventRecords } from './proto_change_detector';
/**
* The code generator takes a list of proto records and creates a function/class
* that "emulates" what the developer would write by hand to implement the same
* kind of behaviour.
*
* This code should be kept in sync with the Dart transformer's
* `angular2.transform.template_compiler.change_detector_codegen` library. If you make updates
* here, please make equivalent changes there.
*/
const IS_CHANGED_LOCAL = "isChanged";
const CHANGES_LOCAL = "changes";
export class ChangeDetectorJITGenerator {
constructor(definition, changeDetectionUtilVarName, abstractChangeDetectorVarName, changeDetectorStateVarName) {
this.changeDetectionUtilVarName = changeDetectionUtilVarName;
this.abstractChangeDetectorVarName = abstractChangeDetectorVarName;
this.changeDetectorStateVarName = changeDetectorStateVarName;
var propertyBindingRecords = createPropertyRecords(definition);
var eventBindingRecords = createEventRecords(definition);
var propertyBindingTargets = definition.bindingRecords.map(b => b.target);
this.id = definition.id;
this.changeDetectionStrategy = definition.strategy;
this.genConfig = definition.genConfig;
this.records = propertyBindingRecords;
this.propertyBindingTargets = propertyBindingTargets;
this.eventBindings = eventBindingRecords;
this.directiveRecords = definition.directiveRecords;
this._names = new CodegenNameUtil(this.records, this.eventBindings, this.directiveRecords, this.changeDetectionUtilVarName);
this._logic =
new CodegenLogicUtil(this._names, this.changeDetectionUtilVarName, this.changeDetectorStateVarName, this.changeDetectionStrategy);
this.typeName = sanitizeName(`ChangeDetector_${this.id}`);
}
generate() {
var factorySource = `
${this.generateSource()}
return function() {
return new ${this.typeName}();
}
`;
return new Function(this.abstractChangeDetectorVarName, this.changeDetectionUtilVarName, this.changeDetectorStateVarName, factorySource)(AbstractChangeDetector, ChangeDetectionUtil, ChangeDetectorState);
}
generateSource() {
return `
var ${this.typeName} = function ${this.typeName}() {
${this.abstractChangeDetectorVarName}.call(
this, ${JSON.stringify(this.id)}, ${this.records.length},
${this.typeName}.gen_propertyBindingTargets, ${this.typeName}.gen_directiveIndices,
${codify(this.changeDetectionStrategy)});
this.dehydrateDirectives(false);
}
${this.typeName}.prototype = Object.create(${this.abstractChangeDetectorVarName}.prototype);
${this.typeName}.prototype.detectChangesInRecordsInternal = function(throwOnChange) {
${this._names.genInitLocals()}
var ${IS_CHANGED_LOCAL} = false;
var ${CHANGES_LOCAL} = null;
${this._genAllRecords(this.records)}
}
${this._maybeGenHandleEventInternal()}
${this._maybeGenAfterContentLifecycleCallbacks()}
${this._maybeGenAfterViewLifecycleCallbacks()}
${this._maybeGenHydrateDirectives()}
${this._maybeGenDehydrateDirectives()}
${this._genPropertyBindingTargets()}
${this._genDirectiveIndices()}
`;
}
/** @internal */
_genPropertyBindingTargets() {
var targets = this._logic.genPropertyBindingTargets(this.propertyBindingTargets, this.genConfig.genDebugInfo);
return `${this.typeName}.gen_propertyBindingTargets = ${targets};`;
}
/** @internal */
_genDirectiveIndices() {
var indices = this._logic.genDirectiveIndices(this.directiveRecords);
return `${this.typeName}.gen_directiveIndices = ${indices};`;
}
/** @internal */
_maybeGenHandleEventInternal() {
if (this.eventBindings.length > 0) {
var handlers = this.eventBindings.map(eb => this._genEventBinding(eb)).join("\n");
return `
${this.typeName}.prototype.handleEventInternal = function(eventName, elIndex, locals) {
var ${this._names.getPreventDefaultAccesor()} = false;
${this._names.genInitEventLocals()}
${handlers}
return ${this._names.getPreventDefaultAccesor()};
}
`;
}
else {
return '';
}
}
/** @internal */
_genEventBinding(eb) {
let codes = [];
this._endOfBlockIdxs = [];
ListWrapper.forEachWithIndex(eb.records, (r, i) => {
let code;
if (r.isConditionalSkipRecord()) {
code = this._genConditionalSkip(r, this._names.getEventLocalName(eb, i));
}
else if (r.isUnconditionalSkipRecord()) {
code = this._genUnconditionalSkip(r);
}
else {
code = this._genEventBindingEval(eb, r);
}
code += this._genEndOfSkipBlock(i);
codes.push(code);
});
return `
if (eventName === "${eb.eventName}" && elIndex === ${eb.elIndex}) {
${codes.join("\n")}
}`;
}
/** @internal */
_genEventBindingEval(eb, r) {
if (r.lastInBinding) {
var evalRecord = this._logic.genEventBindingEvalValue(eb, r);
var markPath = this._genMarkPathToRootAsCheckOnce(r);
var prevDefault = this._genUpdatePreventDefault(eb, r);
return `${evalRecord}\n${markPath}\n${prevDefault}`;
}
else {
return this._logic.genEventBindingEvalValue(eb, r);
}
}
/** @internal */
_genMarkPathToRootAsCheckOnce(r) {
var br = r.bindingRecord;
if (br.isDefaultChangeDetection()) {
return "";
}
else {
return `${this._names.getDetectorName(br.directiveRecord.directiveIndex)}.markPathToRootAsCheckOnce();`;
}
}
/** @internal */
_genUpdatePreventDefault(eb, r) {
var local = this._names.getEventLocalName(eb, r.selfIndex);
return `if (${local} === false) { ${this._names.getPreventDefaultAccesor()} = true};`;
}
/** @internal */
_maybeGenDehydrateDirectives() {
var destroyPipesCode = this._names.genPipeOnDestroy();
var destroyDirectivesCode = this._logic.genDirectivesOnDestroy(this.directiveRecords);
var dehydrateFieldsCode = this._names.genDehydrateFields();
if (!destroyPipesCode && !destroyDirectivesCode && !dehydrateFieldsCode)
return '';
return `${this.typeName}.prototype.dehydrateDirectives = function(destroyPipes) {
if (destroyPipes) {
${destroyPipesCode}
${destroyDirectivesCode}
}
${dehydrateFieldsCode}
}`;
}
/** @internal */
_maybeGenHydrateDirectives() {
var hydrateDirectivesCode = this._logic.genHydrateDirectives(this.directiveRecords);
var hydrateDetectorsCode = this._logic.genHydrateDetectors(this.directiveRecords);
if (!hydrateDirectivesCode && !hydrateDetectorsCode)
return '';
return `${this.typeName}.prototype.hydrateDirectives = function(directives) {
${hydrateDirectivesCode}
${hydrateDetectorsCode}
}`;
}
/** @internal */
_maybeGenAfterContentLifecycleCallbacks() {
var notifications = this._logic.genContentLifecycleCallbacks(this.directiveRecords);
if (notifications.length > 0) {
var directiveNotifications = notifications.join("\n");
return `
${this.typeName}.prototype.afterContentLifecycleCallbacksInternal = function() {
${directiveNotifications}
}
`;
}
else {
return '';
}
}
/** @internal */
_maybeGenAfterViewLifecycleCallbacks() {
var notifications = this._logic.genViewLifecycleCallbacks(this.directiveRecords);
if (notifications.length > 0) {
var directiveNotifications = notifications.join("\n");
return `
${this.typeName}.prototype.afterViewLifecycleCallbacksInternal = function() {
${directiveNotifications}
}
`;
}
else {
return '';
}
}
/** @internal */
_genAllRecords(rs) {
var codes = [];
this._endOfBlockIdxs = [];
for (let i = 0; i < rs.length; i++) {
let code;
let r = rs[i];
if (r.isLifeCycleRecord()) {
code = this._genDirectiveLifecycle(r);
}
else if (r.isPipeRecord()) {
code = this._genPipeCheck(r);
}
else if (r.isConditionalSkipRecord()) {
code = this._genConditionalSkip(r, this._names.getLocalName(r.contextIndex));
}
else if (r.isUnconditionalSkipRecord()) {
code = this._genUnconditionalSkip(r);
}
else {
code = this._genReferenceCheck(r);
}
code = `
${this._maybeFirstInBinding(r)}
${code}
${this._maybeGenLastInDirective(r)}
${this._genEndOfSkipBlock(i)}
`;
codes.push(code);
}
return codes.join("\n");
}
/** @internal */
_genConditionalSkip(r, condition) {
let maybeNegate = r.mode === RecordType.SkipRecordsIf ? '!' : '';
this._endOfBlockIdxs.push(r.fixedArgs[0] - 1);
return `if (${maybeNegate}${condition}) {`;
}
/** @internal */
_genUnconditionalSkip(r) {
this._endOfBlockIdxs.pop();
this._endOfBlockIdxs.push(r.fixedArgs[0] - 1);
return `} else {`;
}
/** @internal */
_genEndOfSkipBlock(protoIndex) {
if (!ListWrapper.isEmpty(this._endOfBlockIdxs)) {
let endOfBlock = ListWrapper.last(this._endOfBlockIdxs);
if (protoIndex === endOfBlock) {
this._endOfBlockIdxs.pop();
return '}';
}
}
return '';
}
/** @internal */
_genDirectiveLifecycle(r) {
if (r.name === "DoCheck") {
return this._genOnCheck(r);
}
else if (r.name === "OnInit") {
return this._genOnInit(r);
}
else if (r.name === "OnChanges") {
return this._genOnChange(r);
}
else {
throw new BaseException(`Unknown lifecycle event '${r.name}'`);
}
}
/** @internal */
_genPipeCheck(r) {
var context = this._names.getLocalName(r.contextIndex);
var argString = r.args.map((arg) => this._names.getLocalName(arg)).join(", ");
var oldValue = this._names.getFieldName(r.selfIndex);
var newValue = this._names.getLocalName(r.selfIndex);
var pipe = this._names.getPipeName(r.selfIndex);
var pipeName = r.name;
var init = `
if (${pipe} === ${this.changeDetectionUtilVarName}.uninitialized) {
${pipe} = ${this._names.getPipesAccessorName()}.get('${pipeName}');
}
`;
var read = `${newValue} = ${pipe}.pipe.transform(${context}, [${argString}]);`;
var contexOrArgCheck = r.args.map((a) => this._names.getChangeName(a));
contexOrArgCheck.push(this._names.getChangeName(r.contextIndex));
var condition = `!${pipe}.pure || (${contexOrArgCheck.join(" || ")})`;
var check = `
if (${this.changeDetectionUtilVarName}.looseNotIdentical(${oldValue}, ${newValue})) {
${newValue} = ${this.changeDetectionUtilVarName}.unwrapValue(${newValue})
${this._genChangeMarker(r)}
${this._genUpdateDirectiveOrElement(r)}
${this._genAddToChanges(r)}
${oldValue} = ${newValue};
}
`;
var genCode = r.shouldBeChecked() ? `${read}${check}` : read;
if (r.isUsedByOtherRecord()) {
return `${init} if (${condition}) { ${genCode} } else { ${newValue} = ${oldValue}; }`;
}
else {
return `${init} if (${condition}) { ${genCode} }`;
}
}
/** @internal */
_genReferenceCheck(r) {
var oldValue = this._names.getFieldName(r.selfIndex);
var newValue = this._names.getLocalName(r.selfIndex);
var read = `
${this._logic.genPropertyBindingEvalValue(r)}
`;
var check = `
if (${this.changeDetectionUtilVarName}.looseNotIdentical(${oldValue}, ${newValue})) {
${this._genChangeMarker(r)}
${this._genUpdateDirectiveOrElement(r)}
${this._genAddToChanges(r)}
${oldValue} = ${newValue};
}
`;
var genCode = r.shouldBeChecked() ? `${read}${check}` : read;
|
if (r.isPureFunction()) {
var condition = r.args.map((a) => this._names.getChangeName(a)).join(" || ");
if (r.isUsedByOtherRecord()) {
return `if (${condition}) { ${genCode} } else { ${newValue} = ${oldValue}; }`;
}
else {
return `if (${condition}) { ${genCode} }`;
}
}
else {
return genCode;
}
}
/** @internal */
_genChangeMarker(r) {
return r.argumentToPureFunction ? `${this._names.getChangeName(r.selfIndex)} = true` : ``;
}
/** @internal */
_genUpdateDirectiveOrElement(r) {
if (!r.lastInBinding)
return "";
var newValue = this._names.getLocalName(r.selfIndex);
var oldValue = this._names.getFieldName(r.selfIndex);
var notifyDebug = this.genConfig.logBindingUpdate ? `this.logBindingUpdate(${newValue});` : "";
var br = r.bindingRecord;
if (br.target.isDirective()) {
var directiveProperty = `${this._names.getDirectiveName(br.directiveRecord.directiveIndex)}.${br.target.name}`;
return `
${this._genThrowOnChangeCheck(oldValue, newValue)}
${directiveProperty} = ${newValue};
${notifyDebug}
${IS_CHANGED_LOCAL} = true;
`;
}
else {
return `
${this._genThrowOnChangeCheck(oldValue, newValue)}
this.notifyDispatcher(${newValue});
${notifyDebug}
`;
}
}
/** @internal */
_genThrowOnChangeCheck(oldValue, newValue) {
if (assertionsEnabled()) {
return `
if(throwOnChange) {
this.throwOnChangeError(${oldValue}, ${newValue});
}
`;
}
else {
return '';
}
}
/** @internal */
_genAddToChanges(r) {
var newValue = this._names.getLocalName(r.selfIndex);
var oldValue = this._names.getFieldName(r.selfIndex);
if (!r.bindingRecord.callOnChanges())
return "";
return `${CHANGES_LOCAL} = this.addChange(${CHANGES_LOCAL}, ${oldValue}, ${newValue});`;
}
/** @internal */
_maybeFirstInBinding(r) {
var prev = ChangeDetectionUtil.protoByIndex(this.records, r.selfIndex - 1);
var firstInBinding = isBlank(prev) || prev.bindingRecord !== r.bindingRecord;
return firstInBinding && !r.bindingRecord.isDirectiveLifecycle() ?
`${this._names.getPropertyBindingIndex()} = ${r.propertyBindingIndex};` :
'';
}
/** @internal */
_maybeGenLastInDirective(r) {
if (!r.lastInDirective)
return "";
return `
${CHANGES_LOCAL} = null;
${this._genNotifyOnPushDetectors(r)}
${IS_CHANGED_LOCAL} = false;
`;
}
/** @internal */
_genOnCheck(r) {
var br = r.bindingRecord;
return `if (!throwOnChange) ${this._names.getDirectiveName(br.directiveRecord.directiveIndex)}.ngDoCheck();`;
}
/** @internal */
_genOnInit(r) {
var br = r.bindingRecord;
return `if (!throwOnChange && ${this._names.getStateName()} === ${this.changeDetectorStateVarName}.NeverChecked) ${this._names.getDirectiveName(br.directiveRecord.directiveIndex)}.ngOnInit();`;
}
/** @internal */
_genOnChange(r) {
var br = r.bindingRecord;
return `if (!throwOnChange && ${CHANGES_LOCAL}) ${this._names.getDirectiveName(br.directiveRecord.directiveIndex)}.ngOnChanges(${CHANGES_LOCAL});`;
}
/** @internal */
_genNotifyOnPushDetectors(r) {
var br = r.bindingRecord;
if (!r.lastInDirective || br.isDefaultChangeDetection())
return "";
var retVal = `
if(${IS_CHANGED_LOCAL}) {
${this._names.getDetectorName(br.directiveRecord.directiveIndex)}.markAsCheckOnce();
}
`;
return retVal;
}
}
| |
distriopt_.py
|
import json
import os
from mininet.topo import Topo
import subprocess
class Mapper(object):
def __init__(self, topo, physical_network_file, mapper):
self.topo = topo
self.physical_network_file = physical_network_file
self.mapper = mapper
self.topo_file = self.create_topo_file(topo)
self.mapping_json_path = self._run_python3_distriopt(virtual_topo_file=self.topo_file,
physical_topo_file=physical_network_file,
mapper=mapper)
@staticmethod
def check_valid_path(physical_network_file):
pass
def create_topo_file(self,topo):
assert isinstance(topo, Topo), "Invalid Network Format"
filename = os.tempnam()
json_topo={"nodes": {}, "links": {}}
for node in topo.nodes():
attrs = {"cores": topo.nodeInfo(node).get("cores", 1),
"memory": topo.nodeInfo(node).get("memory", 100)}
json_topo["nodes"][node] = attrs
for (u, v, attrs) in topo.iterLinks(withInfo=True):
rate = attrs["bw"]
edge_attrs = {"rate": rate}
json_topo["links"][" ".join((u,v))]= edge_attrs
with open(filename, "w") as f:
json.dump(json_topo, f)
return filename
def create_mapping(self):
with open(self.mapping_json_path, "r") as f:
mapping = json.load(f)
if "Infeasible" in mapping:
|
elif "mapping" in mapping:
mapping = mapping["mapping"]
return mapping
else:
raise ValueError("Returned value by the script not managed {}".format(mapping))
def _run_python3_distriopt(self,virtual_topo_file, physical_topo_file, mapper, python3_script="/root/MaxiNet/MaxiNet/Frontend/distriopt_runner.py"):
python3_command = "python3 {} {} {} {}".format(python3_script,virtual_topo_file,physical_topo_file,mapper) # launch python3 script using bash
process = subprocess.Popen(python3_command.split(), stdout=subprocess.PIPE)
output, error = process.communicate()
#return the temporary path for the mapping
return output
|
print("MAPPING INFEASIBLE")
exit(1)
|
daemon_args.rs
|
use std::{
net::{AddrParseError, SocketAddr},
path::PathBuf,
};
use rafx::renderer::daemon::AssetDaemonOpt;
use structopt::StructOpt;
/// Parameters to the asset daemon.
///
/// # Examples
///
/// ```bash
/// asset_daemon --db .assets_db --address "127.0.0.1:9999" assets
/// ```
#[derive(StructOpt, Debug, Clone)]
pub struct AssetDaemonArgs {
/// Path to the asset metadata database directory.
#[structopt(name = "db", long, parse(from_os_str), default_value = ".assets_db")]
pub db_dir: PathBuf,
/// Socket address for the daemon to listen for connections, e.g. "127.0.0.1:9999".
#[structopt(
short,
long,
parse(try_from_str = parse_socket_addr),
default_value = "127.0.0.1:9999"
)]
pub address: SocketAddr,
/// Directories to watch for assets.
#[structopt(parse(from_os_str), default_value = "assets")]
pub asset_dirs: Vec<PathBuf>,
}
impl Into<AssetDaemonOpt> for AssetDaemonArgs {
fn into(self) -> AssetDaemonOpt {
|
address: self.address,
asset_dirs: self.asset_dirs,
}
}
}
/// Parses a string as a socket address.
fn parse_socket_addr(s: &str) -> std::result::Result<SocketAddr, AddrParseError> {
s.parse()
}
|
AssetDaemonOpt {
db_dir: self.db_dir,
|
[cutz]mergekarr.py
|
import heapq
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def
|
(self, lists: List[ListNode]) -> ListNode:
heap = []
root = res = ListNode(None)
for i in range(len(lists)):
heapq.heappush(heap, (lists[i].val, i, lists[i]))
print(heap)
while heap:
m = heapq.heappop(heap)
idx = m[1]
res.next = m[2]
res = res.next
if res.next:
heapq.heappush(heap, (res.next.val, idx, res.next))
return root.next
|
mergeKLists
|
oauth2configs.go
|
package oauth
// Configs :
var Configs map[string]string
// vars
const (
RandomStateString = `
package oauth
// getRandomStateString :
func getRandomStateString() string {
return "pseudo-random"
}
`
)
func init()
|
{
Configs = map[string]string{
"google": `
package oauth
import (
"gitlab.com/aifaniyi/go-libs/conf"
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
)
var googleOauthConfig *oauth2.Config
func init() {
googleOauthConfig = &oauth2.Config{
RedirectURL: conf.GetStringEnv("CALLBACK_URL", "http://localhost:8080/google/callback"),
ClientID: conf.GetStringEnv("GOOGLE_CLIENT_ID", "google_client_id"),
ClientSecret: conf.GetStringEnv("GOOGLE_CLIENT_SECRET", "google_client_secret"),
Scopes: []string{"https://www.googleapis.com/auth/userinfo.email"},
Endpoint: google.Endpoint,
}
}
// getGoogleOauthConfig :
func getGoogleOauthConfig() *oauth2.Config {
return googleOauthConfig
}
`,
"facebook": `
package oauth
import (
"gitlab.com/aifaniyi/go-libs/conf"
"golang.org/x/oauth2"
"golang.org/x/oauth2/facebook"
)
var facebookOauthConfig *oauth2.Config
func init() {
facebookOauthConfig = &oauth2.Config{
RedirectURL: conf.GetStringEnv("CALLBACK_URL", "http://localhost:8080/facebook/callback"),
ClientID: conf.GetStringEnv("FACEBOOK_CLIENT_ID", "facebook_client_id"),
ClientSecret: conf.GetStringEnv("FACEBOOK_CLIENT_SECRET", "facebook_client_secret"),
Scopes: []string{"public_profile"},
Endpoint: facebook.Endpoint,
}
}
// getFacebookOauthConfig :
func getFacebookOauthConfig() *oauth2.Config {
return facebookOauthConfig
}
`,
}
}
|
|
index.tsx
|
import config from '@setitheme/core';
import React, { useState } from 'react';
import ReactDOM from 'react-dom';
import Icon from '../../src/react/Icon';
import iconList from '../../lib/icon-list.json';
import '@styles/seti.scss';
// Remove color keys we don't want
delete config.colors.cyan;
delete config.colors.grey;
delete config.colors.magenta;
delete config.colors.misc;
function ExampleIcon() {
const [ icon, setIcon ] = useState('react');
const [ color, setColor ] = useState('blue');
function onInputChange(e: any) {
setIcon(e.target.value);
}
return (
|
<header>
<h1>React Icon Component</h1>
<p>This is an example of the react icon component.</p>
<p>Type an icon name in the input below and you'll see it rendered to the right.</p>
</header>
<div className="seti-theme-example__inputs">
<input onChange={onInputChange} value={icon} className='icon-input' />
<Icon
icon={icon}
color={color}
wrapper={false}
wrapperClass={null}
wrapperType="span"
/>
</div>
<section className="section">
<h2>Colors</h2>
<ul className="seti-theme-example__list">
{Object.keys(config.colors).map((key, value) => {
return (
<li className="seti-theme-example__list-item" key={key}>
<button className="seti-btn" onClick={() => { setColor(key) }}>
{key}
</button>
</li>
)
})}
</ul>
</section>
<section className="section">
<h2>Icons</h2>
<ul className="seti-theme-example__list">
{Object.keys(iconList).map((key, value) => {
return (
<li className="seti-theme-example__list-item" key={key}>
<button className="seti-btn" onClick={() => { setIcon(key) }}>
{key}
</button>
</li>
)
})}
</ul>
</section>
</main>
);
}
const root = document.getElementById('seti-react');
ReactDOM.render(<ExampleIcon />, root);
|
<main className="seti-theme-example">
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.