prompt
large_stringlengths 70
991k
| completion
large_stringlengths 0
1.02k
|
---|---|
<|file_name|>suggest-labels.rs<|end_file_name|><|fim▁begin|>#[allow(unreachable_code, unused_labels)]
fn main() {
'foo: loop {
break 'fo; //~ ERROR use of undeclared label
}<|fim▁hole|> }
'longlabel: loop {
'longlabel1: loop {
break 'longlable; //~ ERROR use of undeclared label
}
}
}<|fim▁end|>
|
'bar: loop {
continue 'bor; //~ ERROR use of undeclared label
|
<|file_name|>monitor.go<|end_file_name|><|fim▁begin|>package monitor
const (
// RedisPrefix 参数:business。
RedisPrefix = "monitor_stats_%d_"
// SuffixVideo 视频停留统计。参数:state
SuffixVideo = "%d"
// SuffixArc 稿件停留统计。参数:round。参数:state。
SuffixArc = "%d_%d"
BusVideo = 1
BusArc = 2
NotifyTypeEmail = 1
NotityTypeSms = 2
RuleStateOK = 1
RuleStateDisable = 0
)
type RuleResultRes struct {
Code int `json:"code"`
Data []*RuleResultData `json:"data"`
}
type RuleResultData struct {
Rule *Rule `json:"rule"`
Stats *Stats `json:"stats"`
}
// Rule 监控规则信息
type Rule struct {
ID int64 `json:"id"`
Type int8 `json:"type"`
Business int8 `json:"business"`
Name string `json:"name"`
State int8 `json:"state"`
STime string `json:"s_time"`
ETime string `json:"e_time"`
RuleConf *RuleConf `json:"rule_conf"`
}
// RuleConf 监控方案配置结构体
type RuleConf struct {
Name string `json:"name"`
MoniCdt map[string]struct { //监控方案的监控条件
Comp string `json:"comparison"`
Value int64 `json:"value"`
} `json:"moni_cdt"`
NotifyCdt map[string]struct { //达到发送通知的条件
Comp string `json:"comparison"`
Value int64 `json:"value"`
} `json:"notify_cdt"`
Notify struct { //通知类型配置
Way int8 `json:"way"`
Member []string `json:"member"`
} `json:"notify"`<|fim▁hole|> TotalCount int `json:"total_count"`
MoniCount int `json:"moni_count"`
MaxTime int `json:"max_time"`
}<|fim▁end|>
|
}
type Stats struct {
|
<|file_name|>app.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python2.5
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#<|fim▁hole|># WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple wave robot WSGI application and forwarding middleware."""
import webob
import webob.exc
from api import robot_abstract
import logging
class RobotMiddleware(object):
"""WSGI middleware that routes /_wave/ requests to a robot wsgi app."""
def __init__(self, robot_app, main_app):
self._robot_app = robot_app
self._main_app = main_app
def __call__(self, environ, start_response):
path = environ['PATH_INFO']
if path.startswith('/_wave/'):
return self._robot_app(environ, start_response)
return self._main_app(environ, start_response)
class SimpleRobotApp(object):
"""WSGI application for serving an abstract robot.
This is just like the Robot class in the Wave api, but it uses the plain WebOb
request/response objects instead of the analogous AppEngine objects.
"""
def __init__(self, robot):
self._robot = robot
def capabilities(self):
xml = self._robot.GetCapabilitiesXml()
response = webob.Response(content_type='text/xml', body=xml)
response.cache_control = 'Private' # XXX
return response
def profile(self):
xml = self._robot.GetProfileJson()
response = webob.Response(content_type='application/json', body=xml)
response.cache_control = 'Private' # XXX
return response
def jsonrpc(self, req):
json_body = req.body
logging.info('Incoming: %s', json_body)
context, events = robot_abstract.ParseJSONBody(json_body)
for event in events:
self._robot.HandleEvent(event, context)
json_response = robot_abstract.SerializeContext(
context, self._robot.version)
logging.info('Outgoing: %s', json_response)
return webob.Response(content_type='application/json',
body=json_response)
def __call__(self, environ, start_response):
req = webob.Request(environ)
if req.path_info == '/_wave/capabilities.xml' and req.method == 'GET':
response = self.capabilities()
elif req.path_info == '/_wave/robot/profile' and req.method == 'GET':
response = self.profile()
elif req.path_info == '/_wave/robot/jsonrpc' and req.method == 'POST':
response = self.jsonrpc(req)
else:
response = webob.exc.HTTPNotFound()
return response(environ, start_response)<|fim▁end|>
|
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
|
<|file_name|>CommonIO.cpp<|end_file_name|><|fim▁begin|>/*
This file is part of solidity.
solidity is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
solidity is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with solidity. If not, see <http://www.gnu.org/licenses/>.
*/
// SPDX-License-Identifier: GPL-3.0
/** @file CommonIO.cpp
* @author Gav Wood <[email protected]>
* @date 2014
*/
#include <libsolutil/CommonIO.h>
#include <libsolutil/Assertions.h>
#include <fstream><|fim▁hole|>#else
#include <unistd.h>
#include <termios.h>
#endif
using namespace std;
using namespace solidity::util;
namespace
{
template <typename T>
inline T readFile(boost::filesystem::path const& _file)
{
assertThrow(boost::filesystem::exists(_file), FileNotFound, _file.string());
// ifstream does not always fail when the path leads to a directory. Instead it might succeed
// with tellg() returning a nonsensical value so that std::length_error gets raised in resize().
assertThrow(boost::filesystem::is_regular_file(_file), NotAFile, _file.string());
T ret;
size_t const c_elementSize = sizeof(typename T::value_type);
std::ifstream is(_file.string(), std::ifstream::binary);
// Technically, this can still fail even though we checked above because FS content can change at any time.
assertThrow(is, FileNotFound, _file.string());
// get length of file:
is.seekg(0, is.end);
streamoff length = is.tellg();
if (length == 0)
return ret; // do not read empty file (MSVC does not like it)
is.seekg(0, is.beg);
ret.resize((static_cast<size_t>(length) + c_elementSize - 1) / c_elementSize);
is.read(const_cast<char*>(reinterpret_cast<char const*>(ret.data())), static_cast<streamsize>(length));
return ret;
}
}
string solidity::util::readFileAsString(boost::filesystem::path const& _file)
{
return readFile<string>(_file);
}
string solidity::util::readUntilEnd(istream& _stdin)
{
ostringstream ss;
ss << _stdin.rdbuf();
return ss.str();
}
string solidity::util::readBytes(istream& _input, size_t _length)
{
string output;
output.resize(_length);
_input.read(output.data(), static_cast<streamsize>(_length));
// If read() reads fewer bytes it sets failbit in addition to eofbit.
if (_input.fail())
output.resize(static_cast<size_t>(_input.gcount()));
return output;
}
#if defined(_WIN32)
class DisableConsoleBuffering
{
public:
DisableConsoleBuffering()
{
m_stdin = GetStdHandle(STD_INPUT_HANDLE);
GetConsoleMode(m_stdin, &m_oldMode);
SetConsoleMode(m_stdin, m_oldMode & (~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT)));
}
~DisableConsoleBuffering()
{
SetConsoleMode(m_stdin, m_oldMode);
}
private:
HANDLE m_stdin;
DWORD m_oldMode;
};
#else
class DisableConsoleBuffering
{
public:
DisableConsoleBuffering()
{
tcgetattr(0, &m_termios);
m_termios.c_lflag &= ~tcflag_t(ICANON);
m_termios.c_lflag &= ~tcflag_t(ECHO);
m_termios.c_cc[VMIN] = 1;
m_termios.c_cc[VTIME] = 0;
tcsetattr(0, TCSANOW, &m_termios);
}
~DisableConsoleBuffering()
{
m_termios.c_lflag |= ICANON;
m_termios.c_lflag |= ECHO;
tcsetattr(0, TCSADRAIN, &m_termios);
}
private:
struct termios m_termios;
};
#endif
int solidity::util::readStandardInputChar()
{
DisableConsoleBuffering disableConsoleBuffering;
return cin.get();
}
string solidity::util::absolutePath(string const& _path, string const& _reference)
{
boost::filesystem::path p(_path);
// Anything that does not start with `.` is an absolute path.
if (p.begin() == p.end() || (*p.begin() != "." && *p.begin() != ".."))
return _path;
boost::filesystem::path result(_reference);
// If filename is "/", then remove_filename() throws.
// See: https://github.com/boostorg/filesystem/issues/176
if (result.filename() != boost::filesystem::path("/"))
result.remove_filename();
for (boost::filesystem::path::iterator it = p.begin(); it != p.end(); ++it)
if (*it == "..")
result = result.parent_path();
else if (*it != ".")
result /= *it;
return result.generic_string();
}
string solidity::util::sanitizePath(string const& _path) {
return boost::filesystem::path(_path).generic_string();
}<|fim▁end|>
|
#if defined(_WIN32)
#include <windows.h>
|
<|file_name|>alertify.py<|end_file_name|><|fim▁begin|># @Author: ganeshkumarm
# @Date: 2016-11-19T19:20:11+05:30
# @Last modified by: ganeshkumarm
# @Last modified time: 2016-11-19T19:20:45+05:30
#Built in modules
import os
import sys
import time
import subprocess
import datetime
import platform
from win10toast import ToastNotifier
#Used defined module
import exception
class Notify(object):
def __init__(self):
self.title = 'Alert From Alertify'
self.platform = platform.system()
self.toaster = ToastNotifier()
def counter(self, notify_time, message):
s = 00
m = notify_time
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s = 59
m -= 1
while s >= 00:
if m == -1:
print "Completed"
print "Bye"
return
if self.platform == 'Linux':
os.system('clear')
elif self.platform == 'Windows':
os.system('cls');
print "Alertify"
print "-------"
print message
print "-" * len(message)
print "Alerts in %d minutes %d seconds ..." % (m, s)
time.sleep(1)
s -= 1
if s == 0:
s = 59
m -= 1
<|fim▁hole|> try:
time.sleep(notify_time * 60)
except Exception, e:
print e
def sendNotification(self, message, start_time):
try:
end_time = datetime.datetime.now()
diff_time_in_delta = end_time - start_time
diff_time_in_mins = divmod(diff_time_in_delta.days * 86400 + diff_time_in_delta.seconds, 60)
diff_time_msg = ' (Set ' + str(diff_time_in_mins[0]) + ' minutes ' + str(diff_time_in_mins[1]) + ' seconds ago)'
if self.platform == 'Linux':
os.system('notify-send "'+self.title+'" "'+message+'\r'+diff_time_msg+'"')
elif self.platform == 'Windows':
self.toaster.show_toast(self.title, message+'\n'+str(diff_time_msg), duration=300)
except Exception, e:
print e
def main():
try:
counter_flag = True
notify = Notify()
if len(sys.argv) <= 2:
try:
raise exception.PassArgument("Please pass Time and Message as arguments")
except exception.PassArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = sys.argv[1]
if not notify_time.isdigit():
try:
raise exception.InvalidArgument("Time parameter must be a positive integer value")
except exception.InvalidArgument, e:
print e.args
print "Exiting ...."
sys.exit()
notify_time = int(sys.argv[1])
if sys.argv[len(sys.argv) - 1] == '--no-counter':
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv) - 1)])
counter_flag = False
else:
message = ' '.join([sys.argv[i] for i in range(2, len(sys.argv))])
start_time = datetime.datetime.now()
if counter_flag:
notify.counter(notify_time, message)
else:
notify.sleep_time(notify_time)
notify.sendNotification(message, start_time)
except KeyboardInterrupt:
print "\nQuitting ..."
print "Bye"
if __name__ == "__main__":
main()<|fim▁end|>
|
def sleep_time(self, notify_time):
|
<|file_name|>RParser.java<|end_file_name|><|fim▁begin|>// Generated from /mnt/hdd/Programming/Projects/Groovy/intellidots/src/main/antlr/R.g4 by ANTLR 4.2.2
package ua.edu.hneu.ast.parsers;
import org.antlr.v4.runtime.atn.*;
import org.antlr.v4.runtime.dfa.DFA;
import org.antlr.v4.runtime.*;
import org.antlr.v4.runtime.tree.*;
import java.util.List;
@SuppressWarnings({"all", "warnings", "unchecked", "unused", "cast"})
public class RParser extends Parser {
protected static final DFA[] _decisionToDFA;
protected static final PredictionContextCache _sharedContextCache =
new PredictionContextCache();
public static final int
T__53=1, T__52=2, T__51=3, T__50=4, T__49=5, T__48=6, T__47=7, T__46=8,
T__45=9, T__44=10, T__43=11, T__42=12, T__41=13, T__40=14, T__39=15, T__38=16,
T__37=17, T__36=18, T__35=19, T__34=20, T__33=21, T__32=22, T__31=23,
T__30=24, T__29=25, T__28=26, T__27=27, T__26=28, T__25=29, T__24=30,
T__23=31, T__22=32, T__21=33, T__20=34, T__19=35, T__18=36, T__17=37,
T__16=38, T__15=39, T__14=40, T__13=41, T__12=42, T__11=43, T__10=44,
T__9=45, T__8=46, T__7=47, T__6=48, T__5=49, T__4=50, T__3=51, T__2=52,
T__1=53, T__0=54, HEX=55, INT=56, FLOAT=57, COMPLEX=58, STRING=59, ID=60,
USER_OP=61, NL=62, WS=63;
public static final String[] tokenNames = {
"<INVALID>", "'->>'", "'!='", "'while'", "'{'", "'&&'", "'::'", "'='",
"'for'", "'^'", "'$'", "'('", "'Inf'", "','", "'repeat'", "'NA'", "'<-'",
"'FALSE'", "':::'", "'>='", "'[['", "'<'", "']'", "'~'", "'@'", "'function'",
"'NULL'", "'+'", "'TRUE'", "'/'", "'||'", "';'", "'}'", "'if'", "'?'",
"':='", "'<='", "'break'", "'&'", "'*'", "'->'", "'...'", "'NaN'", "':'",
"'['", "'|'", "'=='", "'>'", "'!'", "'in'", "'else'", "'next'", "')'",
"'-'", "'<<-'", "HEX", "INT", "FLOAT", "COMPLEX", "STRING", "ID", "USER_OP",
"NL", "WS"
};
public static final int
RULE_prog = 0, RULE_expr = 1, RULE_exprlist = 2, RULE_formlist = 3, RULE_form = 4,
RULE_sublist = 5, RULE_sub = 6;
public static final String[] ruleNames = {
"prog", "expr", "exprlist", "formlist", "form", "sublist", "sub"
};
@Override
public String getGrammarFileName() { return "R.g4"; }
@Override
public String[] getTokenNames() { return tokenNames; }
@Override
public String[] getRuleNames() { return ruleNames; }
@Override
public String getSerializedATN() { return _serializedATN; }
@Override
public ATN getATN() { return _ATN; }
public RParser(TokenStream input) {
super(input);
_interp = new ParserATNSimulator(this,_ATN,_decisionToDFA,_sharedContextCache);
}
public static class ProgContext extends ParserRuleContext {
public List<TerminalNode> NL() { return getTokens(RParser.NL); }
public List<ExprContext> expr() {
return getRuleContexts(ExprContext.class);
}
public TerminalNode EOF() { return getToken(RParser.EOF, 0); }
public ExprContext expr(int i) {
return getRuleContext(ExprContext.class,i);
}
public TerminalNode NL(int i) {
return getToken(RParser.NL, i);
}
public ProgContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_prog; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterProg(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitProg(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitProg(this);
else return visitor.visitChildren(this);
}
}
public final ProgContext prog() throws RecognitionException {
ProgContext _localctx = new ProgContext(_ctx, getState());
enterRule(_localctx, 0, RULE_prog);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
setState(20);
_errHandler.sync(this);
_la = _input.LA(1);
while ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << 3) | (1L << 4) | (1L << 8) | (1L << 11) | (1L << 12) | (1L << 14) | (1L << 15) | (1L << 17) | (1L << 23) | (1L << 25) | (1L << 26) | (1L << 27) | (1L << 28) | (1L << 33) | (1L << 34) | (1L << 37) | (1L << 42) | (1L << 48) | (1L << 51) | (1L << 53) | (1L << HEX) | (1L << INT) | (1L << FLOAT) | (1L << COMPLEX) | (1L << STRING) | (1L << ID) | (1L << NL))) != 0)) {
{
setState(18);
switch (_input.LA(1)) {
case 3:
case 4:
case 8:
case 11:
case 12:
case 14:
case 15:
case 17:
case 23:
case 25:
case 26:
case 27:
case 28:
case 33:
case 34:
case 37:
case 42:
case 48:
case 51:
case 53:
case HEX:
case INT:
case FLOAT:
case COMPLEX:
case STRING:
case ID:
{
setState(14); expr(0);
setState(15);
_la = _input.LA(1);
if ( !(_la==31 || _la==NL) ) {
_errHandler.recoverInline(this);
}
consume();
}
break;
case NL:
{
setState(17); match(NL);
}
break;
default:
throw new NoViableAltException(this);
}
}
setState(22);
_errHandler.sync(this);
_la = _input.LA(1);
}
setState(23); match(EOF);
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public static class ExprContext extends ParserRuleContext {
public TerminalNode ID() { return getToken(RParser.ID, 0); }
public TerminalNode HEX() { return getToken(RParser.HEX, 0); }
public List<ExprContext> expr() {
return getRuleContexts(ExprContext.class);
}
public TerminalNode USER_OP() { return getToken(RParser.USER_OP, 0); }
public ExprContext expr(int i) {
return getRuleContext(ExprContext.class,i);
}
public SublistContext sublist() {
return getRuleContext(SublistContext.class,0);
}
public FormlistContext formlist() {
return getRuleContext(FormlistContext.class,0);
}
public TerminalNode STRING() { return getToken(RParser.STRING, 0); }
public ExprlistContext exprlist() {
return getRuleContext(ExprlistContext.class,0);
}
public TerminalNode INT() { return getToken(RParser.INT, 0); }
public TerminalNode COMPLEX() { return getToken(RParser.COMPLEX, 0); }
public TerminalNode FLOAT() { return getToken(RParser.FLOAT, 0); }
public ExprContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_expr; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterExpr(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitExpr(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitExpr(this);
else return visitor.visitChildren(this);
}
}
public final ExprContext expr() throws RecognitionException {
return expr(0);
}
private ExprContext expr(int _p) throws RecognitionException {
ParserRuleContext _parentctx = _ctx;
int _parentState = getState();
ExprContext _localctx = new ExprContext(_ctx, _parentState);
ExprContext _prevctx = _localctx;
int _startState = 2;
enterRecursionRule(_localctx, 2, RULE_expr, _p);
int _la;
try {
int _alt;
enterOuterAlt(_localctx, 1);
{
setState(93);
switch ( getInterpreter().adaptivePredict(_input,3,_ctx) ) {
case 1:
{
setState(26);
_la = _input.LA(1);
if ( !(_la==27 || _la==53) ) {
_errHandler.recoverInline(this);
}
consume();
setState(27); expr(36);
}
break;
case 2:
{
setState(28); match(48);
setState(29); expr(30);
}
break;
case 3:
{
setState(30); match(23);
setState(31); expr(27);
}
break;
case 4:
{
setState(32); match(25);
setState(33); match(11);
setState(35);
_la = _input.LA(1);
if (_la==41 || _la==ID) {
{
setState(34); formlist();
}
}
setState(37); match(52);
setState(38); expr(24);
}
break;
case 5:
{
setState(39); match(14);
setState(40); expr(17);
}
break;
case 6:
{
setState(41); match(34);
setState(42); expr(16);
}
break;
case 7:
{
setState(43); match(4);
setState(44); exprlist();
setState(45); match(32);
}
break;
case 8:
{
setState(47); match(33);
setState(48); match(11);
setState(49); expr(0);
setState(50); match(52);
setState(51); expr(0);
}
break;
case 9:
{
setState(53); match(33);
setState(54); match(11);
setState(55); expr(0);
setState(56); match(52);
setState(57); expr(0);
setState(58); match(50);
setState(59); expr(0);
}
break;
case 10:
{
setState(61); match(8);
setState(62); match(11);
setState(63); match(ID);
setState(64); match(49);
setState(65); expr(0);
setState(66); match(52);
setState(67); expr(0);
}
break;
case 11:
{
setState(69); match(3);
setState(70); match(11);
setState(71); expr(0);
setState(72); match(52);
setState(73); expr(0);
}
break;
case 12:
{
setState(75); match(51);
}
break;
case 13:
{
setState(76); match(37);
}
break;
case 14:
{
setState(77); match(11);
setState(78); expr(0);
setState(79); match(52);
}
break;
case 15:
{
setState(81); match(ID);
}
break;
case 16:
{
setState(82); match(STRING);
}
break;
case 17:
{
setState(83); match(HEX);
}
break;
case 18:
{
setState(84); match(INT);
}
break;
case 19:
{
setState(85); match(FLOAT);
}
break;
case 20:
{
setState(86); match(COMPLEX);
}
break;
case 21:
{
setState(87); match(26);
}
break;
case 22:
{
setState(88); match(15);
}
break;
case 23:
{
setState(89); match(12);
}
break;
case 24:
{
setState(90); match(42);
}
break;
case 25:
{
setState(91); match(28);
}
break;
case 26:
{
setState(92); match(17);
}
break;
}
_ctx.stop = _input.LT(-1);
setState(149);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,5,_ctx);
while ( _alt!=2 && _alt!=ATN.INVALID_ALT_NUMBER ) {
if ( _alt==1 ) {
if ( _parseListeners!=null ) triggerExitRuleEvent();
_prevctx = _localctx;
{
setState(147);
switch ( getInterpreter().adaptivePredict(_input,4,_ctx) ) {
case 1:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(95);
if (!(precpred(_ctx, 39))) throw new FailedPredicateException(this, "precpred(_ctx, 39)");
setState(96);
_la = _input.LA(1);
if ( !(_la==6 || _la==18) ) {
_errHandler.recoverInline(this);
}
consume();
setState(97); expr(40);
}
break;
case 2:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(98);
if (!(precpred(_ctx, 38))) throw new FailedPredicateException(this, "precpred(_ctx, 38)");
setState(99);
_la = _input.LA(1);
if ( !(_la==10 || _la==24) ) {
_errHandler.recoverInline(this);
}
consume();
setState(100); expr(39);<|fim▁hole|>
case 3:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(101);
if (!(precpred(_ctx, 37))) throw new FailedPredicateException(this, "precpred(_ctx, 37)");
setState(102); match(9);
setState(103); expr(38);
}
break;
case 4:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(104);
if (!(precpred(_ctx, 35))) throw new FailedPredicateException(this, "precpred(_ctx, 35)");
setState(105); match(43);
setState(106); expr(36);
}
break;
case 5:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(107);
if (!(precpred(_ctx, 34))) throw new FailedPredicateException(this, "precpred(_ctx, 34)");
setState(108); match(USER_OP);
setState(109); expr(35);
}
break;
case 6:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(110);
if (!(precpred(_ctx, 33))) throw new FailedPredicateException(this, "precpred(_ctx, 33)");
setState(111);
_la = _input.LA(1);
if ( !(_la==29 || _la==39) ) {
_errHandler.recoverInline(this);
}
consume();
setState(112); expr(34);
}
break;
case 7:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(113);
if (!(precpred(_ctx, 32))) throw new FailedPredicateException(this, "precpred(_ctx, 32)");
setState(114);
_la = _input.LA(1);
if ( !(_la==27 || _la==53) ) {
_errHandler.recoverInline(this);
}
consume();
setState(115); expr(33);
}
break;
case 8:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(116);
if (!(precpred(_ctx, 31))) throw new FailedPredicateException(this, "precpred(_ctx, 31)");
setState(117);
_la = _input.LA(1);
if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << 2) | (1L << 19) | (1L << 21) | (1L << 36) | (1L << 46) | (1L << 47))) != 0)) ) {
_errHandler.recoverInline(this);
}
consume();
setState(118); expr(32);
}
break;
case 9:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(119);
if (!(precpred(_ctx, 29))) throw new FailedPredicateException(this, "precpred(_ctx, 29)");
setState(120);
_la = _input.LA(1);
if ( !(_la==5 || _la==38) ) {
_errHandler.recoverInline(this);
}
consume();
setState(121); expr(30);
}
break;
case 10:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(122);
if (!(precpred(_ctx, 28))) throw new FailedPredicateException(this, "precpred(_ctx, 28)");
setState(123);
_la = _input.LA(1);
if ( !(_la==30 || _la==45) ) {
_errHandler.recoverInline(this);
}
consume();
setState(124); expr(29);
}
break;
case 11:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(125);
if (!(precpred(_ctx, 26))) throw new FailedPredicateException(this, "precpred(_ctx, 26)");
setState(126); match(23);
setState(127); expr(27);
}
break;
case 12:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(128);
if (!(precpred(_ctx, 25))) throw new FailedPredicateException(this, "precpred(_ctx, 25)");
setState(129);
_la = _input.LA(1);
if ( !((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << 1) | (1L << 7) | (1L << 16) | (1L << 35) | (1L << 40) | (1L << 54))) != 0)) ) {
_errHandler.recoverInline(this);
}
consume();
setState(130); expr(26);
}
break;
case 13:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(131);
if (!(precpred(_ctx, 41))) throw new FailedPredicateException(this, "precpred(_ctx, 41)");
setState(132); match(20);
setState(133); sublist();
setState(134); match(22);
setState(135); match(22);
}
break;
case 14:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(137);
if (!(precpred(_ctx, 40))) throw new FailedPredicateException(this, "precpred(_ctx, 40)");
setState(138); match(44);
setState(139); sublist();
setState(140); match(22);
}
break;
case 15:
{
_localctx = new ExprContext(_parentctx, _parentState);
pushNewRecursionContext(_localctx, _startState, RULE_expr);
setState(142);
if (!(precpred(_ctx, 23))) throw new FailedPredicateException(this, "precpred(_ctx, 23)");
setState(143); match(11);
setState(144); sublist();
setState(145); match(52);
}
break;
}
}
}
setState(151);
_errHandler.sync(this);
_alt = getInterpreter().adaptivePredict(_input,5,_ctx);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
unrollRecursionContexts(_parentctx);
}
return _localctx;
}
public static class ExprlistContext extends ParserRuleContext {
public List<TerminalNode> NL() { return getTokens(RParser.NL); }
public List<ExprContext> expr() {
return getRuleContexts(ExprContext.class);
}
public ExprContext expr(int i) {
return getRuleContext(ExprContext.class,i);
}
public TerminalNode NL(int i) {
return getToken(RParser.NL, i);
}
public ExprlistContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_exprlist; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterExprlist(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitExprlist(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitExprlist(this);
else return visitor.visitChildren(this);
}
}
public final ExprlistContext exprlist() throws RecognitionException {
ExprlistContext _localctx = new ExprlistContext(_ctx, getState());
enterRule(_localctx, 4, RULE_exprlist);
int _la;
try {
setState(163);
switch (_input.LA(1)) {
case 3:
case 4:
case 8:
case 11:
case 12:
case 14:
case 15:
case 17:
case 23:
case 25:
case 26:
case 27:
case 28:
case 33:
case 34:
case 37:
case 42:
case 48:
case 51:
case 53:
case HEX:
case INT:
case FLOAT:
case COMPLEX:
case STRING:
case ID:
enterOuterAlt(_localctx, 1);
{
setState(152); expr(0);
setState(159);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==31 || _la==NL) {
{
{
setState(153);
_la = _input.LA(1);
if ( !(_la==31 || _la==NL) ) {
_errHandler.recoverInline(this);
}
consume();
setState(155);
_la = _input.LA(1);
if ((((_la) & ~0x3f) == 0 && ((1L << _la) & ((1L << 3) | (1L << 4) | (1L << 8) | (1L << 11) | (1L << 12) | (1L << 14) | (1L << 15) | (1L << 17) | (1L << 23) | (1L << 25) | (1L << 26) | (1L << 27) | (1L << 28) | (1L << 33) | (1L << 34) | (1L << 37) | (1L << 42) | (1L << 48) | (1L << 51) | (1L << 53) | (1L << HEX) | (1L << INT) | (1L << FLOAT) | (1L << COMPLEX) | (1L << STRING) | (1L << ID))) != 0)) {
{
setState(154); expr(0);
}
}
}
}
setState(161);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
break;
case 32:
enterOuterAlt(_localctx, 2);
{
}
break;
default:
throw new NoViableAltException(this);
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public static class FormlistContext extends ParserRuleContext {
public FormContext form(int i) {
return getRuleContext(FormContext.class,i);
}
public List<FormContext> form() {
return getRuleContexts(FormContext.class);
}
public FormlistContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_formlist; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterFormlist(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitFormlist(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitFormlist(this);
else return visitor.visitChildren(this);
}
}
public final FormlistContext formlist() throws RecognitionException {
FormlistContext _localctx = new FormlistContext(_ctx, getState());
enterRule(_localctx, 6, RULE_formlist);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
setState(165); form();
setState(170);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==13) {
{
{
setState(166); match(13);
setState(167); form();
}
}
setState(172);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public static class FormContext extends ParserRuleContext {
public TerminalNode ID() { return getToken(RParser.ID, 0); }
public ExprContext expr() {
return getRuleContext(ExprContext.class,0);
}
public FormContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_form; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterForm(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitForm(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitForm(this);
else return visitor.visitChildren(this);
}
}
public final FormContext form() throws RecognitionException {
FormContext _localctx = new FormContext(_ctx, getState());
enterRule(_localctx, 8, RULE_form);
try {
setState(178);
switch ( getInterpreter().adaptivePredict(_input,10,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
setState(173); match(ID);
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
setState(174); match(ID);
setState(175); match(7);
setState(176); expr(0);
}
break;
case 3:
enterOuterAlt(_localctx, 3);
{
setState(177); match(41);
}
break;
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public static class SublistContext extends ParserRuleContext {
public SubContext sub(int i) {
return getRuleContext(SubContext.class,i);
}
public List<SubContext> sub() {
return getRuleContexts(SubContext.class);
}
public SublistContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_sublist; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterSublist(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitSublist(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitSublist(this);
else return visitor.visitChildren(this);
}
}
public final SublistContext sublist() throws RecognitionException {
SublistContext _localctx = new SublistContext(_ctx, getState());
enterRule(_localctx, 10, RULE_sublist);
int _la;
try {
enterOuterAlt(_localctx, 1);
{
setState(180); sub();
setState(185);
_errHandler.sync(this);
_la = _input.LA(1);
while (_la==13) {
{
{
setState(181); match(13);
setState(182); sub();
}
}
setState(187);
_errHandler.sync(this);
_la = _input.LA(1);
}
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public static class SubContext extends ParserRuleContext {
public TerminalNode ID() { return getToken(RParser.ID, 0); }
public ExprContext expr() {
return getRuleContext(ExprContext.class,0);
}
public TerminalNode STRING() { return getToken(RParser.STRING, 0); }
public SubContext(ParserRuleContext parent, int invokingState) {
super(parent, invokingState);
}
@Override public int getRuleIndex() { return RULE_sub; }
@Override
public void enterRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).enterSub(this);
}
@Override
public void exitRule(ParseTreeListener listener) {
if ( listener instanceof RListener ) ((RListener)listener).exitSub(this);
}
@Override
public <T> T accept(ParseTreeVisitor<? extends T> visitor) {
if ( visitor instanceof RVisitor ) return ((RVisitor<? extends T>)visitor).visitSub(this);
else return visitor.visitChildren(this);
}
}
public final SubContext sub() throws RecognitionException {
SubContext _localctx = new SubContext(_ctx, getState());
enterRule(_localctx, 12, RULE_sub);
try {
setState(206);
switch ( getInterpreter().adaptivePredict(_input,12,_ctx) ) {
case 1:
enterOuterAlt(_localctx, 1);
{
setState(188); expr(0);
}
break;
case 2:
enterOuterAlt(_localctx, 2);
{
setState(189); match(ID);
setState(190); match(7);
}
break;
case 3:
enterOuterAlt(_localctx, 3);
{
setState(191); match(ID);
setState(192); match(7);
setState(193); expr(0);
}
break;
case 4:
enterOuterAlt(_localctx, 4);
{
setState(194); match(STRING);
setState(195); match(7);
}
break;
case 5:
enterOuterAlt(_localctx, 5);
{
setState(196); match(STRING);
setState(197); match(7);
setState(198); expr(0);
}
break;
case 6:
enterOuterAlt(_localctx, 6);
{
setState(199); match(26);
setState(200); match(7);
}
break;
case 7:
enterOuterAlt(_localctx, 7);
{
setState(201); match(26);
setState(202); match(7);
setState(203); expr(0);
}
break;
case 8:
enterOuterAlt(_localctx, 8);
{
setState(204); match(41);
}
break;
case 9:
enterOuterAlt(_localctx, 9);
{
}
break;
}
}
catch (RecognitionException re) {
_localctx.exception = re;
_errHandler.reportError(this, re);
_errHandler.recover(this, re);
}
finally {
exitRule();
}
return _localctx;
}
public boolean sempred(RuleContext _localctx, int ruleIndex, int predIndex) {
switch (ruleIndex) {
case 1: return expr_sempred((ExprContext)_localctx, predIndex);
}
return true;
}
private boolean expr_sempred(ExprContext _localctx, int predIndex) {
switch (predIndex) {
case 0: return precpred(_ctx, 39);
case 1: return precpred(_ctx, 38);
case 2: return precpred(_ctx, 37);
case 3: return precpred(_ctx, 35);
case 4: return precpred(_ctx, 34);
case 5: return precpred(_ctx, 33);
case 6: return precpred(_ctx, 32);
case 7: return precpred(_ctx, 31);
case 8: return precpred(_ctx, 29);
case 9: return precpred(_ctx, 28);
case 10: return precpred(_ctx, 26);
case 11: return precpred(_ctx, 25);
case 12: return precpred(_ctx, 41);
case 13: return precpred(_ctx, 40);
case 14: return precpred(_ctx, 23);
}
return true;
}
public static final String _serializedATN =
"\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3A\u00d3\4\2\t\2\4"+
"\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\3\2\3\2\3\2\3\2\7\2\25"+
"\n\2\f\2\16\2\30\13\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\5\3&\n\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\5\3`\n\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3"+
"\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\3\7\3\u0096\n\3\f\3\16\3\u0099\13\3\3\4"+
"\3\4\3\4\5\4\u009e\n\4\7\4\u00a0\n\4\f\4\16\4\u00a3\13\4\3\4\5\4\u00a6"+
"\n\4\3\5\3\5\3\5\7\5\u00ab\n\5\f\5\16\5\u00ae\13\5\3\6\3\6\3\6\3\6\3\6"+
"\5\6\u00b5\n\6\3\7\3\7\3\7\7\7\u00ba\n\7\f\7\16\7\u00bd\13\7\3\b\3\b\3"+
"\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\3\b\5\b\u00d1"+
"\n\b\3\b\2\3\4\t\2\4\6\b\n\f\16\2\13\4\2!!@@\4\2\35\35\67\67\4\2\b\b\24"+
"\24\4\2\f\f\32\32\4\2\37\37))\7\2\4\4\25\25\27\27&&\60\61\4\2\7\7((\4"+
"\2 //\b\2\3\3\t\t\22\22%%**88\u0105\2\26\3\2\2\2\4_\3\2\2\2\6\u00a5\3"+
"\2\2\2\b\u00a7\3\2\2\2\n\u00b4\3\2\2\2\f\u00b6\3\2\2\2\16\u00d0\3\2\2"+
"\2\20\21\5\4\3\2\21\22\t\2\2\2\22\25\3\2\2\2\23\25\7@\2\2\24\20\3\2\2"+
"\2\24\23\3\2\2\2\25\30\3\2\2\2\26\24\3\2\2\2\26\27\3\2\2\2\27\31\3\2\2"+
"\2\30\26\3\2\2\2\31\32\7\2\2\3\32\3\3\2\2\2\33\34\b\3\1\2\34\35\t\3\2"+
"\2\35`\5\4\3&\36\37\7\62\2\2\37`\5\4\3 !\7\31\2\2!`\5\4\3\35\"#\7\33"+
"\2\2#%\7\r\2\2$&\5\b\5\2%$\3\2\2\2%&\3\2\2\2&\'\3\2\2\2\'(\7\66\2\2(`"+
"\5\4\3\32)*\7\20\2\2*`\5\4\3\23+,\7$\2\2,`\5\4\3\22-.\7\6\2\2./\5\6\4"+
"\2/\60\7\"\2\2\60`\3\2\2\2\61\62\7#\2\2\62\63\7\r\2\2\63\64\5\4\3\2\64"+
"\65\7\66\2\2\65\66\5\4\3\2\66`\3\2\2\2\678\7#\2\289\7\r\2\29:\5\4\3\2"+
":;\7\66\2\2;<\5\4\3\2<=\7\64\2\2=>\5\4\3\2>`\3\2\2\2?@\7\n\2\2@A\7\r\2"+
"\2AB\7>\2\2BC\7\63\2\2CD\5\4\3\2DE\7\66\2\2EF\5\4\3\2F`\3\2\2\2GH\7\5"+
"\2\2HI\7\r\2\2IJ\5\4\3\2JK\7\66\2\2KL\5\4\3\2L`\3\2\2\2M`\7\65\2\2N`\7"+
"\'\2\2OP\7\r\2\2PQ\5\4\3\2QR\7\66\2\2R`\3\2\2\2S`\7>\2\2T`\7=\2\2U`\7"+
"9\2\2V`\7:\2\2W`\7;\2\2X`\7<\2\2Y`\7\34\2\2Z`\7\21\2\2[`\7\16\2\2\\`\7"+
",\2\2]`\7\36\2\2^`\7\23\2\2_\33\3\2\2\2_\36\3\2\2\2_ \3\2\2\2_\"\3\2\2"+
"\2_)\3\2\2\2_+\3\2\2\2_-\3\2\2\2_\61\3\2\2\2_\67\3\2\2\2_?\3\2\2\2_G\3"+
"\2\2\2_M\3\2\2\2_N\3\2\2\2_O\3\2\2\2_S\3\2\2\2_T\3\2\2\2_U\3\2\2\2_V\3"+
"\2\2\2_W\3\2\2\2_X\3\2\2\2_Y\3\2\2\2_Z\3\2\2\2_[\3\2\2\2_\\\3\2\2\2_]"+
"\3\2\2\2_^\3\2\2\2`\u0097\3\2\2\2ab\f)\2\2bc\t\4\2\2c\u0096\5\4\3*de\f"+
"(\2\2ef\t\5\2\2f\u0096\5\4\3)gh\f\'\2\2hi\7\13\2\2i\u0096\5\4\3(jk\f%"+
"\2\2kl\7-\2\2l\u0096\5\4\3&mn\f$\2\2no\7?\2\2o\u0096\5\4\3%pq\f#\2\2q"+
"r\t\6\2\2r\u0096\5\4\3$st\f\"\2\2tu\t\3\2\2u\u0096\5\4\3#vw\f!\2\2wx\t"+
"\7\2\2x\u0096\5\4\3\"yz\f\37\2\2z{\t\b\2\2{\u0096\5\4\3 |}\f\36\2\2}~"+
"\t\t\2\2~\u0096\5\4\3\37\177\u0080\f\34\2\2\u0080\u0081\7\31\2\2\u0081"+
"\u0096\5\4\3\35\u0082\u0083\f\33\2\2\u0083\u0084\t\n\2\2\u0084\u0096\5"+
"\4\3\34\u0085\u0086\f+\2\2\u0086\u0087\7\26\2\2\u0087\u0088\5\f\7\2\u0088"+
"\u0089\7\30\2\2\u0089\u008a\7\30\2\2\u008a\u0096\3\2\2\2\u008b\u008c\f"+
"*\2\2\u008c\u008d\7.\2\2\u008d\u008e\5\f\7\2\u008e\u008f\7\30\2\2\u008f"+
"\u0096\3\2\2\2\u0090\u0091\f\31\2\2\u0091\u0092\7\r\2\2\u0092\u0093\5"+
"\f\7\2\u0093\u0094\7\66\2\2\u0094\u0096\3\2\2\2\u0095a\3\2\2\2\u0095d"+
"\3\2\2\2\u0095g\3\2\2\2\u0095j\3\2\2\2\u0095m\3\2\2\2\u0095p\3\2\2\2\u0095"+
"s\3\2\2\2\u0095v\3\2\2\2\u0095y\3\2\2\2\u0095|\3\2\2\2\u0095\177\3\2\2"+
"\2\u0095\u0082\3\2\2\2\u0095\u0085\3\2\2\2\u0095\u008b\3\2\2\2\u0095\u0090"+
"\3\2\2\2\u0096\u0099\3\2\2\2\u0097\u0095\3\2\2\2\u0097\u0098\3\2\2\2\u0098"+
"\5\3\2\2\2\u0099\u0097\3\2\2\2\u009a\u00a1\5\4\3\2\u009b\u009d\t\2\2\2"+
"\u009c\u009e\5\4\3\2\u009d\u009c\3\2\2\2\u009d\u009e\3\2\2\2\u009e\u00a0"+
"\3\2\2\2\u009f\u009b\3\2\2\2\u00a0\u00a3\3\2\2\2\u00a1\u009f\3\2\2\2\u00a1"+
"\u00a2\3\2\2\2\u00a2\u00a6\3\2\2\2\u00a3\u00a1\3\2\2\2\u00a4\u00a6\3\2"+
"\2\2\u00a5\u009a\3\2\2\2\u00a5\u00a4\3\2\2\2\u00a6\7\3\2\2\2\u00a7\u00ac"+
"\5\n\6\2\u00a8\u00a9\7\17\2\2\u00a9\u00ab\5\n\6\2\u00aa\u00a8\3\2\2\2"+
"\u00ab\u00ae\3\2\2\2\u00ac\u00aa\3\2\2\2\u00ac\u00ad\3\2\2\2\u00ad\t\3"+
"\2\2\2\u00ae\u00ac\3\2\2\2\u00af\u00b5\7>\2\2\u00b0\u00b1\7>\2\2\u00b1"+
"\u00b2\7\t\2\2\u00b2\u00b5\5\4\3\2\u00b3\u00b5\7+\2\2\u00b4\u00af\3\2"+
"\2\2\u00b4\u00b0\3\2\2\2\u00b4\u00b3\3\2\2\2\u00b5\13\3\2\2\2\u00b6\u00bb"+
"\5\16\b\2\u00b7\u00b8\7\17\2\2\u00b8\u00ba\5\16\b\2\u00b9\u00b7\3\2\2"+
"\2\u00ba\u00bd\3\2\2\2\u00bb\u00b9\3\2\2\2\u00bb\u00bc\3\2\2\2\u00bc\r"+
"\3\2\2\2\u00bd\u00bb\3\2\2\2\u00be\u00d1\5\4\3\2\u00bf\u00c0\7>\2\2\u00c0"+
"\u00d1\7\t\2\2\u00c1\u00c2\7>\2\2\u00c2\u00c3\7\t\2\2\u00c3\u00d1\5\4"+
"\3\2\u00c4\u00c5\7=\2\2\u00c5\u00d1\7\t\2\2\u00c6\u00c7\7=\2\2\u00c7\u00c8"+
"\7\t\2\2\u00c8\u00d1\5\4\3\2\u00c9\u00ca\7\34\2\2\u00ca\u00d1\7\t\2\2"+
"\u00cb\u00cc\7\34\2\2\u00cc\u00cd\7\t\2\2\u00cd\u00d1\5\4\3\2\u00ce\u00d1"+
"\7+\2\2\u00cf\u00d1\3\2\2\2\u00d0\u00be\3\2\2\2\u00d0\u00bf\3\2\2\2\u00d0"+
"\u00c1\3\2\2\2\u00d0\u00c4\3\2\2\2\u00d0\u00c6\3\2\2\2\u00d0\u00c9\3\2"+
"\2\2\u00d0\u00cb\3\2\2\2\u00d0\u00ce\3\2\2\2\u00d0\u00cf\3\2\2\2\u00d1"+
"\17\3\2\2\2\17\24\26%_\u0095\u0097\u009d\u00a1\u00a5\u00ac\u00b4\u00bb"+
"\u00d0";
public static final ATN _ATN =
new ATNDeserializer().deserialize(_serializedATN.toCharArray());
static {
_decisionToDFA = new DFA[_ATN.getNumberOfDecisions()];
for (int i = 0; i < _ATN.getNumberOfDecisions(); i++) {
_decisionToDFA[i] = new DFA(_ATN.getDecisionState(i), i);
}
}
}<|fim▁end|>
|
}
break;
|
<|file_name|>sql_types.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2008--2010 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
# Database types we support for out variables
#
# Data types
class DatabaseDataType:<|fim▁hole|> self.set_value(value)
def get_value(self):
return self.value
def set_value(self, value):
self.value = value
def __str__(self):
return self.type_name
class NUMBER(DatabaseDataType):
type_name = "NUMBER"
class STRING(DatabaseDataType):
type_name = "STRING"
def __init__(self, value=None, size=None):
DatabaseDataType.__init__(self, value=value, size=size)
if not size:
self.size = 4000
class BINARY(DatabaseDataType):
type_name = "BINARY"
class LONG_BINARY(DatabaseDataType):
type_name = "LONG_BINARY"
# XXX More data types to be added as we find need for them<|fim▁end|>
|
type_name = None
def __init__(self, value=None, size=None):
self.size = size or 1
|
<|file_name|>util.py<|end_file_name|><|fim▁begin|>import contextlib
import gzip
import hashlib
import io
import mmap
from builtins import (
map as imap,
)
def gzip_compress(data, compresslevel=6):
compressed = io.BytesIO()
with gzip.GzipFile(fileobj=compressed,
mode="wb",<|fim▁hole|>
def hash_file(fn, hn):
h = hashlib.new(hn)
with open(fn, "r") as fh:
with contextlib.closing(mmap.mmap(fh.fileno(), 0, prot=mmap.PROT_READ)) as mm:
h.update(mm)
return h.digest()
def indent(text, spaces):
spaces = " " * int(spaces)
return "\n".join(imap(lambda l: spaces + l, text.splitlines()))<|fim▁end|>
|
compresslevel=compresslevel) as compressor:
compressor.write(data)
return compressed.getvalue()
|
<|file_name|>MiSeLoR.java<|end_file_name|><|fim▁begin|>package de.tkprog.MiSeLoR;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;<|fim▁hole|>import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Collections;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import de.tkprog.log.Logger;
public class MiSeLoR {
private Database currentDatabase;
public static MiSeLoR THIS;
public static Logger log;
public static void main(String[] args){
(new File("log/")).mkdir();
log = new Logger("log/MiSeLoR_"+System.currentTimeMillis()+".log");
log.setLogToCLI(true);
log.setLogToFile(true);
log.logAll(true);
new MiSeLoR();
}
public MiSeLoR(){
try {
THIS = this;
currentDatabase = new Database("MiSeLoR_current.db");
Message.initialise(cD());
SimpleMessage.initialise(cD());
ChatMessage.initialise(cD());
LoginMessage.initialise(cD());
LeftGameMessage.initialise(cD());
ServerOverloadMessage.initialise(cD());
DeathMessage.initialise(cD());
EarnedAchievementMessage.initialise(cD());
JoinMessage.initialise(cD());
LostConnectionMessage.initialise(cD());
MovedWronglyMessage.initialise(cD());
PlayerOnlineMessage.initialise(cD());
SavingWorldDataMessage.initialise(cD());
ServerChatMessage.initialise(cD());
UUIDofPlayerIsMessage.initialise(cD());
cmd();
} catch (Exception e) {
e.printStackTrace();
}
}
private void cmd() {
boolean running = true;
BufferedReader bf = new BufferedReader(new InputStreamReader(System.in));
System.out.println();
System.out.println();
do{
try {
System.out.print("> ");
String input = bf.readLine();
if(input != null){
if(Pattern.matches("help|\\?|\\\\\\?|\\\\help", input)){
System.out.println("Commands:\r\n"+
"\thelp|?|\\?|\\help - Shows this help\r\n"+
"\tparse <dd> <mm> <yyyy> <file name> - parses the given file for the given day // file name can use Regex\r\n"+
"\tparses <file name> - parses the given file (\"yyyy-mm-dd-x.log\") for the given day // file name can use Regex\r\n"+
"\tuptime <player name> - shows the uptime for the player\r\n"+
"\tall - shows a summary\r\n"+
"\tgetAllPlayer - shows all saved player\r\n"+
"\tleaderboard - Shows some leaderboard\r\n"+
"\texit - exits the program");
}
else if(Pattern.matches("parse\\s\\d{2}\\s\\d{2}\\s\\d{4}\\s(\\S*)", input)){
Pattern p = Pattern.compile("parse\\s(\\d{2})\\s(\\d{2})\\s(\\d{4})\\s(\\S*)");
Matcher mm = p.matcher(input);
mm.find();
File[] f = getFiles(mm.group(4));
for(File ff : f){
Commands.parse(Integer.parseInt(mm.group(1)), Integer.parseInt(mm.group(2)), Integer.parseInt(mm.group(3)), ff.getAbsolutePath());
}
}
else if(Pattern.matches("parses\\s(\\S*)", input)){
Pattern p = Pattern.compile("parses\\s(\\S*)");
Matcher mm = p.matcher(input);
mm.find();
String filename = mm.group(1);
File[] f = getFiles(filename);
for(File ff : f){
p = Pattern.compile("(\\d{4})\\-(\\d{2})\\-(\\d{2})\\-\\d*\\.log");
mm = p.matcher(ff.getName());
mm.find();
Commands.parse(Integer.parseInt(mm.group(3)), Integer.parseInt(mm.group(2)), Integer.parseInt(mm.group(1)), ff.getAbsolutePath());
}
}
else if(Pattern.matches("uptime\\s(\\S*)", input)){
Pattern p = Pattern.compile("uptime\\s(\\S*)");
Matcher mm = p.matcher(input);
mm.find();
Commands.uptime(mm.group(1));
}
else if(Pattern.matches("all", input)){
Commands.all();
}
else if(Pattern.matches("exit", input)){
Commands.exit();
}
else if(Pattern.matches("getAllPlayer", input)){
String[] s = Commands.getAllPlayers();
for(String ss : s){
System.out.println(ss);
}
}
else if(Pattern.matches("leaderboard", input)){
Commands.showLeaderboard(cD());
}
else{
System.out.println("The command \""+input+"\" wasn't recognized. Type in \"help\".");
}
}
} catch (Exception e) {
e.printStackTrace();
}
} while(running);
}
private File[] getFiles(String fileregex) {
File f = new File(".");
if(!f.isDirectory()){
f = f.getParentFile();
}
if(!f.isDirectory()){
System.err.println("Sollte ned passieren...");
}
File[] ff = f.listFiles();
ArrayList<File> o = new ArrayList<File>();
for(File fff : ff){
if(Pattern.matches(fileregex, fff.getName())){
o.add(fff);
}
else{
System.out.println("\""+fileregex+"\" does not match \""+fff.getName()+"\"");
}
}
File[] ffff = new File[o.size()];
int i = 0;
for(File fff : o){
ffff[i] = fff;
i++;
}
return ffff;
}
public Database cD() {
return getCurrentDatabase();
}
public Database getCurrentDatabase() {
return currentDatabase;
}
public void setCurrentDatabase(Database currentDatabase) {
this.currentDatabase = currentDatabase;
}
}<|fim▁end|>
|
import java.io.InputStreamReader;
|
<|file_name|>PickleSaver.py<|end_file_name|><|fim▁begin|># Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
An object that makes some of the attributes of your class persistent, pickling
them and lazily writing them to a file.
"""
# from the Python Standard Library
import os
import cPickle as pickle
import warnings
# from the pyutil library
import fileutil
import nummedobj
import twistedutil
# from the Twisted library
from twisted.python import log
class PickleSaver(nummedobj.NummedObj):
"""
This makes some of the attributes of your class persistent, saving
them in a pickle and saving them lazily.
The general idea: You are going to tell PickleSaver which of your
attributes ought to be persistently saved, and the name of a file to
save them in. Those attributes will get saved to disk, and when
your object is instantiated those attributes will get set to the
values loaded from the file.
Usage: inherit from PickleSaver and call PickleSaver.__init__() in your
constructor. You will pass arguments to PickleSaver.__init__()
telling it which attributes to save, which file to save them in, and
what values they should have if there is no value stored for them in
the file.
Note: do *not* assign values to your persistent attributes in your
constructor, because you might thus overwrite their persistent
values.
Then whenever you change one of the persistent attributes, call
self.lazy_save() (it won't *really* save -- it'll just schedule a
save for DELAY minutes later.) If you update an attribute and
forget to call self.lazy_save() then the change will not be saved,
unless you later call self.lazy_save() before you shut down.
Data could be lost if the Python interpreter were to die
unexpectedly (for example, due to a segfault in a compiled machine
code module or due to the Python process being killed without
warning via SIGKILL) before the delay passes. However if the Python
interpreter shuts down cleanly (i.e., if it garbage collects and
invokes the __del__ methods of the collected objects), then the data
will be saved at that time (unless your class has the "not-collectable"
problem: http://python.org/doc/current/lib/module-gc.html -- search
in text for "uncollectable").
Note: you can pass DELAY=0 to make PickleSaver a not-so-lazy saver.
The advantage of laziness is that you don't touch the disk as
often -- touching disk is a performance cost.
To cleanly shutdown, invoke shutdown(). Further operations after that
will result in exceptions.
"""
class ExtRes:
"""
This is for holding things (external resources) that PickleSaver needs
to finalize after PickleSaver is killed. (post-mortem finalization)
In particular, this holds the names and values of all attributes
that have been changed, so that after the PickleSaver is
garbage-collected those values will be saved to the persistent file.
"""
def __init__(self, fname, objname):
self.fname = fname
self.objname = objname
self.dirty = False # True iff the attrs have been changed and need to be saved to disk; When you change this flag from False to True, you schedule a save task for 10 minutes later. When the save task goes off it changes the flag from True to False.
self.savertask = None
self.valstr = None # the pickled (serialized, string) contents of the attributes that should be saved
def _save_to_disk(self):
if self.valstr is not None:
log.msg("%s._save_to_disk(): fname: %s" % (self.objname, self.fname,))
of = open(self.fname + ".tmp", "wb")
of.write(self.valstr)
of.flush()
of.close()
of = None
fileutil.remove_if_possible(self.fname)
fileutil.rename(self.fname + ".tmp", self.fname)
log.msg("%s._save_to_disk(): now, having finished write(), os.path.isfile(%s): %s" % (self, self.fname, os.path.isfile(self.fname),))
self.valstr = None
self.dirty = False
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def shutdown(self):
if self.dirty:
self._save_to_disk()
if self.savertask:
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def __del__(self):
self.shutdown()
def __init__(self, fname, attrs, DELAY=60*60, savecb=None):
"""
@param attrs: a dict whose keys are the names of all the attributes to be persistently stored and whose values are the initial default value that the attribute gets set to the first time it is ever used; After this first initialization, the value will be persistent so the initial default value will never be used again.
@param savecb: if not None, then it is a callable that will be called after each save completes (useful for unit tests) (savecb doesn't get called after a shutdown-save, only after a scheduled save)
"""
warnings.warn("deprecated", DeprecationWarning)
nummedobj.NummedObj.__init__(self)
self._DELAY = DELAY
self._attrnames = attrs.keys()
self._extres = PickleSaver.ExtRes(fname=fname, objname=self.__repr__())
self._savecb = savecb
for attrname, defaultval in attrs.items():
setattr(self, attrname, defaultval)
try:
attrdict = pickle.loads(open(self._extres.fname, "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le:
try:<|fim▁hole|> log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le2:
log.msg("Got exception attempting to load attrs. (This is normal if this is the first time you've used this persistent %s object.) fname: %s, le: %s, le2: %s" % (self.__class__, self._extres.fname, le, le2,))
self.lazy_save()
def _store_attrs_in_extres(self):
d = {}
for attrname in self._attrnames:
d[attrname] = getattr(self, attrname)
# log.msg("%s._store_attrs_in_extres: attrname: %s, val: %s" % (self, attrname, getattr(self, attrname),))
# pickle the attrs now, to ensure that there are no reference cycles
self._extres.valstr = pickle.dumps(d, True)
# log.msg("%s._store_attrs_in_extres: valstr: %s" % (self, self._extres.valstr,))
self._extres.dirty = True
def _save_to_disk(self):
log.msg("%s._save_to_disk()" % (self,))
self._extres._save_to_disk()
if self._savecb:
self._savecb()
def _lazy_save(self, delay=None):
""" @deprecated: use lazy_save() instead """
return self.lazy_save(delay)
def lazy_save(self, delay=None):
"""
@param delay: how long from now before the data gets saved to disk, or `None' in order to use the default value provided in the constructor
"""
if delay is None:
delay=self._DELAY
# copy the values into extres so that if `self' gets garbage-collected the values will be written to disk during post-mortem finalization. (This also marks it as dirty.)
self._store_attrs_in_extres()
newsavetask = twistedutil.callLater_weakly(delay, self._save_to_disk)
if self._extres.savertask:
if self._extres.savertask.callId.getTime() < newsavetask.callId.getTime():
try:
newsavetask.callId.cancel()
except:
pass
else:
try:
self._extres.savertask.callId.cancel()
except:
pass
self._extres.savertask = newsavetask
else:
self._extres.savertask = newsavetask
def shutdown(self):
self.extres.shutdown()
self.extres = None<|fim▁end|>
|
attrdict = pickle.loads(open(self._extres.fname + ".tmp", "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
|
<|file_name|>backendTests.ts<|end_file_name|><|fim▁begin|>/* Copyright 2015 The TensorFlow Authors. All Rights Reserved.<|fim▁hole|>You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an 'AS IS' BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
module TF.Backend {
describe('urlPathHelpers', function() {
let demoify = TF.Backend.demoify;
let encode = TF.Backend.queryEncoder;
it('demoify works as expected', function() {
let demoified = demoify(BAD_CHARACTERS);
let all_clean = '';
for (let i = 0; i < BAD_CHARACTERS.length; i++) {
all_clean += '_';
}
chai.assert.equal(
demoified, all_clean, 'cleaning the BAD_CHARACTERS works');
chai.assert.equal(
demoify('foozod'), 'foozod', 'doesnt change safe string');
chai.assert.equal(demoify('foo zod (2)'), 'foo_zod__2_', 'simple case');
});
it('queryEncoder works with demoify on spaces and parens', function() {
let params = {foo: 'something with spaces and (parens)'};
let actual = demoify(encode(params));
let expected = '_foo_something_with_spaces_and__28parens_29';
chai.assert.equal(actual, expected);
});
});
function assertIsDatum(x) {
chai.assert.isNumber(x.step);
chai.assert.instanceOf(x.wall_time, Date);
}
describe('backend tests', function() {
let backend: Backend;
let rm: RequestManager;
let base = 'data';
let demoRouter = TF.Backend.router(base, true);
beforeEach(function() {
// Construct a demo Backend (third param is true)
backend = new Backend(demoRouter);
rm = new RequestManager();
});
it('runs are loaded properly', function(done) {
let runsResponse = backend.runs();
let actualRuns = rm.request(demoRouter.runs());
Promise.all([runsResponse, actualRuns]).then((values) => {
chai.assert.deepEqual(values[0], values[1]);
done();
});
});
it('scalars are loaded properly', function(done) {
backend.scalar('cross_entropy (1)', 'run1').then((s) => {
// just check the data got reformatted properly
let aScalar = s[s.length - 1];
assertIsDatum(aScalar);
chai.assert.isNumber(aScalar.scalar);
// verify date conversion works
chai.assert.equal(aScalar.wall_time.valueOf(), 40000);
done();
});
});
it('histograms are loaded properly', function(done) {
backend.histogram('histo1', 'run1').then((histos) => {
let histo = histos[0];
assertIsDatum(histo);
chai.assert.instanceOf(histo.bins, Array);
done();
});
});
it('all registered types have handlers', function() {
TYPES.forEach((t: string) => {
chai.assert.isDefined(backend[t], t);
chai.assert.isDefined(backend[t + 'Runs'], t + 'Runs');
});
});
it('images are loaded properly', function(done) {
backend.image('im1', 'run1').then((images) => {
let image = images[0];
assertIsDatum(image);
chai.assert.isNumber(image.width);
chai.assert.isNumber(image.height);
let nonDemoQuery = 'index=0&tag=im1&run=run1';
let expectedUrl = demoRouter.individualImage(nonDemoQuery, 10.0);
chai.assert.equal(image.url, expectedUrl);
done();
});
});
it('audio is loaded properly', function(done) {
backend.audio('audio1', 'run1').then((audio_clips) => {
let audio = audio_clips[0];
assertIsDatum(audio);
chai.assert.equal(audio.content_type, 'audio/wav');
let nonDemoQuery = 'index=0&tag=audio1&run=run1';
let expectedUrl = demoRouter.individualAudio(nonDemoQuery);
chai.assert.equal(audio.url, expectedUrl);
done();
});
});
it('trailing slash removed from base route', function() {
let r = TF.Backend.router('foo/');
chai.assert.equal(r.runs(), 'foo/runs');
});
it('run helper methods work', function(done) {
let scalar = {run1: ['cross_entropy (1)'], fake_run_no_data: ['scalar2']};
let image = {run1: ['im1'], fake_run_no_data: ['im1', 'im2']};
let audio = {run1: ['audio1'], fake_run_no_data: ['audio1', 'audio2']};
let runMetadata = {run1: ['step99'], fake_run_no_data: ['step99']};
let graph = ['fake_run_no_data'];
let count = 0;
function next() {
count++;
if (count === 4) {
done();
}
}
backend.scalarRuns().then((x) => {
chai.assert.deepEqual(x, scalar);
next();
});
backend.imageRuns().then((x) => {
chai.assert.deepEqual(x, image);
next();
});
backend.audioRuns().then((x) => {
chai.assert.deepEqual(x, audio);
next();
});
backend.runMetadataRuns().then((x) => {
chai.assert.deepEqual(x, runMetadata);
next();
});
backend.graphRuns().then((x) => {
chai.assert.deepEqual(x, graph);
next();
});
});
it('runToTag helpers work', function() {
let r2t: RunToTag = {
run1: ['foo', 'bar', 'zod'],
run2: ['zod', 'zoink'],
a: ['foo', 'zod']
};
let empty1: RunToTag = {};
let empty2: RunToTag = {run1: [], run2: []};
chai.assert.deepEqual(getRuns(r2t), ['a', 'run1', 'run2']);
chai.assert.deepEqual(getTags(r2t), ['bar', 'foo', 'zod', 'zoink']);
chai.assert.deepEqual(filterTags(r2t, ['run1', 'run2']), getTags(r2t));
chai.assert.deepEqual(filterTags(r2t, ['run1']), ['bar', 'foo', 'zod']);
chai.assert.deepEqual(
filterTags(r2t, ['run2', 'a']), ['foo', 'zod', 'zoink']);
chai.assert.deepEqual(getRuns(empty1), []);
chai.assert.deepEqual(getTags(empty1), []);
chai.assert.deepEqual(getRuns(empty2), ['run1', 'run2']);
chai.assert.deepEqual(getTags(empty2), []);
});
});
describe('Verify that the histogram format conversion works.', function() {
function assertHistogramEquality(h1, h2) {
h1.forEach(function(b1, i) {
let b2 = h2[i];
chai.assert.closeTo(b1.x, b2.x, 1e-10);
chai.assert.closeTo(b1.dx, b2.dx, 1e-10);
chai.assert.closeTo(b1.y, b2.y, 1e-10);
});
}
it('Throws and error if the inputs are of different lengths', function() {
chai.assert.throws(function() {
convertBins(
{bucketRightEdges: [0], bucketCounts: [1, 2], min: 1, max: 2}, 1, 2,
2);
}, 'Edges and counts are of different lengths.');
});
it('Handles data with no bins', function() {
chai.assert.deepEqual(
convertBins(
{bucketRightEdges: [], bucketCounts: [], min: 0, max: 0}, 0, 0,
0),
[]);
});
it('Handles data with one bin', function() {
let counts = [1];
let rightEdges = [1.21e-12];
let histogram = [{x: 1.1e-12, dx: 1.21e-12 - 1.1e-12, y: 1}];
let newHistogram = convertBins(
{
bucketRightEdges: rightEdges,
bucketCounts: counts,
min: 1.1e-12,
max: 1.21e-12
},
1.1e-12, 1.21e-12, 1);
assertHistogramEquality(newHistogram, histogram);
});
it('Handles data with two bins.', function() {
let counts = [1, 2];
let rightEdges = [1.1e-12, 1.21e-12];
let histogram = [
{x: 1.0e-12, dx: 1.05e-13, y: 1.09090909090909},
{x: 1.105e-12, dx: 1.05e-13, y: 1.9090909090909}
];
let newHistogram = convertBins(
{
bucketRightEdges: rightEdges,
bucketCounts: counts,
min: 1.0e-12,
max: 1.21e-12
},
1.0e-12, 1.21e-12, 2);
assertHistogramEquality(newHistogram, histogram);
});
it('Handles a domain that crosses zero, but doesn\'t include zero as ' +
'an edge.',
function() {
let counts = [1, 2];
let rightEdges = [-1.0e-12, 1.0e-12];
let histogram = [
{x: -1.1e-12, dx: 1.05e-12, y: 1.95},
{x: -0.5e-13, dx: 1.05e-12, y: 1.05}
];
let newHistogram = convertBins(
{
bucketRightEdges: rightEdges,
bucketCounts: counts,
min: -1.1e-12,
max: 1.0e-12
},
-1.1e-12, 1.0e-12, 2);
assertHistogramEquality(newHistogram, histogram);
});
it('Handles a histogram of all zeros', function() {
let h = {
min: 0,
max: 0,
nItems: 51200,
sum: 0,
sumSquares: 0,
bucketRightEdges: [0, 1e-12, 1.7976931348623157e+308],
bucketCounts: [0, 51200, 0],
wall_time: '2017-01-25T02:30:11.257Z',
step: 0
};
let newHistogram = convertBins(h, 0, 0, 5);
let expectedHistogram = [
{x: -1, dx: 0.4, y: 0}, {x: -0.6, dx: 0.4, y: 0},
{x: -0.2, dx: 0.4, y: 51200}, {x: 0.2, dx: 0.4, y: 0},
{x: 0.6, dx: 0.4, y: 0}
];
assertHistogramEquality(newHistogram, expectedHistogram);
});
it('Handles a right-most right edge that extends to very large number.',
function() {
let counts = [1, 2, 3];
let rightEdges = [0, 1.0e-12, 1.0e14];
let histogram = [
{x: -1.0e-12, dx: 0.7e-12, y: 0.7},
{x: -0.3e-12, dx: 0.7e-12, y: 1.1},
{x: 0.4e-12, dx: 0.7e-12, y: 4.2}
];
let newHistogram = convertBins(
{
bucketRightEdges: rightEdges,
bucketCounts: counts,
min: -1.0e-12,
max: 1.1e-12
},
-1.0e-12, 1.1e-12, 3);
assertHistogramEquality(newHistogram, histogram);
});
});
}<|fim▁end|>
|
Licensed under the Apache License, Version 2.0 (the 'License');
you may not use this file except in compliance with the License.
|
<|file_name|>models.hpp<|end_file_name|><|fim▁begin|>//
// models.hpp
// animeloop-cli
//
// Created by ShinCurry on 2017/4/3.
// Copyright © 2017年 ShinCurry. All rights reserved.
//
#ifndef models_hpp
#define models_hpp
#include <iostream>
#include <opencv2/opencv.hpp>
namespace al {
typedef std::tuple<long, long> LoopDuration;
typedef std::vector<LoopDuration> LoopDurations;
typedef std::vector<std::string> HashVector;
typedef std::vector<cv::Mat> FrameVector;
typedef std::vector<int> CutVector;
struct VideoInfo {
double fps;
double fourcc;
cv::Size size;
int frame_count;
};
}
<|fim▁hole|>
#endif /* models_hpp */<|fim▁end|>
| |
<|file_name|>PaperSheet.js<|end_file_name|><|fim▁begin|>/**
* @file
* @copyright 2020 WarlockD (https://github.com/warlockd)
* @author Original WarlockD (https://github.com/warlockd)
* @author Changes stylemistake
* @author Changes ThePotato97
* @author Changes Ghommie
* @author Changes Timberpoes
* @license MIT
*/
import { classes } from 'common/react';
import { Component } from 'inferno';
import { marked } from 'marked';
import { useBackend } from '../backend';
import { Box, Flex, Tabs, TextArea } from '../components';
import { Window } from '../layouts';
import { clamp } from 'common/math';
import { sanitizeText } from '../sanitize';
const MAX_PAPER_LENGTH = 5000; // Question, should we send this with ui_data?
// Hacky, yes, works?...yes
const textWidth = (text, font, fontsize) => {
// default font height is 12 in tgui
font = fontsize + "x " + font;
const c = document.createElement('canvas');
const ctx = c.getContext("2d");
ctx.font = font;
const width = ctx.measureText(text).width;
return width;
};
const setFontinText = (text, font, color, bold=false) => {
return "<span style=\""
+ "color:" + color + ";"
+ "font-family:'" + font + "';"
+ ((bold)
? "font-weight: bold;"
: "")
+ "\">" + text + "</span>";
};
const createIDHeader = index => {
return "paperfield_" + index;
};
// To make a field you do a [_______] or however long the field is
// we will then output a TEXT input for it that hopefully covers
// the exact amount of spaces
const field_regex = /\[(_+)\]/g;
const field_tag_regex = /\[<input\s+(?!disabled)(.*?)\s+id="(?<id>paperfield_\d+)"(.*?)\/>\]/gm;
const sign_regex = /%s(?:ign)?(?=\\s|$)?/igm;
const createInputField = (length, width, font,
fontsize, color, id) => {
return "[<input "
+ "type=\"text\" "
+ "style=\""
+ "font:'" + fontsize + "x " + font + "';"
+ "color:" + color + ";"
+ "min-width:" + width + ";"
+ "max-width:" + width + ";"
+ "\" "
+ "id=\"" + id + "\" "
+ "maxlength=" + length +" "
+ "size=" + length + " "
+ "/>]";
};
const createFields = (txt, font, fontsize, color, counter) => {
const ret_text = txt.replace(field_regex, (match, p1, offset, string) => {
const width = textWidth(match, font, fontsize) + "px";
return createInputField(p1.length,
width, font, fontsize, color, createIDHeader(counter++));
});
return {
counter,
text: ret_text,
};
};
const signDocument = (txt, color, user) => {
return txt.replace(sign_regex, () => {
return setFontinText(user, "Times New Roman", color, true);
});
};
const run_marked_default = value => {
// Override function, any links and images should
// kill any other marked tokens we don't want here
const walkTokens = token => {
switch (token.type) {
case 'url':
case 'autolink':
case 'reflink':
case 'link':
case 'image':
token.type = 'text';
// Once asset system is up change to some default image
// or rewrite for icon images
token.href = "";
break;
}
};
return marked(value, {
breaks: true,
smartypants: true,
smartLists: true,
walkTokens,
// Once assets are fixed might need to change this for them
baseUrl: 'thisshouldbreakhttp',
});
};
/*
** This gets the field, and finds the dom object and sees if
** the user has typed something in. If so, it replaces,
** the dom object, in txt with the value, spaces so it
** fits the [] format and saves the value into a object
** There may be ways to optimize this in javascript but
** doing this in byond is nightmarish.
**
** It returns any values that were saved and a corrected
** html code or null if nothing was updated
*/
const checkAllFields = (txt, font, color, user_name, bold=false) => {
let matches;
let values = {};
let replace = [];
// I know its tempting to wrap ALL this in a .replace
// HOWEVER the user might not of entered anything
// if thats the case we are rebuilding the entire string
// for nothing, if nothing is entered, txt is just returned
while ((matches = field_tag_regex.exec(txt)) !== null) {
const full_match = matches[0];
const id = matches.groups.id;
if (id) {
const dom = document.getElementById(id);
// make sure we got data, and kill any html that might
// be in it
const dom_text = dom && dom.value ? dom.value : "";
if (dom_text.length === 0) {
continue;
}
const sanitized_text = sanitizeText(dom.value.trim(), []);
if (sanitized_text.length === 0) {
continue;
}
// this is easier than doing a bunch of text manipulations
const target = dom.cloneNode(true);
// in case they sign in a field
if (sanitized_text.match(sign_regex)) {
target.style.fontFamily = "Times New Roman";
bold = true;
target.defaultValue = user_name;
}
else {
target.style.fontFamily = font;
target.defaultValue = sanitized_text;
}
if (bold) {
target.style.fontWeight = "bold";
}
target.style.color = color;
target.disabled = true;
const wrap = document.createElement('div');
wrap.appendChild(target);
values[id] = sanitized_text; // save the data
replace.push({ value: "[" + wrap.innerHTML + "]", raw_text: full_match });
}
}
if (replace.length > 0) {
for (const o of replace) {
txt = txt.replace(o.raw_text, o.value);
}
}
return { text: txt, fields: values };
};
const pauseEvent = e => {
if (e.stopPropagation) { e.stopPropagation(); }
if (e.preventDefault) { e.preventDefault(); }
e.cancelBubble=true;
e.returnValue=false;
return false;
};
const Stamp = (props, context) => {
const {
image,
opacity,
} = props;
const stamp_transform = {
'left': image.x + 'px',
'top': image.y + 'px',
'transform': 'rotate(' + image.rotate + 'deg)',
'opacity': opacity || 1.0,
};
return (
<div
id="stamp"
className={classes([
'Paper__Stamp',
image.sprite,
])}
style={stamp_transform} />
);
};
const setInputReadonly = (text, readonly) => {
return readonly
? text.replace(/<input\s[^d]/g, '<input disabled ')
: text.replace(/<input\sdisabled\s/g, '<input ');
};
// got to make this a full component if we
// want to control updates
const PaperSheetView = (props, context) => {
const {
value = "",
stamps = [],
backgroundColor,
readOnly,
} = props;
const stamp_list = stamps;
const text_html = {
__html: '<span class="paper-text">'
+ setInputReadonly(value, readOnly)
+ '</span>',
};
return (
<Box
position="relative"
backgroundColor={backgroundColor}
width="100%"
height="100%" >
<Box
className="Paper__Page"
fillPositionedParent
width="100%"
height="100%"
dangerouslySetInnerHTML={text_html}
p="10px" />
{stamp_list.map((o, i) => (
<Stamp key={o[0] + i}
image={{ sprite: o[0], x: o[1], y: o[2], rotate: o[3] }} />
))}
</Box>
);
};
// again, need the states for dragging and such
class PaperSheetStamper extends Component {
constructor(props, context) {
super(props, context);
this.state = {
x: 0,
y: 0,
rotate: 0,
};
this.style = null;
this.handleMouseMove = e => {
const pos = this.findStampPosition(e);
if (!pos) { return; }
// center offset of stamp & rotate
pauseEvent(e);
this.setState({ x: pos[0], y: pos[1], rotate: pos[2] });
};
this.handleMouseClick = e => {
if (e.pageY <= 30) { return; }
const { act, data } = useBackend(this.context);
const stamp_obj = {
x: this.state.x, y: this.state.y, r: this.state.rotate,
stamp_class: this.props.stamp_class,
stamp_icon_state: data.stamp_icon_state,
};
act("stamp", stamp_obj);
};
}
findStampPosition(e) {
let rotating;
const windowRef = document.querySelector('.Layout__content');
if (e.shiftKey) {
rotating = true;
}
if (document.getElementById("stamp"))
{
const stamp = document.getElementById("stamp");
const stampHeight = stamp.clientHeight;
const stampWidth = stamp.clientWidth;
const currentHeight = rotating ? this.state.y : e.pageY
- windowRef.scrollTop - stampHeight;
const currentWidth = rotating ? this.state.x : e.pageX - (stampWidth / 2);
const widthMin = 0;
const heightMin = 0;
const widthMax = (windowRef.clientWidth) - (
stampWidth);
const heightMax = (windowRef.clientHeight - windowRef.scrollTop) - (
stampHeight);
const radians = Math.atan2(
e.pageX - currentWidth,
e.pageY - currentHeight
);
const rotate = rotating ? (radians * (180 / Math.PI) * -1)
: this.state.rotate;
const pos = [
clamp(currentWidth, widthMin, widthMax),
clamp(currentHeight, heightMin, heightMax),
rotate,
];
return pos;
}
}
componentDidMount() {
document.addEventListener("mousemove", this.handleMouseMove);
document.addEventListener("click", this.handleMouseClick);
}
componentWillUnmount() {
document.removeEventListener("mousemove", this.handleMouseMove);
document.removeEventListener("click", this.handleMouseClick);
}
render() {
const {
value,
stamp_class,
stamps,
} = this.props;
const stamp_list = stamps || [];
const current_pos = {
sprite: stamp_class,
x: this.state.x,
y: this.state.y,
rotate: this.state.rotate,
};
return (
<>
<PaperSheetView
readOnly
value={value}
stamps={stamp_list} />
<Stamp
active_stamp
opacity={0.5} image={current_pos} />
</>
);
}
}
// This creates the html from marked text as well as the form fields
const createPreview = (
value,
text,
do_fields = false,
field_counter,
color,
font,
user_name,
is_crayon = false,
) => {
const out = { text: text };
// check if we are adding to paper, if not
// we still have to check if someone entered something
// into the fields
value = value.trim();
if (value.length > 0) {
// First lets make sure it ends in a new line
value += value[value.length] === "\n" ? " \n" : "\n \n";
// Second, we sanitize the text of html
const sanitized_text = sanitizeText(value);
const signed_text = signDocument(sanitized_text, color, user_name);
// Third we replace the [__] with fields as markedjs fucks them up
const fielded_text = createFields(
signed_text, font, 12, color, field_counter);
// Fourth, parse the text using markup<|fim▁hole|> // crayon is bold (<b> tags), maybe make fountain pin italic?
const fonted_text = setFontinText(
formatted_text, font, color, is_crayon);
out.text += fonted_text;
out.field_counter = fielded_text.counter;
}
if (do_fields) {
// finally we check all the form fields to see
// if any data was entered by the user and
// if it was return the data and modify the text
const final_processing = checkAllFields(
out.text, font, color, user_name, is_crayon);
out.text = final_processing.text;
out.form_fields = final_processing.fields;
}
return out;
};
// ugh. So have to turn this into a full
// component too if I want to keep updates
// low and keep the weird flashing down
class PaperSheetEdit extends Component {
constructor(props, context) {
super(props, context);
this.state = {
previewSelected: "Preview",
old_text: props.value || "",
counter: props.counter || 0,
textarea_text: "",
combined_text: props.value || "",
};
}
createPreviewFromData(value, do_fields = false) {
const { data } = useBackend(this.context);
return createPreview(value,
this.state.old_text,
do_fields,
this.state.counter,
data.pen_color,
data.pen_font,
data.edit_usr,
data.is_crayon,
);
}
onInputHandler(e, value) {
if (value !== this.state.textarea_text) {
const combined_length = this.state.old_text.length
+ this.state.textarea_text.length;
if (combined_length > MAX_PAPER_LENGTH) {
if ((combined_length - MAX_PAPER_LENGTH) >= value.length) {
// Basically we cannot add any more text to the paper
value = '';
} else {
value = value.substr(0, value.length
- (combined_length - MAX_PAPER_LENGTH));
}
// we check again to save an update
if (value === this.state.textarea_text) {
// Do nothing
return;
}
}
this.setState(() => ({
textarea_text: value,
combined_text: this.createPreviewFromData(value),
}));
}
}
// the final update send to byond, final upkeep
finalUpdate(new_text) {
const { act } = useBackend(this.context);
const final_processing = this.createPreviewFromData(new_text, true);
act('save', final_processing);
this.setState(() => { return {
textarea_text: "",
previewSelected: "save",
combined_text: final_processing.text,
old_text: final_processing.text,
counter: final_processing.field_counter,
}; });
// byond should switch us to readonly mode from here
}
render() {
const {
textColor,
fontFamily,
stamps,
backgroundColor,
} = this.props;
return (
<Flex
direction="column"
fillPositionedParent>
<Flex.Item>
<Tabs>
<Tabs.Tab
key="marked_edit"
textColor={'black'}
backgroundColor={this.state.previewSelected === "Edit"
? "grey"
: "white"}
selected={this.state.previewSelected === "Edit"}
onClick={() => this.setState({ previewSelected: "Edit" })}>
Edit
</Tabs.Tab>
<Tabs.Tab
key="marked_preview"
textColor={'black'}
backgroundColor={this.state.previewSelected === "Preview"
? "grey"
: "white"}
selected={this.state.previewSelected === "Preview"}
onClick={() => this.setState(() => {
const new_state = {
previewSelected: "Preview",
textarea_text: this.state.textarea_text,
combined_text: this.createPreviewFromData(
this.state.textarea_text).text,
};
return new_state;
})}>
Preview
</Tabs.Tab>
<Tabs.Tab
key="marked_done"
textColor={'black'}
backgroundColor={this.state.previewSelected === "confirm"
? "red"
: this.state.previewSelected === "save"
? "grey"
: "white"}
selected={this.state.previewSelected === "confirm"
|| this.state.previewSelected === "save"}
onClick={() => {
if (this.state.previewSelected === "confirm") {
this.finalUpdate(this.state.textarea_text);
}
else if (this.state.previewSelected === "Edit") {
this.setState(() => {
const new_state = {
previewSelected: "confirm",
textarea_text: this.state.textarea_text,
combined_text: this.createPreviewFromData(
this.state.textarea_text).text,
};
return new_state;
});
}
else {
this.setState({ previewSelected: "confirm" });
}
}}>
{this.state.previewSelected === "confirm" ? "Confirm" : "Save"}
</Tabs.Tab>
</Tabs>
</Flex.Item>
<Flex.Item
grow={1}
basis={1}>
{this.state.previewSelected === "Edit" && (
<TextArea
value={this.state.textarea_text}
textColor={textColor}
fontFamily={fontFamily}
height={(window.innerHeight - 80) + "px"}
backgroundColor={backgroundColor}
onInput={this.onInputHandler.bind(this)} />
) || (
<PaperSheetView
value={this.state.combined_text}
stamps={stamps}
fontFamily={fontFamily}
textColor={textColor} />
)}
</Flex.Item>
</Flex>
);
}
}
export const PaperSheet = (props, context) => {
const { data } = useBackend(context);
const {
edit_mode,
text,
paper_color = "white",
pen_color = "black",
pen_font = "Verdana",
stamps,
stamp_class,
sizeX,
sizeY,
name,
add_text,
add_font,
add_color,
add_sign,
field_counter,
} = data;
// some features can add text to a paper sheet outside of this ui
// we need to parse, sanitize and add any of it to the text value.
const values = { text: text, field_counter: field_counter };
if (add_text) {
for (let index = 0; index < add_text.length; index++) {
const used_color = add_color[index];
const used_font = add_font[index];
const used_sign = add_sign[index];
const processing = createPreview(
add_text[index],
values.text,
false,
values.field_counter,
used_color,
used_font,
used_sign
);
values.text = processing.text;
values.field_counter = processing.field_counter;
}
}
const stamp_list = !stamps
? []
: stamps;
const decide_mode = mode => {
switch (mode) {
case 0:
return (
<PaperSheetView
value={values.text}
stamps={stamp_list}
readOnly />
);
case 1:
return (
<PaperSheetEdit
value={values.text}
counter={values.field_counter}
textColor={pen_color}
fontFamily={pen_font}
stamps={stamp_list}
backgroundColor={paper_color} />
);
case 2:
return (
<PaperSheetStamper
value={values.text}
stamps={stamp_list}
stamp_class={stamp_class} />
);
default:
return "ERROR ERROR WE CANNOT BE HERE!!";
}
};
return (
<Window
title={name}
theme="paper"
width={sizeX || 400}
height={sizeY || 500}>
<Window.Content
backgroundColor={paper_color}
scrollable>
<Box
id="page"
fitted
fillPositionedParent>
{decide_mode(edit_mode)}
</Box>
</Window.Content>
</Window>
);
};<|fim▁end|>
|
const formatted_text = run_marked_default(fielded_text.text);
// Fifth, we wrap the created text in the pin color, and font.
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
##############################################################################
#
# Desarrollado por rNet Soluciones
# Jefe de Proyecto: Ing. Ulises Tlatoani Vidal Rieder
# Desarrollador: Ing. Salvador Daniel Pelayo Gómez.
# Analista: Lic. David Padilla Bobadilla
#
##############################################################################<|fim▁hole|># OpenERP, Open Source Management Solution
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_cp<|fim▁end|>
|
#
|
<|file_name|>RegistryService.java<|end_file_name|><|fim▁begin|>/*
* Copyright (C) 2005-2010 Alfresco Software Limited.
*
* This file is part of Alfresco
*
* Alfresco is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Alfresco is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with Alfresco. If not, see <http://www.gnu.org/licenses/>.
*/
package org.alfresco.repo.admin.registry;
import java.io.Serializable;
import java.util.Collection;
/**
* Interface for service providing access to key-value pairs for storage
* of system-controlled metadata.
*
* @author Derek Hulley
*/
public interface RegistryService
{
/**
* Assign a value to the registry key, which must be of the form <b>/a/b/c</b>.
*
* @param key the registry key.
* @param value any value that can be stored in the repository.
*/
void addProperty(RegistryKey key, Serializable value);
/**
* @param key the registry key.
* @return Returns the value stored in the key or <tt>null</tt> if
<|fim▁hole|> * @see #addProperty(String, Serializable)
*/
Serializable getProperty(RegistryKey key);
/**
* Fetches all child elements for the given path. The key's property should be
* <tt>null</tt> as it is completely ignored.
* <code><pre>
* ...
* registryService.addValue(KEY_A_B_C_1, VALUE_ONE);
* registryService.addValue(KEY_A_B_C_2, VALUE_TWO);
* ...
* assertTrue(registryService.getChildElements(KEY_A_B_null).contains("C"));
* ...
* </pre></code>
*
* @param key the registry key with the path. The last element in the path
* will be ignored, and can be any acceptable value localname or <tt>null</tt>.
* @return Returns all child elements (not values) for the given key, ignoring
* the last element in the key.
*
* @see RegistryKey#getPath()
*/
Collection<String> getChildElements(RegistryKey key);
/**
* Copies the path or value from the source to the target location. The source and target
* keys <b>must</b> be both either path-specific or property-specific. If the source doesn't
* exist, then nothing will be done; there is no guarantee that the target will exist after
* the call.
* <p>
* This is essentially a merge operation. Use {@link #delete(RegistryKey) delete} first
* if the target must be cleaned.
*
* @param sourceKey the source registry key to take values from
* @param targetKey the target registyr key to move the path or value to
*/
void copy(RegistryKey sourceKey, RegistryKey targetKey);
/**
* Delete the path element or value described by the key. If the key points to nothing,
* then nothing is done.
* <code>delete(/a/b/c)</code> will remove value <b>c</b> from path <b>/a/b</b>.<br/>
* <code>delete(/a/b/null)</code> will remove node <b>/a/b</b> along with all values and child
* elements.
*
* @param key the path or value to delete
*/
void delete(RegistryKey key);
}<|fim▁end|>
|
* no value exists at the path and name provided
*
|
<|file_name|>doc.rs<|end_file_name|><|fim▁begin|>// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
Region inference module.
# Terminology
Note that we use the terms region and lifetime interchangeably,
though the term `lifetime` is preferred.
# Introduction
Region inference uses a somewhat more involved algorithm than type
inference. It is not the most efficient thing ever written though it
seems to work well enough in practice (famous last words). The reason
that we use a different algorithm is because, unlike with types, it is
impractical to hand-annotate with regions (in some cases, there aren't
even the requisite syntactic forms). So we have to get it right, and
it's worth spending more time on a more involved analysis. Moreover,
regions are a simpler case than types: they don't have aggregate
structure, for example.
Unlike normal type inference, which is similar in spirit to H-M and thus
works progressively, the region type inference works by accumulating
constraints over the course of a function. Finally, at the end of
processing a function, we process and solve the constraints all at
once.
The constraints are always of one of three possible forms:
- ConstrainVarSubVar(R_i, R_j) states that region variable R_i
must be a subregion of R_j
- ConstrainRegSubVar(R, R_i) states that the concrete region R
(which must not be a variable) must be a subregion of the varibale R_i
- ConstrainVarSubReg(R_i, R) is the inverse
# Building up the constraints
Variables and constraints are created using the following methods:
- `new_region_var()` creates a new, unconstrained region variable;
- `make_subregion(R_i, R_j)` states that R_i is a subregion of R_j
- `lub_regions(R_i, R_j) -> R_k` returns a region R_k which is
the smallest region that is greater than both R_i and R_j
- `glb_regions(R_i, R_j) -> R_k` returns a region R_k which is
the greatest region that is smaller than both R_i and R_j
The actual region resolution algorithm is not entirely
obvious, though it is also not overly complex.
## Snapshotting
It is also permitted to try (and rollback) changes to the graph. This
is done by invoking `start_snapshot()`, which returns a value. Then
later you can call `rollback_to()` which undoes the work.
Alternatively, you can call `commit()` which ends all snapshots.
Snapshots can be recursive---so you can start a snapshot when another
is in progress, but only the root snapshot can "commit".
# Resolving constraints
The constraint resolution algorithm is not super complex but also not
entirely obvious. Here I describe the problem somewhat abstractly,
then describe how the current code works. There may be other, smarter
ways of doing this with which I am unfamiliar and can't be bothered to
research at the moment. - NDM
## The problem
Basically our input is a directed graph where nodes can be divided
into two categories: region variables and concrete regions. Each edge
`R -> S` in the graph represents a constraint that the region `R` is a
subregion of the region `S`.
Region variable nodes can have arbitrary degree. There is one region
variable node per region variable.
Each concrete region node is associated with some, well, concrete
region: e.g., a free lifetime, or the region for a particular scope.
Note that there may be more than one concrete region node for a
particular region value. Moreover, because of how the graph is built,
we know that all concrete region nodes have either in-degree 1 or
out-degree 1.
Before resolution begins, we build up the constraints in a hashmap
that maps `Constraint` keys to spans. During resolution, we construct
the actual `Graph` structure that we describe here.
## Our current algorithm
We divide region variables into two groups: Expanding and Contracting.
Expanding region variables are those that have a concrete region
predecessor (direct or indirect). Contracting region variables are
all others.
We first resolve the values of Expanding region variables and then
process Contracting ones. We currently use an iterative, fixed-point
procedure (but read on, I believe this could be replaced with a linear
walk). Basically we iterate over the edges in the graph, ensuring
that, if the source of the edge has a value, then this value is a
subregion of the target value. If the target does not yet have a
value, it takes the value from the source. If the target already had
a value, then the resulting value is Least Upper Bound of the old and
new values. When we are done, each Expanding node will have the
smallest region that it could possibly have and still satisfy the
constraints.
We next process the Contracting nodes. Here we again iterate over the
edges, only this time we move values from target to source (if the
source is a Contracting node). For each contracting node, we compute
its value as the GLB of all its successors. Basically contracting
nodes ensure that there is overlap between their successors; we will
ultimately infer the largest overlap possible.
# The Region Hierarchy
## Without closures
Let's first consider the region hierarchy without thinking about
closures, because they add a lot of complications. The region
hierarchy *basically* mirrors the lexical structure of the code.
There is a region for every piece of 'evaluation' that occurs, meaning
every expression, block, and pattern (patterns are considered to
"execute" by testing the value they are applied to and creating any
relevant bindings). So, for example:
fn foo(x: int, y: int) { // -+
// +------------+ // |
// | +-----+ // |
// | +-+ +-+ +-+ // |
// | | | | | | | // |
// v v v v v v v // |
let z = x + y; // |
... // |
} // -+
fn bar() { ... }
In this example, there is a region for the fn body block as a whole,
and then a subregion for the declaration of the local variable.
Within that, there are sublifetimes for the assignment pattern and
also the expression `x + y`. The expression itself has sublifetimes
for evaluating `x` and `y`.
## Function calls
Function calls are a bit tricky. I will describe how we handle them
*now* and then a bit about how we can improve them (Issue #6268).
Consider a function call like `func(expr1, expr2)`, where `func`,
`arg1`, and `arg2` are all arbitrary expressions. Currently,
we construct a region hierarchy like:
+----------------+
| |
+--+ +---+ +---+|
v v v v v vv
func(expr1, expr2)
Here you can see that the call as a whole has a region and the
function plus arguments are subregions of that. As a side-effect of
this, we get a lot of spurious errors around nested calls, in
particular when combined with `&mut` functions. For example, a call
like this one
self.foo(self.bar())
where both `foo` and `bar` are `&mut self` functions will always yield
an error.
Here is a more involved example (which is safe) so we can see what's
going on:
struct Foo { f: uint, g: uint }
...
fn add(p: &mut uint, v: uint) {
*p += v;
}
...
fn inc(p: &mut uint) -> uint {
*p += 1; *p
}
fn weird() {
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f,
'b: inc(&mut (*x).f)) // (..)
}
The important part is the line marked `(..)` which contains a call to
`add()`. The first argument is a mutable borrow of the field `f`. The
second argument also borrows the field `f`. Now, in the current borrow
checker, the first borrow is given the lifetime of the call to
`add()`, `'a`. The second borrow is given the lifetime of `'b` of the
call to `inc()`. Because `'b` is considered to be a sublifetime of
`'a`, an error is reported since there are two co-existing mutable
borrows of the same data.
However, if we were to examine the lifetimes a bit more carefully, we
can see that this error is unnecessary. Let's examine the lifetimes
involved with `'a` in detail. We'll break apart all the steps involved
in a call expression:
'a: {
'a_arg1: let a_temp1: ... = add;
'a_arg2: let a_temp2: &'a mut uint = &'a mut (*x).f;
'a_arg3: let a_temp3: uint = {
let b_temp1: ... = inc;
let b_temp2: &'b = &'b mut (*x).f;
'b_call: b_temp1(b_temp2)
};
'a_call: a_temp1(a_temp2, a_temp3) // (**)
}
Here we see that the lifetime `'a` includes a number of substatements.
In particular, there is this lifetime I've called `'a_call` that
corresponds to the *actual execution of the function `add()`*, after
all arguments have been evaluated. There is a corresponding lifetime
`'b_call` for the execution of `inc()`. If we wanted to be precise
about it, the lifetime of the two borrows should be `'a_call` and
`'b_call` respectively, since the references that were created
will not be dereferenced except during the execution itself.
However, this model by itself is not sound. The reason is that
while the two references that are created will never be used
simultaneously, it is still true that the first reference is
*created* before the second argument is evaluated, and so even though
it will not be *dereferenced* during the evaluation of the second
argument, it can still be *invalidated* by that evaluation. Consider
this similar but unsound example:
struct Foo { f: uint, g: uint }
...
fn add(p: &mut uint, v: uint) {
*p += v;
}
...
fn consume(x: Box<Foo>) -> uint {
x.f + x.g
}
fn weird() {
let mut x: Box<Foo> = box Foo { ... };
'a: add(&mut (*x).f, consume(x)) // (..)
}
In this case, the second argument to `add` actually consumes `x`, thus
invalidating the first argument.
So, for now, we exclude the `call` lifetimes from our model.
Eventually I would like to include them, but we will have to make the
borrow checker handle this situation correctly. In particular, if
there is a reference created whose lifetime does not enclose
the borrow expression, we must issue sufficient restrictions to ensure
that the pointee remains valid.
## Adding closures
The other significant complication to the region hierarchy is
closures. I will describe here how closures should work, though some
of the work to implement this model is ongoing at the time of this
writing.
The body of closures are type-checked along with the function that
creates them. However, unlike other expressions that appear within the
function body, it is not entirely obvious when a closure body executes
with respect to the other expressions. This is because the closure
body will execute whenever the closure is called; however, we can
never know precisely when the closure will be called, especially
without some sort of alias analysis.
However, we can place some sort of limits on when the closure<|fim▁hole|>anymore. Therefore, we say that the lifetime of the closure body is a
sublifetime of the closure bound, but the closure body itself is unordered
with respect to other parts of the code.
For example, consider the following fragment of code:
'a: {
let closure: fn:'a() = || 'b: {
'c: ...
};
'd: ...
}
Here we have four lifetimes, `'a`, `'b`, `'c`, and `'d`. The closure
`closure` is bounded by the lifetime `'a`. The lifetime `'b` is the
lifetime of the closure body, and `'c` is some statement within the
closure body. Finally, `'d` is a statement within the outer block that
created the closure.
We can say that the closure body `'b` is a sublifetime of `'a` due to
the closure bound. By the usual lexical scoping conventions, the
statement `'c` is clearly a sublifetime of `'b`, and `'d` is a
sublifetime of `'d`. However, there is no ordering between `'c` and
`'d` per se (this kind of ordering between statements is actually only
an issue for dataflow; passes like the borrow checker must assume that
closures could execute at any time from the moment they are created
until they go out of scope).
### Complications due to closure bound inference
There is only one problem with the above model: in general, we do not
actually *know* the closure bounds during region inference! In fact,
closure bounds are almost always region variables! This is very tricky
because the inference system implicitly assumes that we can do things
like compute the LUB of two scoped lifetimes without needing to know
the values of any variables.
Here is an example to illustrate the problem:
fn identify<T>(x: T) -> T { x }
fn foo() { // 'foo is the function body
'a: {
let closure = identity(|| 'b: {
'c: ...
});
'd: closure();
}
'e: ...;
}
In this example, the closure bound is not explicit. At compile time,
we will create a region variable (let's call it `V0`) to represent the
closure bound.
The primary difficulty arises during the constraint propagation phase.
Imagine there is some variable with incoming edges from `'c` and `'d`.
This means that the value of the variable must be `LUB('c,
'd)`. However, without knowing what the closure bound `V0` is, we
can't compute the LUB of `'c` and `'d`! Any we don't know the closure
bound until inference is done.
The solution is to rely on the fixed point nature of inference.
Basically, when we must compute `LUB('c, 'd)`, we just use the current
value for `V0` as the closure's bound. If `V0`'s binding should
change, then we will do another round of inference, and the result of
`LUB('c, 'd)` will change.
One minor implication of this is that the graph does not in fact track
the full set of dependencies between edges. We cannot easily know
whether the result of a LUB computation will change, since there may
be indirect dependencies on other variables that are not reflected on
the graph. Therefore, we must *always* iterate over all edges when
doing the fixed point calculation, not just those adjacent to nodes
whose values have changed.
Were it not for this requirement, we could in fact avoid fixed-point
iteration altogether. In that universe, we could instead first
identify and remove strongly connected components (SCC) in the graph.
Note that such components must consist solely of region variables; all
of these variables can effectively be unified into a single variable.
Once SCCs are removed, we are left with a DAG. At this point, we
could walk the DAG in topological order once to compute the expanding
nodes, and again in reverse topological order to compute the
contracting nodes. However, as I said, this does not work given the
current treatment of closure bounds, but perhaps in the future we can
address this problem somehow and make region inference somewhat more
efficient. Note that this is solely a matter of performance, not
expressiveness.
### Skolemization
For a discussion on skolemization and higher-ranked subtyping, please
see the module `middle::typeck::infer::higher_ranked::doc`.
*/<|fim▁end|>
|
executes. In particular, the type of every closure `fn:'r K` includes
a region bound `'r`. This bound indicates the maximum lifetime of that
closure; once we exit that region, the closure cannot be called
|
<|file_name|>test_filters.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
"""
unit test for filters module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import unittest
import numpy as np
from evo.core import filters
from evo.core import lie_algebra as lie
# TODO: clean these up and use proper fixtures.
POSES_1 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
POSES_2 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 1.0]))
]
POSES_3 = [
lie.se3(np.eye(3), np.array([0, 0, 0.0])),
lie.se3(np.eye(3), np.array([0, 0, 0.9])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 0.999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999])),
lie.se3(np.eye(3), np.array([0, 0, 0.99999])),<|fim▁hole|>
POSES_4 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
class TestFilterPairsByPath(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 2), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_convergence_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 7)])
axis = np.array([1, 0, 0])
POSES_5 = [
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0]))
]
TRANSFORM = lie.random_se3()
POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5]
axis = np.array([1, 0, 0])
p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0]))
pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3]))
p1 = np.dot(p0, pd)
p2 = np.dot(p1, pd)
p3 = np.dot(p2, pd)
POSES_6 = [p0, p1, p2, p3, p3]
POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6]
class TestFilterPairsByAngle(unittest.TestCase):
def test_poses5(self):
tol = 0.001
expected_result = [(0, 1), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi - tol
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses5_all_pairs(self):
tol = 0.01
expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses6(self):
tol = 0.001
target_angle = math.pi - tol
expected_result = [(0, 3)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
def test_poses6_all_pairs(self):
target_angle = math.pi
tol = 0.001
expected_result = [(0, 3), (0, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
if __name__ == '__main__':
unittest.main(verbosity=2)<|fim▁end|>
|
lie.se3(np.eye(3), np.array([0, 0, 0.999999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999999]))
]
|
<|file_name|>config.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
""" Copyright (C) 2012 mountainpenguin ([email protected])
<http://github.com/mountainpenguin/pyrt>
This file is part of pyRT.
pyRT is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
pyRT is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with pyRT. If not, see <http://www.gnu.org/licenses/>.
"""
import cPickle as pickle
import os
try:
import json
except ImportError:
import simplejson as json
class ConfigError(Exception):
def __init__(self, value):
self.parameter = value
def __repr__(self):
return repr(self.parameter)
def __str__(self):
return self.__repr__()
class ConfigStore(object):
def __init__(self, sockpath, serverhost, serverport, password, ssl_certificate=None, ssl_private_key=None, ca_certs=None, root_directory="/", logfile="pyrt.log", refresh=10, scgi_username=None, scgi_password=None, scgi_method="Digest"):
self.rtorrent_socket = sockpath
self.host = serverhost
self.port = serverport
self.password = password
self.ssl_certificate = ssl_certificate
self.ssl_private_key = ssl_private_key
self.ssl_ca_certs = ca_certs
self.root_directory = root_directory
self.logfile = logfile
self.refresh = refresh
self.scgi_username = scgi_username
self.scgi_password = scgi_password
self.scgi_method = scgi_method
class Config:
def __init__(self):
# look for saved config file
if os.path.exists(os.path.join("config", ".pyrtconfig")):
try:
self.CONFIG = pickle.load(open(os.path.join("config", ".pyrtconfig")))
except:
os.remove(os.path.join("config", ".pyrtconfig"))
self.loadconfig()
else:
self.loadconfig()
def set(self, key, value):
if key not in self.CONFIG.__dict__:
return False
else:
self.CONFIG.__dict__[key] = value
self._flush()
return self.CONFIG.__dict__[key]
def _flush(self):
pickle.dump(self.CONFIG, open(os.path.join("config", ".pyrtconfig"), "w"))
def loadconfig(self):
if not os.path.exists(os.path.join("config", ".pyrtrc")):
raise ConfigError("Config File doesn't exist")
config_ = open(os.path.join("config", ".pyrtrc")).read()
config_stripped = ""
for line in config_.split("\n"):
if line == "":
pass
else:
for char in line:
if char == "#":
break
else:
config_stripped += char
config_stripped += "\n"
try:
configfile = json.loads(config_stripped)
if "ssl_certificate" in configfile.keys() and "ssl_private_key" in configfile.keys():
cert = configfile["ssl_certificate"]
pkey = configfile["ssl_private_key"]
else:
cert, pkey = None, None
if "ssl_ca_certs" in configfile.keys():
ca_certs = configfile["ssl_ca_certs"]
else:
ca_certs = None
if "root_directory" in configfile:
root_dir = configfile["root_directory"]
else:
root_dir = "/"
<|fim▁hole|> logfile = "pyrt.log"
try:
refresh = int(configfile["refresh"])
except:
refresh = 10
if "scgi_username" in configfile:
scgi_username = configfile["scgi_username"]
else:
scgi_username = None
if "scgi_password" in configfile:
scgi_password = configfile["scgi_password"]
else:
scgi_password = None
if "scgi_method" in configfile:
scgi_method = configfile["scgi_method"]
else:
scgi_method = "Digest"
self.CONFIG = ConfigStore(
sockpath=configfile["rtorrent_socket"],
serverhost=configfile["host"],
serverport=configfile["port"],
password=configfile["password"],
ssl_certificate=cert,
ssl_private_key=pkey,
ca_certs=ca_certs,
root_directory=root_dir,
logfile=logfile,
refresh=refresh,
scgi_username=scgi_username,
scgi_password=scgi_password,
scgi_method=scgi_method,
)
self._flush()
except KeyError:
raise ConfigError("Config File is malformed")
def get(self, conf):
if conf in self.CONFIG.__dict__.keys():
return self.CONFIG.__dict__[conf]
else:
return None<|fim▁end|>
|
if "logfile" in configfile:
logfile = configfile["logfile"]
else:
|
<|file_name|>tests.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
class MyTest(unittest.TestCase):
def test(self):
self.assertEqual(4, 4)<|fim▁end|>
|
import unittest
|
<|file_name|>083_remove_duplicates_from_sorted_list.cpp<|end_file_name|><|fim▁begin|>#include <iostream>
using namespace std;
/**
* Definition for singly-linked list.
*/
struct ListNode {
int val;
ListNode *next;
ListNode(int x) : val(x), next(NULL) {}
};
class Solution {
public:
ListNode* deleteDuplicates(ListNode* head) {
if (!head)
return head;
ListNode *ret = head;
ListNode *pos = head;
head = pos->next;
while (head) {
if (head->val == pos->val) {
pos->next = head->next;
delete head;
} else {
pos = pos->next;
}
head = pos->next;
}
return ret;
}<|fim▁hole|>
int main(void)
{
return 0;
}<|fim▁end|>
|
};
|
<|file_name|>no_0037_sudoku_solver.rs<|end_file_name|><|fim▁begin|>struct Solution;
impl Solution {
pub fn solve_sudoku(board: &mut Vec<Vec<char>>) {
// 9 行,每个 u16 表示一行上的数据,u16 表示对应索引的数字在这一行上已经有了。
let mut line = vec![0_u16; 9];
// 9 列
let mut column = vec![0_u16; 9];
// 3 x 3 的块
let mut block = vec![vec![0_u16; 3]; 3];
// 如果不是空格,就将对应的位置设置成 1.
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b != '.' {
Self::flip(
&mut line,
&mut column,
&mut block,
i,
j,
*b as u8 - '1' as u8,
);
}
}
}
// 先把只有一个选择的位置标记上
loop {
let mut modified = false;
let m = board.len();
let n = board[0].len();
for i in 0..m {
for j in 0..n {
let b = board[i][j];
if b != '.' {
continue;
}
// 先取反,1 就表示空格了,然后 & 0x1ff 是为了把前面的高位的 1 去掉。
let mask = !(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
// mask&(mask-1) 是把最右侧的 1 置 0,如果结果是 0 说明 mask 中只有一个 1,也就是只有一个选择
if mask > 0 && mask & (mask - 1) == 0 {
// 右边 0 的个数,也就是右边 1 的位置
let digit = mask.trailing_zeros() as u8;
// 将 [i][j] 的位置放上数字
Self::flip(&mut line, &mut column, &mut block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
modified = true;
}
}
}
if !modified {
break;
}
}
// 将空格收集起来
let mut spaces = Vec::new();
for (i, row) in board.iter().enumerate() {
for (j, b) in row.iter().enumerate() {
if *b == '.' {
spaces.push((i, j));
}
}
}
Self::dfs(&spaces, 0, &mut line, &mut column, &mut block, board);
}
// 将 digit 对应的比特位取反
fn flip(
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
i: usize,
j: usize,
digit: u8,
) {
line[i] ^= 1 << digit;
column[j] ^= 1 << digit;
block[i / 3][j / 3] ^= 1 << digit;
}
fn dfs(
spaces: &Vec<(usize, usize)>,
pos: usize,
line: &mut Vec<u16>,
column: &mut Vec<u16>,
block: &mut Vec<Vec<u16>>,
board: &mut Vec<Vec<char>>,
) -> bool {
if pos == spaces.len() {
return true;
}
let (i, j) = spaces[pos];
let mut mask = !(line[i] | column[j] | block[i / 3][j / 3]) & 0x1ff;
while mask > 0 {
let digit = mask.trailing_zeros() as u8;
Self::flip(line, column, block, i, j, digit);
board[i][j] = (digit + '1' as u8) as char;
if Self::dfs(spaces, pos + 1, line, column, block, board) {
return true;
}
Self::flip(line, column, block, i, j, digit);
mask &= mask - 1;
}<|fim▁hole|>
false
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_solve_sudoku() {
let mut board = vec![
vec!['5', '3', '.', '.', '7', '.', '.', '.', '.'],
vec!['6', '.', '.', '1', '9', '5', '.', '.', '.'],
vec!['.', '9', '8', '.', '.', '.', '.', '6', '.'],
vec!['8', '.', '.', '.', '6', '.', '.', '.', '3'],
vec!['4', '.', '.', '8', '.', '3', '.', '.', '1'],
vec!['7', '.', '.', '.', '2', '.', '.', '.', '6'],
vec!['.', '6', '.', '.', '.', '.', '2', '8', '.'],
vec!['.', '.', '.', '4', '1', '9', '.', '.', '5'],
vec!['.', '.', '.', '.', '8', '.', '.', '7', '9'],
];
Solution::solve_sudoku(&mut board);
let want = vec![
vec!['5', '3', '4', '6', '7', '8', '9', '1', '2'],
vec!['6', '7', '2', '1', '9', '5', '3', '4', '8'],
vec!['1', '9', '8', '3', '4', '2', '5', '6', '7'],
vec!['8', '5', '9', '7', '6', '1', '4', '2', '3'],
vec!['4', '2', '6', '8', '5', '3', '7', '9', '1'],
vec!['7', '1', '3', '9', '2', '4', '8', '5', '6'],
vec!['9', '6', '1', '5', '3', '7', '2', '8', '4'],
vec!['2', '8', '7', '4', '1', '9', '6', '3', '5'],
vec!['3', '4', '5', '2', '8', '6', '1', '7', '9'],
];
assert_eq!(board, want);
}
}<|fim▁end|>
| |
<|file_name|>autocomp.js<|end_file_name|><|fim▁begin|>#!/usr/bin/env node
var fs = require('fs'),
path = require('path');
//kansorc = require('kanso/kansorc');
//kansorc.load(function (err, cfg) {
var commands = {
'clear-cache': null,
'create': null,
'createdb': null,
'deletedb': null,
'listdb': null,
'replicate': null,
'help': [{list: [
'clear-cache',
'create',
'createdb',
'deletedb',
'listdb',
'replicate',
'help',
'install',
'update',
'ls',
'pack',
'publish',
'push',
'show',
'transform',
'unpublish',
'upload',
'uuids'
]}],
'install': [{directories: true, filenames: /.*\.tar\.gz$/}],
'update': null,
'ls': [{directories: true}],<|fim▁hole|> 'publish': [{directories: true}],
// TODO: add lookup of environments in .kansorc
'push': [{environments: true, directories: true}, {environments: true}],
'show': [{directories: true}],
'transform': [
{list: ['clear-ids', 'add-ids', 'csv', 'map']},
{filenames: /.*/, directories: true}, // could be .json or .csv / .tsv
{filenames: /.*\.json$/, directories: true}
],
'unpublish': null,
'upload': [{filenames: /.*\.json$/, directories: true}, {environments: true}],
'uuids': null
};
var args = process.argv.slice(3);
var arglen = 0;
for (var i = 0; i < args.length; i++) {
if (args[i] && args[i][0] !== '-') {
arglen++;
}
}
var command = null;
for (var j = 0; j < args.length; j++) {
if (args[j] && args[j][0] !== '-') {
command = args[j];
break;
}
}
// the current text being entered
var curr = args[args.length - 1];
function trim(str) {
return str.replace(/^\s+/, '').replace(/\s+$/, '');
}
function matchList(list, curr, /*optional*/nextlist) {
var m = [];
list.forEach(function (l) {
if (l.indexOf(curr) === 0) {
m.push(l + ' ');
}
});
if (m.length === 1 && trim(m[0]) === trim(curr)) {
return nextlist || [];
}
return m;
}
function completeList(argdef) {
if (!argdef) {
return [];
}
var l = [];
if (argdef.list) {
l = l.concat(argdef.list);
}
if (argdef.directories) {
l = l.concat(
fs.readdirSync('.').filter(function (f) {
return fs.statSync(f).isDirectory();
})
);
}
if (argdef.filenames) {
l = l.concat(
fs.readdirSync('.').filter(function (f) {
return argdef.filenames.test(f);
})
);
}
return l;
}
var matches = [];
// list all commands
if (arglen === 0) {
matches = Object.keys(commands);
}
// complete first command
else if (arglen === 1) {
matches = matchList(
Object.keys(commands),
curr,
commands[curr] && completeList(commands[curr][0])
);
}
// match command arguments
else if (arglen > 1) {
if (commands[command] && commands[command][arglen - 2]) {
var argdef = commands[command][arglen - 2];
var next_argdef = commands[command][arglen - 1];
if (argdef.list) {
matches = matches.concat(
matchList(
argdef.list, curr, completeList(next_argdef)
)
);
}
if (argdef.directories) {
var wd = './';
if (curr && /\/$/.test(curr)) {
wd = curr;
}
else if (curr) {
wd = path.dirname(curr) + '/';
}
var files = fs.readdirSync(wd);
var dirs = files.filter(function (f) {
return fs.statSync(wd === './' ? f: wd + f).isDirectory();
}).map(function (d) {
return wd === './' ? d: wd + d;
});
matches = matches.concat(
matchList(dirs, curr, completeList(next_argdef))
);
}
if (argdef.filenames) {
var wd = './';
if (curr && /\/$/.test(curr)) {
wd = curr;
}
else if (curr) {
wd = path.dirname(curr) + '/';
}
var files = fs.readdirSync(wd);
var dirs = files.filter(function (f) {
return argdef.filenames.test(wd === './' ? f: wd + f);
}).map(function (d) {
return wd === './' ? d: wd + d;
});
matches = matches.concat(
matchList(dirs, curr, completeList(next_argdef))
);
}
}
}
process.stdout.write(matches.join('\n'));
//});<|fim▁end|>
|
'pack': [{directories: true}],
|
<|file_name|>tfile.server.model.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* Module dependencies.<|fim▁hole|>
/**
* Tfile Schema
*/
var TfileSchema = new Schema({
name: {
type: String,
default: '',
required: 'Please add a TestFile name',
trim: true
},
created: {
type: Date,
default: Date.now
},
user: {
type: Schema.ObjectId,
ref: 'User'
},
content: {
type:String,
default: ''
},
tags: {
type:[String],
default: {}
}
});
mongoose.model('Tfile', TfileSchema);<|fim▁end|>
|
*/
var mongoose = require('mongoose'),
Schema = mongoose.Schema;
|
<|file_name|>main.module.ts<|end_file_name|><|fim▁begin|>import { TopWarningComponent } from './../top-warning/top-warning.component';
import { TrialExpiredComponent } from './../trial-expired/trial-expired.component';
import { EmptyDashboardComponent } from './empty-dashboard.component';
import { RouterModule } from '@angular/router';
import { TreeViewComponent } from './../tree-view/tree-view.component';
import { TranslateModule } from '@ngx-translate/core';
import { SideNavComponent } from './../side-nav/side-nav.component';
import { SharedModule } from './../shared/shared.module';
import { MainComponent } from './main.component';
import { NgModule, ModuleWithProviders } from '@angular/core';
const routing: ModuleWithProviders = RouterModule.forChild([
{
path: '',
component: MainComponent,
children: [
{
path: 'blank',
component: EmptyDashboardComponent,
},
{
path: 'apps',
loadChildren: 'app/apps-list/apps-list.module#AppsListModule',
},
{
path: 'subs',
loadChildren: 'app/subscription/subscription.module#SubscriptionComponentModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site',
loadChildren: 'app/site/site.module#SiteModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/functions',
loadChildren: 'app/functions.module#FunctionsModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/proxies',
loadChildren: 'app/proxies.module#ProxiesModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/slots',
loadChildren: 'app/slots-list/slots-list.module#SlotsListModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/slots/:slot',
loadChildren: 'app/site/site.module#SiteModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/slots/:slot/functions',
loadChildren: 'app/functions.module#FunctionsModule',
},
{
path: 'subscriptions/:subscriptionId/resourcegroups/:resourceGroup/sites/:site/slots/:slot/proxies',
loadChildren: 'app/proxies.module#ProxiesModule',
},
{
path: 'providers/microsoft.blueridge',
loadChildren: 'app/functions.module#FunctionsModule',
},
{
path: '**',
component: EmptyDashboardComponent,
},
],
},
]);
@NgModule({<|fim▁hole|>})
export class MainModule {}<|fim▁end|>
|
imports: [TranslateModule.forChild(), SharedModule, routing],
declarations: [MainComponent, SideNavComponent, TreeViewComponent, TrialExpiredComponent, TopWarningComponent],
providers: [],
|
<|file_name|>CloudFormationStackRecordSourceInfo.java<|end_file_name|><|fim▁begin|>/*
* Copyright 2014-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.lightsail.model;
<|fim▁hole|>import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* Describes the source of a CloudFormation stack record (i.e., the export snapshot record).
* </p>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/lightsail-2016-11-28/CloudFormationStackRecordSourceInfo"
* target="_top">AWS API Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class CloudFormationStackRecordSourceInfo implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*/
private String resourceType;
/**
* <p>
* The name of the record.
* </p>
*/
private String name;
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*/
private String arn;
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @see CloudFormationStackRecordSourceType
*/
public void setResourceType(String resourceType) {
this.resourceType = resourceType;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @return The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @see CloudFormationStackRecordSourceType
*/
public String getResourceType() {
return this.resourceType;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @return Returns a reference to this object so that method calls can be chained together.
* @see CloudFormationStackRecordSourceType
*/
public CloudFormationStackRecordSourceInfo withResourceType(String resourceType) {
setResourceType(resourceType);
return this;
}
/**
* <p>
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* </p>
*
* @param resourceType
* The Lightsail resource type (e.g., <code>ExportSnapshotRecord</code>).
* @return Returns a reference to this object so that method calls can be chained together.
* @see CloudFormationStackRecordSourceType
*/
public CloudFormationStackRecordSourceInfo withResourceType(CloudFormationStackRecordSourceType resourceType) {
this.resourceType = resourceType.toString();
return this;
}
/**
* <p>
* The name of the record.
* </p>
*
* @param name
* The name of the record.
*/
public void setName(String name) {
this.name = name;
}
/**
* <p>
* The name of the record.
* </p>
*
* @return The name of the record.
*/
public String getName() {
return this.name;
}
/**
* <p>
* The name of the record.
* </p>
*
* @param name
* The name of the record.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CloudFormationStackRecordSourceInfo withName(String name) {
setName(name);
return this;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @param arn
* The Amazon Resource Name (ARN) of the export snapshot record.
*/
public void setArn(String arn) {
this.arn = arn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @return The Amazon Resource Name (ARN) of the export snapshot record.
*/
public String getArn() {
return this.arn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the export snapshot record.
* </p>
*
* @param arn
* The Amazon Resource Name (ARN) of the export snapshot record.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public CloudFormationStackRecordSourceInfo withArn(String arn) {
setArn(arn);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getResourceType() != null)
sb.append("ResourceType: ").append(getResourceType()).append(",");
if (getName() != null)
sb.append("Name: ").append(getName()).append(",");
if (getArn() != null)
sb.append("Arn: ").append(getArn());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof CloudFormationStackRecordSourceInfo == false)
return false;
CloudFormationStackRecordSourceInfo other = (CloudFormationStackRecordSourceInfo) obj;
if (other.getResourceType() == null ^ this.getResourceType() == null)
return false;
if (other.getResourceType() != null && other.getResourceType().equals(this.getResourceType()) == false)
return false;
if (other.getName() == null ^ this.getName() == null)
return false;
if (other.getName() != null && other.getName().equals(this.getName()) == false)
return false;
if (other.getArn() == null ^ this.getArn() == null)
return false;
if (other.getArn() != null && other.getArn().equals(this.getArn()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getResourceType() == null) ? 0 : getResourceType().hashCode());
hashCode = prime * hashCode + ((getName() == null) ? 0 : getName().hashCode());
hashCode = prime * hashCode + ((getArn() == null) ? 0 : getArn().hashCode());
return hashCode;
}
@Override
public CloudFormationStackRecordSourceInfo clone() {
try {
return (CloudFormationStackRecordSourceInfo) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.lightsail.model.transform.CloudFormationStackRecordSourceInfoMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}<|fim▁end|>
| |
<|file_name|>test_django.py<|end_file_name|><|fim▁begin|>from kaneda.backends import LoggerBackend, ElasticsearchBackend
from kaneda.queues import CeleryQueue
from django_kaneda import settings # NOQA
class TestDjango(object):
def test_django_kaneda_with_backend(self, mocker, django_settings_backend):
mocker.patch('django_kaneda.settings', django_settings_backend)
from django_kaneda import LazyMetrics
metrics = LazyMetrics()
assert isinstance(metrics.backend, ElasticsearchBackend)
result = metrics.gauge('test_gauge', 42)
assert result
def test_django_kaneda_with_debug(self, mocker, django_settings_debug):
mocker.patch('django_kaneda.settings', django_settings_debug)<|fim▁hole|> from django_kaneda import LazyMetrics
metrics = LazyMetrics()
metrics.gauge('test_gauge', 42)
assert isinstance(metrics.backend, LoggerBackend)
def test_django_kaneda_with_queue(self, mocker, django_settings_queue):
mocker.patch('django_kaneda.settings', django_settings_queue)
from django_kaneda import LazyMetrics
metrics = LazyMetrics()
assert isinstance(metrics.queue, CeleryQueue)
result = metrics.gauge('test_gauge', 42)
assert result<|fim▁end|>
| |
<|file_name|>forms.py<|end_file_name|><|fim▁begin|># This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from wtforms.ext.sqlalchemy.fields import QuerySelectField
from wtforms.fields import StringField
from wtforms.validators import DataRequired
from indico.core.db.sqlalchemy.descriptions import RenderMode
from indico.modules.events.sessions.models.sessions import Session
from indico.modules.events.tracks.models.groups import TrackGroup
from indico.util.i18n import _
from indico.web.forms.base import IndicoForm, generated_data<|fim▁hole|>
class TrackForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
code = StringField(_('Code'))
track_group = QuerySelectField(_('Track group'), default='', allow_blank=True, get_label='title',
description=_('Select a track group to which this track should belong'))
default_session = QuerySelectField(_('Default session'), default='', allow_blank=True, get_label='title',
description=_('Indico will preselect this session whenever an abstract is '
'accepted for the track'))
description = IndicoMarkdownField(_('Description'), editor=True)
def __init__(self, *args, **kwargs):
event = kwargs.pop('event')
super(TrackForm, self).__init__(*args, **kwargs)
self.default_session.query = Session.query.with_parent(event)
self.track_group.query = TrackGroup.query.with_parent(event)
class ProgramForm(IndicoForm):
program = IndicoMarkdownField(_('Programme'), editor=True, mathjax=True)
@generated_data
def program_render_mode(self):
return RenderMode.markdown
class TrackGroupForm(IndicoForm):
title = StringField(_('Title'), [DataRequired()])
description = IndicoMarkdownField(_('Description'), editor=True)<|fim▁end|>
|
from indico.web.forms.fields import IndicoMarkdownField
|
<|file_name|>error.rs<|end_file_name|><|fim▁begin|>//
// imag - the personal information management suite for the commandline
// Copyright (C) 2015, 2016 Matthias Beyer <[email protected]> and contributors
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; version
// 2.1 of the License.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
//
error_chain! {
types {
NoteError, NoteErrorKind, ResultExt, Result;
}
errors {
StoreWriteError {
description("Error writing store")
display("Error writing store")
}
<|fim▁hole|>
HeaderTypeError {
description("Header type error")
display("Header type error")
}
NoteToEntryConversion {
description("Error converting Note instance to Entry instance")
display("Error converting Note instance to Entry instance")
}
}
}<|fim▁end|>
|
StoreReadError {
description("Error reading store")
display("Error reading store")
}
|
<|file_name|>killProcName.py<|end_file_name|><|fim▁begin|># Kills a process by process name
#
# Uses the Performance Data Helper to locate the PID, then kills it.
# Will only kill the process if there is only one process of that name
# (eg, attempting to kill "Python.exe" will only work if there is only
# one Python.exe running. (Note that the current process does not
# count - ie, if Python.exe is hosting this script, you can still kill
# another Python.exe (as long as there is only one other Python.exe)
# Really just a demo for the win32pdh(util) module, which allows you
# to get all sorts of information about a running process and many
# other aspects of your system.
import win32api, win32pdhutil, win32con, sys
def killProcName(procname):
# Change suggested by Dan Knierim, who found that this performed a
# "refresh", allowing us to kill processes created since this was run
# for the first time.
try:
win32pdhutil.GetPerformanceAttributes('Process','ID Process',procname)
except:
pass
pids = win32pdhutil.FindPerformanceAttributesByName(procname)
# If _my_ pid in there, remove it!
try:
pids.remove(win32api.GetCurrentProcessId())
except ValueError:
pass
if len(pids)==0:
result = "Can't find %s" % procname
elif len(pids)>1:
result = "Found too many %s's - pids=`%s`" % (procname,pids)
else:
handle = win32api.OpenProcess(win32con.PROCESS_TERMINATE, 0,pids[0])
win32api.TerminateProcess(handle,0)
win32api.CloseHandle(handle)
result = ""
return result
if __name__ == '__main__':
if len(sys.argv)>1:
for procname in sys.argv[1:]:<|fim▁hole|> result = killProcName(procname)
if result:
print result
print "Dumping all processes..."
win32pdhutil.ShowAllProcesses()
else:
print "Killed %s" % procname
else:
print "Usage: killProcName.py procname ..."<|fim▁end|>
| |
<|file_name|>primeng-tabview.js<|end_file_name|><|fim▁begin|>import { EventEmitter, Input, Output, Component, ViewContainerRef, ContentChildren, ElementRef, NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { TooltipModule } from 'primeng/tooltip';
import { PrimeTemplate, SharedModule } from 'primeng/api';
var __decorate = (this && this.__decorate) || function (decorators, target, key, desc) {
var c = arguments.length, r = c < 3 ? target : desc === null ? desc = Object.getOwnPropertyDescriptor(target, key) : desc, d;
if (typeof Reflect === "object" && typeof Reflect.decorate === "function") r = Reflect.decorate(decorators, target, key, desc);
else for (var i = decorators.length - 1; i >= 0; i--) if (d = decorators[i]) r = (c < 3 ? d(r) : c > 3 ? d(target, key, r) : d(target, key)) || r;
return c > 3 && r && Object.defineProperty(target, key, r), r;
};
let idx = 0;
let TabViewNav = class TabViewNav {
constructor() {
this.orientation = 'top';
this.onTabClick = new EventEmitter();
this.onTabCloseClick = new EventEmitter();
}
getDefaultHeaderClass(tab) {
let styleClass = 'ui-state-default ui-corner-' + this.orientation;
if (tab.headerStyleClass) {
styleClass = styleClass + " " + tab.headerStyleClass;
}
return styleClass;
}
clickTab(event, tab) {
this.onTabClick.emit({
originalEvent: event,
tab: tab
});
}
clickClose(event, tab) {
this.onTabCloseClick.emit({
originalEvent: event,
tab: tab
});
}
};
__decorate([
Input()
], TabViewNav.prototype, "tabs", void 0);
__decorate([
Input()
], TabViewNav.prototype, "orientation", void 0);
__decorate([
Output()
], TabViewNav.prototype, "onTabClick", void 0);
__decorate([
Output()
], TabViewNav.prototype, "onTabCloseClick", void 0);
TabViewNav = __decorate([
Component({
selector: '[p-tabViewNav]',
host: {
'[class.ui-tabview-nav]': 'true',
'[class.ui-helper-reset]': 'true',
'[class.ui-helper-clearfix]': 'true',
'[class.ui-widget-header]': 'true',
'[class.ui-corner-all]': 'true'
},
template: `
<ng-template ngFor let-tab [ngForOf]="tabs">
<li [class]="getDefaultHeaderClass(tab)" [ngStyle]="tab.headerStyle" role="presentation"
[ngClass]="{'ui-tabview-selected ui-state-active': tab.selected, 'ui-state-disabled': tab.disabled}"
(click)="clickTab($event,tab)" *ngIf="!tab.closed" tabindex="0" (keydown.enter)="clickTab($event,tab)">
<a [attr.id]="tab.id + '-label'" role="tab" [attr.aria-selected]="tab.selected" [attr.aria-controls]="tab.id" [pTooltip]="tab.tooltip" [tooltipPosition]="tab.tooltipPosition"
[attr.aria-selected]="tab.selected" [positionStyle]="tab.tooltipPositionStyle" [tooltipStyleClass]="tab.tooltipStyleClass">
<ng-container *ngIf="!tab.headerTemplate" >
<span class="ui-tabview-left-icon" [ngClass]="tab.leftIcon" *ngIf="tab.leftIcon"></span>
<span class="ui-tabview-title">{{tab.header}}</span>
<span class="ui-tabview-right-icon" [ngClass]="tab.rightIcon" *ngIf="tab.rightIcon"></span>
</ng-container>
<ng-container *ngIf="tab.headerTemplate">
<ng-container *ngTemplateOutlet="tab.headerTemplate"></ng-container>
</ng-container>
</a>
<span *ngIf="tab.closable" class="ui-tabview-close pi pi-times" (click)="clickClose($event,tab)"></span>
</li>
</ng-template>
`
})
], TabViewNav);
let TabPanel = class TabPanel {
constructor(viewContainer) {
this.viewContainer = viewContainer;
this.cache = true;
this.tooltipPosition = 'top';
this.tooltipPositionStyle = 'absolute';
this.id = `ui-tabpanel-${idx++}`;
}
ngAfterContentInit() {
this.templates.forEach((item) => {
switch (item.getType()) {
case 'header':
this.headerTemplate = item.template;
break;
case 'content':
this.contentTemplate = item.template;
break;
default:
this.contentTemplate = item.template;
break;
}
});
}
get selected() {
return this._selected;
}
set selected(val) {
this._selected = val;
this.loaded = true;
}
ngOnDestroy() {
this.view = null;
}
};
TabPanel.ctorParameters = () => [
{ type: ViewContainerRef }
];
__decorate([
Input()
], TabPanel.prototype, "header", void 0);
__decorate([
Input()
], TabPanel.prototype, "disabled", void 0);
__decorate([
Input()
], TabPanel.prototype, "closable", void 0);
__decorate([
Input()
], TabPanel.prototype, "headerStyle", void 0);
__decorate([
Input()
], TabPanel.prototype, "headerStyleClass", void 0);
__decorate([
Input()
], TabPanel.prototype, "leftIcon", void 0);
__decorate([
Input()
], TabPanel.prototype, "rightIcon", void 0);
__decorate([
Input()
], TabPanel.prototype, "cache", void 0);
__decorate([
Input()
], TabPanel.prototype, "tooltip", void 0);
__decorate([
Input()
], TabPanel.prototype, "tooltipPosition", void 0);
__decorate([
Input()
], TabPanel.prototype, "tooltipPositionStyle", void 0);
__decorate([
Input()
], TabPanel.prototype, "tooltipStyleClass", void 0);
__decorate([
ContentChildren(PrimeTemplate)
], TabPanel.prototype, "templates", void 0);
__decorate([
Input()
], TabPanel.prototype, "selected", null);
TabPanel = __decorate([
Component({
selector: 'p-tabPanel',
template: `
<div [attr.id]="id" class="ui-tabview-panel ui-widget-content" [ngClass]="{'ui-helper-hidden': !selected}"
role="tabpanel" [attr.aria-hidden]="!selected" [attr.aria-labelledby]="id + '-label'" *ngIf="!closed">
<ng-content></ng-content>
<ng-container *ngIf="contentTemplate && (cache ? loaded : selected)">
<ng-container *ngTemplateOutlet="contentTemplate"></ng-container>
</ng-container>
</div>
`
})
], TabPanel);
let TabView = class TabView {
constructor(el) {
this.el = el;
this.orientation = 'top';
this.onChange = new EventEmitter();
this.onClose = new EventEmitter();
this.activeIndexChange = new EventEmitter();
}
ngAfterContentInit() {
this.initTabs();
this.tabPanels.changes.subscribe(_ => {
this.initTabs();
});
}
initTabs() {
this.tabs = this.tabPanels.toArray();
let selectedTab = this.findSelectedTab();
if (!selectedTab && this.tabs.length) {
if (this.activeIndex != null && this.tabs.length > this.activeIndex)
this.tabs[this.activeIndex].selected = true;
else
this.tabs[0].selected = true;
}
}
open(event, tab) {
if (tab.disabled) {
if (event) {
event.preventDefault();
}
return;
}
if (!tab.selected) {
let selectedTab = this.findSelectedTab();
if (selectedTab) {
selectedTab.selected = false;
}
tab.selected = true;
let selectedTabIndex = this.findTabIndex(tab);
this.preventActiveIndexPropagation = true;
this.activeIndexChange.emit(selectedTabIndex);
this.onChange.emit({ originalEvent: event, index: selectedTabIndex });
}
if (event) {
event.preventDefault();
}
}
close(event, tab) {
if (this.controlClose) {
this.onClose.emit({
originalEvent: event,
index: this.findTabIndex(tab),
close: () => {
this.closeTab(tab);
}
});
}
else {
this.closeTab(tab);
this.onClose.emit({
originalEvent: event,
index: this.findTabIndex(tab)
});
}
event.stopPropagation();
}
closeTab(tab) {
if (tab.disabled) {
return;
}
if (tab.selected) {
tab.selected = false;
for (let i = 0; i < this.tabs.length; i++) {
let tabPanel = this.tabs[i];
if (!tabPanel.closed && !tab.disabled) {
tabPanel.selected = true;
break;
}
}
}
tab.closed = true;
}
findSelectedTab() {
for (let i = 0; i < this.tabs.length; i++) {
if (this.tabs[i].selected) {
return this.tabs[i];
}
}
return null;
}
findTabIndex(tab) {
let index = -1;
for (let i = 0; i < this.tabs.length; i++) {
if (this.tabs[i] == tab) {
index = i;
break;
}
}
return index;
}
getBlockableElement() {
return this.el.nativeElement.children[0];
}
get activeIndex() {
return this._activeIndex;
}
set activeIndex(val) {
this._activeIndex = val;
if (this.preventActiveIndexPropagation) {
this.preventActiveIndexPropagation = false;
return;
}
if (this.tabs && this.tabs.length && this._activeIndex != null && this.tabs.length > this._activeIndex) {
this.findSelectedTab().selected = false;
this.tabs[this._activeIndex].selected = true;
}
}
};
TabView.ctorParameters = () => [
{ type: ElementRef }
];
__decorate([
Input()
], TabView.prototype, "orientation", void 0);
__decorate([
Input()
], TabView.prototype, "style", void 0);
__decorate([
Input()
], TabView.prototype, "styleClass", void 0);
__decorate([
Input()
], TabView.prototype, "controlClose", void 0);
__decorate([
ContentChildren(TabPanel)
], TabView.prototype, "tabPanels", void 0);
__decorate([
Output()
], TabView.prototype, "onChange", void 0);
__decorate([
Output()
], TabView.prototype, "onClose", void 0);
__decorate([
Output()
], TabView.prototype, "activeIndexChange", void 0);
__decorate([
Input()
], TabView.prototype, "activeIndex", null);
TabView = __decorate([
Component({
selector: 'p-tabView',
template: `
<div [ngClass]="'ui-tabview ui-widget ui-widget-content ui-corner-all ui-tabview-' + orientation" [ngStyle]="style" [class]="styleClass">
<ul p-tabViewNav role="tablist" *ngIf="orientation!='bottom'" [tabs]="tabs" [orientation]="orientation"
(onTabClick)="open($event.originalEvent, $event.tab)" (onTabCloseClick)="close($event.originalEvent, $event.tab)"></ul>
<div class="ui-tabview-panels"><|fim▁hole|> <ng-content></ng-content>
</div>
<ul p-tabViewNav role="tablist" *ngIf="orientation=='bottom'" [tabs]="tabs" [orientation]="orientation"
(onTabClick)="open($event.originalEvent, $event.tab)" (onTabCloseClick)="close($event.originalEvent, $event.tab)"></ul>
</div>
`
})
], TabView);
let TabViewModule = class TabViewModule {
};
TabViewModule = __decorate([
NgModule({
imports: [CommonModule, SharedModule, TooltipModule],
exports: [TabView, TabPanel, TabViewNav, SharedModule],
declarations: [TabView, TabPanel, TabViewNav]
})
], TabViewModule);
/**
* Generated bundle index. Do not edit.
*/
export { TabPanel, TabView, TabViewModule, TabViewNav };
//# sourceMappingURL=primeng-tabview.js.map<|fim▁end|>
| |
<|file_name|>FileBrowser.java<|end_file_name|><|fim▁begin|>/*Copyright (c) 2004,University of Illinois at Urbana-Champaign. All rights reserved.
*
* Created on Jun 14, 2006
*
* Developed by: CCT, Center for Computation and Technology,
* NCSA, University of Illinois at Urbana-Champaign
* OSC, Ohio Supercomputing Center
* TACC, Texas Advanced Computing Center
* UKy, University of Kentucky
*
* https://www.gridchem.org/
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal with the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimers in the documentation
* and/or other materials provided with the distribution.
* 3. Neither the names of Chemistry and Computational Biology Group , NCSA,
* University of Illinois at Urbana-Champaign, nor the names of its contributors
* may be used to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS WITH THE SOFTWARE.
*/
package org.gridchem.client.gui.filebrowser;
import java.net.URI;
/**
* Interface for the <code>FileBrowser</code> class to provide some common
* methods. This is probably unnecessary.
*
* @author Rion Dooley < dooley [at] tacc [dot] utexas [dot] edu ><|fim▁hole|> *
*/
public interface FileBrowser {
/**
* Set the path of the file browser
* @param uri
*/
public void setPath(String path);
/**
* Get the currently selected files URI
*/
public String getPath();
/**
* Select the file corresponding to the file name
*
* @param filename
*/
public void setSelected(String filename);
/**
* Get the name of the currently selected file
*
*/
public String getSelected();
}<|fim▁end|>
| |
<|file_name|>common.js<|end_file_name|><|fim▁begin|>function getURLVar(key) {
var value = [];
var query = String(document.location).split('?');
if (query[1]) {
var part = query[1].split('&');
for (i = 0; i < part.length; i++) {
var data = part[i].split('=');
if (data[0] && data[1]) {
value[data[0]] = data[1];
}
}
if (value[key]) {
return value[key];
} else {
return '';
}
}
}
$(document).ready(function() {
// Highlight any found errors
$('.text-danger').each(function() {
var element = $(this).parent().parent();
if (element.hasClass('form-group')) {
element.addClass('has-error');
}
});
// Currency
$('#form-currency .currency-select').on('click', function(e) {
e.preventDefault();
$('#form-currency input[name=\'code\']').attr('value', $(this).attr('name'));
$('#form-currency').submit();
});
// Language
$('#form-language .language-select').on('click', function(e) {
e.preventDefault();
$('#form-language input[name=\'code\']').attr('value', $(this).attr('name'));
$('#form-language').submit();
})
/* Search */
$('#search input[name=\'search\']').parent().find('button').on('click', function() {
var url = $('base').attr('href') + 'index.php?route=product/search';
var value = $('header input[name=\'search\']').val();
if (value) {
url += '&search=' + encodeURIComponent(value);
}
location = url;
});
$('#search input[name=\'search\']').on('keydown', function(e) {
if (e.keyCode == 13) {
$('header input[name=\'search\']').parent().find('button').trigger('click');
}
});
// Menu
$('#menu .dropdown-menu').each(function() {
var menu = $('#menu').offset();
var dropdown = $(this).parent().offset();
var i = (dropdown.left + $(this).outerWidth()) - (menu.left + $('#menu').outerWidth());
if (i > 0) {
$(this).css('margin-left', '-' + (i + 5) + 'px');
}
});
// Product List
$('#list-view').click(function() {
$('#content .product-grid > .clearfix').remove();
$('#content .row > .product-grid').attr('class', 'product-layout product-list col-xs-12');
localStorage.setItem('display', 'list');
});
// Product Grid
$('#grid-view').click(function() {
// What a shame bootstrap does not take into account dynamically loaded columns
var cols = $('#column-right, #column-left').length;
if (cols == 2) {
$('#content .product-list').attr('class', 'product-layout product-grid col-lg-6 col-md-6 col-sm-12 col-xs-12');
} else if (cols == 1) {
$('#content .product-list').attr('class', 'product-layout product-grid col-lg-4 col-md-4 col-sm-6 col-xs-12');
} else {
$('#content .product-list').attr('class', 'product-layout product-grid col-lg-3 col-md-3 col-sm-6 col-xs-12');
}
localStorage.setItem('display', 'grid');
});
if (localStorage.getItem('display') == 'list') {
$('#list-view').trigger('click');
} else {
$('#grid-view').trigger('click');
}
// Checkout
$(document).on('keydown', '#collapse-checkout-option input[name=\'email\'], #collapse-checkout-option input[name=\'password\']', function(e) {
if (e.keyCode == 13) {
$('#collapse-checkout-option #button-login').trigger('click');
}
});
// tooltips on hover
$('[data-toggle=\'tooltip\']').tooltip({container: 'body'});
// Makes tooltips work on ajax generated content
$(document).ajaxStop(function() {
$('[data-toggle=\'tooltip\']').tooltip({container: 'body'});
});
});
// Cart add remove functions
var cart = {
'add': function(product_id, quantity) {
$.ajax({
url: 'index.php?route=checkout/cart/add',
type: 'post',
data: 'product_id=' + product_id + '&quantity=' + (typeof(quantity) != 'undefined' ? quantity : 1),
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
$('.alert, .text-danger').remove();
if (json['redirect']) {
location = json['redirect'];
}
if (json['success']) {
$('#content').parent().before('<div class="alert alert-success"><i class="fa fa-check-circle"></i> ' + json['success'] + ' <button type="button" class="close" data-dismiss="alert">×</button></div>');
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
//TODO 不要捲動到最上方
//$('html, body').animate({ scrollTop: 0 }, 'slow');
$('#cart > ul').load('index.php?route=common/cart/info ul li');
}
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'update': function(key, quantity) {
$.ajax({
url: 'index.php?route=checkout/cart/edit',
type: 'post',
data: 'key=' + key + '&quantity=' + (typeof(quantity) != 'undefined' ? quantity : 1),
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
if (getURLVar('route') == 'checkout/cart' || getURLVar('route') == 'checkout/checkout') {
location = 'index.php?route=checkout/cart';
} else {
$('#cart > ul').load('index.php?route=common/cart/info ul li');
}
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'remove': function(key) {
$.ajax({
url: 'index.php?route=checkout/cart/remove',
type: 'post',
data: 'key=' + key,
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
if (getURLVar('route') == 'checkout/cart' || getURLVar('route') == 'checkout/checkout') {
location = 'index.php?route=checkout/cart';
} else {
$('#cart > ul').load('index.php?route=common/cart/info ul li');
}
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'addAll': function(orders, callback) {
$.ajax({
url: 'index.php?route=checkout/cart/addAll',
type: 'post',
data: 'orders=' + orders,
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
$('.alert, .text-danger').remove();
if (json['redirect']) {
location = json['redirect'];
}
if (json['success']) {
$('#content').parent().before('<div class="alert alert-success"><i class="fa fa-check-circle"></i> ' + json['success'] + ' <button type="button" class="close" data-dismiss="alert">×</button></div>');
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
//不要捲動到最上方
//$('html, body').animate({ scrollTop: 0 }, 'slow');
$('#cart > ul').load('index.php?route=common/cart/info ul li');
callback(true);
}
},
error: function(xhr, ajaxOptions, thrownError) {
callback(false);
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'removeAll': function(callback) {
$.ajax({
url: 'index.php?route=checkout/cart/removeAll',
type: 'post',
data: '',
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
if (getURLVar('route') == 'checkout/cart' || getURLVar('route') == 'checkout/checkout') {
location = 'index.php?route=checkout/cart';
} else {
$('#cart > ul').load('index.php?route=common/cart/info ul li');
}
callback(true);
},
error: function(xhr, ajaxOptions, thrownError) {
callback(false);
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
}
}
var voucher = {
'add': function() {
},
'remove': function(key) {
$.ajax({
url: 'index.php?route=checkout/cart/remove',
type: 'post',
data: 'key=' + key,
dataType: 'json',
beforeSend: function() {
$('#cart > button').button('loading');
},
complete: function() {
$('#cart > button').button('reset');
},
success: function(json) {
// Need to set timeout otherwise it wont update the total
setTimeout(function () {
$('#cart > button').html('<span id="cart-total"><i class="fa fa-shopping-cart"></i> ' + json['total'] + '</span>');
}, 100);
if (getURLVar('route') == 'checkout/cart' || getURLVar('route') == 'checkout/checkout') {
location = 'index.php?route=checkout/cart';
} else {
$('#cart > ul').load('index.php?route=common/cart/info ul li');
}
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}<|fim▁hole|> });
}
}
var wishlist = {
'add': function(product_id) {
$.ajax({
url: 'index.php?route=account/wishlist/add',
type: 'post',
data: 'product_id=' + product_id,
dataType: 'json',
success: function(json) {
$('.alert').remove();
if (json['redirect']) {
location = json['redirect'];
}
if (json['success']) {
$('#content').parent().before('<div class="alert alert-success"><i class="fa fa-check-circle"></i> ' + json['success'] + ' <button type="button" class="close" data-dismiss="alert">×</button></div>');
}
$('#wishlist-total span').html(json['total']);
$('#wishlist-total').attr('title', json['total']);
$('html, body').animate({ scrollTop: 0 }, 'slow');
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'remove': function() {
}
}
var compare = {
'add': function(product_id) {
$.ajax({
url: 'index.php?route=product/compare/add',
type: 'post',
data: 'product_id=' + product_id,
dataType: 'json',
success: function(json) {
$('.alert').remove();
if (json['success']) {
$('#content').parent().before('<div class="alert alert-success"><i class="fa fa-check-circle"></i> ' + json['success'] + ' <button type="button" class="close" data-dismiss="alert">×</button></div>');
$('#compare-total').html(json['total']);
$('html, body').animate({ scrollTop: 0 }, 'slow');
}
},
error: function(xhr, ajaxOptions, thrownError) {
alert(thrownError + "\r\n" + xhr.statusText + "\r\n" + xhr.responseText);
}
});
},
'remove': function() {
}
}
/* Agree to Terms */
$(document).delegate('.agree', 'click', function(e) {
e.preventDefault();
$('#modal-agree').remove();
var element = this;
$.ajax({
url: $(element).attr('href'),
type: 'get',
dataType: 'html',
success: function(data) {
html = '<div id="modal-agree" class="modal">';
html += ' <div class="modal-dialog">';
html += ' <div class="modal-content">';
html += ' <div class="modal-header">';
html += ' <button type="button" class="close" data-dismiss="modal" aria-hidden="true">×</button>';
html += ' <h4 class="modal-title">' + $(element).text() + '</h4>';
html += ' </div>';
html += ' <div class="modal-body">' + data + '</div>';
html += ' </div';
html += ' </div>';
html += '</div>';
$('body').append(html);
$('#modal-agree').modal('show');
}
});
});
// Autocomplete */
(function($) {
$.fn.autocomplete = function(option) {
return this.each(function() {
this.timer = null;
this.items = new Array();
$.extend(this, option);
$(this).attr('autocomplete', 'off');
// Focus
$(this).on('focus', function() {
this.request();
});
// Blur
$(this).on('blur', function() {
setTimeout(function(object) {
object.hide();
}, 200, this);
});
// Keydown
$(this).on('keydown', function(event) {
switch(event.keyCode) {
case 27: // escape
this.hide();
break;
default:
this.request();
break;
}
});
// Click
this.click = function(event) {
event.preventDefault();
value = $(event.target).parent().attr('data-value');
if (value && this.items[value]) {
this.select(this.items[value]);
}
}
// Show
this.show = function() {
var pos = $(this).position();
$(this).siblings('ul.dropdown-menu').css({
top: pos.top + $(this).outerHeight(),
left: pos.left
});
$(this).siblings('ul.dropdown-menu').show();
}
// Hide
this.hide = function() {
$(this).siblings('ul.dropdown-menu').hide();
}
// Request
this.request = function() {
clearTimeout(this.timer);
this.timer = setTimeout(function(object) {
object.source($(object).val(), $.proxy(object.response, object));
}, 200, this);
}
// Response
this.response = function(json) {
html = '';
if (json.length) {
for (i = 0; i < json.length; i++) {
this.items[json[i]['value']] = json[i];
}
for (i = 0; i < json.length; i++) {
if (!json[i]['category']) {
html += '<li data-value="' + json[i]['value'] + '"><a href="#">' + json[i]['label'] + '</a></li>';
}
}
// Get all the ones with a categories
var category = new Array();
for (i = 0; i < json.length; i++) {
if (json[i]['category']) {
if (!category[json[i]['category']]) {
category[json[i]['category']] = new Array();
category[json[i]['category']]['name'] = json[i]['category'];
category[json[i]['category']]['item'] = new Array();
}
category[json[i]['category']]['item'].push(json[i]);
}
}
for (i in category) {
html += '<li class="dropdown-header">' + category[i]['name'] + '</li>';
for (j = 0; j < category[i]['item'].length; j++) {
html += '<li data-value="' + category[i]['item'][j]['value'] + '"><a href="#"> ' + category[i]['item'][j]['label'] + '</a></li>';
}
}
}
if (html) {
this.show();
} else {
this.hide();
}
$(this).siblings('ul.dropdown-menu').html(html);
}
$(this).after('<ul class="dropdown-menu"></ul>');
$(this).siblings('ul.dropdown-menu').delegate('a', 'click', $.proxy(this.click, this));
});
}
})(window.jQuery);<|fim▁end|>
| |
<|file_name|>jquery.slimscroll.js<|end_file_name|><|fim▁begin|>/*! Copyright (c) 2011 Piotr Rochala (http://rocha.la)
* Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
* and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
*
* Version: 1.0.0
*
*/
(function($) {
jQuery.fn.extend({
slimScroll: function(options) {
var defaults = {
wheelStep : 20,
width : 'auto',
height : '250px',
size : '7px',
color: '#000',
position : 'right',
distance : '1px',
start : 'top',
opacity : .4,
alwaysVisible : false,
disableFadeOut: false,
railVisible : false,
railColor : '#333',
railOpacity : '0.2',
railClass : 'slimScrollRail',
barClass : 'slimScrollBar',
wrapperClass : 'slimScrollDiv',
allowPageScroll : false,
scroll : 0,
touchScrollStep : 200
};
var o = $.extend(defaults, options);
// do it for every element that matches selector
this.each(function(){
var isOverPanel, isOverBar, isDragg, queueHide, touchDif,
barHeight, percentScroll, lastScroll,
divS = '<div></div>',
minBarHeight = 30,
releaseScroll = false;
// used in event handlers and for better minification
var me = $(this);
// ensure we are not binding it again
if (me.parent().hasClass('slimScrollDiv'))
{
// start from last bar position
var offset = me.scrollTop();
// find bar and rail
bar = me.parent().find('.slimScrollBar');
rail = me.parent().find('.slimScrollRail');
// check if we should scroll existing instance
if (options)
{
if ('scrollTo' in options)
{
// jump to a static point
offset = parseInt(o.scrollTo);
}
else if ('scrollBy' in options)
{
// jump by value pixels
offset += parseInt(o.scrollBy);
}
// scroll content by the given offset
scrollContent(offset, false, true);
}
return;
}
// optionally set height to the parent's height
o.height = (o.height == 'auto') ? me.parent().innerHeight() : o.height;
// wrap content
var wrapper = $(divS)
.addClass(o.wrapperClass)
.css({
position: 'relative',
overflow: 'hidden',
width: o.width,
height: o.height
});
// update style for the div
me.css({
overflow: 'hidden',
width: o.width,
height: o.height
});
// create scrollbar rail
var rail = $(divS)
.addClass(o.railClass)
.css({
width: o.size,
height: '100%',
position: 'absolute',
top: 0,
display: (o.alwaysVisible && o.railVisible) ? 'block' : 'none',
'border-radius': o.size,
background: o.railColor,
opacity: o.railOpacity,
zIndex: 90
});
// create scrollbar
var bar = $(divS)
.addClass(o.barClass)
.css({
background: o.color,
width: o.size,
position: 'absolute',
top: 0,
opacity: o.opacity,
display: o.alwaysVisible ? 'block' : 'none',
'border-radius' : o.size,
BorderRadius: o.size,
MozBorderRadius: o.size,
WebkitBorderRadius: o.size,
zIndex: 99
});
// set position
var posCss = (o.position == 'right') ? { right: o.distance } : { left: o.distance };
rail.css(posCss);
bar.css(posCss);
// wrap it
me.wrap(wrapper);
// append to parent div
me.parent().append(bar);
me.parent().append(rail);
// make it draggable
bar.draggable({
axis: 'y',
containment: 'parent',
start: function() { isDragg = true; },
stop: function() { isDragg = false; hideBar(); },
drag: function(e)
{
// scroll content
scrollContent(0, $(this).position().top, false);
}
});
// on rail over
rail.hover(function(){
showBar();
}, function(){
hideBar();
});
// on bar over
bar.hover(function(){
isOverBar = true;
}, function(){
isOverBar = false;
});
// show on parent mouseover
me.hover(function(){
isOverPanel = true;
showBar();
hideBar();
}, function(){
isOverPanel = false;
hideBar();
});
// support for mobile
me.bind('touchstart', function(e,b){
if (e.originalEvent.touches.length)
{
// record where touch started
touchDif = e.originalEvent.touches[0].pageY;
}
});
me.bind('touchmove', function(e){
// prevent scrolling the page
e.originalEvent.preventDefault();
if (e.originalEvent.touches.length)
{
// see how far user swiped
var diff = (touchDif - e.originalEvent.touches[0].pageY) / o.touchScrollStep;
// scroll content
scrollContent(diff, true);
}
});
var _onWheel = function(e)
{
// use mouse wheel only when mouse is over
if (!isOverPanel) { return; }
var e = e || window.event;
var delta = 0;
if (e.wheelDelta) { delta = -e.wheelDelta/120; }
if (e.detail) { delta = e.detail / 3; }
// scroll content
scrollContent(delta, true);
// stop window scroll
if (e.preventDefault && !releaseScroll) { e.preventDefault(); }
if (!releaseScroll) { e.returnValue = false; }
}
function scrollContent(y, isWheel, isJump)
{
var delta = y;
if (isWheel)
{
// move bar with mouse wheel
delta = parseInt(bar.css('top')) + y * parseInt(o.wheelStep) / 100 * bar.outerHeight();
// move bar, make sure it doesn't go out
var maxTop = me.outerHeight() - bar.outerHeight();
delta = Math.min(Math.max(delta, 0), maxTop);
// scroll the scrollbar
bar.css({ top: delta + 'px' });
}
// calculate actual scroll amount
percentScroll = parseInt(bar.css('top')) / (me.outerHeight() - bar.outerHeight());
delta = percentScroll * (me[0].scrollHeight - me.outerHeight());
if (isJump)
{
delta = y;
var offsetTop = delta / me[0].scrollHeight * me.outerHeight();
bar.css({ top: offsetTop + 'px' });
}
// scroll content
me.scrollTop(delta);
// ensure bar is visible
showBar();
// trigger hide when scroll is stopped
hideBar();
}
var attachWheel = function()
{
if (window.addEventListener)
{
this.addEventListener('DOMMouseScroll', _onWheel, false );
this.addEventListener('mousewheel', _onWheel, false );
}
else
{
document.attachEvent("onmousewheel", _onWheel)
}
}
// attach scroll events
attachWheel();
function getBarHeight()
{
// calculate scrollbar height and make sure it is not too small
barHeight = Math.max((me.outerHeight() / me[0].scrollHeight) * me.outerHeight(), minBarHeight);
bar.css({ height: barHeight + 'px' });
}
// set up initial height
getBarHeight();
function showBar()
{
// recalculate bar height
getBarHeight();
clearTimeout(queueHide);
// when bar reached top or bottom
if (percentScroll == ~~ percentScroll)<|fim▁hole|> {
//release wheel
releaseScroll = o.allowPageScroll;
// publish approporiate event
if (lastScroll != percentScroll)
{
var msg = (~~percentScroll == 0) ? 'top' : 'bottom';
me.trigger('slimscroll', msg);
}
}
lastScroll = percentScroll;
// show only when required
if(barHeight >= me.outerHeight()) {
//allow window scroll
releaseScroll = true;
return;
}
bar.stop(true,true).fadeIn('fast');
if (o.railVisible) { rail.stop(true,true).fadeIn('fast'); }
}
function hideBar()
{
// only hide when options allow it
if (!o.alwaysVisible)
{
queueHide = setTimeout(function(){
if (!(o.disableFadeOut && isOverPanel) && !isOverBar && !isDragg)
{
bar.fadeOut('slow');
rail.fadeOut('slow');
}
}, 1000);
}
}
// check start position
if (o.start == 'bottom')
{
// scroll content to bottom
bar.css({ top: me.outerHeight() - bar.outerHeight() });
scrollContent(0, true);
}
else if (typeof o.start == 'object')
{
// scroll content
scrollContent($(o.start).position().top, null, true);
// make sure bar stays hidden
if (!o.alwaysVisible) { bar.hide(); }
}
});
// maintain chainability
return this;
}
});
jQuery.fn.extend({
slimscroll: jQuery.fn.slimScroll
});
})(jQuery);<|fim▁end|>
| |
<|file_name|>conftest.py<|end_file_name|><|fim▁begin|>"""
This module allows you to mock the config file as needed.
A default fixture that simply returns a safe-to-modify copy of
the default value is provided.
This can be overridden by parametrizing over the option you wish to
mock.<|fim▁hole|>... def test_fixture(mock_config, extension_initial_dot):
... import bids
... assert bids.config.get_option("extension_initial_dot") == extension_initial_dot
"""
from unittest.mock import patch
import pytest
@pytest.fixture
def config_paths():
import bids.config
return bids.config.get_option('config_paths').copy()
@pytest.fixture
def extension_initial_dot():
import bids.config
return bids.config.get_option('extension_initial_dot')
@pytest.fixture
def mock_config(config_paths, extension_initial_dot):
import bids.config
with patch.dict('bids.config._settings'):
bids.config._settings['config_paths'] = config_paths
bids.config._settings['extension_initial_dot'] = extension_initial_dot
yield<|fim▁end|>
|
e.g.
>>> @pytest.mark.parametrize("extension_initial_dot", (True, False))
|
<|file_name|>builder.rs<|end_file_name|><|fim▁begin|>extern crate libc;
use std::ops::Drop;
use std::collections::HashMap;
use std::ffi::CString;
use basic_block::BasicBlock;<|fim▁hole|>use function_call::FunctionCall;
use bindings::*;
#[derive(PartialEq,Eq)]
pub struct IRBuilder(pub(super) LLVMBuilderRef);
impl IRBuilder {
pub fn new() -> IRBuilder {
IRBuilder(unsafe { LLVMCreateBuilder() })
}
pub fn new_in_context(cont: &Context) -> IRBuilder {
IRBuilder(unsafe { LLVMCreateBuilderInContext(cont.0) })
}
pub fn position_at_end(&self, bb: BasicBlock) {
unsafe {
LLVMPositionBuilderAtEnd(self.0, bb.0);
}
}
pub fn insertion_block(&self) -> Option<BasicBlock> {
let r = unsafe { LLVMGetInsertBlock(self.0) };
if r.is_null() { None } else { Some(BasicBlock(r)) }
}
pub fn ret_void(&self) -> Value {
Value(unsafe { LLVMBuildRetVoid(self.0) })
}
pub fn ret(&self, val: Value) -> Value {
Value(unsafe { LLVMBuildRet(self.0, val.0) })
}
pub fn br(&self, br: BasicBlock) -> Value {
Value(unsafe { LLVMBuildBr(self.0, br.0) })
}
pub fn cond_br(&self, cond: Value, then: &BasicBlock, els: &BasicBlock) -> Value {
Value(unsafe { LLVMBuildCondBr(self.0, cond.0, then.0, els.0) })
}
pub fn switch(&self, val: Value, default: BasicBlock, cases: HashMap<Value, BasicBlock>) -> Switch {
let switch = unsafe {
LLVMBuildSwitch(self.0, val.0, default.0, cases.len() as u32)
};
for (on_val, dest) in cases {
unsafe {
LLVMAddCase(switch, on_val.0, dest.0);
}
}
Switch(switch)
}
pub fn call(&self, f: Function, args: &[Value]) -> FunctionCall {
unsafe {
FunctionCall(LLVMBuildCall(self.0,
f.0,
args.iter().map(|x| x.0).collect::<Vec<_>>().as_mut_ptr(),
args.len() as u32,
CString::new("").unwrap().as_ptr()
)
)
}
}
}
impl Drop for IRBuilder {
fn drop(&mut self) {
unsafe {
LLVMDisposeBuilder(self.0);
}
}
}
#[cfg(test)]
mod tests {
use super::IRBuilder;
use module::Module;
use types::{Type,FunctionType};
use value::Value;
#[test]
fn test_insertion_block_and_position_at_end() {
let modl = Module::new("test");
let f = modl.add_function("testf", FunctionType::new(Type::int32(), &vec![], false));
let entry_b = f.append_bb("entry");
let builder = IRBuilder::new();
builder.position_at_end(f.entry_bb().unwrap());
assert_eq!(builder.insertion_block().unwrap(), entry_b);
}
#[test]
fn test_function_calling() {
let modl = Module::new("test");
let f = modl.add_function("testf", FunctionType::new(Type::int32(), &vec![], false));
let f2 = modl.add_function("testf2", FunctionType::new(Type::int32(), &vec![], false));
let _ = f.append_bb("entry");
let _ = f2.append_bb("entry");
let builder = IRBuilder::new();
builder.position_at_end(f.entry_bb().unwrap());
let call = builder.call(f2, &vec![Value::const_int(Type::int32(), 10)]);
assert_eq!(call.called_value(), f2);
}
}<|fim▁end|>
|
use value::Value;
use context::Context;
use switch::Switch;
use function::Function;
|
<|file_name|>test_assertions.py<|end_file_name|><|fim▁begin|>import collections
import unittest
from kobold import assertions
class TestAssertEqual(unittest.TestCase):
def test_empty_hashes(self):
assertions.assert_equal({}, {})
def test_distinct_keys(self):
self.assertRaises(
AssertionError,
assertions.assert_equal,
{'a' : 1},
{'b' : 2})
Response = collections.namedtuple('Response', 'headers status_code data')
class TestAssertResponseMatches(unittest.TestCase):
def test_empty_body(self):
actual = Response(headers={}, status_code=200, data={})
assertions.assert_response_matches({'body' : {},<|fim▁hole|> 'headers' : {}}, actual)
def test_omit_status_and_headers(self):
actual = Response(headers={}, status_code=200, data={})
assertions.assert_response_matches({'body' : {}}, actual)
def test_equal_bodies(self):
actual = Response(
headers={},
status_code=200,
data={'key' : 'value'})
assertions.assert_response_matches({'body' : {'key' : 'value'},
'status_code' : 200,
'headers' : {}}, actual)
def test_unequal_bodies(self):
actual = Response(
headers={},
status_code=200,
data={'key' : 'value'})
self.assertRaises(
AssertionError,
assertions.assert_response_matches,
{'body' : {'key' : 'anothervalue'},
'status_code' : 200,
'headers' : {}},
actual)
def test_unequal_headers(self):
actual = Response(
headers={'header' : 'value'},
status_code=200,
data={'key' : 'value'})
self.assertRaises(
AssertionError,
assertions.assert_response_matches,
{'body' : {'key' : 'value'},
'status_code' : 200,
'headers' : {'header' : 'anothervalue'}},
actual)<|fim▁end|>
|
'status_code' : 200,
|
<|file_name|>test.js<|end_file_name|><|fim▁begin|>import test from 'ava';
import fn from './';
test('to decimal', t => {
t.true(fn(65) === 0.65);
t.true(fn(1234.5) === 12.345);<|fim▁hole|> t.true(fn(6158.4256, {digits: 5}) === 61.58426);
t.true(fn(1234.5, {digits: 0}) === 12);
t.end();
});<|fim▁end|>
|
t.true(fn(0.1) === 0.001);
t.true(fn(12.1245, {digits: 2}) === 0.12);
|
<|file_name|>Cisco_IOS_XR_tunnel_l2tun_oper.py<|end_file_name|><|fim▁begin|>""" Cisco_IOS_XR_tunnel_l2tun_oper
This module contains a collection of YANG definitions
for Cisco IOS\-XR tunnel\-l2tun package operational data.
This module contains definitions
for the following management objects\:
l2tp\: L2TP operational data
l2tpv2\: l2tpv2
Copyright (c) 2013\-2016 by Cisco Systems, Inc.
All rights reserved.
"""
import re
import collections
from enum import Enum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk.errors import YPYError, YPYModelError
class DigestHashEnum(Enum):
"""
DigestHashEnum
Digest hash types
.. data:: md5 = 0
MD5
.. data:: sha1 = 1
SHA1
"""
md5 = 0
sha1 = 1
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['DigestHashEnum']
class L2Tp(object):
"""
L2TP operational data
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Classes>`
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.CounterHistFail>`
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters>`
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Session>`
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions>`
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations>`
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.classes = L2Tp.Classes()
self.classes.parent = self
self.counter_hist_fail = L2Tp.CounterHistFail()
self.counter_hist_fail.parent = self
self.counters = L2Tp.Counters()
self.counters.parent = self
self.session = L2Tp.Session()
self.session.parent = self
self.sessions = L2Tp.Sessions()
self.sessions.parent = self
self.tunnel_configurations = L2Tp.TunnelConfigurations()
self.tunnel_configurations.parent = self
self.tunnels = L2Tp.Tunnels()
self.tunnels.parent = self
class Counters(object):
"""
L2TP control messages counters
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.control = L2Tp.Counters.Control()
self.control.parent = self
class Control(object):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr>`
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_xr = L2Tp.Counters.Control.TunnelXr()
self.tunnel_xr.parent = self
self.tunnels = L2Tp.Counters.Control.Tunnels()
self.tunnels.parent = self
class TunnelXr(object):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication>`
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication = L2Tp.Counters.Control.TunnelXr.Authentication()
self.authentication.parent = self
self.global_ = L2Tp.Counters.Control.TunnelXr.Global_()
self.global_.parent = self
class Authentication(object):
"""
Tunnel authentication counters
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.CommonDigest>`
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.LocalSecret>`
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.NonceAvp>`
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.challenge_avp = L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeAvp()
self.challenge_avp.parent = self
self.challenge_reponse = L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeReponse()
self.challenge_reponse.parent = self
self.common_digest = L2Tp.Counters.Control.TunnelXr.Authentication.CommonDigest()
self.common_digest.parent = self
self.integrity_check = L2Tp.Counters.Control.TunnelXr.Authentication.IntegrityCheck()
self.integrity_check.parent = self
self.local_secret = L2Tp.Counters.Control.TunnelXr.Authentication.LocalSecret()
self.local_secret.parent = self
self.nonce_avp = L2Tp.Counters.Control.TunnelXr.Authentication.NonceAvp()
self.nonce_avp.parent = self
self.overall_statistics = L2Tp.Counters.Control.TunnelXr.Authentication.OverallStatistics()
self.overall_statistics.parent = self
self.primary_digest = L2Tp.Counters.Control.TunnelXr.Authentication.PrimaryDigest()
self.primary_digest.parent = self
self.secondary_digest = L2Tp.Counters.Control.TunnelXr.Authentication.SecondaryDigest()
self.secondary_digest.parent = self
class NonceAvp(object):
"""
Nonce AVP statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:nonce-avp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.NonceAvp']['meta_info']
class CommonDigest(object):
"""
Common digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:common-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.CommonDigest']['meta_info']
class PrimaryDigest(object):
"""
Primary digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:primary-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.PrimaryDigest']['meta_info']
class SecondaryDigest(object):
"""
Secondary digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:secondary-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.SecondaryDigest']['meta_info']
class IntegrityCheck(object):
"""
Integrity check statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:integrity-check'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.IntegrityCheck']['meta_info']
class LocalSecret(object):
"""
Local secret statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:local-secret'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.LocalSecret']['meta_info']
class ChallengeAvp(object):
"""
Challenge AVP statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:challenge-avp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeAvp']['meta_info']
class ChallengeReponse(object):
"""
Challenge response statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:challenge-reponse'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.ChallengeReponse']['meta_info']
class OverallStatistics(object):
"""
Overall statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:overall-statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication.OverallStatistics']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.challenge_avp is not None and self.challenge_avp._has_data():
return True
if self.challenge_reponse is not None and self.challenge_reponse._has_data():
return True
if self.common_digest is not None and self.common_digest._has_data():
return True
if self.integrity_check is not None and self.integrity_check._has_data():
return True
if self.local_secret is not None and self.local_secret._has_data():
return True
if self.nonce_avp is not None and self.nonce_avp._has_data():
return True
if self.overall_statistics is not None and self.overall_statistics._has_data():
return True
if self.primary_digest is not None and self.primary_digest._has_data():
return True
if self.secondary_digest is not None and self.secondary_digest._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Authentication']['meta_info']
class Global_(object):
"""
Tunnel counters
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_.Drop>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_.Received>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_.Retransmit>`
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.TunnelXr.Global_.Transmit>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.drop = L2Tp.Counters.Control.TunnelXr.Global_.Drop()
self.drop.parent = self
self.received = L2Tp.Counters.Control.TunnelXr.Global_.Received()
self.received.parent = self
self.retransmit = L2Tp.Counters.Control.TunnelXr.Global_.Retransmit()
self.retransmit.parent = self
self.total_drop = None
self.total_received = None
self.total_retransmit = None
self.total_transmit = None
self.transmit = L2Tp.Counters.Control.TunnelXr.Global_.Transmit()
self.transmit.parent = self
class Transmit(object):
"""
Transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:transmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Global_.Transmit']['meta_info']
class Retransmit(object):
"""
Re transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:retransmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Global_.Retransmit']['meta_info']
class Received(object):
"""
Received data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:received'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Global_.Received']['meta_info']
class Drop(object):
"""
Drop data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:drop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Global_.Drop']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.drop is not None and self.drop._has_data():
return True
if self.received is not None and self.received._has_data():
return True
if self.retransmit is not None and self.retransmit._has_data():
return True
if self.total_drop is not None:
return True
if self.total_received is not None:
return True
if self.total_retransmit is not None:
return True
if self.total_transmit is not None:
return True
if self.transmit is not None and self.transmit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr.Global_']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.authentication is not None and self.authentication._has_data():
return True
if self.global_ is not None and self.global_._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.TunnelXr']['meta_info']
class Tunnels(object):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel = YList()
self.tunnel.parent = self
self.tunnel.name = 'tunnel'
class Tunnel(object):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id <key>
L2TP tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Brief>`
.. attribute:: global_
Global data
**type**\: :py:class:`Global_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_id = None
self.brief = L2Tp.Counters.Control.Tunnels.Tunnel.Brief()
self.brief.parent = self
self.global_ = L2Tp.Counters.Control.Tunnels.Tunnel.Global_()
self.global_.parent = self
class Brief(object):
"""
L2TP control message local and remote addresses
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_address = None
self.remote_address = None
self.remote_tunnel_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:brief'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_address is not None:
return True
if self.remote_address is not None:
return True
if self.remote_tunnel_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Brief']['meta_info']
class Global_(object):
"""
Global data
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Drop>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Received>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Retransmit>`
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Transmit>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.drop = L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Drop()
self.drop.parent = self
self.received = L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Received()
self.received.parent = self
self.retransmit = L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Retransmit()
self.retransmit.parent = self
self.total_drop = None
self.total_received = None
self.total_retransmit = None
self.total_transmit = None
self.transmit = L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Transmit()
self.transmit.parent = self
class Transmit(object):
"""
Transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:transmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Transmit']['meta_info']
class Retransmit(object):
"""
Re transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:retransmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Retransmit']['meta_info']
class Received(object):
"""
Received data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:received'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Received']['meta_info']
class Drop(object):
"""
Drop data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:drop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Global_.Drop']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.drop is not None and self.drop._has_data():
return True
if self.received is not None and self.received._has_data():
return True
if self.retransmit is not None and self.retransmit._has_data():
return True
if self.total_drop is not None:
return True
if self.total_received is not None:
return True
if self.total_retransmit is not None:
return True
if self.total_transmit is not None:
return True
if self.transmit is not None and self.transmit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel.Global_']['meta_info']
@property
def _common_path(self):
if self.tunnel_id is None:
raise YPYModelError('Key property tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel[Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-id = ' + str(self.tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_id is not None:
return True
if self.brief is not None and self.brief._has_data():
return True
if self.global_ is not None and self.global_._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels.Tunnel']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel is not None:
for child_ref in self.tunnel:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control.Tunnels']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_xr is not None and self.tunnel_xr._has_data():
return True
if self.tunnels is not None and self.tunnels._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters.Control']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.control is not None and self.control._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Counters']['meta_info']
class TunnelConfigurations(object):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations.TunnelConfiguration>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_configuration = YList()
self.tunnel_configuration.parent = self
self.tunnel_configuration.name = 'tunnel_configuration'
class TunnelConfiguration(object):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2TpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.TunnelConfigurations.TunnelConfiguration.L2TpClass>`
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.l2tp_class = L2Tp.TunnelConfigurations.TunnelConfiguration.L2TpClass()
self.l2tp_class.parent = self
self.remote_tunnel_id = None
class L2TpClass(object):
"""
L2Tp class data
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHashEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHashEnum>`
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting_method_list = None
self.class_name_xr = None
self.digest_hash = None
self.encoded_password = None
self.hello_timeout = None
self.host_name = None
self.initial_retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_retries = None
self.ip_tos = None
self.is_authentication_enabled = None
self.is_congestion_control_enabled = None
self.is_digest_check_enabled = None
self.is_digest_enabled = None
self.is_hidden = None
self.is_peer_address_checked = None
self.password = None
self.receive_window_size = None
self.retransmit_maximum_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_retries = None
self.setup_timeout = None
self.timeout_no_user = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp-class'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting_method_list is not None:
return True
if self.class_name_xr is not None:
return True
if self.digest_hash is not None:
return True
if self.encoded_password is not None:
return True
if self.hello_timeout is not None:
return True
if self.host_name is not None:
return True
if self.initial_retransmit_maximum_timeout is not None:
return True
if self.initial_retransmit_minimum_timeout is not None:
return True
if self.initial_retransmit_retries is not None:
return True
if self.ip_tos is not None:
return True
if self.is_authentication_enabled is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_digest_check_enabled is not None:
return True
if self.is_digest_enabled is not None:
return True
if self.is_hidden is not None:
return True
if self.is_peer_address_checked is not None:
return True
if self.password is not None:
return True
if self.receive_window_size is not None:
return True
if self.retransmit_maximum_timeout is not None:
return True
if self.retransmit_minimum_timeout is not None:
return True
if self.retransmit_retries is not None:
return True
if self.setup_timeout is not None:
return True
if self.timeout_no_user is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.TunnelConfigurations.TunnelConfiguration.L2TpClass']['meta_info']
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configurations/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configuration[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.l2tp_class is not None and self.l2tp_class._has_data():
return True
if self.remote_tunnel_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.TunnelConfigurations.TunnelConfiguration']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configurations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_configuration is not None:
for child_ref in self.tunnel_configuration:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.TunnelConfigurations']['meta_info']
class CounterHistFail(object):
"""
Failure events leading to disconnection
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of int
**range:** 0..4294967295
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.pkt_timeout = YLeafList()
self.pkt_timeout.parent = self
self.pkt_timeout.name = 'pkt_timeout'
self.rx_counters = None
self.sess_down_tmout = None
self.tx_counters = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:counter-hist-fail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.pkt_timeout is not None:
for child in self.pkt_timeout:
if child is not None:
return True
if self.rx_counters is not None:
return True
if self.sess_down_tmout is not None:
return True
if self.tx_counters is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.CounterHistFail']['meta_info']
class Classes(object):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Classes.Class_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.class_ = YList()
self.class_.parent = self
self.class_.name = 'class_'
class Class_(object):
"""
L2TP class name
.. attribute:: class_name <key>
L2TP class name
**type**\: str
**length:** 1..31
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHashEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHashEnum>`
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.class_name = None
self.accounting_method_list = None
self.class_name_xr = None
self.digest_hash = None
self.encoded_password = None
self.hello_timeout = None
self.host_name = None
self.initial_retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_retries = None
self.ip_tos = None
self.is_authentication_enabled = None
self.is_congestion_control_enabled = None
self.is_digest_check_enabled = None
self.is_digest_enabled = None
self.is_hidden = None
self.is_peer_address_checked = None
self.password = None
self.receive_window_size = None
self.retransmit_maximum_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_retries = None
self.setup_timeout = None
self.timeout_no_user = None
self.vrf_name = None
@property
def _common_path(self):
if self.class_name is None:
raise YPYModelError('Key property class_name is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:classes/Cisco-IOS-XR-tunnel-l2tun-oper:class[Cisco-IOS-XR-tunnel-l2tun-oper:class-name = ' + str(self.class_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.class_name is not None:
return True
if self.accounting_method_list is not None:
return True
if self.class_name_xr is not None:
return True
if self.digest_hash is not None:
return True
if self.encoded_password is not None:
return True
if self.hello_timeout is not None:
return True
if self.host_name is not None:
return True
if self.initial_retransmit_maximum_timeout is not None:
return True
if self.initial_retransmit_minimum_timeout is not None:
return True
if self.initial_retransmit_retries is not None:
return True
if self.ip_tos is not None:
return True
if self.is_authentication_enabled is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_digest_check_enabled is not None:
return True
if self.is_digest_enabled is not None:
return True
if self.is_hidden is not None:
return True
if self.is_peer_address_checked is not None:
return True
if self.password is not None:
return True
if self.receive_window_size is not None:
return True
if self.retransmit_maximum_timeout is not None:
return True
if self.retransmit_minimum_timeout is not None:
return True
if self.retransmit_retries is not None:
return True
if self.setup_timeout is not None:
return True
if self.timeout_no_user is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Classes.Class_']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:classes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.class_ is not None:
for child_ref in self.class_:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Classes']['meta_info']
class Tunnels(object):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel = YList()
self.tunnel.parent = self
self.tunnel.name = 'tunnel'
class Tunnel(object):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of int
**range:** 0..65535
**units**\: second
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.active_sessions = None
self.class_name = None
self.digest_secrets = None
self.is_congestion_control_enabled = None
self.is_pmtu_enabled = None
self.is_tunnel_up = None
self.local_address = None
self.local_port = None
self.local_tunnel_name = None
self.local_window_size = None
self.maximum_retransmission_time = None
self.order_queue_size = None
self.packet_queue_check = None
self.protocol = None
self.remote_address = None
self.remote_port = None
self.remote_tunnel_id = None
self.remote_tunnel_name = None
self.remote_window_size = None
self.resend_maximum_queue_size = None
self.resend_queue_size = None
self.resends = None
self.retransmission_time = None
self.retransmit_time = YLeafList()
self.retransmit_time.parent = self
self.retransmit_time.name = 'retransmit_time'
self.sequence_nr = None
self.sequence_ns = None
self.total_out_of_order_drop_packets = None
self.total_out_of_order_reorder_packets = None
self.total_peer_authentication_failures = None
self.unsent_maximum_queue_size = None
self.unsent_queue_size = None
self.zero_length_body_acknowledgement_sent = None
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.active_sessions is not None:
return True
if self.class_name is not None:
return True
if self.digest_secrets is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_pmtu_enabled is not None:
return True
if self.is_tunnel_up is not None:
return True
if self.local_address is not None:
return True
if self.local_port is not None:
return True
if self.local_tunnel_name is not None:
return True
if self.local_window_size is not None:
return True
if self.maximum_retransmission_time is not None:
return True
if self.order_queue_size is not None:
return True
if self.packet_queue_check is not None:
return True
if self.protocol is not None:
return True
if self.remote_address is not None:
return True
if self.remote_port is not None:
return True
if self.remote_tunnel_id is not None:
return True
if self.remote_tunnel_name is not None:
return True
if self.remote_window_size is not None:
return True
if self.resend_maximum_queue_size is not None:
return True
if self.resend_queue_size is not None:
return True
if self.resends is not None:
return True
if self.retransmission_time is not None:
return True
if self.retransmit_time is not None:
for child in self.retransmit_time:
if child is not None:
return True
if self.sequence_nr is not None:
return True
if self.sequence_ns is not None:
return True
if self.total_out_of_order_drop_packets is not None:
return True
if self.total_out_of_order_reorder_packets is not None:
return True
if self.total_peer_authentication_failures is not None:
return True
if self.unsent_maximum_queue_size is not None:
return True
if self.unsent_queue_size is not None:
return True
if self.zero_length_body_acknowledgement_sent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Tunnels.Tunnel']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel is not None:
for child_ref in self.tunnel:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Tunnels']['meta_info']
class Sessions(object):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_session_id <key>
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
.. attribute:: is_session_up
True if session is up
**type**\: bool
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData>`
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.local_session_id = None
self.call_serial_number = None
self.interface_name = None
self.is_conditional_debug_enabled = None
self.is_sequencing_on = None
self.is_session_locally_initiated = None
self.is_session_manual = None
self.is_session_state_established = None
self.is_session_up = None
self.is_udp_checksum_enabled = None
self.l2tp_sh_sess_tie_breaker = None
self.l2tp_sh_sess_tie_breaker_enabled = None
self.l2tp_sh_sess_udp_lport = None
self.l2tp_sh_sess_udp_rport = None
self.local_ip_address = None
self.local_tunnel_name = None
self.protocol = None
self.remote_ip_address = None
self.remote_session_id = None
self.remote_tunnel_id = None
self.remote_tunnel_name = None
self.session_application_data = L2Tp.Sessions.Session.SessionApplicationData()
self.session_application_data.parent = self
self.unique_id = None
class SessionApplicationData(object):
"""
Session application data
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData.Vpdn>`
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Sessions.Session.SessionApplicationData.Xconnect>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.l2tp_sh_sess_app_type = None
self.vpdn = L2Tp.Sessions.Session.SessionApplicationData.Vpdn()
self.vpdn.parent = self
self.xconnect = L2Tp.Sessions.Session.SessionApplicationData.Xconnect()
self.xconnect.parent = self
class Xconnect(object):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.circuit_name = None
self.ipv6_protocol_tunneling = None
self.is_circuit_state_up = None
self.is_local_circuit_state_up = None
self.is_remote_circuit_state_up = None
self.sessionvc_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:xconnect'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.circuit_name is not None:
return True
if self.ipv6_protocol_tunneling is not None:
return True
if self.is_circuit_state_up is not None:
return True
if self.is_local_circuit_state_up is not None:
return True
if self.is_remote_circuit_state_up is not None:
return True
if self.sessionvc_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Sessions.Session.SessionApplicationData.Xconnect']['meta_info']
class Vpdn(object):
"""
VPDN data
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: username
Session username
**type**\: str
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.username = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:vpdn'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.username is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Sessions.Session.SessionApplicationData.Vpdn']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:session-application-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.l2tp_sh_sess_app_type is not None:
return True
if self.vpdn is not None and self.vpdn._has_data():
return True
if self.xconnect is not None and self.xconnect._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Sessions.Session.SessionApplicationData']['meta_info']
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
if self.local_session_id is None:
raise YPYModelError('Key property local_session_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:sessions/Cisco-IOS-XR-tunnel-l2tun-oper:session[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + '][Cisco-IOS-XR-tunnel-l2tun-oper:local-session-id = ' + str(self.local_session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.local_session_id is not None:
return True
if self.call_serial_number is not None:
return True
if self.interface_name is not None:
return True
if self.is_conditional_debug_enabled is not None:
return True
if self.is_sequencing_on is not None:
return True
if self.is_session_locally_initiated is not None:
return True
if self.is_session_manual is not None:
return True
if self.is_session_state_established is not None:
return True
if self.is_session_up is not None:
return True
if self.is_udp_checksum_enabled is not None:
return True
if self.l2tp_sh_sess_tie_breaker is not None:
return True
if self.l2tp_sh_sess_tie_breaker_enabled is not None:
return True
if self.l2tp_sh_sess_udp_lport is not None:
return True
if self.l2tp_sh_sess_udp_rport is not None:
return True
if self.local_ip_address is not None:
return True
if self.local_tunnel_name is not None:
return True
if self.protocol is not None:
return True
if self.remote_ip_address is not None:
return True
if self.remote_session_id is not None:
return True
if self.remote_tunnel_id is not None:
return True
if self.remote_tunnel_name is not None:
return True
if self.session_application_data is not None and self.session_application_data._has_data():
return True
if self.unique_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Sessions']['meta_info']
class Session(object):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tp.Session.Unavailable>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.unavailable = L2Tp.Session.Unavailable()
self.unavailable.parent = self
class Unavailable(object):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sessions_on_hold = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:session/Cisco-IOS-XR-tunnel-l2tun-oper:unavailable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions_on_hold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Session.Unavailable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp/Cisco-IOS-XR-tunnel-l2tun-oper:session'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.unavailable is not None and self.unavailable._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.classes is not None and self.classes._has_data():
return True
if self.counter_hist_fail is not None and self.counter_hist_fail._has_data():
return True
if self.counters is not None and self.counters._has_data():
return True
if self.session is not None and self.session._has_data():
return True
if self.sessions is not None and self.sessions._has_data():
return True
if self.tunnel_configurations is not None and self.tunnel_configurations._has_data():
return True
if self.tunnels is not None and self.tunnels._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tp']['meta_info']
class L2Tpv2(object):
"""
l2tpv2
.. attribute:: classes
List of L2TP class names
**type**\: :py:class:`Classes <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Classes>`
.. attribute:: counter_hist_fail
Failure events leading to disconnection
**type**\: :py:class:`CounterHistFail <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.CounterHistFail>`
.. attribute:: counters
L2TP control messages counters
**type**\: :py:class:`Counters <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters>`
.. attribute:: session
L2TP control messages counters
**type**\: :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Session>`
.. attribute:: sessions
List of session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions>`
.. attribute:: statistics
L2TP v2 statistics information
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Statistics>`
.. attribute:: tunnel
L2TPv2 tunnel
**type**\: :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel>`
.. attribute:: tunnel_configurations
List of tunnel IDs
**type**\: :py:class:`TunnelConfigurations <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations>`
.. attribute:: tunnels
List of tunnel IDs
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.classes = L2Tpv2.Classes()
self.classes.parent = self
self.counter_hist_fail = L2Tpv2.CounterHistFail()
self.counter_hist_fail.parent = self
self.counters = L2Tpv2.Counters()
self.counters.parent = self
self.session = L2Tpv2.Session()
self.session.parent = self
self.sessions = L2Tpv2.Sessions()
self.sessions.parent = self
self.statistics = L2Tpv2.Statistics()
self.statistics.parent = self
self.tunnel = L2Tpv2.Tunnel()
self.tunnel.parent = self
self.tunnel_configurations = L2Tpv2.TunnelConfigurations()
self.tunnel_configurations.parent = self
self.tunnels = L2Tpv2.Tunnels()
self.tunnels.parent = self
class Counters(object):
"""
L2TP control messages counters
.. attribute:: control
L2TP control messages counters
**type**\: :py:class:`Control <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control>`
.. attribute:: forwarding
L2TP forwarding messages counters
**type**\: :py:class:`Forwarding <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.control = L2Tpv2.Counters.Control()
self.control.parent = self
self.forwarding = L2Tpv2.Counters.Forwarding()
self.forwarding.parent = self
class Forwarding(object):
"""
L2TP forwarding messages counters
.. attribute:: sessions
List of class and session IDs
**type**\: :py:class:`Sessions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding.Sessions>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sessions = L2Tpv2.Counters.Forwarding.Sessions()
self.sessions.parent = self
class Sessions(object):
"""
List of class and session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Forwarding.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
L2TP information for a particular session
.. attribute:: tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: session_id <key>
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: in_bytes
Number of bytes sent in
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: in_packets
Number of packets sent in
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: out_bytes
Number of bytes sent out
**type**\: int
**range:** 0..18446744073709551615
**units**\: byte
.. attribute:: out_packets
Number of packets sent out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_id = None
self.session_id = None
self.in_bytes = None
self.in_packets = None
self.out_bytes = None
self.out_packets = None
self.remote_session_id = None
@property
def _common_path(self):
if self.tunnel_id is None:
raise YPYModelError('Key property tunnel_id is None')
if self.session_id is None:
raise YPYModelError('Key property session_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:forwarding/Cisco-IOS-XR-tunnel-l2tun-oper:sessions/Cisco-IOS-XR-tunnel-l2tun-oper:session[Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-id = ' + str(self.tunnel_id) + '][Cisco-IOS-XR-tunnel-l2tun-oper:session-id = ' + str(self.session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_id is not None:
return True
if self.session_id is not None:
return True
if self.in_bytes is not None:
return True
if self.in_packets is not None:
return True
if self.out_bytes is not None:
return True
if self.out_packets is not None:
return True
if self.remote_session_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Forwarding.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:forwarding/Cisco-IOS-XR-tunnel-l2tun-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Forwarding.Sessions']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:forwarding'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions is not None and self.sessions._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Forwarding']['meta_info']
class Control(object):
"""
L2TP control messages counters
.. attribute:: tunnel_xr
L2TP control tunnel messages counters
**type**\: :py:class:`TunnelXr <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr>`
.. attribute:: tunnels
Table of tunnel IDs of control message counters
**type**\: :py:class:`Tunnels <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_xr = L2Tpv2.Counters.Control.TunnelXr()
self.tunnel_xr.parent = self
self.tunnels = L2Tpv2.Counters.Control.Tunnels()
self.tunnels.parent = self
class TunnelXr(object):
"""
L2TP control tunnel messages counters
.. attribute:: authentication
Tunnel authentication counters
**type**\: :py:class:`Authentication <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication>`
.. attribute:: global_
Tunnel counters
**type**\: :py:class:`Global_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.authentication = L2Tpv2.Counters.Control.TunnelXr.Authentication()
self.authentication.parent = self
self.global_ = L2Tpv2.Counters.Control.TunnelXr.Global_()
self.global_.parent = self
class Authentication(object):
"""
Tunnel authentication counters
.. attribute:: challenge_avp
Challenge AVP statistics
**type**\: :py:class:`ChallengeAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeAvp>`
.. attribute:: challenge_reponse
Challenge response statistics
**type**\: :py:class:`ChallengeReponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeReponse>`
.. attribute:: common_digest
Common digest statistics
**type**\: :py:class:`CommonDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.CommonDigest>`
.. attribute:: integrity_check
Integrity check statistics
**type**\: :py:class:`IntegrityCheck <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.IntegrityCheck>`
.. attribute:: local_secret
Local secret statistics
**type**\: :py:class:`LocalSecret <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.LocalSecret>`
.. attribute:: nonce_avp
Nonce AVP statistics
**type**\: :py:class:`NonceAvp <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.NonceAvp>`
.. attribute:: overall_statistics
Overall statistics
**type**\: :py:class:`OverallStatistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.OverallStatistics>`
.. attribute:: primary_digest
Primary digest statistics
**type**\: :py:class:`PrimaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.PrimaryDigest>`
.. attribute:: secondary_digest
Secondary digest statistics
**type**\: :py:class:`SecondaryDigest <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Authentication.SecondaryDigest>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.challenge_avp = L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeAvp()
self.challenge_avp.parent = self
self.challenge_reponse = L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeReponse()
self.challenge_reponse.parent = self
self.common_digest = L2Tpv2.Counters.Control.TunnelXr.Authentication.CommonDigest()
self.common_digest.parent = self
self.integrity_check = L2Tpv2.Counters.Control.TunnelXr.Authentication.IntegrityCheck()
self.integrity_check.parent = self
self.local_secret = L2Tpv2.Counters.Control.TunnelXr.Authentication.LocalSecret()
self.local_secret.parent = self
self.nonce_avp = L2Tpv2.Counters.Control.TunnelXr.Authentication.NonceAvp()
self.nonce_avp.parent = self
self.overall_statistics = L2Tpv2.Counters.Control.TunnelXr.Authentication.OverallStatistics()
self.overall_statistics.parent = self
self.primary_digest = L2Tpv2.Counters.Control.TunnelXr.Authentication.PrimaryDigest()
self.primary_digest.parent = self
self.secondary_digest = L2Tpv2.Counters.Control.TunnelXr.Authentication.SecondaryDigest()
self.secondary_digest.parent = self
class NonceAvp(object):
"""
Nonce AVP statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:nonce-avp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.NonceAvp']['meta_info']
class CommonDigest(object):
"""
Common digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:common-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.CommonDigest']['meta_info']
class PrimaryDigest(object):
"""
Primary digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:primary-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.PrimaryDigest']['meta_info']
class SecondaryDigest(object):
"""
Secondary digest statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:secondary-digest'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.SecondaryDigest']['meta_info']
class IntegrityCheck(object):
"""
Integrity check statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:integrity-check'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.IntegrityCheck']['meta_info']
class LocalSecret(object):
"""
Local secret statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:local-secret'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.LocalSecret']['meta_info']
class ChallengeAvp(object):
"""
Challenge AVP statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:challenge-avp'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeAvp']['meta_info']
class ChallengeReponse(object):
"""
Challenge response statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:challenge-reponse'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.ChallengeReponse']['meta_info']
class OverallStatistics(object):
"""
Overall statistics
.. attribute:: bad_hash
Bad hash
**type**\: int
**range:** 0..4294967295
.. attribute:: bad_length
Bad length
**type**\: int
**range:** 0..4294967295
.. attribute:: failed
Failed
**type**\: int
**range:** 0..4294967295
.. attribute:: generate_response_failures
Generate response fail
**type**\: int
**range:** 0..4294967295
.. attribute:: ignored
Ignored
**type**\: int
**range:** 0..4294967295
.. attribute:: missing
Missing
**type**\: int
**range:** 0..4294967295
.. attribute:: passed
Passed
**type**\: int
**range:** 0..4294967295
.. attribute:: skipped
Skipped
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected
Unexpected
**type**\: int
**range:** 0..4294967295
.. attribute:: unexpected_zlb
Unexpected ZLB
**type**\: int
**range:** 0..4294967295
.. attribute:: validate
Validate
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.bad_hash = None
self.bad_length = None
self.failed = None
self.generate_response_failures = None
self.ignored = None
self.missing = None
self.passed = None
self.skipped = None
self.unexpected = None
self.unexpected_zlb = None
self.validate = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication/Cisco-IOS-XR-tunnel-l2tun-oper:overall-statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.bad_hash is not None:
return True
if self.bad_length is not None:
return True
if self.failed is not None:
return True
if self.generate_response_failures is not None:
return True
if self.ignored is not None:
return True
if self.missing is not None:
return True
if self.passed is not None:
return True
if self.skipped is not None:
return True
if self.unexpected is not None:
return True
if self.unexpected_zlb is not None:
return True
if self.validate is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication.OverallStatistics']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:authentication'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.challenge_avp is not None and self.challenge_avp._has_data():
return True
if self.challenge_reponse is not None and self.challenge_reponse._has_data():
return True
if self.common_digest is not None and self.common_digest._has_data():
return True
if self.integrity_check is not None and self.integrity_check._has_data():
return True
if self.local_secret is not None and self.local_secret._has_data():
return True
if self.nonce_avp is not None and self.nonce_avp._has_data():
return True
if self.overall_statistics is not None and self.overall_statistics._has_data():
return True
if self.primary_digest is not None and self.primary_digest._has_data():
return True
if self.secondary_digest is not None and self.secondary_digest._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Authentication']['meta_info']
class Global_(object):
"""
Tunnel counters
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global_.Drop>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global_.Received>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global_.Retransmit>`
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.TunnelXr.Global_.Transmit>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.drop = L2Tpv2.Counters.Control.TunnelXr.Global_.Drop()
self.drop.parent = self
self.received = L2Tpv2.Counters.Control.TunnelXr.Global_.Received()
self.received.parent = self
self.retransmit = L2Tpv2.Counters.Control.TunnelXr.Global_.Retransmit()
self.retransmit.parent = self
self.total_drop = None
self.total_received = None
self.total_retransmit = None
self.total_transmit = None
self.transmit = L2Tpv2.Counters.Control.TunnelXr.Global_.Transmit()
self.transmit.parent = self
class Transmit(object):
"""
Transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:transmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Global_.Transmit']['meta_info']
class Retransmit(object):
"""
Re transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:retransmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Global_.Retransmit']['meta_info']
class Received(object):
"""
Received data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:received'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Global_.Received']['meta_info']
class Drop(object):
"""
Drop data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global/Cisco-IOS-XR-tunnel-l2tun-oper:drop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:<|fim▁hole|> return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Global_.Drop']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr/Cisco-IOS-XR-tunnel-l2tun-oper:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.drop is not None and self.drop._has_data():
return True
if self.received is not None and self.received._has_data():
return True
if self.retransmit is not None and self.retransmit._has_data():
return True
if self.total_drop is not None:
return True
if self.total_received is not None:
return True
if self.total_retransmit is not None:
return True
if self.total_transmit is not None:
return True
if self.transmit is not None and self.transmit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr.Global_']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-xr'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.authentication is not None and self.authentication._has_data():
return True
if self.global_ is not None and self.global_._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.TunnelXr']['meta_info']
class Tunnels(object):
"""
Table of tunnel IDs of control message counters
.. attribute:: tunnel
L2TP tunnel control message counters
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel = YList()
self.tunnel.parent = self
self.tunnel.name = 'tunnel'
class Tunnel(object):
"""
L2TP tunnel control message counters
.. attribute:: tunnel_id <key>
L2TP tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: brief
L2TP control message local and remote addresses
**type**\: :py:class:`Brief <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Brief>`
.. attribute:: global_
Global data
**type**\: :py:class:`Global_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_id = None
self.brief = L2Tpv2.Counters.Control.Tunnels.Tunnel.Brief()
self.brief.parent = self
self.global_ = L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_()
self.global_.parent = self
class Brief(object):
"""
L2TP control message local and remote addresses
.. attribute:: local_address
Local IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_address
Remote IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_address = None
self.remote_address = None
self.remote_tunnel_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:brief'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_address is not None:
return True
if self.remote_address is not None:
return True
if self.remote_tunnel_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Brief']['meta_info']
class Global_(object):
"""
Global data
.. attribute:: drop
Drop data
**type**\: :py:class:`Drop <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Drop>`
.. attribute:: received
Received data
**type**\: :py:class:`Received <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Received>`
.. attribute:: retransmit
Re transmit data
**type**\: :py:class:`Retransmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Retransmit>`
.. attribute:: total_drop
Total drop
**type**\: int
**range:** 0..4294967295
.. attribute:: total_received
Total received
**type**\: int
**range:** 0..4294967295
.. attribute:: total_retransmit
Total retransmit
**type**\: int
**range:** 0..4294967295
.. attribute:: total_transmit
Total transmit
**type**\: int
**range:** 0..4294967295
.. attribute:: transmit
Transmit data
**type**\: :py:class:`Transmit <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Transmit>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.drop = L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Drop()
self.drop.parent = self
self.received = L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Received()
self.received.parent = self
self.retransmit = L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Retransmit()
self.retransmit.parent = self
self.total_drop = None
self.total_received = None
self.total_retransmit = None
self.total_transmit = None
self.transmit = L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Transmit()
self.transmit.parent = self
class Transmit(object):
"""
Transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:transmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Transmit']['meta_info']
class Retransmit(object):
"""
Re transmit data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:retransmit'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Retransmit']['meta_info']
class Received(object):
"""
Received data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:received'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Received']['meta_info']
class Drop(object):
"""
Drop data
.. attribute:: acknowledgement_packets
Packets acknowledgement
**type**\: int
**range:** 0..4294967295
.. attribute:: call_disconnect_notify_packets
Call disconnect notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: hello_packets
Keep alive messages
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_connected_packets
Incoming call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_replies
Incoming call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_call_requests
Incoming call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_connected_packets
Outgoing call connected packets
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_replies
Outgoing call replies
**type**\: int
**range:** 0..4294967295
.. attribute:: outgoing_call_requests
Outgoing call requests
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_replies
Service relay reply counts
**type**\: int
**range:** 0..4294967295
.. attribute:: service_relay_requests
Service relay request counts
**type**\: int
**range:** 0..4294967295
.. attribute:: set_link_info_packets
Set link info packets
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_notifications
Start control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_replies
Start control connection replies
**type**\: int
**range:** 0..4294967295
.. attribute:: start_control_connection_requests
Start control connection requests
**type**\: int
**range:** 0..4294967295
.. attribute:: stop_control_connection_notifications
Stop control connection notifications
**type**\: int
**range:** 0..4294967295
.. attribute:: unknown_packets
Unknown packets
**type**\: int
**range:** 0..4294967295
.. attribute:: wan_error_notify_packets
WAN error notify packets
**type**\: int
**range:** 0..4294967295
.. attribute:: zero_length_body_packets
Zero length body packets
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.acknowledgement_packets = None
self.call_disconnect_notify_packets = None
self.hello_packets = None
self.incoming_call_connected_packets = None
self.incoming_call_replies = None
self.incoming_call_requests = None
self.outgoing_call_connected_packets = None
self.outgoing_call_replies = None
self.outgoing_call_requests = None
self.service_relay_replies = None
self.service_relay_requests = None
self.set_link_info_packets = None
self.start_control_connection_notifications = None
self.start_control_connection_replies = None
self.start_control_connection_requests = None
self.stop_control_connection_notifications = None
self.unknown_packets = None
self.wan_error_notify_packets = None
self.zero_length_body_packets = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:drop'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.acknowledgement_packets is not None:
return True
if self.call_disconnect_notify_packets is not None:
return True
if self.hello_packets is not None:
return True
if self.incoming_call_connected_packets is not None:
return True
if self.incoming_call_replies is not None:
return True
if self.incoming_call_requests is not None:
return True
if self.outgoing_call_connected_packets is not None:
return True
if self.outgoing_call_replies is not None:
return True
if self.outgoing_call_requests is not None:
return True
if self.service_relay_replies is not None:
return True
if self.service_relay_requests is not None:
return True
if self.set_link_info_packets is not None:
return True
if self.start_control_connection_notifications is not None:
return True
if self.start_control_connection_replies is not None:
return True
if self.start_control_connection_requests is not None:
return True
if self.stop_control_connection_notifications is not None:
return True
if self.unknown_packets is not None:
return True
if self.wan_error_notify_packets is not None:
return True
if self.zero_length_body_packets is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_.Drop']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:global'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.drop is not None and self.drop._has_data():
return True
if self.received is not None and self.received._has_data():
return True
if self.retransmit is not None and self.retransmit._has_data():
return True
if self.total_drop is not None:
return True
if self.total_received is not None:
return True
if self.total_retransmit is not None:
return True
if self.total_transmit is not None:
return True
if self.transmit is not None and self.transmit._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel.Global_']['meta_info']
@property
def _common_path(self):
if self.tunnel_id is None:
raise YPYModelError('Key property tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel[Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-id = ' + str(self.tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_id is not None:
return True
if self.brief is not None and self.brief._has_data():
return True
if self.global_ is not None and self.global_._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels.Tunnel']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel is not None:
for child_ref in self.tunnel:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control.Tunnels']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters/Cisco-IOS-XR-tunnel-l2tun-oper:control'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_xr is not None and self.tunnel_xr._has_data():
return True
if self.tunnels is not None and self.tunnels._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters.Control']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counters'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.control is not None and self.control._has_data():
return True
if self.forwarding is not None and self.forwarding._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Counters']['meta_info']
class Statistics(object):
"""
L2TP v2 statistics information
.. attribute:: average_packet_processing_time
Average processing time for received packets (in micro seconds)
**type**\: int
**range:** 0..4294967295
**units**\: microsecond
.. attribute:: buffered_packets
Bufferred packets
**type**\: int
**range:** 0..4294967295
.. attribute:: incoming_dropped_packets
In coming packets dropped
**type**\: int
**range:** 0..4294967295
.. attribute:: netio_packets
Packets RX in netio
**type**\: int
**range:** 0..4294967295
.. attribute:: received_out_of_order_packets
Out of order packets received
**type**\: int
**range:** 0..4294967295
.. attribute:: received_packets
Number of packets received
**type**\: int
**range:** 0..4294967295
.. attribute:: reorder_deviation_packets
Re order deviation
**type**\: int
**range:** 0..4294967295
.. attribute:: reorder_packets
Re order packets
**type**\: int
**range:** 0..4294967295
.. attribute:: sent_packets
Number of packets sent
**type**\: int
**range:** 0..4294967295
.. attribute:: sessions
Number of sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: tunnels
Number of tunnels
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.average_packet_processing_time = None
self.buffered_packets = None
self.incoming_dropped_packets = None
self.netio_packets = None
self.received_out_of_order_packets = None
self.received_packets = None
self.reorder_deviation_packets = None
self.reorder_packets = None
self.sent_packets = None
self.sessions = None
self.tunnels = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.average_packet_processing_time is not None:
return True
if self.buffered_packets is not None:
return True
if self.incoming_dropped_packets is not None:
return True
if self.netio_packets is not None:
return True
if self.received_out_of_order_packets is not None:
return True
if self.received_packets is not None:
return True
if self.reorder_deviation_packets is not None:
return True
if self.reorder_packets is not None:
return True
if self.sent_packets is not None:
return True
if self.sessions is not None:
return True
if self.tunnels is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Statistics']['meta_info']
class Tunnel(object):
"""
L2TPv2 tunnel
.. attribute:: accounting
Tunnel accounting counters
**type**\: :py:class:`Accounting <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel.Accounting>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting = L2Tpv2.Tunnel.Accounting()
self.accounting.parent = self
class Accounting(object):
"""
Tunnel accounting counters
.. attribute:: statistics
Tunnel accounting statistics
**type**\: :py:class:`Statistics <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnel.Accounting.Statistics>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.statistics = L2Tpv2.Tunnel.Accounting.Statistics()
self.statistics.parent = self
class Statistics(object):
"""
Tunnel accounting statistics
.. attribute:: current_size
Current checkpoint size
**type**\: int
**range:** 0..4294967295
.. attribute:: memory_failures
Memory failures
**type**\: int
**range:** 0..4294967295
.. attribute:: negative_acknowledgement
Negative acknowledgement
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: positive_acknowledgement
Positive acknowledgement
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: queue_statistics_size
Queue statistics size
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: records_checkpointed
Total records checkpointed
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_fail_to_recover
Records fail to recover
**type**\: int
**range:** 0..4294967295
.. attribute:: records_failed_to_checkpoint
Records fail to checkpoint
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_recovered_from_checkpoint
Records recovered from checkpoint
**type**\: int
**range:** 0..4294967295
.. attribute:: records_sent_from_queue
Records sent from queue
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: records_sent_successfully
Accounting records sent successfully
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: reject
Accounting reject
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: start
Accounting start
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: stop
Accounting stop
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: transport_failures
Transport failures
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.current_size = None
self.memory_failures = None
self.negative_acknowledgement = None
self.positive_acknowledgement = None
self.queue_statistics_size = None
self.records_checkpointed = None
self.records_fail_to_recover = None
self.records_failed_to_checkpoint = None
self.records_recovered_from_checkpoint = None
self.records_sent_from_queue = None
self.records_sent_successfully = None
self.reject = None
self.start = None
self.stop = None
self.transport_failures = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel/Cisco-IOS-XR-tunnel-l2tun-oper:accounting/Cisco-IOS-XR-tunnel-l2tun-oper:statistics'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.current_size is not None:
return True
if self.memory_failures is not None:
return True
if self.negative_acknowledgement is not None:
return True
if self.positive_acknowledgement is not None:
return True
if self.queue_statistics_size is not None:
return True
if self.records_checkpointed is not None:
return True
if self.records_fail_to_recover is not None:
return True
if self.records_failed_to_checkpoint is not None:
return True
if self.records_recovered_from_checkpoint is not None:
return True
if self.records_sent_from_queue is not None:
return True
if self.records_sent_successfully is not None:
return True
if self.reject is not None:
return True
if self.start is not None:
return True
if self.stop is not None:
return True
if self.transport_failures is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Tunnel.Accounting.Statistics']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel/Cisco-IOS-XR-tunnel-l2tun-oper:accounting'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.statistics is not None and self.statistics._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Tunnel.Accounting']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting is not None and self.accounting._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Tunnel']['meta_info']
class TunnelConfigurations(object):
"""
List of tunnel IDs
.. attribute:: tunnel_configuration
L2TP tunnel information
**type**\: list of :py:class:`TunnelConfiguration <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations.TunnelConfiguration>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel_configuration = YList()
self.tunnel_configuration.parent = self
self.tunnel_configuration.name = 'tunnel_configuration'
class TunnelConfiguration(object):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: l2tp_class
L2Tp class data
**type**\: :py:class:`L2TpClass <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.TunnelConfigurations.TunnelConfiguration.L2TpClass>`
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.l2tp_class = L2Tpv2.TunnelConfigurations.TunnelConfiguration.L2TpClass()
self.l2tp_class.parent = self
self.remote_tunnel_id = None
class L2TpClass(object):
"""
L2Tp class data
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHashEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHashEnum>`
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.accounting_method_list = None
self.class_name_xr = None
self.digest_hash = None
self.encoded_password = None
self.hello_timeout = None
self.host_name = None
self.initial_retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_retries = None
self.ip_tos = None
self.is_authentication_enabled = None
self.is_congestion_control_enabled = None
self.is_digest_check_enabled = None
self.is_digest_enabled = None
self.is_hidden = None
self.is_peer_address_checked = None
self.password = None
self.receive_window_size = None
self.retransmit_maximum_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_retries = None
self.setup_timeout = None
self.timeout_no_user = None
self.vrf_name = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:l2tp-class'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.accounting_method_list is not None:
return True
if self.class_name_xr is not None:
return True
if self.digest_hash is not None:
return True
if self.encoded_password is not None:
return True
if self.hello_timeout is not None:
return True
if self.host_name is not None:
return True
if self.initial_retransmit_maximum_timeout is not None:
return True
if self.initial_retransmit_minimum_timeout is not None:
return True
if self.initial_retransmit_retries is not None:
return True
if self.ip_tos is not None:
return True
if self.is_authentication_enabled is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_digest_check_enabled is not None:
return True
if self.is_digest_enabled is not None:
return True
if self.is_hidden is not None:
return True
if self.is_peer_address_checked is not None:
return True
if self.password is not None:
return True
if self.receive_window_size is not None:
return True
if self.retransmit_maximum_timeout is not None:
return True
if self.retransmit_minimum_timeout is not None:
return True
if self.retransmit_retries is not None:
return True
if self.setup_timeout is not None:
return True
if self.timeout_no_user is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.TunnelConfigurations.TunnelConfiguration.L2TpClass']['meta_info']
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configurations/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configuration[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.l2tp_class is not None and self.l2tp_class._has_data():
return True
if self.remote_tunnel_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.TunnelConfigurations.TunnelConfiguration']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel-configurations'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel_configuration is not None:
for child_ref in self.tunnel_configuration:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.TunnelConfigurations']['meta_info']
class CounterHistFail(object):
"""
Failure events leading to disconnection
.. attribute:: pkt_timeout
timeout events by packet
**type**\: list of int
**range:** 0..4294967295
.. attribute:: rx_counters
Receive side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: sess_down_tmout
sesions affected due to timeout
**type**\: int
**range:** 0..4294967295
.. attribute:: tx_counters
Send side counters
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.pkt_timeout = YLeafList()
self.pkt_timeout.parent = self
self.pkt_timeout.name = 'pkt_timeout'
self.rx_counters = None
self.sess_down_tmout = None
self.tx_counters = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:counter-hist-fail'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.pkt_timeout is not None:
for child in self.pkt_timeout:
if child is not None:
return True
if self.rx_counters is not None:
return True
if self.sess_down_tmout is not None:
return True
if self.tx_counters is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.CounterHistFail']['meta_info']
class Classes(object):
"""
List of L2TP class names
.. attribute:: class_
L2TP class name
**type**\: list of :py:class:`Class_ <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Classes.Class_>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.class_ = YList()
self.class_.parent = self
self.class_.name = 'class_'
class Class_(object):
"""
L2TP class name
.. attribute:: class_name <key>
L2TP class name
**type**\: str
**length:** 1..31
.. attribute:: accounting_method_list
Accounting List
**type**\: str
**length:** 0..256
.. attribute:: class_name_xr
Class name
**type**\: str
**length:** 0..256
.. attribute:: digest_hash
Hash configured as MD5 or SHA1
**type**\: :py:class:`DigestHashEnum <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.DigestHashEnum>`
.. attribute:: encoded_password
Encoded password
**type**\: str
**length:** 0..256
.. attribute:: hello_timeout
Hello timeout value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: host_name
Host name
**type**\: str
**length:** 0..256
.. attribute:: initial_retransmit_maximum_timeout
Initial timeout maximum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_minimum_timeout
Initial timeout minimum in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: initial_retransmit_retries
Initial retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: ip_tos
IP TOS
**type**\: int
**range:** 0..255
.. attribute:: is_authentication_enabled
True if authentication is enabled
**type**\: bool
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled
**type**\: bool
.. attribute:: is_digest_check_enabled
True if digest check is enabled
**type**\: bool
.. attribute:: is_digest_enabled
True if digest authentication is enabled
**type**\: bool
.. attribute:: is_hidden
True if class is hidden
**type**\: bool
.. attribute:: is_peer_address_checked
True if peer address is checked
**type**\: bool
.. attribute:: password
Password
**type**\: str
**length:** 0..25
.. attribute:: receive_window_size
Receive window size
**type**\: int
**range:** 0..65535
.. attribute:: retransmit_maximum_timeout
Retransmit maximum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_minimum_timeout
Retransmit minimum timeout in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: retransmit_retries
Retransmit retries
**type**\: int
**range:** 0..4294967295
.. attribute:: setup_timeout
Timeout setup value in seconds
**type**\: int
**range:** 0..4294967295
**units**\: second
.. attribute:: timeout_no_user
Timeout no user
**type**\: int
**range:** 0..4294967295
.. attribute:: vrf_name
VRF name
**type**\: str
**length:** 0..256
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.class_name = None
self.accounting_method_list = None
self.class_name_xr = None
self.digest_hash = None
self.encoded_password = None
self.hello_timeout = None
self.host_name = None
self.initial_retransmit_maximum_timeout = None
self.initial_retransmit_minimum_timeout = None
self.initial_retransmit_retries = None
self.ip_tos = None
self.is_authentication_enabled = None
self.is_congestion_control_enabled = None
self.is_digest_check_enabled = None
self.is_digest_enabled = None
self.is_hidden = None
self.is_peer_address_checked = None
self.password = None
self.receive_window_size = None
self.retransmit_maximum_timeout = None
self.retransmit_minimum_timeout = None
self.retransmit_retries = None
self.setup_timeout = None
self.timeout_no_user = None
self.vrf_name = None
@property
def _common_path(self):
if self.class_name is None:
raise YPYModelError('Key property class_name is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:classes/Cisco-IOS-XR-tunnel-l2tun-oper:class[Cisco-IOS-XR-tunnel-l2tun-oper:class-name = ' + str(self.class_name) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.class_name is not None:
return True
if self.accounting_method_list is not None:
return True
if self.class_name_xr is not None:
return True
if self.digest_hash is not None:
return True
if self.encoded_password is not None:
return True
if self.hello_timeout is not None:
return True
if self.host_name is not None:
return True
if self.initial_retransmit_maximum_timeout is not None:
return True
if self.initial_retransmit_minimum_timeout is not None:
return True
if self.initial_retransmit_retries is not None:
return True
if self.ip_tos is not None:
return True
if self.is_authentication_enabled is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_digest_check_enabled is not None:
return True
if self.is_digest_enabled is not None:
return True
if self.is_hidden is not None:
return True
if self.is_peer_address_checked is not None:
return True
if self.password is not None:
return True
if self.receive_window_size is not None:
return True
if self.retransmit_maximum_timeout is not None:
return True
if self.retransmit_minimum_timeout is not None:
return True
if self.retransmit_retries is not None:
return True
if self.setup_timeout is not None:
return True
if self.timeout_no_user is not None:
return True
if self.vrf_name is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Classes.Class_']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:classes'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.class_ is not None:
for child_ref in self.class_:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Classes']['meta_info']
class Tunnels(object):
"""
List of tunnel IDs
.. attribute:: tunnel
L2TP tunnel information
**type**\: list of :py:class:`Tunnel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Tunnels.Tunnel>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.tunnel = YList()
self.tunnel.parent = self
self.tunnel.name = 'tunnel'
class Tunnel(object):
"""
L2TP tunnel information
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: active_sessions
Number of active sessions
**type**\: int
**range:** 0..4294967295
.. attribute:: class_name
L2TP class name
**type**\: str
**length:** 0..256
.. attribute:: digest_secrets
Control message authentication with digest secrets
**type**\: int
**range:** 0..65535
.. attribute:: is_congestion_control_enabled
True if congestion control is enabled else false
**type**\: bool
.. attribute:: is_pmtu_enabled
True if tunnel PMTU checking is enabled
**type**\: bool
.. attribute:: is_tunnel_up
True if tunnel is up
**type**\: bool
.. attribute:: local_address
Local tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_port
Local port
**type**\: int
**range:** 0..65535
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: local_window_size
Local window size
**type**\: int
**range:** 0..65535
.. attribute:: maximum_retransmission_time
Maximum retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: order_queue_size
Order queue size
**type**\: int
**range:** 0..65535
.. attribute:: packet_queue_check
Current number session packet queue check
**type**\: int
**range:** 0..65535
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_address
Remote tunnel address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_port
Remote port
**type**\: int
**range:** 0..65535
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: remote_window_size
Remote window size
**type**\: int
**range:** 0..65535
.. attribute:: resend_maximum_queue_size
Resend maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: resend_queue_size
Resend queue size
**type**\: int
**range:** 0..65535
.. attribute:: resends
Total resends
**type**\: int
**range:** 0..4294967295
.. attribute:: retransmission_time
Retransmission time in seconds
**type**\: int
**range:** 0..65535
**units**\: second
.. attribute:: retransmit_time
Retransmit time distribution in seconds
**type**\: list of int
**range:** 0..65535
**units**\: second
.. attribute:: sequence_nr
Sequence NR
**type**\: int
**range:** 0..65535
.. attribute:: sequence_ns
Sequence NS
**type**\: int
**range:** 0..65535
.. attribute:: total_out_of_order_drop_packets
Total out of order dropped packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_out_of_order_reorder_packets
Total out of order reorder packets
**type**\: int
**range:** 0..4294967295
.. attribute:: total_peer_authentication_failures
Number of peer authentication failures
**type**\: int
**range:** 0..4294967295
.. attribute:: unsent_maximum_queue_size
Unsent maximum queue size
**type**\: int
**range:** 0..65535
.. attribute:: unsent_queue_size
Unsent queue size
**type**\: int
**range:** 0..65535
.. attribute:: zero_length_body_acknowledgement_sent
Total zero length body acknowledgement
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.active_sessions = None
self.class_name = None
self.digest_secrets = None
self.is_congestion_control_enabled = None
self.is_pmtu_enabled = None
self.is_tunnel_up = None
self.local_address = None
self.local_port = None
self.local_tunnel_name = None
self.local_window_size = None
self.maximum_retransmission_time = None
self.order_queue_size = None
self.packet_queue_check = None
self.protocol = None
self.remote_address = None
self.remote_port = None
self.remote_tunnel_id = None
self.remote_tunnel_name = None
self.remote_window_size = None
self.resend_maximum_queue_size = None
self.resend_queue_size = None
self.resends = None
self.retransmission_time = None
self.retransmit_time = YLeafList()
self.retransmit_time.parent = self
self.retransmit_time.name = 'retransmit_time'
self.sequence_nr = None
self.sequence_ns = None
self.total_out_of_order_drop_packets = None
self.total_out_of_order_reorder_packets = None
self.total_peer_authentication_failures = None
self.unsent_maximum_queue_size = None
self.unsent_queue_size = None
self.zero_length_body_acknowledgement_sent = None
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels/Cisco-IOS-XR-tunnel-l2tun-oper:tunnel[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.active_sessions is not None:
return True
if self.class_name is not None:
return True
if self.digest_secrets is not None:
return True
if self.is_congestion_control_enabled is not None:
return True
if self.is_pmtu_enabled is not None:
return True
if self.is_tunnel_up is not None:
return True
if self.local_address is not None:
return True
if self.local_port is not None:
return True
if self.local_tunnel_name is not None:
return True
if self.local_window_size is not None:
return True
if self.maximum_retransmission_time is not None:
return True
if self.order_queue_size is not None:
return True
if self.packet_queue_check is not None:
return True
if self.protocol is not None:
return True
if self.remote_address is not None:
return True
if self.remote_port is not None:
return True
if self.remote_tunnel_id is not None:
return True
if self.remote_tunnel_name is not None:
return True
if self.remote_window_size is not None:
return True
if self.resend_maximum_queue_size is not None:
return True
if self.resend_queue_size is not None:
return True
if self.resends is not None:
return True
if self.retransmission_time is not None:
return True
if self.retransmit_time is not None:
for child in self.retransmit_time:
if child is not None:
return True
if self.sequence_nr is not None:
return True
if self.sequence_ns is not None:
return True
if self.total_out_of_order_drop_packets is not None:
return True
if self.total_out_of_order_reorder_packets is not None:
return True
if self.total_peer_authentication_failures is not None:
return True
if self.unsent_maximum_queue_size is not None:
return True
if self.unsent_queue_size is not None:
return True
if self.zero_length_body_acknowledgement_sent is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Tunnels.Tunnel']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:tunnels'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.tunnel is not None:
for child_ref in self.tunnel:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Tunnels']['meta_info']
class Sessions(object):
"""
List of session IDs
.. attribute:: session
L2TP information for a particular session
**type**\: list of :py:class:`Session <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.session = YList()
self.session.parent = self
self.session.name = 'session'
class Session(object):
"""
L2TP information for a particular session
.. attribute:: local_tunnel_id <key>
Local tunnel ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: local_session_id <key>
Local session ID
**type**\: int
**range:** \-2147483648..2147483647
.. attribute:: call_serial_number
Call serial number
**type**\: int
**range:** 0..4294967295
.. attribute:: interface_name
Interface name
**type**\: str
**length:** 0..256
.. attribute:: is_conditional_debug_enabled
True if conditional debugging is enabled
**type**\: bool
.. attribute:: is_sequencing_on
True if session sequence is on
**type**\: bool
.. attribute:: is_session_locally_initiated
True if session initiated locally
**type**\: bool
.. attribute:: is_session_manual
True if session is manual
**type**\: bool
.. attribute:: is_session_state_established
True if session state is established
**type**\: bool
.. attribute:: is_session_up
True if session is up
**type**\: bool
.. attribute:: is_udp_checksum_enabled
True if UDP checksum enabled
**type**\: bool
.. attribute:: l2tp_sh_sess_tie_breaker
l2tp sh sess tie breaker
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: l2tp_sh_sess_tie_breaker_enabled
l2tp sh sess tie breaker enabled
**type**\: int
**range:** 0..255
.. attribute:: l2tp_sh_sess_udp_lport
l2tp sh sess udp lport
**type**\: int
**range:** 0..65535
.. attribute:: l2tp_sh_sess_udp_rport
l2tp sh sess udp rport
**type**\: int
**range:** 0..65535
.. attribute:: local_ip_address
Local session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: local_tunnel_name
Local tunnel name
**type**\: str
**length:** 0..256
.. attribute:: protocol
Protocol
**type**\: int
**range:** 0..255
.. attribute:: remote_ip_address
Remote session IP address
**type**\: str
**pattern:** (([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])(%[\\p{N}\\p{L}]+)?
.. attribute:: remote_session_id
Remote session ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_id
Remote tunnel ID
**type**\: int
**range:** 0..4294967295
.. attribute:: remote_tunnel_name
Remote tunnel name
**type**\: str
**length:** 0..256
.. attribute:: session_application_data
Session application data
**type**\: :py:class:`SessionApplicationData <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData>`
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.local_tunnel_id = None
self.local_session_id = None
self.call_serial_number = None
self.interface_name = None
self.is_conditional_debug_enabled = None
self.is_sequencing_on = None
self.is_session_locally_initiated = None
self.is_session_manual = None
self.is_session_state_established = None
self.is_session_up = None
self.is_udp_checksum_enabled = None
self.l2tp_sh_sess_tie_breaker = None
self.l2tp_sh_sess_tie_breaker_enabled = None
self.l2tp_sh_sess_udp_lport = None
self.l2tp_sh_sess_udp_rport = None
self.local_ip_address = None
self.local_tunnel_name = None
self.protocol = None
self.remote_ip_address = None
self.remote_session_id = None
self.remote_tunnel_id = None
self.remote_tunnel_name = None
self.session_application_data = L2Tpv2.Sessions.Session.SessionApplicationData()
self.session_application_data.parent = self
self.unique_id = None
class SessionApplicationData(object):
"""
Session application data
.. attribute:: l2tp_sh_sess_app_type
l2tp sh sess app type
**type**\: int
**range:** 0..4294967295
.. attribute:: vpdn
VPDN data
**type**\: :py:class:`Vpdn <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData.Vpdn>`
.. attribute:: xconnect
Xconnect data
**type**\: :py:class:`Xconnect <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Sessions.Session.SessionApplicationData.Xconnect>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.l2tp_sh_sess_app_type = None
self.vpdn = L2Tpv2.Sessions.Session.SessionApplicationData.Vpdn()
self.vpdn.parent = self
self.xconnect = L2Tpv2.Sessions.Session.SessionApplicationData.Xconnect()
self.xconnect.parent = self
class Xconnect(object):
"""
Xconnect data
.. attribute:: circuit_name
Circuit name
**type**\: str
.. attribute:: ipv6_protocol_tunneling
IPv6ProtocolTunneling
**type**\: bool
.. attribute:: is_circuit_state_up
True if circuit state is up
**type**\: bool
.. attribute:: is_local_circuit_state_up
True if local circuit state is up
**type**\: bool
.. attribute:: is_remote_circuit_state_up
True if remote circuit state is up
**type**\: bool
.. attribute:: sessionvc_id
Session VC ID
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.circuit_name = None
self.ipv6_protocol_tunneling = None
self.is_circuit_state_up = None
self.is_local_circuit_state_up = None
self.is_remote_circuit_state_up = None
self.sessionvc_id = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:xconnect'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.circuit_name is not None:
return True
if self.ipv6_protocol_tunneling is not None:
return True
if self.is_circuit_state_up is not None:
return True
if self.is_local_circuit_state_up is not None:
return True
if self.is_remote_circuit_state_up is not None:
return True
if self.sessionvc_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Sessions.Session.SessionApplicationData.Xconnect']['meta_info']
class Vpdn(object):
"""
VPDN data
.. attribute:: interface_name
Interface name
**type**\: str
**pattern:** (([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){3,4}\\d+\\.\\d+)\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]\*\\d+))\|(([a\-zA\-Z0\-9\_]\*\\d+/){2}([a\-zA\-Z0\-9\_]+))\|([a\-zA\-Z0\-9\_\-]\*\\d+)\|([a\-zA\-Z0\-9\_\-]\*\\d+\\.\\d+)\|(mpls)\|(dwdm)
.. attribute:: username
Session username
**type**\: str
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.interface_name = None
self.username = None
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:vpdn'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.interface_name is not None:
return True
if self.username is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Sessions.Session.SessionApplicationData.Vpdn']['meta_info']
@property
def _common_path(self):
if self.parent is None:
raise YPYModelError('parent is not set . Cannot derive path.')
return self.parent._common_path +'/Cisco-IOS-XR-tunnel-l2tun-oper:session-application-data'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.l2tp_sh_sess_app_type is not None:
return True
if self.vpdn is not None and self.vpdn._has_data():
return True
if self.xconnect is not None and self.xconnect._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Sessions.Session.SessionApplicationData']['meta_info']
@property
def _common_path(self):
if self.local_tunnel_id is None:
raise YPYModelError('Key property local_tunnel_id is None')
if self.local_session_id is None:
raise YPYModelError('Key property local_session_id is None')
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:sessions/Cisco-IOS-XR-tunnel-l2tun-oper:session[Cisco-IOS-XR-tunnel-l2tun-oper:local-tunnel-id = ' + str(self.local_tunnel_id) + '][Cisco-IOS-XR-tunnel-l2tun-oper:local-session-id = ' + str(self.local_session_id) + ']'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.local_tunnel_id is not None:
return True
if self.local_session_id is not None:
return True
if self.call_serial_number is not None:
return True
if self.interface_name is not None:
return True
if self.is_conditional_debug_enabled is not None:
return True
if self.is_sequencing_on is not None:
return True
if self.is_session_locally_initiated is not None:
return True
if self.is_session_manual is not None:
return True
if self.is_session_state_established is not None:
return True
if self.is_session_up is not None:
return True
if self.is_udp_checksum_enabled is not None:
return True
if self.l2tp_sh_sess_tie_breaker is not None:
return True
if self.l2tp_sh_sess_tie_breaker_enabled is not None:
return True
if self.l2tp_sh_sess_udp_lport is not None:
return True
if self.l2tp_sh_sess_udp_rport is not None:
return True
if self.local_ip_address is not None:
return True
if self.local_tunnel_name is not None:
return True
if self.protocol is not None:
return True
if self.remote_ip_address is not None:
return True
if self.remote_session_id is not None:
return True
if self.remote_tunnel_id is not None:
return True
if self.remote_tunnel_name is not None:
return True
if self.session_application_data is not None and self.session_application_data._has_data():
return True
if self.unique_id is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Sessions.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:sessions'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.session is not None:
for child_ref in self.session:
if child_ref._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Sessions']['meta_info']
class Session(object):
"""
L2TP control messages counters
.. attribute:: unavailable
L2TP session unavailable information
**type**\: :py:class:`Unavailable <ydk.models.cisco_ios_xr.Cisco_IOS_XR_tunnel_l2tun_oper.L2Tpv2.Session.Unavailable>`
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.unavailable = L2Tpv2.Session.Unavailable()
self.unavailable.parent = self
class Unavailable(object):
"""
L2TP session unavailable information
.. attribute:: sessions_on_hold
Number of session ID in hold database
**type**\: int
**range:** 0..4294967295
"""
_prefix = 'tunnel-l2tun-oper'
_revision = '2015-11-09'
def __init__(self):
self.parent = None
self.sessions_on_hold = None
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:session/Cisco-IOS-XR-tunnel-l2tun-oper:unavailable'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.sessions_on_hold is not None:
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Session.Unavailable']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2/Cisco-IOS-XR-tunnel-l2tun-oper:session'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.unavailable is not None and self.unavailable._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2.Session']['meta_info']
@property
def _common_path(self):
return '/Cisco-IOS-XR-tunnel-l2tun-oper:l2tpv2'
def is_config(self):
''' Returns True if this instance represents config data else returns False '''
return False
def _has_data(self):
if self.classes is not None and self.classes._has_data():
return True
if self.counter_hist_fail is not None and self.counter_hist_fail._has_data():
return True
if self.counters is not None and self.counters._has_data():
return True
if self.session is not None and self.session._has_data():
return True
if self.sessions is not None and self.sessions._has_data():
return True
if self.statistics is not None and self.statistics._has_data():
return True
if self.tunnel is not None and self.tunnel._has_data():
return True
if self.tunnel_configurations is not None and self.tunnel_configurations._has_data():
return True
if self.tunnels is not None and self.tunnels._has_data():
return True
return False
@staticmethod
def _meta_info():
from ydk.models.cisco_ios_xr._meta import _Cisco_IOS_XR_tunnel_l2tun_oper as meta
return meta._meta_table['L2Tpv2']['meta_info']<|fim▁end|>
| |
<|file_name|>main.go<|end_file_name|><|fim▁begin|>package main
import (
"flag"
"github.com/katherinealbany/rodentia/logger"
"os"
"os/exec"
)
var (
log = logger.New("main")
dir string
repo string
build string
stable string
release string<|fim▁hole|>)
func init() {
flag.StringVar(&dir, "dir", ".", "build directory")
flag.StringVar(&repo, "repo", "katherinealbany", "docker registry repository")
flag.StringVar(&build, "build", "latest", "build tag")
flag.StringVar(&stable, "stable", "stable", "stable build tag")
flag.StringVar(&release, "release", "v1.0.0", "release version")
flag.StringVar(&force, "force", "false", "force the matter!")
flag.StringVar(&push, "push", "true", "push after build")
}
func main() {
log.Info("Parsing...")
flag.Parse()
log.Debug("dir =", dir)
log.Debug("repo =", repo)
log.Debug("build =", build)
log.Debug("stable =", stable)
log.Debug("release =", release)
log.Debug("force =", force)
log.Debug("push =", push)
cmd := exec.Command("docker", "version")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
log.Debug("Starting...")
if err := cmd.Start(); err != nil {
log.Fatal(err)
}
log.Info("Running...")
if err := cmd.Wait(); err != nil {
log.Fatal(err)
}
}<|fim▁end|>
|
force string
push string
|
<|file_name|>test_dimension.py<|end_file_name|><|fim▁begin|>from itertools import product
import numpy as np
from sympy import And
import pytest
from conftest import skipif, opts_tiling
from devito import (ConditionalDimension, Grid, Function, TimeFunction, SparseFunction, # noqa
Eq, Operator, Constant, Dimension, SubDimension, switchconfig,
SubDomain, Lt, Le, Gt, Ge, Ne, Buffer)
from devito.ir.iet import (Conditional, Expression, Iteration, FindNodes,
retrieve_iteration_tree)
from devito.symbolics import indexify, retrieve_functions, IntDiv
from devito.types import Array
class TestBufferedDimension(object):
def test_multi_buffer(self):
grid = Grid((3, 3))
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, 1), Eq(g, f.forward)])
op(time_M=3)
# f looped all time_order buffer and is 1 everywhere
assert np.allclose(f.data, 1)
# g looped indices 0 to 3, rest is still 0
assert np.allclose(g.data[0:4], 1)
assert np.allclose(g.data[4:], 0)
def test_multi_buffer_long_time(self):
grid = Grid((3, 3))
time = grid.time_dim
f = TimeFunction(name="f", grid=grid)
g = TimeFunction(name="g", grid=grid, save=Buffer(7))
op = Operator([Eq(f.forward, time), Eq(g, time+1)])
op(time_M=20)
# f[0] is time=19, f[1] is time=20
assert np.allclose(f.data[0], 19)
assert np.allclose(f.data[1], 20)
# g is time 15 to 21 (loop twice the 7 buffer then 15->21)
for i in range(7):
assert np.allclose(g.data[i], 14+i+1)
class TestSubDimension(object):
@pytest.mark.parametrize('opt', opts_tiling)
def test_interior(self, opt):
"""
Tests application of an Operator consisting of a single equation
over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid)
eqn = [Eq(u.forward, u + 2, subdomain=interior)]
op = Operator(eqn, opt=opt)
op.apply(time_M=2)
assert np.all(u.data[1, 1:-1, 1:-1, 1:-1] == 6.)
assert np.all(u.data[1, :, 0] == 0.)
assert np.all(u.data[1, :, -1] == 0.)
assert np.all(u.data[1, :, :, 0] == 0.)
assert np.all(u.data[1, :, :, -1] == 0.)
def test_domain_vs_interior(self):
"""
Tests application of an Operator consisting of two equations, one
over the whole domain (default), and one over the ``interior`` subdomain.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
interior = grid.interior
u = TimeFunction(name='u', grid=grid) # noqa
eqs = [Eq(u.forward, u + 1),
Eq(u.forward, u.forward + 2, subdomain=interior)]
op = Operator(eqs, opt='noop')
trees = retrieve_iteration_tree(op)
assert len(trees) == 2
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, :, 0, :] == 1)
assert np.all(u.data[1, :, -1, :] == 1)
assert np.all(u.data[1, :, :, 0] == 1)
assert np.all(u.data[1, :, :, -1] == 1)
assert np.all(u.data[1, 1:3, 1:3, 1:3] == 3)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdim_middle(self, opt):
"""
Tests that instantiating SubDimensions using the classmethod
constructors works correctly.
"""
grid = Grid(shape=(4, 4, 4))
x, y, z = grid.dimensions
t = grid.stepping_dim # noqa
u = TimeFunction(name='u', grid=grid) # noqa
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=1,
thickness_right=1)
eqs = [Eq(u.forward, u + 1)]
eqs = [e.subs(x, xi) for e in eqs]
op = Operator(eqs, opt=opt)
u.data[:] = 1.0
op.apply(time_M=1)
assert np.all(u.data[1, 0, :, :] == 1)
assert np.all(u.data[1, -1, :, :] == 1)
assert np.all(u.data[1, 1:3, :, :] == 2)
def test_symbolic_size(self):
"""Check the symbolic size of all possible SubDimensions is as expected."""
grid = Grid(shape=(4,))
x, = grid.dimensions
thickness = 4
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
assert xleft.symbolic_size == xleft.thickness.left[0]
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
assert xi.symbolic_size == (x.symbolic_max - x.symbolic_min -
xi.thickness.left[0] - xi.thickness.right[0] + 1)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
assert xright.symbolic_size == xright.thickness.right[0]
@pytest.mark.parametrize('opt', opts_tiling)
def test_bcs(self, opt):
"""
Tests application of an Operator consisting of multiple equations
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xleft = SubDimension.left(name='xleft', parent=x, thickness=thickness)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
xright = SubDimension.right(name='xright', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
t_in_centre = Eq(u[t+1, xi, yi], 1)
leftbc = Eq(u[t+1, xleft, yi], u[t+1, xleft+1, yi] + 1)
rightbc = Eq(u[t+1, xright, yi], u[t+1, xright-1, yi] + 1)
op = Operator([t_in_centre, leftbc, rightbc], opt=opt)
op.apply(time_m=1, time_M=1)
assert np.all(u.data[0, :, 0:thickness] == 0.)
assert np.all(u.data[0, :, -thickness:] == 0.)
assert all(np.all(u.data[0, i, thickness:-thickness] == (thickness+1-i))
for i in range(thickness))
assert all(np.all(u.data[0, -i, thickness:-thickness] == (thickness+2-i))
for i in range(1, thickness + 1))
assert np.all(u.data[0, thickness:-thickness, thickness:-thickness] == 1.)
def test_flow_detection_interior(self):
"""
Test detection of flow directions when SubDimensions are used
(in this test they are induced by the ``interior`` subdomain).
Stencil uses values at new timestep as well as those at previous ones
This forces an evaluation order onto x.
Weights are:
x=0 x=1 x=2 x=3
t=N 2 ---3
v /
t=N+1 o--+----4
Flow dependency should traverse x in the negative direction
x=2 x=3 x=4 x=5 x=6
t=0 0 --- 0 -- 1 -- 0
v / v / v /
t=1 44 -+--- 11 -+--- 2--+ -- 0
"""
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
interior = grid.interior
u = TimeFunction(name='u', grid=grid, save=10, time_order=1, space_order=0)
step = Eq(u.forward, 2*u
+ 3*u.subs(x, x+x.spacing)
+ 4*u.forward.subs(x, x+x.spacing),
subdomain=interior)
op = Operator(step)
u.data[0, 5, 5] = 1.0
op.apply(time_M=0)
assert u.data[1, 5, 5] == 2
assert u.data[1, 4, 5] == 11
assert u.data[1, 3, 5] == 44
assert u.data[1, 2, 5] == 4*44
assert u.data[1, 1, 5] == 4*4*44
# This point isn't updated because of the `interior` selection
assert u.data[1, 0, 5] == 0
assert np.all(u.data[1, 6:, :] == 0)
assert np.all(u.data[1, :, 0:5] == 0)
assert np.all(u.data[1, :, 6:] == 0)
@pytest.mark.parametrize('exprs,expected,', [
# Carried dependence in both /t/ and /x/
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y])'], 'y'),
(['Eq(u[t+1, x, y], u[t+1, x-1, y] + u[t, x, y], subdomain=interior)'], 'i0y'),
# Carried dependence in both /t/ and /y/
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y])'], 'x'),
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)'], 'i0x'),
# Carried dependence in /y/, leading to separate /y/ loops, one
# going forward, the other backward
(['Eq(u[t+1, x, y], u[t+1, x, y-1] + u[t, x, y], subdomain=interior)',
'Eq(u[t+1, x, y], u[t+1, x, y+1] + u[t, x, y], subdomain=interior)'], 'i0x'),
])
def test_iteration_property_parallel(self, exprs, expected):
"""Tests detection of sequental and parallel Iterations when applying
equations over different subdomains."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
t = grid.time_dim # noqa
interior = grid.interior # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='noop')
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Sequential for i in iterations if i.dim.name != expected)
assert all(i.is_Parallel for i in iterations if i.dim.name == expected)
@skipif(['device'])
@pytest.mark.parametrize('exprs,expected,', [
# All parallel, the innermost Iteration gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft] + 1.)'], ['yleft']),
# All outers are parallel, carried dependence in `yleft`, so the middle
# Iteration over `x` gets vectorized
(['Eq(u[time, x, yleft], u[time, x, yleft+1] + 1.)'], ['x']),
# Only the middle Iteration is parallel, so no vectorization (the Iteration
# is left non-vectorised for OpenMP parallelism)
(['Eq(u[time+1, x, yleft], u[time, x, yleft+1] + u[time+1, x, yleft+1])'], [])
])
def test_iteration_property_vector(self, exprs, expected):
"""Tests detection of vector Iterations when using subdimensions."""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions # noqa
time = grid.time_dim # noqa
# The leftmost 10 elements
yleft = SubDimension.left(name='yleft', parent=y, thickness=10) # noqa
u = TimeFunction(name='u', grid=grid, save=10, time_order=0, space_order=1) # noqa
# List comprehension would need explicit locals/globals mappings to eval
for i, e in enumerate(list(exprs)):
exprs[i] = eval(e)
op = Operator(exprs, opt='simd')
iterations = FindNodes(Iteration).visit(op)
vectorized = [i.dim.name for i in iterations if i.is_Vectorized]
assert set(vectorized) == set(expected)
@pytest.mark.parametrize('opt', opts_tiling)
def test_subdimmiddle_parallel(self, opt):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# a 5 point stencil that can be computed in parallel
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t, xi-1, yi]
+ u[t, xi+1, yi] + u[t, xi, yi-1] + u[t, xi, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre], opt=opt)
print(op.ccode)
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xi, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 9:12, 10] == 1.0)
assert np.all(u.data[1, 10, 9:12] == 1.0)
# Other than those, it should all be 0
u.data[1, 9:12, 10] = 0.0
u.data[1, 10, 9:12] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_parallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Can be done in parallel
eq = Eq(u[t+1, xl, yi], u[t, xl, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim in [xl, yi])
op.apply(time_m=0, time_M=0)
assert np.all(u.data[1, 0:thickness, 0:thickness] == 0)
assert np.all(u.data[1, 0:thickness, -thickness:] == 0)
assert np.all(u.data[1, 0:thickness, thickness:-thickness] == 1)
assert np.all(u.data[1, thickness+1:, :] == 0)
def test_subdimmiddle_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
Different from ``test_subdimmiddle_parallel`` because an interior
dimension cannot be evaluated in parallel.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=0, time_order=1)
xi = SubDimension.middle(name='xi', parent=x,
thickness_left=thickness, thickness_right=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# flow dependencies in x and y which should force serial execution
# in reverse direction
centre = Eq(u[t+1, xi, yi], u[t, xi, yi] + u[t+1, xi+1, yi+1])
u.data[0, 10, 10] = 1.0
op = Operator([centre])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xi)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=0, time_M=0)
for i in range(4, 11):
assert u.data[1, i, i] == 1.0
u.data[1, i, i] = 0.0
assert np.all(u.data[1, :] == 0)
def test_subdimleft_notparallel(self):
"""
Tests application of an Operator consisting of a subdimension
defined over different sub-regions, explicitly created through the
use of SubDimensions.
This tests that flow direction is not being automatically inferred
from whether the subdimension is on the left or right boundary.
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
t = grid.stepping_dim
thickness = 4
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=0)
xl = SubDimension.left(name='xl', parent=x, thickness=thickness)
yi = SubDimension.middle(name='yi', parent=y,
thickness_left=thickness, thickness_right=thickness)
# Flows inward (i.e. forward) rather than outward
eq = Eq(u[t+1, xl, yi], u[t+1, xl-1, yi] + 1)
op = Operator([eq])
iterations = FindNodes(Iteration).visit(op)
assert all(i.is_Affine and i.is_Sequential for i in iterations if i.dim == xl)
assert all(i.is_Affine and i.is_Parallel for i in iterations if i.dim == yi)
op.apply(time_m=1, time_M=1)
assert all(np.all(u.data[0, :thickness, thickness+i] == [1, 2, 3, 4])
for i in range(12))
assert np.all(u.data[0, thickness:] == 0)
assert np.all(u.data[0, :, thickness+12:] == 0)
def test_subdim_fd(self):
"""
Test that the FD shortcuts are handled correctly with SubDimensions
"""
grid = Grid(shape=(20, 20))
x, y = grid.dimensions
u = TimeFunction(name='u', save=None, grid=grid, space_order=1, time_order=1)
u.data[:] = 2.
# Flows inward (i.e. forward) rather than outward
eq = [Eq(u.forward, u.dx + u.dy, subdomain=grid.interior)]
op = Operator(eq)
op.apply(time_M=0)
assert np.all(u.data[1, -1, :] == 2.)
assert np.all(u.data[1, :, 0] == 2.)
assert np.all(u.data[1, :, -1] == 2.)
assert np.all(u.data[1, 0, :] == 2.)
assert np.all(u.data[1, 1:18, 1:18] == 0.)
def test_arrays_defined_over_subdims(self):
"""
Check code generation when an Array uses a SubDimension.
"""
grid = Grid(shape=(3,))
x, = grid.dimensions
xi, = grid.interior.dimensions
f = Function(name='f', grid=grid)
a = Array(name='a', dimensions=(xi,), dtype=grid.dtype)
op = Operator([Eq(a[xi], 1), Eq(f, f + a[xi + 1], subdomain=grid.interior)],
openmp=False)
assert len(op.parameters) == 6
# neither `x_size` nor `xi_size` are expected here
assert not any(i.name in ('x_size', 'xi_size') for i in op.parameters)
# Try running it -- regardless of what it will produce, this should run
# ie, this checks this error isn't raised:
# "ValueError: No value found for parameter xi_size"
op()
@pytest.mark.parametrize('opt', opts_tiling)
def test_expandingbox_like(self, opt):
"""
Make sure SubDimensions aren't an obstacle to expanding boxes.
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
u = TimeFunction(name='u', grid=grid)
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
eqn = Eq(u.forward, u + 1)
eqn = eqn.subs({x: xi, y: yi})
op = Operator(eqn, opt=opt)
op.apply(time=3, x_m=2, x_M=5, y_m=2, y_M=5,
xi_ltkn=0, xi_rtkn=0, yi_ltkn=0, yi_rtkn=0)
assert np.all(u.data[0, 2:-2, 2:-2] == 4.)
assert np.all(u.data[1, 2:-2, 2:-2] == 3.)
assert np.all(u.data[:, :2] == 0.)
assert np.all(u.data[:, -2:] == 0.)
assert np.all(u.data[:, :, :2] == 0.)
assert np.all(u.data[:, :, -2:] == 0.)
class TestConditionalDimension(object):
"""
A collection of tests to check the correct functioning of ConditionalDimensions.
"""
def test_basic(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.), Eq(usave, u)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
def test_basic_shuffles(self):
"""
Like ``test_basic``, but with different equation orderings. Nevertheless,
we assert against the same exact values as in ``test_basic``, since we
save `u`, not `u.forward`.
"""<|fim▁hole|> nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
# Shuffle 1
eqns = [Eq(usave, u), Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
# Shuffle 2
usave.data[:] = 0.
u.data[:] = 0.
u2.data[:] = 0.
eqns = [Eq(u.forward, u + 1.), Eq(usave, u), Eq(u2.forward, u2 + 1.)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor)
for i in range((nt+factor-1)//factor)])
@pytest.mark.parametrize('opt', opts_tiling)
def test_spacial_subsampling(self, opt):
"""
Test conditional dimension for the spatial ones.
This test saves u every two grid points :
u2[x, y] = u[2*x, 2*y]
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, save=nt)
assert(grid.time_dim in u.indices)
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in u.grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, save=nt)
assert(time in u2.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2, u)]
op = Operator(eqns, opt=opt)
op.apply(time_M=nt-2)
# Verify that u2[x,y]= u[2*x, 2*y]
assert np.allclose(u.data[:-1, 0::2, 0::2], u2.data[:-1, :, :])
def test_time_subsampling_fd(self):
nt = 19
grid = Grid(shape=(11, 11))
x, y = grid.dimensions
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled, time_order=2)
dx2 = [indexify(i) for i in retrieve_functions(usave.dt2.evaluate)]
assert dx2 == [usave[time_subsampled - 1, x, y],
usave[time_subsampled + 1, x, y],
usave[time_subsampled, x, y]]
def test_issue_1592(self):
grid = Grid(shape=(11, 11))
time = grid.time_dim
time_sub = ConditionalDimension('t_sub', parent=time, factor=2)
v = TimeFunction(name="v", grid=grid, space_order=4, time_dim=time_sub, save=5)
w = Function(name="w", grid=grid, space_order=4)
Operator(Eq(w, v.dx))(time=6)
op = Operator(Eq(v.forward, v.dx))
op.apply(time=6)
exprs = FindNodes(Expression).visit(op)
assert exprs[-1].expr.lhs.indices[0] == IntDiv(time, 2) + 1
def test_subsampled_fd(self):
"""
Test that the FD shortcuts are handled correctly with ConditionalDimensions
"""
grid = Grid(shape=(11, 11))
time = grid.time_dim
# Creates subsampled spatial dimensions and accordine grid
dims = tuple([ConditionalDimension(d.name+'sub', parent=d, factor=2)
for d in grid.dimensions])
grid2 = Grid((6, 6), dimensions=dims, time_dimension=time)
u2 = TimeFunction(name='u2', grid=grid2, space_order=2, time_order=1)
u2.data.fill(2.)
eqns = [Eq(u2.forward, u2.dx + u2.dy)]
op = Operator(eqns)
op.apply(time_M=0, x_M=11, y_M=11)
# Verify that u2 contains subsampled fd values
assert np.all(u2.data[0, :, :] == 2.)
assert np.all(u2.data[1, 0, 0] == 0.)
assert np.all(u2.data[1, -1, -1] == -20.)
assert np.all(u2.data[1, 0, -1] == -10.)
assert np.all(u2.data[1, -1, 0] == -10.)
assert np.all(u2.data[1, 1:-1, 0] == 0.)
assert np.all(u2.data[1, 0, 1:-1] == 0.)
assert np.all(u2.data[1, 1:-1, -1] == -10.)
assert np.all(u2.data[1, -1, 1:-1] == -10.)
assert np.all(u2.data[1, 1:4, 1:4] == 0.)
# This test generates an openmp loop form which makes older gccs upset
@switchconfig(openmp=False)
def test_nothing_in_negative(self):
"""Test the case where when the condition is false, there is nothing to do."""
nt = 4
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', save=nt, grid=grid)
assert(grid.time_dim in u.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(usave, u)]
op = Operator(eqns)
u.data[:] = 1.0
usave.data[:] = 0.0
op.apply(time_m=1, time_M=1)
assert np.allclose(usave.data, 0.0)
op.apply(time_m=0, time_M=0)
assert np.allclose(usave.data, 1.0)
def test_laplace(self):
grid = Grid(shape=(20, 20, 20))
x, y, z = grid.dimensions
time = grid.time_dim
t = grid.stepping_dim
tsave = ConditionalDimension(name='tsave', parent=time, factor=2)
u = TimeFunction(name='u', grid=grid, save=None, time_order=2)
usave = TimeFunction(name='usave', grid=grid, time_dim=tsave,
time_order=0, space_order=0)
steps = []
# save of snapshot
steps.append(Eq(usave, u))
# standard laplace-like thing
steps.append(Eq(u[t+1, x, y, z],
u[t, x, y, z] - u[t-1, x, y, z]
+ u[t, x-1, y, z] + u[t, x+1, y, z]
+ u[t, x, y-1, z] + u[t, x, y+1, z]
+ u[t, x, y, z-1] + u[t, x, y, z+1]))
op = Operator(steps)
u.data[:] = 0.0
u.data[0, 10, 10, 10] = 1.0
op.apply(time_m=0, time_M=0)
assert np.sum(u.data[0, :, :, :]) == 1.0
assert np.sum(u.data[1, :, :, :]) == 7.0
assert np.all(usave.data[0, :, :, :] == u.data[0, :, :, :])
def test_as_expr(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave, time_subsampled * u)]
op = Operator(eqns)
op.apply(t=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
assert np.all([np.allclose(u2.data[i], i) for i in range(nt)])
assert np.all([np.allclose(usave.data[i], i*factor*i)
for i in range((nt+factor-1)//factor)])
def test_shifted(self):
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
u2 = TimeFunction(name='u2', grid=grid, save=nt)
assert(time in u2.indices)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
usave = TimeFunction(name='usave', grid=grid, save=2, time_dim=time_subsampled)
assert(time_subsampled in usave.indices)
t_sub_shift = Constant(name='t_sub_shift', dtype=np.int32)
eqns = [Eq(u.forward, u + 1.), Eq(u2.forward, u2 + 1.),
Eq(usave.subs(time_subsampled, time_subsampled - t_sub_shift), u)]
op = Operator(eqns)
# Starting at time_m=10, so time_subsampled - t_sub_shift is in range
op.apply(time_m=10, time_M=nt-2, t_sub_shift=3)
assert np.all(np.allclose(u.data[0], 8))
assert np.all([np.allclose(u2.data[i], i - 10) for i in range(10, nt)])
assert np.all([np.allclose(usave.data[i], 2+i*factor) for i in range(2)])
def test_no_index(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression."""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid)
assert(grid.stepping_dim in u.indices)
v = Function(name='v', grid=grid)
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
eqns = [Eq(u.forward, u + 1), Eq(v, v + u*u*time_subsampled)]
op = Operator(eqns)
op.apply(t_M=nt-2)
assert np.all(np.allclose(u.data[(nt-1) % 3], nt-1))
# expected result is 1024
# v = u[0]**2 * 0 + u[4]**2 * 1 + u[8]**2 * 2 + u[12]**2 * 3 + u[16]**2 * 4
# with u[t] = t
# v = 16 * 1 + 64 * 2 + 144 * 3 + 256 * 4 = 1600
assert np.all(np.allclose(v.data, 1600))
def test_no_index_sparse(self):
"""Test behaviour when the ConditionalDimension is used as a symbol in
an expression over sparse data objects."""
grid = Grid(shape=(4, 4), extent=(3.0, 3.0))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, save=1)
f.data[:] = 0.
coordinates = [(0.5, 0.5), (0.5, 2.5), (2.5, 0.5), (2.5, 2.5)]
sf = SparseFunction(name='sf', grid=grid, npoint=4, coordinates=coordinates)
sf.data[:] = 1.
sd = sf.dimensions[sf._sparse_position]
# We want to write to `f` through `sf` so that we obtain the
# following 4x4 grid (the '*' show the position of the sparse points)
# We do that by emulating an injection
#
# 0 --- 0 --- 0 --- 0
# | * | | * |
# 0 --- 1 --- 1 --- 0
# | | | |
# 0 --- 1 --- 1 --- 0
# | * | | * |
# 0 --- 0 --- 0 --- 0
radius = 1
indices = [(i, i+radius) for i in sf._coordinate_indices]
bounds = [i.symbolic_size - radius for i in grid.dimensions]
eqs = []
for e, i in enumerate(product(*indices)):
args = [j > 0 for j in i]
args.extend([j < k for j, k in zip(i, bounds)])
condition = And(*args, evaluate=False)
cd = ConditionalDimension('sfc%d' % e, parent=sd, condition=condition)
index = [time] + list(i)
eqs.append(Eq(f[index], f[index] + sf[cd]))
op = Operator(eqs)
op.apply(time=0)
assert np.all(f.data[0, 1:-1, 1:-1] == 1.)
assert np.all(f.data[0, 0] == 0.)
assert np.all(f.data[0, -1] == 0.)
assert np.all(f.data[0, :, 0] == 0.)
assert np.all(f.data[0, :, -1] == 0.)
def test_symbolic_factor(self):
"""
Test ConditionalDimension with symbolic factor (provided as a Constant).
"""
g = Grid(shape=(4, 4, 4))
u = TimeFunction(name='u', grid=g, time_order=0)
fact = Constant(name='fact', dtype=np.int32, value=4)
tsub = ConditionalDimension(name='tsub', parent=g.time_dim, factor=fact)
usave = TimeFunction(name='usave', grid=g, time_dim=tsub, save=4)
op = Operator([Eq(u, u + 1), Eq(usave, u)])
op.apply(time=7) # Use `fact`'s default value, 4
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 5)
u.data[:] = 0.
op.apply(time=7, fact=2)
assert np.all(usave.data[0] == 1)
assert np.all(usave.data[1] == 3)
assert np.all(usave.data[2] == 5)
assert np.all(usave.data[3] == 7)
def test_implicit_dims(self):
"""
Test ConditionalDimension as an implicit dimension for an equation.
"""
# This test makes an Operator that should create a vector of increasing
# integers, but stop incrementing when a certain stop value is reached
shape = (50,)
stop_value = 20
time = Dimension(name='time')
f = TimeFunction(name='f', shape=shape, dimensions=[time])
# The condition to stop incrementing
cond = ConditionalDimension(name='cond',
parent=time, condition=f[time] < stop_value)
eqs = [Eq(f.forward, f), Eq(f.forward, f.forward + 1, implicit_dims=[cond])]
op = Operator(eqs)
op.apply(time_M=shape[0] - 2)
# Make the same calculation in python to assert the result
F = np.zeros(shape[0])
for i in range(shape[0]):
F[i] = i if i < stop_value else stop_value
assert np.all(f.data == F)
def test_grouping(self):
"""
Test that Clusters over the same set of ConditionalDimensions fall within
the same Conditional. This is a follow up to issue #1610.
"""
grid = Grid(shape=(10, 10))
time = grid.time_dim
cond = ConditionalDimension(name='cond', parent=time, condition=time < 5)
u = TimeFunction(name='u', grid=grid, space_order=4)
# We use a SubDomain only to keep the two Eqs separated
eqns = [Eq(u.forward, u + 1, subdomain=grid.interior),
Eq(u.forward, u.dx.dx + 1., implicit_dims=[cond])]
op = Operator(eqns, opt=('advanced-fsg', {'cire-mincost-sops': 1}))
conds = FindNodes(Conditional).visit(op)
assert len(conds) == 1
assert len(retrieve_iteration_tree(conds[0].then_body)) == 2
def test_stepping_dim_in_condition_lowering(self):
"""
Check that the compiler performs lowering on conditions
with TimeDimensions and generates the expected code::
if (g[t][x + 1][y + 1] <= 10){ if (g[t0][x + 1][y + 1] <= 10){
... --> ...
} }
This test increments a function by one at every timestep until it is
less-or-equal to 10 (g<=10) while although operator runs for 13 timesteps.
"""
grid = Grid(shape=(4, 4))
_, y = grid.dimensions
ths = 10
g = TimeFunction(name='g', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Le(g, ths))
op = Operator(Eq(g.forward, g + 1, implicit_dims=ci))
op.apply(time_M=ths+3)
assert np.all(g.data[0, :, :] == ths)
assert np.all(g.data[1, :, :] == ths + 1)
assert 'if (g[t0][x + 1][y + 1] <= 10)\n'
'{\n g[t1][x + 1][y + 1] = g[t0][x + 1][y + 1] + 1' in str(op.ccode)
def test_expr_like_lowering(self):
"""
Test the lowering of an expr-like ConditionalDimension's condition.
This test makes an Operator that should indexify and lower the condition
passed in the Conditional Dimension
"""
grid = Grid(shape=(3, 3))
g1 = Function(name='g1', grid=grid)
g2 = Function(name='g2', grid=grid)
g1.data[:] = 0.49
g2.data[:] = 0.49
x, y = grid.dimensions
ci = ConditionalDimension(name='ci', parent=y, condition=Le((g1 + g2),
1.01*(g1 + g2)))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
Operator(Eq(f, g1+g2)).apply()
assert np.all(f.data[:] == g1.data[:] + g2.data[:])
@pytest.mark.parametrize('setup_rel, rhs, c1, c2, c3, c4', [
# Relation, RHS, c1 to c4 used as indexes in assert
(Lt, 3, 2, 4, 4, -1), (Le, 2, 2, 4, 4, -1), (Ge, 3, 4, 6, 1, 4),
(Gt, 2, 4, 6, 1, 4), (Ne, 5, 2, 6, 1, 2)
])
def test_relational_classes(self, setup_rel, rhs, c1, c2, c3, c4):
"""
Test ConditionalDimension using conditions based on Relations over SubDomains.
"""
class InnerDomain(SubDomain):
name = 'inner'
def define(self, dimensions):
return {d: ('middle', 2, 2) for d in dimensions}
inner_domain = InnerDomain()
grid = Grid(shape=(8, 8), subdomains=(inner_domain,))
g = Function(name='g', grid=grid)
g2 = Function(name='g2', grid=grid)
for i in [g, g2]:
i.data[:4, :4] = 1
i.data[4:, :4] = 2
i.data[4:, 4:] = 3
i.data[:4, 4:] = 4
xi, yi = grid.subdomains['inner'].dimensions
cond = setup_rel(0.25*g + 0.75*g2, rhs, subdomain=grid.subdomains['inner'])
ci = ConditionalDimension(name='ci', parent=yi, condition=cond)
f = Function(name='f', shape=grid.shape, dimensions=(xi, ci))
eq1 = Eq(f, 0.4*g + 0.6*g2)
eq2 = Eq(f, 5)
Operator([eq1, eq2]).apply()
assert np.all(f.data[2:6, c1:c2] == 5.)
assert np.all(f.data[:, c3:c4] < 5.)
def test_from_cond_to_param(self):
"""
Test that Functions appearing in the condition of a ConditionalDimension
but not explicitly in an Eq are actually part of the Operator input
(stems from issue #1298).
"""
grid = Grid(shape=(8, 8))
x, y = grid.dimensions
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ci = ConditionalDimension(name='ci', parent=y, condition=Lt(g, 2 + h))
f = Function(name='f', shape=grid.shape, dimensions=(x, ci))
for _ in range(5):
# issue #1298 was non deterministic
Operator(Eq(f, 5)).apply()
@skipif('device')
def test_no_fusion_simple(self):
"""
If ConditionalDimensions are present, then Clusters must not be fused so
that ultimately Eqs get scheduled to different loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
# No ConditionalDimensions yet. Will be fused and optimized
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 4
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
assert exprs[3].expr.rhs is exprs[0].output
# Now with a ConditionalDimension. No fusion, no optimization
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime])]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 1
@skipif('device')
def test_no_fusion_convoluted(self):
"""
Conceptually like `test_no_fusion_simple`, but with more expressions
and non-trivial data flow.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid)
g = Function(name='g', grid=grid)
h = Function(name='h', grid=grid)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(h, f + 1),
Eq(g, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1, implicit_dims=[ctime]),
Eq(f.forward, f + 1),
Eq(g, f + 1)]
op = Operator(eqns)
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 3
assert exprs[1].expr.rhs is exprs[0].output
assert exprs[2].expr.rhs is exprs[0].output
def test_affiness(self):
"""
Test for issue #1616.
"""
nt = 19
grid = Grid(shape=(11, 11))
time = grid.time_dim
factor = 4
time_subsampled = ConditionalDimension('t_sub', parent=time, factor=factor)
u = TimeFunction(name='u', grid=grid)
usave = TimeFunction(name='usave', grid=grid, save=(nt+factor-1)//factor,
time_dim=time_subsampled)
eqns = [Eq(u.forward, u + 1.), Eq(usave, u)]
op = Operator(eqns)
iterations = [i for i in FindNodes(Iteration).visit(op) if i.dim is not time]
assert all(i.is_Affine for i in iterations)
class TestMashup(object):
"""
Check the correct functioning of the compiler in presence of many Dimension types.
"""
def test_topofusion_w_subdims_conddims(self):
"""
Check that topological fusion works across guarded Clusters over different
iteration spaces and in presence of anti-dependences.
This test uses both SubDimensions (via SubDomains) and ConditionalDimensions.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1),
Eq(g.forward, g + 1),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 1
assert exprs[0].write is h
def test_topofusion_w_subdims_conddims_v2(self):
"""
Like `test_topofusion_w_subdims_conddims` but with more SubDomains,
so we expect fewer loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 2
assert len(FindNodes(Expression).visit(op._func_table['bf0'].root)) == 3
assert len(FindNodes(Expression).visit(op._func_table['bf1'].root)) == 2 + 1 # r0
def test_topofusion_w_subdims_conddims_v3(self):
"""
Like `test_topofusion_w_subdims_conddims_v2` but with an extra anti-dependence,
which causes scheduling over more loop nests.
"""
grid = Grid(shape=(4, 4, 4))
time = grid.time_dim
f = TimeFunction(name='f', grid=grid, time_order=2)
g = TimeFunction(name='g', grid=grid, time_order=2)
h = TimeFunction(name='h', grid=grid, time_order=2)
fsave = TimeFunction(name='fsave', grid=grid, time_order=2, save=5)
gsave = TimeFunction(name='gsave', grid=grid, time_order=2, save=5)
ctime = ConditionalDimension(name='ctime', parent=time, condition=time > 4)
eqns = [Eq(f.forward, f + 1, subdomain=grid.interior),
Eq(g.forward, g + 1, subdomain=grid.interior),
Eq(fsave, f.dt2, implicit_dims=[ctime]),
Eq(h, f.dt2.dx + g, subdomain=grid.interior),
Eq(gsave, g.dt2, implicit_dims=[ctime])]
op = Operator(eqns)
# Check generated code -- expect the gsave equation to be scheduled together
# in the same loop nest with the fsave equation
assert len(op._func_table) == 3
exprs = FindNodes(Expression).visit(op._func_table['bf0'].root)
assert len(exprs) == 2
assert exprs[0].write is f
assert exprs[1].write is g
exprs = FindNodes(Expression).visit(op._func_table['bf1'].root)
assert len(exprs) == 3
assert exprs[1].write is fsave
assert exprs[2].write is gsave
exprs = FindNodes(Expression).visit(op._func_table['bf2'].root)
assert len(exprs) == 2
assert exprs[1].write is h<|fim▁end|>
| |
<|file_name|>lsn.js<|end_file_name|><|fim▁begin|>$(document).ready(function() {
/* initialize the external events
-----------------------------------------------------------------*/
$('#external-events div.external-event').each(function() {
// create an Event Object (http://arshaw.com/fullcalendar/docs/event_data/Event_Object/)
// it doesn't need to have a start or end
var eventObject = {
title: $.trim($(this).text()), // use the element's text as the event title
className: $.trim($(this).attr("class").split(' ')[1]) // get the class name color[x]
};
// store the Event Object in the DOM element so we can get to it later<|fim▁hole|> $(this).data('eventObject', eventObject);
// make the event draggable using jQuery UI
$(this).draggable({
zIndex: 999,
revert: true, // will cause the event to go back to its
revertDuration: 0 // original position after the drag
});
});
/* initialize the calendar
-----------------------------------------------------------------*/
var calendar = $('#calendar').fullCalendar({
header: {
left: 'prev,next today',
center: 'title',
right: 'agendaWeek,agendaDay'
},
defaultView: 'agendaDay',
timeFormat: 'H:mm{ - H:mm}',
axisFormat: 'H:mm',
minTime: '8:00',
maxTime: '22:00',
allDaySlot: false,
monthNames: ['一月','二月','三月','四月','五月','六月','七月','八月','九月','十月','十一月','十二月'],
monthNamesShort: ['1月','2月','3月','4月','5月','6月','7月','8月','9月','10月','11月','12月'],
dayNames: ['星期日', '星期一', '星期二', '星期三', '星期四', '星期五', '星期六'],
dayNamesShort: ['周日','周一','周二','周三','周四','周五','周六'],
titleFormat: {
month: 'yyyy MMMM',
week: "yyyy'年' MMM d'日'{ '—'[ MMM] d'日' }",
day: "dddd, yyyy'年' MMM d'日'"
},
/*defaultEventMinutes: 120, */
selectable: true,
selectHelper: true,
select: function(start, end, allDay) {
var type = false;
var color = false;
var execute = function(){
$("input").each(function(){
(this.checked == true) ? type = $(this).val() : null;
(this.checked == true) ? color = $(this).attr('id') : null;
});
$("#dialog-form").dialog("close");
calendar.fullCalendar('renderEvent',
{
title: type,
start: start,
end: end,
allDay: allDay,
className: color
},
true // make the event "stick"
);
calendar.fullCalendar('unselect');
};
var cancel = function() {
$("#dialog-form").dialog("close");
}
var dialogOpts = {
modal: true,
position: "center",
buttons: {
"确定": execute,
"取消": cancel
}
};
$("#dialog-form").dialog(dialogOpts);
},
editable: true,
eventMouseover: function(event, domEvent) {
/*
for(var key in event){
$("<p>").text(key + ':' + event[key]).appendTo($("body"));
};
*/
var layer = '<div id="events-layer" class="fc-transparent" style="position:absolute; width:100%; height:100%; top:-1px; text-align:right; z-index:100"><a><img src="images/icon_edit.gif" title="edit" width="14" id="edbut'+event._id+'" border="0" style="padding-right:3px; padding-top:2px;" /></a><a><img src="images/icon_delete.png" title="delete" width="14" id="delbut'+event._id+'" border="0" style="padding-right:5px; padding-top:2px;" /></a></div>';
$(this).append(layer);
$("#delbut"+event._id).hide();
$("#delbut"+event._id).fadeIn(300);
$("#delbut"+event._id).click(function() {
calendar.fullCalendar('removeEvents', event._id);
//$.post("delete.php", {eventId: event._id});
calendar.fullCalendar('refetchEvents');
});
$("#edbut"+event._id).hide();
$("#edbut"+event._id).fadeIn(300);
$("#edbut"+event._id).click(function() {
//var title = prompt('Current Event Title: ' + event.title + '\n\nNew Event Title: ');
/*
if(title){
$.post("update_title.php", {eventId: event.id, eventTitle: title});
calendar.fullCalendar('refetchEvents');
}
*/
var type = false;
var color = false;
var execute = function(){
$("input").each(function(){
(this.checked == true) ? type = $(this).val() : null;
(this.checked == true) ? color = $(this).attr('id') : null;
});
$("#dialog-form").dialog("close");
event.title = type;
event.className = color;
calendar.fullCalendar('updateEvent', event);
calendar.fullCalendar('refetchEvents');
};
var cancel = function() {
$("#dialog-form").dialog("close");
}
var dialogOpts = {
modal: true,
position: "center",
buttons: {
"确定": execute,
"取消": cancel
}
};
$("#dialog-form").dialog(dialogOpts);
});
},
eventMouseout: function(calEvent, domEvent) {
$("#events-layer").remove();
},
droppable: true, // this allows things to be dropped onto the calendar !!!
drop: function(date, allDay) { // this function is called when something is dropped
// retrieve the dropped element's stored Event Object
var originalEventObject = $(this).data('eventObject');
// we need to copy it, so that multiple events don't have a reference to the same object
var copiedEventObject = $.extend({}, originalEventObject);
// assign it the date that was reported
copiedEventObject.start = date;
copiedEventObject.end = (date.getTime() + 7200000)/1000;
copiedEventObject.allDay = false;
// render the event on the calendar
// the last `true` argument determines if the event "sticks" (http://arshaw.com/fullcalendar/docs/event_rendering/renderEvent/)
$('#calendar').fullCalendar('renderEvent', copiedEventObject, true);
}
});
});<|fim▁end|>
| |
<|file_name|>Tron.java<|end_file_name|><|fim▁begin|>package nak.liblinear;
import static nak.liblinear.Linear.info;
/**
* Trust Region Newton Method optimization<|fim▁hole|>
private final double eps;
private final int max_iter;
public Tron( final Function fun_obj ) {
this(fun_obj, 0.1);
}
public Tron( final Function fun_obj, double eps ) {
this(fun_obj, eps, 1000);
}
public Tron( final Function fun_obj, double eps, int max_iter ) {
this.fun_obj = fun_obj;
this.eps = eps;
this.max_iter = max_iter;
}
void tron(double[] w) {
// Parameters for updating the iterates.
double eta0 = 1e-4, eta1 = 0.25, eta2 = 0.75;
// Parameters for updating the trust region size delta.
double sigma1 = 0.25, sigma2 = 0.5, sigma3 = 4;
int n = fun_obj.get_nr_variable();
int i, cg_iter;
double delta, snorm, one = 1.0;
double alpha, f, fnew, prered, actred, gs;
int search = 1, iter = 1;
double[] s = new double[n];
double[] r = new double[n];
double[] w_new = new double[n];
double[] g = new double[n];
for (i = 0; i < n; i++)
w[i] = 0;
f = fun_obj.fun(w);
fun_obj.grad(w, g);
delta = euclideanNorm(g);
double gnorm1 = delta;
double gnorm = gnorm1;
if (gnorm <= eps * gnorm1) search = 0;
iter = 1;
while (iter <= max_iter && search != 0) {
cg_iter = trcg(delta, g, s, r);
System.arraycopy(w, 0, w_new, 0, n);
daxpy(one, s, w_new);
gs = dot(g, s);
prered = -0.5 * (gs - dot(s, r));
fnew = fun_obj.fun(w_new);
// Compute the actual reduction.
actred = f - fnew;
// On the first iteration, adjust the initial step bound.
snorm = euclideanNorm(s);
if (iter == 1) delta = Math.min(delta, snorm);
// Compute prediction alpha*snorm of the step.
if (fnew - f - gs <= 0)
alpha = sigma3;
else
alpha = Math.max(sigma1, -0.5 * (gs / (fnew - f - gs)));
// Update the trust region bound according to the ratio of actual to
// predicted reduction.
if (actred < eta0 * prered)
delta = Math.min(Math.max(alpha, sigma1) * snorm, sigma2 * delta);
else if (actred < eta1 * prered)
delta = Math.max(sigma1 * delta, Math.min(alpha * snorm, sigma2 * delta));
else if (actred < eta2 * prered)
delta = Math.max(sigma1 * delta, Math.min(alpha * snorm, sigma3 * delta));
else
delta = Math.max(delta, Math.min(alpha * snorm, sigma3 * delta));
info("iter %2d act %5.3e pre %5.3e delta %5.3e f %5.3e |g| %5.3e CG %3d%n", iter, actred, prered, delta, f, gnorm, cg_iter);
if (actred > eta0 * prered) {
iter++;
System.arraycopy(w_new, 0, w, 0, n);
f = fnew;
fun_obj.grad(w, g);
gnorm = euclideanNorm(g);
if (gnorm <= eps * gnorm1) break;
}
if (f < -1.0e+32) {
info("WARNING: f < -1.0e+32%n");
break;
}
if (Math.abs(actred) <= 0 && prered <= 0) {
info("WARNING: actred and prered <= 0%n");
break;
}
if (Math.abs(actred) <= 1.0e-12 * Math.abs(f) && Math.abs(prered) <= 1.0e-12 * Math.abs(f)) {
info("WARNING: actred and prered too small%n");
break;
}
}
}
private int trcg(double delta, double[] g, double[] s, double[] r) {
int n = fun_obj.get_nr_variable();
double one = 1;
double[] d = new double[n];
double[] Hd = new double[n];
double rTr, rnewTrnew, cgtol;
for (int i = 0; i < n; i++) {
s[i] = 0;
r[i] = -g[i];
d[i] = r[i];
}
cgtol = 0.1 * euclideanNorm(g);
int cg_iter = 0;
rTr = dot(r, r);
while (true) {
if (euclideanNorm(r) <= cgtol) break;
cg_iter++;
fun_obj.Hv(d, Hd);
double alpha = rTr / dot(d, Hd);
daxpy(alpha, d, s);
if (euclideanNorm(s) > delta) {
info("cg reaches trust region boundary%n");
alpha = -alpha;
daxpy(alpha, d, s);
double std = dot(s, d);
double sts = dot(s, s);
double dtd = dot(d, d);
double dsq = delta * delta;
double rad = Math.sqrt(std * std + dtd * (dsq - sts));
if (std >= 0)
alpha = (dsq - sts) / (std + rad);
else
alpha = (rad - std) / dtd;
daxpy(alpha, d, s);
alpha = -alpha;
daxpy(alpha, Hd, r);
break;
}
alpha = -alpha;
daxpy(alpha, Hd, r);
rnewTrnew = dot(r, r);
double beta = rnewTrnew / rTr;
scale(beta, d);
daxpy(one, r, d);
rTr = rnewTrnew;
}
return (cg_iter);
}
/**
* constant times a vector plus a vector
*
* <pre>
* vector2 += constant * vector1
* </pre>
*
* @since 1.8
*/
private static void daxpy(double constant, double vector1[], double vector2[]) {
if (constant == 0) return;
assert vector1.length == vector2.length;
for (int i = 0; i < vector1.length; i++) {
vector2[i] += constant * vector1[i];
}
}
/**
* returns the dot product of two vectors
*
* @since 1.8
*/
private static double dot(double vector1[], double vector2[]) {
double product = 0;
assert vector1.length == vector2.length;
for (int i = 0; i < vector1.length; i++) {
product += vector1[i] * vector2[i];
}
return product;
}
/**
* returns the euclidean norm of a vector
*
* @since 1.8
*/
private static double euclideanNorm(double vector[]) {
int n = vector.length;
if (n < 1) {
return 0;
}
if (n == 1) {
return Math.abs(vector[0]);
}
// this algorithm is (often) more accurate than just summing up the squares and taking the square-root afterwards
double scale = 0; // scaling factor that is factored out
double sum = 1; // basic sum of squares from which scale has been factored out
for (int i = 0; i < n; i++) {
if (vector[i] != 0) {
double abs = Math.abs(vector[i]);
// try to get the best scaling factor
if (scale < abs) {
double t = scale / abs;
sum = 1 + sum * (t * t);
scale = abs;
} else {
double t = abs / scale;
sum += t * t;
}
}
}
return scale * Math.sqrt(sum);
}
/**
* scales a vector by a constant
*
* @since 1.8
*/
private static void scale(double constant, double vector[]) {
if (constant == 1.0) return;
for (int i = 0; i < vector.length; i++) {
vector[i] *= constant;
}
}
}<|fim▁end|>
|
*/
class Tron {
private final Function fun_obj;
|
<|file_name|>home.py<|end_file_name|><|fim▁begin|><|fim▁hole|>
The home manual page is at:
http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home
==Operation==
The default 'Activate Home' checkbox is on. When it is on, the functions described below will work, when it is off, nothing will be done.
==Settings==
===Name of Home File===
Default: home.gcode
At the beginning of a each layer, home will add the commands of a gcode script with the name of the "Name of Home File" setting, if one exists. Home does not care if the text file names are capitalized, but some file systems do not handle file name cases properly, so to be on the safe side you should give them lower case names. Home looks for those files in the alterations folder in the .skeinforge folder in the home directory. If it doesn't find the file it then looks in the alterations folder in the skeinforge_plugins folder.
==Examples==
The following examples home the file Screw Holder Bottom.stl. The examples are run in a terminal in the folder which contains Screw Holder Bottom.stl and home.py.
> python home.py
This brings up the home dialog.
> python home.py Screw Holder Bottom.stl
The home tool is parsing the file:
Screw Holder Bottom.stl
..
The home tool has created the file:
.. Screw Holder Bottom_home.gcode
"""
from __future__ import absolute_import
#Init has to be imported first because it has code to workaround the python bug where relative imports don't work if the module is imported as a main module.
import __init__
from fabmetheus_utilities.fabmetheus_tools import fabmetheus_interpret
from fabmetheus_utilities.vector3 import Vector3
from fabmetheus_utilities import archive
from fabmetheus_utilities import euclidean
from fabmetheus_utilities import gcodec
from fabmetheus_utilities import settings
from skeinforge_application.skeinforge_utilities import skeinforge_craft
from skeinforge_application.skeinforge_utilities import skeinforge_polyfile
from skeinforge_application.skeinforge_utilities import skeinforge_profile
import math
import os
import sys
__author__ = 'Enrique Perez ([email protected])'
__date__ = '$Date: 2008/21/04 $'
__license__ = 'GNU Affero General Public License http://www.gnu.org/licenses/agpl.html'
def getCraftedText( fileName, text, repository = None ):
"Home a gcode linear move file or text."
return getCraftedTextFromText(archive.getTextIfEmpty(fileName, text), repository)
def getCraftedTextFromText( gcodeText, repository = None ):
"Home a gcode linear move text."
if gcodec.isProcedureDoneOrFileIsEmpty( gcodeText, 'home'):
return gcodeText
if repository == None:
repository = settings.getReadRepository( HomeRepository() )
if not repository.activateHome.value:
return gcodeText
return HomeSkein().getCraftedGcode(gcodeText, repository)
def getNewRepository():
'Get new repository.'
return HomeRepository()
def writeOutput(fileName, shouldAnalyze=True):
"Home a gcode linear move file. Chain home the gcode if it is not already homed."
skeinforge_craft.writeChainTextWithNounMessage(fileName, 'home', shouldAnalyze)
class HomeRepository:
"A class to handle the home settings."
def __init__(self):
"Set the default settings, execute title & settings fileName."
skeinforge_profile.addListsToCraftTypeRepository('skeinforge_application.skeinforge_plugins.craft_plugins.home.html', self)
self.fileNameInput = settings.FileNameInput().getFromFileName( fabmetheus_interpret.getGNUTranslatorGcodeFileTypeTuples(), 'Open File for Home', self, '')
self.openWikiManualHelpPage = settings.HelpPage().getOpenFromAbsolute('http://fabmetheus.crsndoo.com/wiki/index.php/Skeinforge_Home')
self.activateHome = settings.BooleanSetting().getFromValue('Activate Home', self, True )
self.nameOfHomeFile = settings.StringSetting().getFromValue('Name of Home File:', self, 'home.gcode')
self.executeTitle = 'Home'
def execute(self):
"Home button has been clicked."
fileNames = skeinforge_polyfile.getFileOrDirectoryTypesUnmodifiedGcode(self.fileNameInput.value, fabmetheus_interpret.getImportPluginFileNames(), self.fileNameInput.wasCancelled)
for fileName in fileNames:
writeOutput(fileName)
class HomeSkein:
"A class to home a skein of extrusions."
def __init__(self):
self.distanceFeedRate = gcodec.DistanceFeedRate()
self.extruderActive = False
self.highestZ = None
self.homeLines = []
self.layerCount = settings.LayerCount()
self.lineIndex = 0
self.lines = None
self.oldLocation = None
self.shouldHome = False
self.travelFeedRateMinute = 957.0
def addFloat( self, begin, end ):
"Add dive to the original height."
beginEndDistance = begin.distance(end)
alongWay = self.absoluteEdgeWidth / beginEndDistance
closeToEnd = euclidean.getIntermediateLocation( alongWay, end, begin )
closeToEnd.z = self.highestZ
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, closeToEnd.dropAxis(), closeToEnd.z ) )
def addHomeTravel( self, splitLine ):
"Add the home travel gcode."
location = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
self.highestZ = max( self.highestZ, location.z )
if not self.shouldHome:
return
self.shouldHome = False
if self.oldLocation == None:
return
if self.extruderActive:
self.distanceFeedRate.addLine('M103')
self.addHopUp( self.oldLocation )
self.distanceFeedRate.addLinesSetAbsoluteDistanceMode(self.homeLines)
self.addHopUp( self.oldLocation )
self.addFloat( self.oldLocation, location )
if self.extruderActive:
self.distanceFeedRate.addLine('M101')
def addHopUp(self, location):
"Add hop to highest point."
locationUp = Vector3( location.x, location.y, self.highestZ )
self.distanceFeedRate.addLine( self.distanceFeedRate.getLinearGcodeMovementWithFeedRate( self.travelFeedRateMinute, locationUp.dropAxis(), locationUp.z ) )
def getCraftedGcode( self, gcodeText, repository ):
"Parse gcode text and store the home gcode."
self.repository = repository
self.homeLines = settings.getAlterationFileLines(repository.nameOfHomeFile.value)
if len(self.homeLines) < 1:
return gcodeText
self.lines = archive.getTextLines(gcodeText)
self.parseInitialization( repository )
for self.lineIndex in xrange(self.lineIndex, len(self.lines)):
line = self.lines[self.lineIndex]
self.parseLine(line)
return self.distanceFeedRate.output.getvalue()
def parseInitialization( self, repository ):
'Parse gcode initialization and store the parameters.'
for self.lineIndex in xrange(len(self.lines)):
line = self.lines[self.lineIndex]
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
firstWord = gcodec.getFirstWord(splitLine)
self.distanceFeedRate.parseSplitLine(firstWord, splitLine)
if firstWord == '(</extruderInitialization>)':
self.distanceFeedRate.addTagBracketedProcedure('home')
return
elif firstWord == '(<edgeWidth>':
self.absoluteEdgeWidth = abs(float(splitLine[1]))
elif firstWord == '(<travelFeedRatePerSecond>':
self.travelFeedRateMinute = 60.0 * float(splitLine[1])
self.distanceFeedRate.addLine(line)
def parseLine(self, line):
"Parse a gcode line and add it to the bevel gcode."
splitLine = gcodec.getSplitLineBeforeBracketSemicolon(line)
if len(splitLine) < 1:
return
firstWord = splitLine[0]
if firstWord == 'G1':
self.addHomeTravel(splitLine)
self.oldLocation = gcodec.getLocationFromSplitLine(self.oldLocation, splitLine)
elif firstWord == '(<layer>':
self.layerCount.printProgressIncrement('home')
if len(self.homeLines) > 0:
self.shouldHome = True
elif firstWord == 'M101':
self.extruderActive = True
elif firstWord == 'M103':
self.extruderActive = False
self.distanceFeedRate.addLine(line)
def main():
"Display the home dialog."
if len(sys.argv) > 1:
writeOutput(' '.join(sys.argv[1 :]))
else:
settings.startMainLoopFromConstructor(getNewRepository())
if __name__ == "__main__":
main()<|fim▁end|>
|
"""
This page is in the table of contents.
Plugin to home the tool at beginning of each layer.
|
<|file_name|>json11.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) 2013 Dropbox, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "json11.hpp"
#include <cassert>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <limits>
namespace json11 {
static const int max_depth = 200;
using std::string;
using std::vector;
using std::multimap;
using std::make_shared;
using std::initializer_list;
using std::move;
/* Helper for representing null - just a do-nothing struct, plus comparison
* operators so the helpers in JsonValue work. We can't use nullptr_t because
* it may not be orderable.
*/
struct NullStruct {
bool operator==(NullStruct) const { return true; }
bool operator<(NullStruct) const { return false; }
};
/* * * * * * * * * * * * * * * * * * * *
* Serialization
*/
static void dump(NullStruct, string &out) {
out += "null";
}
static void dump(double value, string &out) {
if (std::isfinite(value)) {
char buf[32];
snprintf(buf, sizeof buf, "%.17g", value);
out += buf;
} else {
out += "null";
}
}
static void dump(int value, string &out) {
char buf[32];
snprintf(buf, sizeof buf, "%d", value);
out += buf;
}
static void dump(bool value, string &out) {
out += value ? "true" : "false";
}
static void dump(const string &value, string &out) {
out += '"';
for (size_t i = 0; i < value.length(); i++) {
const char ch = value[i];
if (ch == '\\') {
out += "\\\\";
} else if (ch == '"') {
out += "\\\"";
} else if (ch == '\b') {
out += "\\b";
} else if (ch == '\f') {
out += "\\f";
} else if (ch == '\n') {
out += "\\n";
} else if (ch == '\r') {
out += "\\r";
} else if (ch == '\t') {
out += "\\t";
} else if (static_cast<uint8_t>(ch) <= 0x1f) {
char buf[8];
snprintf(buf, sizeof buf, "\\u%04x", ch);
out += buf;
} else if (static_cast<uint8_t>(ch) == 0xe2 && static_cast<uint8_t>(value[i+1]) == 0x80
&& static_cast<uint8_t>(value[i+2]) == 0xa8) {
out += "\\u2028";
i += 2;
} else if (static_cast<uint8_t>(ch) == 0xe2 && static_cast<uint8_t>(value[i+1]) == 0x80
&& static_cast<uint8_t>(value[i+2]) == 0xa9) {
out += "\\u2029";
i += 2;
} else {
out += ch;
}
}
out += '"';
}
static void dump(const Json::array &values, string &out) {
bool first = true;
out += "[";
for (const auto &value : values) {
if (!first)
out += ", ";
value.dump(out);
first = false;
}
out += "]";
}
static void dump(const Json::object &values, string &out) {
bool first = true;
out += "{";
for (const auto &kv : values) {
if (!first)
out += ", ";
dump(kv.first, out);
out += ": ";
kv.second.dump(out);
first = false;
}
out += "}";
}
void Json::dump(string &out) const {
m_ptr->dump(out);
}
/* * * * * * * * * * * * * * * * * * * *
* Value wrappers
*/
template <Json::Type tag, typename T>
class Value : public JsonValue {
protected:
// Constructors
explicit Value(const T &value) : m_value(value) {}
explicit Value(T &&value) : m_value(move(value)) {}
// Get type tag
Json::Type type() const override {
return tag;
}
// Comparisons
bool equals(const JsonValue * other) const override {
return m_value == static_cast<const Value<tag, T> *>(other)->m_value;
}
bool less(const JsonValue * other) const override {
return m_value < static_cast<const Value<tag, T> *>(other)->m_value;
}
const T m_value;
void dump(string &out) const override { json11::dump(m_value, out); }
};
class JsonDouble final : public Value<Json::NUMBER, double> {
double number_value() const override { return m_value; }
int int_value() const override { return static_cast<int>(m_value); }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
explicit JsonDouble(double value) : Value(value) {}
};
class JsonInt final : public Value<Json::NUMBER, int> {
double number_value() const override { return m_value; }
int int_value() const override { return m_value; }
bool equals(const JsonValue * other) const override { return m_value == other->number_value(); }
bool less(const JsonValue * other) const override { return m_value < other->number_value(); }
public:
explicit JsonInt(int value) : Value(value) {}
};
class JsonBoolean final : public Value<Json::BOOL, bool> {
bool bool_value() const override { return m_value; }
public:
explicit JsonBoolean(bool value) : Value(value) {}
};
class JsonString final : public Value<Json::STRING, string> {
const string &string_value() const override { return m_value; }
public:
explicit JsonString(const string &value) : Value(value) {}
explicit JsonString(string &&value) : Value(move(value)) {}
};
class JsonArray final : public Value<Json::ARRAY, Json::array> {
const Json::array &array_items() const override { return m_value; }
const Json & operator[](size_t i) const override;
public:
explicit JsonArray(const Json::array &value) : Value(value) {}
explicit JsonArray(Json::array &&value) : Value(move(value)) {}
};
class JsonObject final : public Value<Json::OBJECT, Json::object> {
const Json::object &object_items() const override { return m_value; }
const Json & operator[](const string &key) const override;
public:
explicit JsonObject(const Json::object &value) : Value(value) {}
explicit JsonObject(Json::object &&value) : Value(move(value)) {}
};
class JsonNull final : public Value<Json::NUL, NullStruct> {
public:
JsonNull() : Value({}) {}
};
/* * * * * * * * * * * * * * * * * * * *
* Static globals - static-init-safe
*/
struct Statics {
const std::shared_ptr<JsonValue> null = make_shared<JsonNull>();
const std::shared_ptr<JsonValue> t = make_shared<JsonBoolean>(true);
const std::shared_ptr<JsonValue> f = make_shared<JsonBoolean>(false);
const string empty_string;
const vector<Json> empty_vector;
const multimap<string, Json> empty_map;
Statics() {}
};
static const Statics & statics() {
static const Statics s {};
return s;
}
static const Json & static_null() {
// This has to be separate, not in Statics, because Json() accesses statics().null.
static const Json json_null;
return json_null;
}
/* * * * * * * * * * * * * * * * * * * *
* Constructors
*/
Json::Json() noexcept : m_ptr(statics().null) {}
Json::Json(std::nullptr_t) noexcept : m_ptr(statics().null) {}
Json::Json(double value) : m_ptr(make_shared<JsonDouble>(value)) {}
Json::Json(int value) : m_ptr(make_shared<JsonInt>(value)) {}
Json::Json(bool value) : m_ptr(value ? statics().t : statics().f) {}
Json::Json(const string &value) : m_ptr(make_shared<JsonString>(value)) {}
Json::Json(string &&value) : m_ptr(make_shared<JsonString>(move(value))) {}
Json::Json(const char * value) : m_ptr(make_shared<JsonString>(value)) {}
Json::Json(const Json::array &values) : m_ptr(make_shared<JsonArray>(values)) {}
Json::Json(Json::array &&values) : m_ptr(make_shared<JsonArray>(move(values))) {}
Json::Json(const Json::object &values) : m_ptr(make_shared<JsonObject>(values)) {}
Json::Json(Json::object &&values) : m_ptr(make_shared<JsonObject>(move(values))) {}
/* * * * * * * * * * * * * * * * * * * *
* Accessors
*/
Json::Type Json::type() const { return m_ptr->type(); }
double Json::number_value() const { return m_ptr->number_value(); }
int Json::int_value() const { return m_ptr->int_value(); }
bool Json::bool_value() const { return m_ptr->bool_value(); }
const string & Json::string_value() const { return m_ptr->string_value(); }
const vector<Json> & Json::array_items() const { return m_ptr->array_items(); }
const multimap<string, Json> & Json::object_items() const { return m_ptr->object_items(); }
const Json & Json::operator[] (size_t i) const { return (*m_ptr)[i]; }
const Json & Json::operator[] (const string &key) const { return (*m_ptr)[key]; }
double JsonValue::number_value() const { return 0; }
int JsonValue::int_value() const { return 0; }
bool JsonValue::bool_value() const { return false; }
const string & JsonValue::string_value() const { return statics().empty_string; }
const vector<Json> & JsonValue::array_items() const { return statics().empty_vector; }
const multimap<string, Json> & JsonValue::object_items() const { return statics().empty_map; }
const Json & JsonValue::operator[] (size_t) const { return static_null(); }
const Json & JsonValue::operator[] (const string &) const { return static_null(); }
const Json & JsonObject::operator[] (const string &key) const {
auto iter = m_value.find(key);
return (iter == m_value.end()) ? static_null() : iter->second;
}
const Json & JsonArray::operator[] (size_t i) const {
if (i >= m_value.size()) return static_null();
else return m_value[i];
}
/* * * * * * * * * * * * * * * * * * * *
* Comparison
*/
bool Json::operator== (const Json &other) const {
if (m_ptr == other.m_ptr)
return true;
if (m_ptr->type() != other.m_ptr->type())
return false;
return m_ptr->equals(other.m_ptr.get());
}
bool Json::operator< (const Json &other) const {
if (m_ptr == other.m_ptr)
return false;
if (m_ptr->type() != other.m_ptr->type())
return m_ptr->type() < other.m_ptr->type();
return m_ptr->less(other.m_ptr.get());
}
/* * * * * * * * * * * * * * * * * * * *
* Parsing
*/
/* esc(c)
*
* Format char c suitable for printing in an error message.
*/
static inline string esc(char c) {
char buf[12];
if (static_cast<uint8_t>(c) >= 0x20 && static_cast<uint8_t>(c) <= 0x7f) {
snprintf(buf, sizeof buf, "'%c' (%d)", c, c);
} else {
snprintf(buf, sizeof buf, "(%d)", c);
}
return string(buf);
}
static inline bool in_range(long x, long lower, long upper) {
return (x >= lower && x <= upper);
}
namespace {
/* JsonParser
*
* Object that tracks all state of an in-progress parse.
*/
struct JsonParser final {
/* State
*/
const string &str;
size_t i;
string &err;
bool failed;
const JsonParse strategy;
/* fail(msg, err_ret = Json())
*
* Mark this parse as failed.
*/
Json fail(string &&msg) {
return fail(move(msg), Json());
}
template <typename T>
T fail(string &&msg, const T err_ret) {
if (!failed)
err = std::move(msg);
failed = true;
return err_ret;
}
/* consume_whitespace()
*
* Advance until the current character is non-whitespace.
*/
void consume_whitespace() {
while (str[i] == ' ' || str[i] == '\r' || str[i] == '\n' || str[i] == '\t')
i++;
}
/* consume_comment()
*
* Advance comments (c-style inline and multiline).
*/
bool consume_comment() {
bool comment_found = false;
if (str[i] == '/') {
i++;
if (i == str.size())
return fail("unexpected end of input after start of comment", false);
if (str[i] == '/') { // inline comment
i++;
// advance until next line, or end of input
while (i < str.size() && str[i] != '\n') {
i++;
}
comment_found = true;
}
else if (str[i] == '*') { // multiline comment
i++;
if (i > str.size()-2)
return fail("unexpected end of input inside multi-line comment", false);
// advance until closing tokens
while (!(str[i] == '*' && str[i+1] == '/')) {
i++;
if (i > str.size()-2)
return fail(
"unexpected end of input inside multi-line comment", false);
}
i += 2;
comment_found = true;
}
else
return fail("malformed comment", false);
}
return comment_found;
}
/* consume_garbage()
*
* Advance until the current character is non-whitespace and non-comment.
*/
void consume_garbage() {
consume_whitespace();
if(strategy == JsonParse::COMMENTS) {
bool comment_found = false;
do {
comment_found = consume_comment();
if (failed) return;
consume_whitespace();
}
while(comment_found);
}
}
/* get_next_token()
*
* Return the next non-whitespace character. If the end of the input is reached,
* flag an error and return 0.
*/
char get_next_token() {
consume_garbage();
if (failed) return (char)0;
if (i == str.size())
return fail("unexpected end of input", (char)0);
return str[i++];
}
/* encode_utf8(pt, out)
*
* Encode pt as UTF-8 and add it to out.
*/
void encode_utf8(long pt, string & out) {
if (pt < 0)
return;
if (pt < 0x80) {
out += static_cast<char>(pt);
} else if (pt < 0x800) {
out += static_cast<char>((pt >> 6) | 0xC0);
out += static_cast<char>((pt & 0x3F) | 0x80);
} else if (pt < 0x10000) {
out += static_cast<char>((pt >> 12) | 0xE0);
out += static_cast<char>(((pt >> 6) & 0x3F) | 0x80);
out += static_cast<char>((pt & 0x3F) | 0x80);
} else {
out += static_cast<char>((pt >> 18) | 0xF0);
out += static_cast<char>(((pt >> 12) & 0x3F) | 0x80);
out += static_cast<char>(((pt >> 6) & 0x3F) | 0x80);
out += static_cast<char>((pt & 0x3F) | 0x80);
}
}
/* parse_string()
*
* Parse a string, starting at the current position.
*/
string parse_string() {
string out;
long last_escaped_codepoint = -1;
while (true) {
if (i == str.size())
return fail("unexpected end of input in string", "");
char ch = str[i++];
if (ch == '"') {
encode_utf8(last_escaped_codepoint, out);
return out;
}
if (in_range(ch, 0, 0x1f))
return fail("unescaped " + esc(ch) + " in string", "");
// The usual case: non-escaped characters
if (ch != '\\') {
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = -1;
out += ch;
continue;
}
// Handle escapes
if (i == str.size())
return fail("unexpected end of input in string", "");
ch = str[i++];
if (ch == 'u') {
// Extract 4-byte escape sequence
string esc = str.substr(i, 4);
// Explicitly check length of the substring. The following loop
// relies on std::string returning the terminating NUL when
// accessing str[length]. Checking here reduces brittleness.
if (esc.length() < 4) {
return fail("bad \\u escape: " + esc, "");
}
for (size_t j = 0; j < 4; j++) {
if (!in_range(esc[j], 'a', 'f') && !in_range(esc[j], 'A', 'F')
&& !in_range(esc[j], '0', '9'))
return fail("bad \\u escape: " + esc, "");
}
long codepoint = strtol(esc.data(), nullptr, 16);
// JSON specifies that characters outside the BMP shall be encoded as a pair
// of 4-hex-digit \u escapes encoding their surrogate pair components. Check
// whether we're in the middle of such a beast: the previous codepoint was an
// escaped lead (high) surrogate, and this is a trail (low) surrogate.
if (in_range(last_escaped_codepoint, 0xD800, 0xDBFF)
&& in_range(codepoint, 0xDC00, 0xDFFF)) {
// Reassemble the two surrogate pairs into one astral-plane character, per
// the UTF-16 algorithm.
encode_utf8((((last_escaped_codepoint - 0xD800) << 10)
| (codepoint - 0xDC00)) + 0x10000, out);
last_escaped_codepoint = -1;
} else {
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = codepoint;
}
i += 4;
continue;
}
encode_utf8(last_escaped_codepoint, out);
last_escaped_codepoint = -1;
if (ch == 'b') {
out += '\b';
} else if (ch == 'f') {
out += '\f';
} else if (ch == 'n') {
out += '\n';
} else if (ch == 'r') {
out += '\r';
} else if (ch == 't') {
out += '\t';
} else if (ch == '"' || ch == '\\' || ch == '/') {
out += ch;
} else {
return fail("invalid escape character " + esc(ch), "");
}
}
}
/* parse_number()
*
* Parse a double.
*/
Json parse_number() {
size_t start_pos = i;
if (str[i] == '-')
i++;
// Integer part
if (str[i] == '0') {
i++;
if (in_range(str[i], '0', '9'))
return fail("leading 0s not permitted in numbers");
} else if (in_range(str[i], '1', '9')) {
i++;
while (in_range(str[i], '0', '9'))
i++;
} else {
return fail("invalid " + esc(str[i]) + " in number");
}
if (str[i] != '.' && str[i] != 'e' && str[i] != 'E'
&& (i - start_pos) <= static_cast<size_t>(std::numeric_limits<int>::digits10)) {
return std::atoi(str.c_str() + start_pos);
}
// Decimal part
if (str[i] == '.') {
i++;
if (!in_range(str[i], '0', '9'))
return fail("at least one digit required in fractional part");
while (in_range(str[i], '0', '9'))
i++;
}
// Exponent part
if (str[i] == 'e' || str[i] == 'E') {
i++;
if (str[i] == '+' || str[i] == '-')
i++;
if (!in_range(str[i], '0', '9'))
return fail("at least one digit required in exponent");
while (in_range(str[i], '0', '9'))
i++;
}
return std::strtod(str.c_str() + start_pos, nullptr);
}
/* expect(str, res)
*
* Expect that 'str' starts at the character that was just read. If it does, advance
* the input and return res. If not, flag an error.
*/
Json expect(const string &expected, Json res) {
assert(i != 0);
i--;
if (str.compare(i, expected.length(), expected) == 0) {
i += expected.length();
return res;
} else {
return fail("parse error: expected " + expected + ", got " + str.substr(i, expected.length()));
}
}
/* parse_json()
*
* Parse a JSON object.
*/
Json parse_json(int depth) {
if (depth > max_depth) {
return fail("exceeded maximum nesting depth");
}
char ch = get_next_token();
if (failed)
return Json();
if (ch == '-' || (ch >= '0' && ch <= '9')) {
i--;
return parse_number();
}
if (ch == 't')
return expect("true", true);
if (ch == 'f')
return expect("false", false);
if (ch == 'n')
return expect("null", Json());
if (ch == '"')
return parse_string();
if (ch == '{') {
multimap<string, Json> data;
ch = get_next_token();
if (ch == '}')
return data;
while (1) {
if (ch != '"')
return fail("expected '\"' in object, got " + esc(ch));
string key = parse_string();
if (failed)
return Json();
ch = get_next_token();
if (ch != ':')
return fail("expected ':' in object, got " + esc(ch));
data.insert(std::make_pair(std::move(key), parse_json(depth + 1)));
if (failed)
return Json();
ch = get_next_token();
if (ch == '}')
break;
if (ch != ',')
return fail("expected ',' in object, got " + esc(ch));
ch = get_next_token();
}
return data;
}
if (ch == '[') {
vector<Json> data;
ch = get_next_token();
if (ch == ']')
return data;
while (1) {
i--;
data.push_back(parse_json(depth + 1));
if (failed)
return Json();
ch = get_next_token();
if (ch == ']')
break;
if (ch != ',')
return fail("expected ',' in list, got " + esc(ch));
ch = get_next_token();
(void)ch;
}
return data;
}
return fail("expected value, got " + esc(ch));
}
};
}//namespace {
Json Json::parse(const string &in, string &err, JsonParse strategy) {
JsonParser parser { in, 0, err, false, strategy };
Json result = parser.parse_json(0);
// Check for any trailing garbage
parser.consume_garbage();
if (parser.failed)
return Json();
if (parser.i != in.size())
return parser.fail("unexpected trailing " + esc(in[parser.i]));
return result;
}
// Documented in json11.hpp
vector<Json> Json::parse_multi(const string &in,
std::string::size_type &parser_stop_pos,
string &err,
JsonParse strategy) {
JsonParser parser { in, 0, err, false, strategy };
parser_stop_pos = 0;
vector<Json> json_vec;
while (parser.i != in.size() && !parser.failed) {
json_vec.push_back(parser.parse_json(0));
if (parser.failed)
break;
// Check for another object
parser.consume_garbage();
if (parser.failed)
break;
parser_stop_pos = parser.i;
}
return json_vec;
}
/* * * * * * * * * * * * * * * * * * * *
* Shape-checking
*/
bool Json::has_shape(const shape & types, string & err) const {
if (!is_object()) {
err = "expected JSON object, got " + dump();
return false;
}
for (auto & item : types) {
if ((*this)[item.first].type() != item.second) {
err = "bad type for " + item.first + " in " + dump();
return false;
}
}
return true;<|fim▁hole|><|fim▁end|>
|
}
} // namespace json11
|
<|file_name|>packer.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
import sys, os, os.path, signal
import jsshellhelper
from optparse import OptionParser<|fim▁hole|>
class Packer(object):
toolsdir = os.path.dirname(os.path.abspath(__file__))
def run(self, jsshell, filename):
tmpFile = jsshellhelper.createEscapedFile(filename)
cmd = [jsshell,
'-f', os.path.join(self.toolsdir, 'packer.js'),
'-f', os.path.join(self.toolsdir, 'cleaner.js'),
'-f', tmpFile,
'-e', "var input = __unescape_string(); print(pack(input, 62, 1, 0));"]
proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = proc.communicate()
if stdout:
print stdout
else:
print stderr
tmpFile = jsshellhelper.cleanUp(tmpFile)
def main():
parser = OptionParser()
options, args = parser.parse_args()
if len(args) < 2:
print >>sys.stderr, """Usage: %s <path to jsshell> <js file>""" % sys.argv[0]
sys.exit(1)
packer = Packer()
packer.run(args[0], args[1])
if __name__ == '__main__':
main()<|fim▁end|>
|
from subprocess import Popen, PIPE, STDOUT
# Uses jsshell https://developer.mozilla.org/en/Introduction_to_the_JavaScript_shell
|
<|file_name|>handlers.go<|end_file_name|><|fim▁begin|>// -*- Mode: Go; indent-tabs-mode: t -*-
/*
* Copyright (C) 2016-2021 Canonical Ltd
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 3 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package snapstate
import (
"context"
"encoding/json"
"errors"
"fmt"
"os"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"time"
"gopkg.in/tomb.v2"
"github.com/snapcore/snapd/asserts/snapasserts"
"github.com/snapcore/snapd/boot"
"github.com/snapcore/snapd/cmd/snaplock/runinhibit"
"github.com/snapcore/snapd/dirs"
"github.com/snapcore/snapd/features"
"github.com/snapcore/snapd/i18n"
"github.com/snapcore/snapd/logger"
"github.com/snapcore/snapd/osutil"
"github.com/snapcore/snapd/overlord/auth"
"github.com/snapcore/snapd/overlord/configstate/config"
"github.com/snapcore/snapd/overlord/configstate/settings"
"github.com/snapcore/snapd/overlord/ifacestate/ifacerepo"
"github.com/snapcore/snapd/overlord/restart"
"github.com/snapcore/snapd/overlord/snapstate/backend"
"github.com/snapcore/snapd/overlord/state"
"github.com/snapcore/snapd/progress"
"github.com/snapcore/snapd/release"
"github.com/snapcore/snapd/snap"
"github.com/snapcore/snapd/snap/quota"
"github.com/snapcore/snapd/store"
"github.com/snapcore/snapd/strutil"
"github.com/snapcore/snapd/timings"
"github.com/snapcore/snapd/wrappers"
)
// SnapServiceOptions is a hook set by servicestate.
var SnapServiceOptions = func(st *state.State, instanceName string, grps map[string]*quota.Group) (opts *wrappers.SnapServiceOptions, err error) {
panic("internal error: snapstate.SnapServiceOptions is unset")
}
var EnsureSnapAbsentFromQuotaGroup = func(st *state.State, snap string) error {
panic("internal error: snapstate.EnsureSnapAbsentFromQuotaGroup is unset")
}
var SecurityProfilesRemoveLate = func(snapName string, rev snap.Revision, typ snap.Type) error {
panic("internal error: snapstate.SecurityProfilesRemoveLate is unset")
}
// TaskSnapSetup returns the SnapSetup with task params hold by or referred to by the task.
func TaskSnapSetup(t *state.Task) (*SnapSetup, error) {
var snapsup SnapSetup
err := t.Get("snap-setup", &snapsup)
if err != nil && err != state.ErrNoState {
return nil, err
}
if err == nil {
return &snapsup, nil
}
var id string
err = t.Get("snap-setup-task", &id)
if err != nil {
return nil, err
}
ts := t.State().Task(id)
if ts == nil {
return nil, fmt.Errorf("internal error: tasks are being pruned")
}
if err := ts.Get("snap-setup", &snapsup); err != nil {
return nil, err
}
return &snapsup, nil
}
// SetTaskSnapSetup writes the given SnapSetup to the provided task's
// snap-setup-task Task, or to the task itself if the task does not have a
// snap-setup-task (i.e. it _is_ the snap-setup-task)
func SetTaskSnapSetup(t *state.Task, snapsup *SnapSetup) error {
if t.Has("snap-setup") {
// this is the snap-setup-task so just write to the task directly
t.Set("snap-setup", snapsup)
} else {
// this task isn't the snap-setup-task, so go get that and write to that
// one
var id string
err := t.Get("snap-setup-task", &id)
if err != nil {
return err
}
ts := t.State().Task(id)
if ts == nil {
return fmt.Errorf("internal error: tasks are being pruned")
}
ts.Set("snap-setup", snapsup)
}
return nil
}
func snapSetupAndState(t *state.Task) (*SnapSetup, *SnapState, error) {
snapsup, err := TaskSnapSetup(t)
if err != nil {
return nil, nil, err
}
var snapst SnapState
err = Get(t.State(), snapsup.InstanceName(), &snapst)
if err != nil && err != state.ErrNoState {
return nil, nil, err
}
return snapsup, &snapst, nil
}
/* State Locking
do* / undo* handlers should usually lock the state just once with:
st.Lock()
defer st.Unlock()
For tasks doing slow operations (long i/o, networking operations) it's OK
to unlock the state temporarily:
st.Unlock()
err := slowIOOp()
st.Lock()
if err != nil {
...
}
but if a task Get and then Set the SnapState of a snap it must avoid
releasing the state lock in between, other tasks might have
reasons to update the SnapState independently:
// DO NOT DO THIS!:
snapst := ...
snapst.Attr = ...
st.Unlock()
...
st.Lock()
Set(st, snapName, snapst)
if a task really needs to mix mutating a SnapState and releasing the state
lock it should be serialized at the task runner level, see
SnapManger.blockedTask and TaskRunner.SetBlocked
*/
const defaultCoreSnapName = "core"
func defaultBaseSnapsChannel() string {
channel := os.Getenv("SNAPD_BASES_CHANNEL")
if channel == "" {
return "stable"
}
return channel
}
func defaultSnapdSnapsChannel() string {
channel := os.Getenv("SNAPD_SNAPD_CHANNEL")
if channel == "" {
return "stable"
}
return channel
}
func defaultPrereqSnapsChannel() string {
channel := os.Getenv("SNAPD_PREREQS_CHANNEL")
if channel == "" {
return "stable"
}
return channel
}
func findLinkSnapTaskForSnap(st *state.State, snapName string) (*state.Task, error) {
for _, chg := range st.Changes() {
if chg.Status().Ready() {
continue
}
for _, tc := range chg.Tasks() {
if tc.Status().Ready() {
continue
}
if tc.Kind() == "link-snap" {
snapsup, err := TaskSnapSetup(tc)
if err != nil {
return nil, err
}
if snapsup.InstanceName() == snapName {
return tc, nil
}
}
}
}
return nil, nil
}
func isInstalled(st *state.State, snapName string) (bool, error) {
var snapState SnapState
err := Get(st, snapName, &snapState)
if err != nil && err != state.ErrNoState {
return false, err
}
return snapState.IsInstalled(), nil
}
// timeout for tasks to check if the prerequisites are ready
var prerequisitesRetryTimeout = 30 * time.Second
func (m *SnapManager) doPrerequisites(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
// check if we need to inject tasks to install core
snapsup, _, err := snapSetupAndState(t)
if err != nil {
return err
}
// os/base/kernel/gadget cannot have prerequisites other
// than the models default base (or core) which is installed anyway
switch snapsup.Type {
case snap.TypeOS, snap.TypeBase, snap.TypeKernel, snap.TypeGadget:
return nil
}
// snapd is special and has no prereqs
if snapsup.Type == snap.TypeSnapd {
return nil
}
// we need to make sure we install all prereqs together in one
// operation
base := defaultCoreSnapName
if snapsup.Base != "" {
base = snapsup.Base
}
// if a previous version of snapd persisted Prereq only, fill the contentAttrs.
// There will be no content attrs, so it will not update an outdated default provider
if len(snapsup.PrereqContentAttrs) == 0 && len(snapsup.Prereq) != 0 {
snapsup.PrereqContentAttrs = make(map[string][]string, len(snapsup.Prereq))
for _, prereq := range snapsup.Prereq {
snapsup.PrereqContentAttrs[prereq] = nil
}
}
if err := m.installPrereqs(t, base, snapsup.PrereqContentAttrs, snapsup.UserID, perfTimings, snapsup.Flags); err != nil {
return err
}
return nil
}
func (m *SnapManager) installOneBaseOrRequired(t *state.Task, snapName string, contentAttrs []string, requireTypeBase bool, channel string, onInFlight error, userID int, flags Flags) (*state.TaskSet, error) {
st := t.State()
// The core snap provides everything we need for core16.
coreInstalled, err := isInstalled(st, "core")
if err != nil {
return nil, err
}
if snapName == "core16" && coreInstalled {
return nil, nil
}
// installed already?
isInstalled, err := isInstalled(st, snapName)
if err != nil {
return nil, err
}
if isInstalled {
return updatePrereqIfOutdated(t, snapName, contentAttrs, userID, flags)
}
// in progress?
if linkTask, err := findLinkSnapTaskForSnap(st, snapName); err != nil {
return nil, err
} else if linkTask != nil {
return nil, onInFlight
}
// not installed, nor queued for install -> install it
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return nil, err
}
ts, err := InstallWithDeviceContext(context.TODO(), st, snapName, &RevisionOptions{Channel: channel}, userID, Flags{RequireTypeBase: requireTypeBase}, deviceCtx, "")
// something might have triggered an explicit install while
// the state was unlocked -> deal with that here by simply
// retrying the operation.
if conflErr, ok := err.(*ChangeConflictError); ok {
// conflicted with an install in the same change, just skip
if conflErr.ChangeID == t.Change().ID() {
return nil, nil
}
return nil, &state.Retry{After: prerequisitesRetryTimeout}
}
return ts, err
}
// updates a prerequisite, if it's not providing a content interface that a plug expects it to
func updatePrereqIfOutdated(t *state.Task, snapName string, contentAttrs []string, userID int, flags Flags) (*state.TaskSet, error) {
if len(contentAttrs) == 0 {
return nil, nil
}
st := t.State()
// check if the default provider has all expected content tags
if ok, err := hasAllContentAttrs(st, snapName, contentAttrs); err != nil {
return nil, err
} else if ok {
return nil, nil
}
// this is an optimization since the Update would also detect a conflict
// but only after accessing the store
if ok, err := shouldSkipToAvoidConflict(t, snapName); err != nil {
return nil, err
} else if ok {
return nil, nil
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return nil, err
}
// default provider is missing some content tags (likely outdated) so update it
ts, err := UpdateWithDeviceContext(st, snapName, nil, userID, flags, deviceCtx, "")
if err != nil {
if conflErr, ok := err.(*ChangeConflictError); ok {
// there's already an update for the same snap in this change,
// just skip this one
if conflErr.ChangeID == t.Change().ID() {
return nil, nil
}
return nil, &state.Retry{After: prerequisitesRetryTimeout}
}
// don't propagate error to avoid failing the main install since the
// content provider is (for now) a soft dependency
t.Logf("failed to update %q, will not have required content %q: %s", snapName, strings.Join(contentAttrs, ", "), err)
return nil, nil
}
return ts, nil
}
// Checks for conflicting tasks. Returns true if the operation should be skipped. The error
// can be a state.Retry if the operation should be retried later.
func shouldSkipToAvoidConflict(task *state.Task, snapName string) (bool, error) {
otherTask, err := findLinkSnapTaskForSnap(task.State(), snapName)
if err != nil {
return false, err
}
if otherTask == nil {
return false, nil
}
// it's in the same change, so the snap is already going to be installed
if otherTask.Change().ID() == task.Change().ID() {
return true, nil
}
// it's not in the same change, so retry to avoid conflicting changes to the snap
return true, &state.Retry{
After: prerequisitesRetryTimeout,
Reason: fmt.Sprintf("conflicting changes on snap %q by task %q", snapName, otherTask.Kind()),
}
}
// Checks if the snap has slots with "content" attributes matching the
// ones that the snap being installed requires
func hasAllContentAttrs(st *state.State, snapName string, requiredContentAttrs []string) (bool, error) {
providedContentAttrs := make(map[string]bool)
repo := ifacerepo.Get(st)
for _, slot := range repo.Slots(snapName) {
if slot.Interface != "content" {
continue
}
val, ok := slot.Lookup("content")
if !ok {
continue
}
contentAttr, ok := val.(string)
if !ok {
return false, fmt.Errorf("expected 'content' attribute of slot '%s' (snap: '%s') to be string but was %s", slot.Name, snapName, reflect.TypeOf(val))
}
providedContentAttrs[contentAttr] = true
}
for _, contentAttr := range requiredContentAttrs {
if _, ok := providedContentAttrs[contentAttr]; !ok {
return false, nil
}
}
return true, nil
}
func (m *SnapManager) installPrereqs(t *state.Task, base string, prereq map[string][]string, userID int, tm timings.Measurer, flags Flags) error {
st := t.State()
// We try to install all wanted snaps. If one snap cannot be installed
// because of change conflicts or similar we retry. Only if all snaps
// can be installed together we add the tasks to the change.
var tss []*state.TaskSet
for prereqName, contentAttrs := range prereq {
var onInFlightErr error = nil
var err error
var ts *state.TaskSet
timings.Run(tm, "install-prereq", fmt.Sprintf("install %q", prereqName), func(timings.Measurer) {
noTypeBaseCheck := false
ts, err = m.installOneBaseOrRequired(t, prereqName, contentAttrs, noTypeBaseCheck, defaultPrereqSnapsChannel(), onInFlightErr, userID, flags)
})
if err != nil {
return prereqError("prerequisite", prereqName, err)
}
if ts == nil {
continue
}
tss = append(tss, ts)
}
// for base snaps we need to wait until the change is done
// (either finished or failed)
onInFlightErr := &state.Retry{After: prerequisitesRetryTimeout}
var tsBase *state.TaskSet
var err error
if base != "none" {
timings.Run(tm, "install-prereq", fmt.Sprintf("install base %q", base), func(timings.Measurer) {
requireTypeBase := true
tsBase, err = m.installOneBaseOrRequired(t, base, nil, requireTypeBase, defaultBaseSnapsChannel(), onInFlightErr, userID, Flags{})
})
if err != nil {
return prereqError("snap base", base, err)
}
}
// on systems without core or snapd need to install snapd to
// make interfaces work - LP: 1819318
var tsSnapd *state.TaskSet
snapdSnapInstalled, err := isInstalled(st, "snapd")
if err != nil {
return err
}
coreSnapInstalled, err := isInstalled(st, "core")
if err != nil {
return err
}
if base != "core" && !snapdSnapInstalled && !coreSnapInstalled {
timings.Run(tm, "install-prereq", "install snapd", func(timings.Measurer) {
noTypeBaseCheck := false
tsSnapd, err = m.installOneBaseOrRequired(t, "snapd", nil, noTypeBaseCheck, defaultSnapdSnapsChannel(), onInFlightErr, userID, Flags{})
})
if err != nil {
return prereqError("system snap", "snapd", err)
}
}
chg := t.Change()
// add all required snaps, no ordering, this will be done in the
// auto-connect task handler
for _, ts := range tss {
ts.JoinLane(st.NewLane())
chg.AddAll(ts)
}
// add the base if needed, prereqs else must wait on this
if tsBase != nil {
tsBase.JoinLane(st.NewLane())
for _, t := range chg.Tasks() {
t.WaitAll(tsBase)
}
chg.AddAll(tsBase)
}
// add snapd if needed, everything must wait on this
if tsSnapd != nil {
tsSnapd.JoinLane(st.NewLane())
for _, t := range chg.Tasks() {
t.WaitAll(tsSnapd)
}
chg.AddAll(tsSnapd)
}
// make sure that the new change is committed to the state
// together with marking this task done
t.SetStatus(state.DoneStatus)
return nil
}
func prereqError(what, snapName string, err error) error {
if _, ok := err.(*state.Retry); ok {
return err
}
return fmt.Errorf("cannot install %s %q: %v", what, snapName, err)
}
func (m *SnapManager) doPrepareSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
if snapsup.Revision().Unset() {
// Local revisions start at -1 and go down.
revision := snapst.LocalRevision()
if revision.Unset() || revision.N > 0 {
revision = snap.R(-1)
} else {
revision.N--
}
if !revision.Local() {
panic("internal error: invalid local revision built: " + revision.String())
}
snapsup.SideInfo.Revision = revision
}
t.Set("snap-setup", snapsup)
return nil
}
func (m *SnapManager) undoPrepareSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, err := TaskSnapSetup(t)
if err != nil {
return err
}
if snapsup.SideInfo == nil || snapsup.SideInfo.RealName == "" {
return nil
}
var logMsg []string
var snapSetup string
dupSig := []string{"snap-install:"}
chg := t.Change()
logMsg = append(logMsg, fmt.Sprintf("change %q: %q", chg.Kind(), chg.Summary()))
for _, t := range chg.Tasks() {
// TODO: report only tasks in intersecting lanes?
tintro := fmt.Sprintf("%s: %s", t.Kind(), t.Status())
logMsg = append(logMsg, tintro)
dupSig = append(dupSig, tintro)
if snapsup, err := TaskSnapSetup(t); err == nil && snapsup.SideInfo != nil {
snapSetup1 := fmt.Sprintf(" snap-setup: %q (%v) %q", snapsup.SideInfo.RealName, snapsup.SideInfo.Revision, snapsup.SideInfo.Channel)
if snapSetup1 != snapSetup {
snapSetup = snapSetup1
logMsg = append(logMsg, snapSetup)
dupSig = append(dupSig, fmt.Sprintf(" snap-setup: %q", snapsup.SideInfo.RealName))
}
}
for _, l := range t.Log() {
// cut of the rfc339 timestamp to ensure duplicate
// detection works in daisy
tStampLen := strings.Index(l, " ")
if tStampLen < 0 {
continue
}
// not tStampLen+1 because the indent is nice
entry := l[tStampLen:]
logMsg = append(logMsg, entry)
dupSig = append(dupSig, entry)
}
}
var ubuntuCoreTransitionCount int
err = st.Get("ubuntu-core-transition-retry", &ubuntuCoreTransitionCount)
if err != nil && err != state.ErrNoState {
return err
}
extra := map[string]string{
"Channel": snapsup.Channel,
"Revision": snapsup.SideInfo.Revision.String(),
}
if ubuntuCoreTransitionCount > 0 {
extra["UbuntuCoreTransitionCount"] = strconv.Itoa(ubuntuCoreTransitionCount)
}
// Only report and error if there is an actual error in the change,
// we could undo things because the user canceled the change.
var isErr bool
for _, tt := range t.Change().Tasks() {
if tt.Status() == state.ErrorStatus {
isErr = true
break
}
}
if isErr && !settings.ProblemReportsDisabled(st) {
st.Unlock()
oopsid, err := errtrackerReport(snapsup.SideInfo.RealName, strings.Join(logMsg, "\n"), strings.Join(dupSig, "\n"), extra)
st.Lock()
if err == nil {
logger.Noticef("Reported install problem for %q as %s", snapsup.SideInfo.RealName, oopsid)
} else {
logger.Debugf("Cannot report problem: %s", err)
}
}
return nil
}
func installInfoUnlocked(st *state.State, snapsup *SnapSetup, deviceCtx DeviceContext) (store.SnapActionResult, error) {
st.Lock()
defer st.Unlock()
opts := &RevisionOptions{Channel: snapsup.Channel, CohortKey: snapsup.CohortKey, Revision: snapsup.Revision()}
return installInfo(context.TODO(), st, snapsup.InstanceName(), opts, snapsup.UserID, Flags{}, deviceCtx)
}
// autoRefreshRateLimited returns the rate limit of auto-refreshes or 0 if
// there is no limit.
func autoRefreshRateLimited(st *state.State) (rate int64) {
tr := config.NewTransaction(st)
var rateLimit string
err := tr.Get("core", "refresh.rate-limit", &rateLimit)
if err != nil {
return 0
}
// NOTE ParseByteSize errors on negative rates
val, err := strutil.ParseByteSize(rateLimit)
if err != nil {
return 0
}
return val
}
func downloadSnapParams(st *state.State, t *state.Task) (*SnapSetup, StoreService, *auth.UserState, error) {
snapsup, err := TaskSnapSetup(t)
if err != nil {
return nil, nil, nil, err
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return nil, nil, nil, err
}
sto := Store(st, deviceCtx)
user, err := userFromUserID(st, snapsup.UserID)
if err != nil {
return nil, nil, nil, err
}
return snapsup, sto, user, nil
}
func (m *SnapManager) doDownloadSnap(t *state.Task, tomb *tomb.Tomb) error {
st := t.State()
var rate int64
st.Lock()
perfTimings := state.TimingsForTask(t)
snapsup, theStore, user, err := downloadSnapParams(st, t)
if snapsup != nil && snapsup.IsAutoRefresh {
// NOTE rate is never negative
rate = autoRefreshRateLimited(st)
}
st.Unlock()
if err != nil {
return err
}
meter := NewTaskProgressAdapterUnlocked(t)
targetFn := snapsup.MountFile()
dlOpts := &store.DownloadOptions{
IsAutoRefresh: snapsup.IsAutoRefresh,
RateLimit: rate,
}
if snapsup.DownloadInfo == nil {
var storeInfo store.SnapActionResult
// COMPATIBILITY - this task was created from an older version
// of snapd that did not store the DownloadInfo in the state
// yet. Therefore do not worry about DeviceContext.
storeInfo, err = installInfoUnlocked(st, snapsup, nil)
if err != nil {
return err
}
timings.Run(perfTimings, "download", fmt.Sprintf("download snap %q", snapsup.SnapName()), func(timings.Measurer) {
err = theStore.Download(tomb.Context(nil), snapsup.SnapName(), targetFn, &storeInfo.DownloadInfo, meter, user, dlOpts)
})
snapsup.SideInfo = &storeInfo.SideInfo
} else {
timings.Run(perfTimings, "download", fmt.Sprintf("download snap %q", snapsup.SnapName()), func(timings.Measurer) {
err = theStore.Download(tomb.Context(nil), snapsup.SnapName(), targetFn, snapsup.DownloadInfo, meter, user, dlOpts)
})
}
if err != nil {
return err
}
snapsup.SnapPath = targetFn
// update the snap setup for the follow up tasks
st.Lock()
t.Set("snap-setup", snapsup)
perfTimings.Save(st)
st.Unlock()
return nil
}
var (
mountPollInterval = 1 * time.Second
)
<|fim▁hole|>// instance keyed or not
func hasOtherInstances(st *state.State, instanceName string) (bool, error) {
snapName, _ := snap.SplitInstanceName(instanceName)
var all map[string]*json.RawMessage
if err := st.Get("snaps", &all); err != nil && err != state.ErrNoState {
return false, err
}
for otherName := range all {
if otherName == instanceName {
continue
}
if otherSnapName, _ := snap.SplitInstanceName(otherName); otherSnapName == snapName {
return true, nil
}
}
return false, nil
}
var ErrKernelGadgetUpdateTaskMissing = errors.New("cannot refresh kernel with change created by old snapd that is missing gadget update task")
func checkKernelHasUpdateAssetsTask(t *state.Task) error {
for _, other := range t.Change().Tasks() {
snapsup, err := TaskSnapSetup(other)
if err == state.ErrNoState {
// XXX: hooks have no snapsup, is this detection okay?
continue
}
if err != nil {
return err
}
if snapsup.Type != "kernel" {
continue
}
if other.Kind() == "update-gadget-assets" {
return nil
}
}
return ErrKernelGadgetUpdateTaskMissing
}
func (m *SnapManager) doMountSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
perfTimings := state.TimingsForTask(t)
snapsup, snapst, err := snapSetupAndState(t)
st.Unlock()
if err != nil {
return err
}
curInfo, err := snapst.CurrentInfo()
if err != nil && err != ErrNoCurrent {
return err
}
m.backend.CurrentInfo(curInfo)
st.Lock()
deviceCtx, err := DeviceCtx(t.State(), t, nil)
st.Unlock()
if err != nil {
return err
}
// check that there is a "update-gadget-assets" task for kernels too,
// see https://bugs.launchpad.net/snapd/+bug/1940553
if snapsup.Type == snap.TypeKernel {
st.Lock()
err = checkKernelHasUpdateAssetsTask(t)
st.Unlock()
if err != nil {
return err
}
}
timings.Run(perfTimings, "check-snap", fmt.Sprintf("check snap %q", snapsup.InstanceName()), func(timings.Measurer) {
err = checkSnap(st, snapsup.SnapPath, snapsup.InstanceName(), snapsup.SideInfo, curInfo, snapsup.Flags, deviceCtx)
})
if err != nil {
return err
}
cleanup := func() {
st.Lock()
defer st.Unlock()
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
t.Errorf("cannot cleanup partial setup snap %q: %v", snapsup.InstanceName(), err)
return
}
// remove snap dir is idempotent so it's ok to always call it in the cleanup path
if err := m.backend.RemoveSnapDir(snapsup.placeInfo(), otherInstances); err != nil {
t.Errorf("cannot cleanup partial setup snap %q: %v", snapsup.InstanceName(), err)
}
}
setupOpts := &backend.SetupSnapOptions{
SkipKernelExtraction: snapsup.SkipKernelExtraction,
}
pb := NewTaskProgressAdapterUnlocked(t)
// TODO Use snapsup.Revision() to obtain the right info to mount
// instead of assuming the candidate is the right one.
var snapType snap.Type
var installRecord *backend.InstallRecord
timings.Run(perfTimings, "setup-snap", fmt.Sprintf("setup snap %q", snapsup.InstanceName()), func(timings.Measurer) {
snapType, installRecord, err = m.backend.SetupSnap(snapsup.SnapPath, snapsup.InstanceName(), snapsup.SideInfo, deviceCtx, setupOpts, pb)
})
if err != nil {
cleanup()
return err
}
// double check that the snap is mounted
var readInfoErr error
for i := 0; i < 10; i++ {
_, readInfoErr = readInfo(snapsup.InstanceName(), snapsup.SideInfo, errorOnBroken)
if readInfoErr == nil {
logger.Debugf("snap %q (%v) available at %q", snapsup.InstanceName(), snapsup.Revision(), snapsup.placeInfo().MountDir())
break
}
if _, ok := readInfoErr.(*snap.NotFoundError); !ok {
break
}
// snap not found, seems is not mounted yet
msg := fmt.Sprintf("expected snap %q revision %v to be mounted but is not", snapsup.InstanceName(), snapsup.Revision())
readInfoErr = fmt.Errorf("cannot proceed, %s", msg)
if i == 0 {
logger.Noticef(msg)
}
time.Sleep(mountPollInterval)
}
if readInfoErr != nil {
timings.Run(perfTimings, "undo-setup-snap", fmt.Sprintf("Undo setup of snap %q", snapsup.InstanceName()), func(timings.Measurer) {
err = m.backend.UndoSetupSnap(snapsup.placeInfo(), snapType, installRecord, deviceCtx, pb)
})
if err != nil {
st.Lock()
t.Errorf("cannot undo partial setup snap %q: %v", snapsup.InstanceName(), err)
st.Unlock()
}
cleanup()
return readInfoErr
}
st.Lock()
// set snapst type for undoMountSnap
t.Set("snap-type", snapType)
if installRecord != nil {
t.Set("install-record", installRecord)
}
st.Unlock()
if snapsup.Flags.RemoveSnapPath {
if err := os.Remove(snapsup.SnapPath); err != nil {
logger.Noticef("Failed to cleanup %s: %s", snapsup.SnapPath, err)
}
}
st.Lock()
perfTimings.Save(st)
st.Unlock()
return nil
}
func (m *SnapManager) undoMountSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
snapsup, err := TaskSnapSetup(t)
st.Unlock()
if err != nil {
return err
}
st.Lock()
deviceCtx, err := DeviceCtx(t.State(), t, nil)
st.Unlock()
if err != nil {
return err
}
st.Lock()
var typ snap.Type
err = t.Get("snap-type", &typ)
st.Unlock()
// backward compatibility
if err == state.ErrNoState {
typ = "app"
} else if err != nil {
return err
}
var installRecord backend.InstallRecord
st.Lock()
// install-record is optional (e.g. not present in tasks from older snapd)
err = t.Get("install-record", &installRecord)
st.Unlock()
if err != nil && err != state.ErrNoState {
return err
}
pb := NewTaskProgressAdapterUnlocked(t)
if err := m.backend.UndoSetupSnap(snapsup.placeInfo(), typ, &installRecord, deviceCtx, pb); err != nil {
return err
}
st.Lock()
defer st.Unlock()
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
return err
}
return m.backend.RemoveSnapDir(snapsup.placeInfo(), otherInstances)
}
// queryDisabledServices uses wrappers.QueryDisabledServices()
//
// Note this function takes a snap info rather than snapst because there are
// situations where we want to call this on non-current snap infos, i.e. in the
// undo handlers, see undoLinkSnap for an example.
func (m *SnapManager) queryDisabledServices(info *snap.Info, pb progress.Meter) ([]string, error) {
return m.backend.QueryDisabledServices(info, pb)
}
func (m *SnapManager) doUnlinkCurrentSnap(t *state.Task, _ *tomb.Tomb) error {
// called only during refresh when a new revision of a snap is being
// installed
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
oldInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
tr := config.NewTransaction(st)
experimentalRefreshAppAwareness, err := features.Flag(tr, features.RefreshAppAwareness)
if err != nil && !config.IsNoOption(err) {
return err
}
if experimentalRefreshAppAwareness && !snapsup.Flags.IgnoreRunning {
// Invoke the hard refresh flow. Upon success the returned lock will be
// held to prevent snap-run from advancing until UnlinkSnap, executed
// below, completes.
lock, err := hardEnsureNothingRunningDuringRefresh(m.backend, st, snapst, oldInfo)
if err != nil {
return err
}
defer lock.Close()
}
snapst.Active = false
// snapd current symlink on the refresh path can only replaced by a
// symlink to a new revision of the snapd snap, so only do the actual
// unlink if we're not working on the snapd snap
if oldInfo.Type() != snap.TypeSnapd {
// do the final unlink
linkCtx := backend.LinkContext{
FirstInstall: false,
// This task is only used for unlinking a snap during refreshes so we
// can safely hard-code this condition here.
RunInhibitHint: runinhibit.HintInhibitedForRefresh,
}
err = m.backend.UnlinkSnap(oldInfo, linkCtx, NewTaskProgressAdapterLocked(t))
if err != nil {
return err
}
}
// mark as inactive
Set(st, snapsup.InstanceName(), snapst)
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
return nil
}
func (m *SnapManager) undoUnlinkCurrentSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
oldInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return err
}
snapst.Active = true
opts, err := SnapServiceOptions(st, snapsup.InstanceName(), nil)
if err != nil {
return err
}
linkCtx := backend.LinkContext{
FirstInstall: false,
ServiceOptions: opts,
}
reboot, err := m.backend.LinkSnap(oldInfo, deviceCtx, linkCtx, perfTimings)
if err != nil {
return err
}
// mark as active again
Set(st, snapsup.InstanceName(), snapst)
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
// if we just put back a previous a core snap, request a restart
// so that we switch executing its snapd
m.maybeRestart(t, oldInfo, reboot)
return nil
}
func (m *SnapManager) doCopySnapData(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
snapsup, snapst, err := snapSetupAndState(t)
st.Unlock()
if err != nil {
return err
}
newInfo, err := readInfo(snapsup.InstanceName(), snapsup.SideInfo, errorOnBroken)
if err != nil {
return err
}
oldInfo, err := snapst.CurrentInfo()
if err != nil && err != ErrNoCurrent {
return err
}
st.Lock()
opts, err := getDirMigrationOpts(st, snapst, snapsup)
st.Unlock()
if err != nil {
return err
}
dirOpts := opts.getSnapDirOpts()
pb := NewTaskProgressAdapterUnlocked(t)
if copyDataErr := m.backend.CopySnapData(newInfo, oldInfo, pb, dirOpts); copyDataErr != nil {
if oldInfo != nil {
// there is another revision of the snap, cannot remove
// shared data directory
return copyDataErr
}
// cleanup shared snap data directory
st.Lock()
defer st.Unlock()
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
t.Errorf("cannot undo partial snap %q data copy: %v", snapsup.InstanceName(), err)
return copyDataErr
}
// no other instances of this snap, shared data directory can be
// removed now too
if err := m.backend.RemoveSnapDataDir(newInfo, otherInstances); err != nil {
t.Errorf("cannot undo partial snap %q data copy, failed removing shared directory: %v", snapsup.InstanceName(), err)
}
return copyDataErr
}
// the migration hasn't been done - do it now
if opts.UseHidden && !opts.MigratedToHidden {
if err = m.backend.HideSnapData(snapsup.InstanceName()); err != nil {
// undo the migration. In contrast to copy data for which the new revision
// directory can just be discarded, the snap data migration introduces
// a change that affects all revisions of the snap and thus needs to be reverted
if err := m.backend.UndoHideSnapData(snapsup.InstanceName()); err != nil {
src := filepath.Join("~", dirs.HiddenSnapDataHomeDir, snapsup.InstanceName())
dst := filepath.Join("~", dirs.UserHomeSnapDir, snapsup.InstanceName())
st.Lock()
st.Warnf("cannot undo snap dir hiding (move all user's %s to %s): %v",
src, dst, err)
st.Unlock()
}
return err
}
snapsup.MigratedHidden = true
st.Lock()
err := SetTaskSnapSetup(t, snapsup)
st.Unlock()
if err != nil {
return fmt.Errorf("cannot set migration status to done: %w", err)
}
} else if !opts.UseHidden && opts.MigratedToHidden {
// migration was done but user turned the feature off, so undo migration
if err := m.backend.UndoHideSnapData(snapsup.InstanceName()); err != nil {
if err := m.backend.HideSnapData(snapsup.InstanceName()); err != nil {
src := filepath.Join("~", dirs.UserHomeSnapDir, snapsup.InstanceName())
dst := filepath.Join("~", dirs.HiddenSnapDataHomeDir, snapsup.InstanceName())
st.Lock()
st.Warnf("cannot undo snap dir exposing (move all user's %s to %s): %v",
src, dst, err)
st.Unlock()
}
return err
}
snapsup.MigratedExposed = true
st.Lock()
err := SetTaskSnapSetup(t, snapsup)
st.Unlock()
if err != nil {
return fmt.Errorf("cannot set migration status to undone: %w", err)
}
}
return nil
}
func (m *SnapManager) undoCopySnapData(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
snapsup, snapst, err := snapSetupAndState(t)
st.Unlock()
if err != nil {
return err
}
newInfo, err := readInfo(snapsup.InstanceName(), snapsup.SideInfo, 0)
if err != nil {
return err
}
oldInfo, err := snapst.CurrentInfo()
if err != nil && err != ErrNoCurrent {
return err
}
// we migrated the data in this run - undo that
if snapsup.MigratedHidden {
if err := m.backend.UndoHideSnapData(snapsup.InstanceName()); err != nil {
return err
}
snapsup.MigratedHidden = false
snapst.MigratedHidden = false
if err := writeMigrationStatus(t, snapst, snapsup); err != nil {
return err
}
} else if snapsup.MigratedExposed {
// we reverted the migration in this run - undo that
if err := m.backend.HideSnapData(snapsup.InstanceName()); err != nil {
return err
}
snapsup.MigratedExposed = false
snapst.MigratedHidden = true
if err := writeMigrationStatus(t, snapst, snapsup); err != nil {
return err
}
}
st.Lock()
opts, err := getDirMigrationOpts(st, snapst, snapsup)
st.Unlock()
if err != nil {
return fmt.Errorf("failed to get snap dir options: %w", err)
}
dirOpts := opts.getSnapDirOpts()
pb := NewTaskProgressAdapterUnlocked(t)
if err := m.backend.UndoCopySnapData(newInfo, oldInfo, pb, dirOpts); err != nil {
return err
}
if oldInfo != nil {
// there is other revision of this snap, cannot remove shared
// directory anyway
return nil
}
st.Lock()
defer st.Unlock()
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
return err
}
// no other instances of this snap and no other revisions, shared data
// directory can be removed
if err := m.backend.RemoveSnapDataDir(newInfo, otherInstances); err != nil {
return err
}
return nil
}
// writeMigrationStatus writes the SnapSetup, state and sequence file (if they
// exist). This must be called after the migration undo procedure is done since
// only then do we know the actual final state of the migration.
func writeMigrationStatus(t *state.Task, snapst *SnapState, snapsup *SnapSetup) error {
st := t.State()
snapName := snapsup.InstanceName()
st.Lock()
defer st.Unlock()
if err := SetTaskSnapSetup(t, snapsup); err != nil {
return err
}
err := Get(st, snapName, &SnapState{})
if err != nil && err != state.ErrNoState {
return err
}
if err == nil {
// migration state might've been written in the change; update it after undo
Set(st, snapName, snapst)
}
seqFile := filepath.Join(dirs.SnapSeqDir, snapName+".json")
if osutil.FileExists(seqFile) {
// might've written migration status to seq file in the change; update it
// after undo
return writeSeqFile(snapName, snapst)
}
// never got to write seq file; don't need to re-write migration status in it
return nil
}
func (m *SnapManager) cleanupCopySnapData(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
if t.Status() != state.DoneStatus {
// it failed
return nil
}
_, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
info, err := snapst.CurrentInfo()
if err != nil {
return err
}
// try to remove trashed any data in ~/snap and ~/.snap/data
m.backend.ClearTrashedData(info)
return nil
}
// writeSeqFile writes the sequence file for failover handling
func writeSeqFile(name string, snapst *SnapState) error {
p := filepath.Join(dirs.SnapSeqDir, name+".json")
if err := os.MkdirAll(filepath.Dir(p), 0755); err != nil {
return err
}
b, err := json.Marshal(&struct {
Sequence []*snap.SideInfo `json:"sequence"`
Current string `json:"current"`
MigratedHidden bool `json:"migrated-hidden"`
}{
Sequence: snapst.Sequence,
Current: snapst.Current.String(),
MigratedHidden: snapst.MigratedHidden,
})
if err != nil {
return err
}
return osutil.AtomicWriteFile(p, b, 0644, 0)
}
// missingDisabledServices returns a list of services that are present in
// this snap info and should be disabled as well as a list of disabled
// services that are currently missing (i.e. they were renamed).
// present in this snap info.
// the first arg is the disabled services when the snap was last active
func missingDisabledServices(svcs []string, info *snap.Info) ([]string, []string, error) {
// make a copy of all the previously disabled services that we will remove
// from, as well as an empty list to add to for the found services
missingSvcs := []string{}
foundSvcs := []string{}
// for all the previously disabled services, check if they are in the
// current snap info revision as services or not
for _, disabledSvcName := range svcs {
// check if the service is an app _and_ is a service
if app, ok := info.Apps[disabledSvcName]; ok && app.IsService() {
foundSvcs = append(foundSvcs, disabledSvcName)
} else {
missingSvcs = append(missingSvcs, disabledSvcName)
}
}
// sort the lists for easier testing
sort.Strings(missingSvcs)
sort.Strings(foundSvcs)
return foundSvcs, missingSvcs, nil
}
// LinkSnapParticipant is an interface for interacting with snap link/unlink
// operations.
//
// Unlike the interface for a task handler, only one notification method is
// used. The method notifies a participant that linkage of a snap has changed.
// This method is invoked in link-snap, unlink-snap, the undo path of those
// methods and the undo handler for link-snap.
//
// In all cases it is invoked after all other operations are completed but
// before the task completes.
type LinkSnapParticipant interface {
// SnapLinkageChanged is called when a snap is linked or unlinked.
// The error is only logged and does not stop the task it is used from.
SnapLinkageChanged(st *state.State, instanceName string) error
}
var linkSnapParticipants []LinkSnapParticipant
// AddLinkSnapParticipant adds a participant in the link/unlink operations.
func AddLinkSnapParticipant(p LinkSnapParticipant) {
linkSnapParticipants = append(linkSnapParticipants, p)
}
// MockLinkSnapParticipants replaces the list of link snap participants for testing.
func MockLinkSnapParticipants(ps []LinkSnapParticipant) (restore func()) {
old := linkSnapParticipants
linkSnapParticipants = ps
return func() {
linkSnapParticipants = old
}
}
func notifyLinkParticipants(t *state.Task, instanceName string) {
st := t.State()
for _, p := range linkSnapParticipants {
if err := p.SnapLinkageChanged(st, instanceName); err != nil {
t.Errorf("%v", err)
}
}
}
func (m *SnapManager) doLinkSnap(t *state.Task, _ *tomb.Tomb) (err error) {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return err
}
oldInfo, err := snapst.CurrentInfo()
if err != nil && err != ErrNoCurrent {
return err
}
// find if the snap is already installed before we modify snapst below
isInstalled := snapst.IsInstalled()
cand := snapsup.SideInfo
m.backend.Candidate(cand)
oldCandidateIndex := snapst.LastIndex(cand.Revision)
var oldRevsBeforeCand []snap.Revision
if oldCandidateIndex < 0 {
snapst.Sequence = append(snapst.Sequence, cand)
} else if !snapsup.Revert {
// save the revs before the candidate, so undoLink can account for discarded revs when putting it back
for _, si := range snapst.Sequence[:oldCandidateIndex] {
oldRevsBeforeCand = append(oldRevsBeforeCand, si.Revision)
}
// remove the old candidate from the sequence, add it at the end
copy(snapst.Sequence[oldCandidateIndex:len(snapst.Sequence)-1], snapst.Sequence[oldCandidateIndex+1:])
snapst.Sequence[len(snapst.Sequence)-1] = cand
}
oldCurrent := snapst.Current
snapst.Current = cand.Revision
snapst.Active = true
oldChannel := snapst.TrackingChannel
if snapsup.Channel != "" {
err := snapst.SetTrackingChannel(snapsup.Channel)
if err != nil {
return err
}
}
oldIgnoreValidation := snapst.IgnoreValidation
snapst.IgnoreValidation = snapsup.IgnoreValidation
oldTryMode := snapst.TryMode
snapst.TryMode = snapsup.TryMode
oldDevMode := snapst.DevMode
snapst.DevMode = snapsup.DevMode
oldJailMode := snapst.JailMode
snapst.JailMode = snapsup.JailMode
oldClassic := snapst.Classic
snapst.Classic = snapsup.Classic
oldCohortKey := snapst.CohortKey
snapst.CohortKey = snapsup.CohortKey
if snapsup.Required { // set only on install and left alone on refresh
snapst.Required = true
}
oldRefreshInhibitedTime := snapst.RefreshInhibitedTime
oldLastRefreshTime := snapst.LastRefreshTime
// only set userID if unset or logged out in snapst and if we
// actually have an associated user
if snapsup.UserID > 0 {
var user *auth.UserState
if snapst.UserID != 0 {
user, err = auth.User(st, snapst.UserID)
if err != nil && err != auth.ErrInvalidUser {
return err
}
}
if user == nil {
// if the original user installing the snap is
// no longer available transfer to user who
// triggered this change
snapst.UserID = snapsup.UserID
}
}
// keep instance key
snapst.InstanceKey = snapsup.InstanceKey
// don't keep the old state because, if we fail, we may or may not be able to
// revert the migration. We set the migration status after undoing any
// migration related ops
if snapsup.MigratedHidden {
snapst.MigratedHidden = true
} else if snapsup.MigratedExposed {
snapst.MigratedHidden = false
}
newInfo, err := readInfo(snapsup.InstanceName(), cand, 0)
if err != nil {
return err
}
// record type
snapst.SetType(newInfo.Type())
pb := NewTaskProgressAdapterLocked(t)
// Check for D-Bus service conflicts a second time to detect
// conflicts within a transaction.
if err := checkDBusServiceConflicts(st, newInfo); err != nil {
return err
}
opts, err := SnapServiceOptions(st, snapsup.InstanceName(), nil)
if err != nil {
return err
}
firstInstall := oldCurrent.Unset()
linkCtx := backend.LinkContext{
FirstInstall: firstInstall,
ServiceOptions: opts,
}
// on UC18+, snap tooling comes from the snapd snap so we need generated
// mount units to depend on the snapd snap mount units
if !deviceCtx.Classic() && deviceCtx.Model().Base() != "" {
linkCtx.RequireMountedSnapdSnap = true
}
// write sequence file for failover helpers
if err := writeSeqFile(snapsup.InstanceName(), snapst); err != nil {
return err
}
needsReboot, err := m.backend.LinkSnap(newInfo, deviceCtx, linkCtx, perfTimings)
// defer a cleanup helper which will unlink the snap if anything fails after
// this point
defer func() {
if err == nil {
return
}
// err is not nil, we need to try and unlink the snap to cleanup after
// ourselves
var backendErr error
if newInfo.Type() == snap.TypeSnapd && !firstInstall {
// snapd snap is special in the sense that we always
// need the current symlink, so we restore the link to
// the old revision
_, backendErr = m.backend.LinkSnap(oldInfo, deviceCtx, linkCtx, perfTimings)
} else {
// snapd during first install and all other snaps
backendErr = m.backend.UnlinkSnap(newInfo, linkCtx, pb)
}
if backendErr != nil {
t.Errorf("cannot cleanup failed attempt at making snap %q available to the system: %v", snapsup.InstanceName(), backendErr)
}
notifyLinkParticipants(t, snapsup.InstanceName())
}()
if err != nil {
return err
}
// Restore configuration of the target revision (if available) on revert
if isInstalled {
// Make a copy of configuration of current snap revision
if err = config.SaveRevisionConfig(st, snapsup.InstanceName(), oldCurrent); err != nil {
return err
}
}
// Restore configuration of the target revision (if available; nothing happens if it's not).
// We only do this on reverts (and not on refreshes).
if snapsup.Revert {
if err = config.RestoreRevisionConfig(st, snapsup.InstanceName(), snapsup.Revision()); err != nil {
return err
}
}
if len(snapst.Sequence) == 1 {
if err := m.createSnapCookie(st, snapsup.InstanceName()); err != nil {
return fmt.Errorf("cannot create snap cookie: %v", err)
}
}
// save for undoLinkSnap
t.Set("old-trymode", oldTryMode)
t.Set("old-devmode", oldDevMode)
t.Set("old-jailmode", oldJailMode)
t.Set("old-classic", oldClassic)
t.Set("old-ignore-validation", oldIgnoreValidation)
t.Set("old-channel", oldChannel)
t.Set("old-current", oldCurrent)
t.Set("old-candidate-index", oldCandidateIndex)
t.Set("old-refresh-inhibited-time", oldRefreshInhibitedTime)
t.Set("old-cohort-key", oldCohortKey)
t.Set("old-last-refresh-time", oldLastRefreshTime)
t.Set("old-revs-before-cand", oldRevsBeforeCand)
if snapsup.Revert {
t.Set("old-revert-status", snapst.RevertStatus)
switch snapsup.RevertStatus {
case NotBlocked:
if snapst.RevertStatus == nil {
snapst.RevertStatus = make(map[int]RevertStatus)
}
snapst.RevertStatus[oldCurrent.N] = NotBlocked
default:
delete(snapst.RevertStatus, oldCurrent.N)
}
} else {
delete(snapst.RevertStatus, cand.Revision.N)
}
// Record the fact that the snap was refreshed successfully.
snapst.RefreshInhibitedTime = nil
if !snapsup.Revert {
now := timeNow()
snapst.LastRefreshTime = &now
}
if cand.SnapID != "" {
// write the auxiliary store info
aux := &auxStoreInfo{
Media: snapsup.Media,
Website: snapsup.Website,
}
if err := keepAuxStoreInfo(cand.SnapID, aux); err != nil {
return err
}
if len(snapst.Sequence) == 1 {
defer func() {
if err != nil {
// the install is getting undone, and there are no more of this snap
// try to remove the aux info we just created
discardAuxStoreInfo(cand.SnapID)
}
}()
}
}
// Compatibility with old snapd: check if we have auto-connect task and
// if not, inject it after self (link-snap) for snaps that are not core
if newInfo.Type() != snap.TypeOS {
var hasAutoConnect, hasSetupProfiles bool
for _, other := range t.Change().Tasks() {
// Check if this is auto-connect task for same snap and we it's part of the change with setup-profiles task
if other.Kind() == "auto-connect" || other.Kind() == "setup-profiles" {
otherSnapsup, err := TaskSnapSetup(other)
if err != nil {
return err
}
if snapsup.InstanceName() == otherSnapsup.InstanceName() {
if other.Kind() == "auto-connect" {
hasAutoConnect = true
} else {
hasSetupProfiles = true
}
}
}
}
if !hasAutoConnect && hasSetupProfiles {
InjectAutoConnect(t, snapsup)
}
}
// Do at the end so we only preserve the new state if it worked.
Set(st, snapsup.InstanceName(), snapst)
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
// Make sure if state commits and snapst is mutated we won't be rerun
t.SetStatus(state.DoneStatus)
// if we just installed a core snap, request a restart
// so that we switch executing its snapd.
var canReboot bool
if needsReboot {
var cannotReboot bool
// system reboot is required, but can this task request that?
if err := t.Get("cannot-reboot", &cannotReboot); err != nil && err != state.ErrNoState {
return err
}
if !cannotReboot {
// either the task was created before that variable was
// introduced or the task can request a reboot
canReboot = true
} else {
t.Logf("reboot postponed to later tasks")
}
}
if !needsReboot || canReboot {
m.maybeRestart(t, newInfo, needsReboot)
}
return nil
}
// maybeRestart will schedule a reboot or restart as needed for the
// just linked snap with info if it's a core or snapd or kernel snap.
func (m *SnapManager) maybeRestart(t *state.Task, info *snap.Info, rebootRequired bool) {
// Don't restart when preseeding - we will switch to new snapd on
// first boot.
if m.preseed {
return
}
st := t.State()
if rebootRequired {
t.Logf("Requested system restart.")
RestartSystem(t)
return
}
typ := info.Type()
// If the type of the snap requesting this start is non-trivial that either
// means we are on Ubuntu Core and the type is a base/kernel/gadget which
// requires a reboot of the system, or that the type is snapd in which case
// we just do a restart of snapd itself. In these cases restartReason will
// be non-empty and thus we will perform a restart.
// If restartReason is empty, then the snap requesting the restart was not
// a boot participant and thus we don't need to do any sort of restarts as
// a result of updating this snap.
restartReason := daemonRestartReason(st, typ)
if restartReason == "" {
// no message -> no restart
return
}
t.Logf(restartReason)
restart.Request(st, restart.RestartDaemon)
}
func daemonRestartReason(st *state.State, typ snap.Type) string {
if !((release.OnClassic && typ == snap.TypeOS) || typ == snap.TypeSnapd) {
// not interesting
return ""
}
if typ == snap.TypeOS {
// ignore error here as we have no way to return to caller
snapdSnapInstalled, _ := isInstalled(st, "snapd")
if snapdSnapInstalled {
// this snap is the base, but snapd is running from the snapd snap
return ""
}
return "Requested daemon restart."
}
return "Requested daemon restart (snapd snap)."
}
// maybeUndoRemodelBootChanges will check if an undo needs to update the
// bootloader. This can happen if e.g. a new kernel gets installed. This
// will switch the bootloader to the new kernel but if the change is later
// undone we need to switch back to the kernel of the old model.
func (m *SnapManager) maybeUndoRemodelBootChanges(t *state.Task) error {
// get the new and the old model
deviceCtx, err := DeviceCtx(t.State(), t, nil)
if err != nil {
return err
}
// we only have an old model if we are in a remodel situation
if !deviceCtx.ForRemodeling() {
return nil
}
groundDeviceCtx := deviceCtx.GroundContext()
oldModel := groundDeviceCtx.Model()
newModel := deviceCtx.Model()
// check type of the snap we are undoing, only kernel/base/core are
// relevant
snapsup, _, err := snapSetupAndState(t)
if err != nil {
return err
}
var newSnapName, snapName string
switch snapsup.Type {
case snap.TypeKernel:
snapName = oldModel.Kernel()
newSnapName = newModel.Kernel()
case snap.TypeOS, snap.TypeBase:
// XXX: add support for "core"
snapName = oldModel.Base()
newSnapName = newModel.Base()
default:
return nil
}
// we can stop if the kernel/base has not changed
if snapName == newSnapName {
return nil
}
// we can stop if the snap we are looking at is not a kernel/base
// of the new model
if snapsup.InstanceName() != newSnapName {
return nil
}
// get info for *old* kernel/base/core and see if we need to reboot
// TODO: we may need something like infoForDeviceSnap here
var snapst SnapState
if err = Get(t.State(), snapName, &snapst); err != nil {
return err
}
info, err := snapst.CurrentInfo()
if err != nil && err != ErrNoCurrent {
return err
}
bp := boot.Participant(info, info.Type(), groundDeviceCtx)
reboot, err := bp.SetNextBoot()
if err != nil {
return err
}
// we may just have switch back to the old kernel/base/core so
// we may need to restart
m.maybeRestart(t, info, reboot)
return nil
}
func (m *SnapManager) undoLinkSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return err
}
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
var oldChannel string
err = t.Get("old-channel", &oldChannel)
if err != nil {
return err
}
var oldIgnoreValidation bool
err = t.Get("old-ignore-validation", &oldIgnoreValidation)
if err != nil && err != state.ErrNoState {
return err
}
var oldTryMode bool
err = t.Get("old-trymode", &oldTryMode)
if err != nil {
return err
}
var oldDevMode bool
err = t.Get("old-devmode", &oldDevMode)
if err != nil {
return err
}
var oldJailMode bool
err = t.Get("old-jailmode", &oldJailMode)
if err != nil {
return err
}
var oldClassic bool
err = t.Get("old-classic", &oldClassic)
if err != nil {
return err
}
var oldCurrent snap.Revision
err = t.Get("old-current", &oldCurrent)
if err != nil {
return err
}
var oldCandidateIndex int
if err := t.Get("old-candidate-index", &oldCandidateIndex); err != nil {
return err
}
var oldRefreshInhibitedTime *time.Time
if err := t.Get("old-refresh-inhibited-time", &oldRefreshInhibitedTime); err != nil && err != state.ErrNoState {
return err
}
var oldLastRefreshTime *time.Time
if err := t.Get("old-last-refresh-time", &oldLastRefreshTime); err != nil && err != state.ErrNoState {
return err
}
var oldCohortKey string
if err := t.Get("old-cohort-key", &oldCohortKey); err != nil && err != state.ErrNoState {
return err
}
var oldRevsBeforeCand []snap.Revision
if err := t.Get("old-revs-before-cand", &oldRevsBeforeCand); err != nil && err != state.ErrNoState {
return err
}
if len(snapst.Sequence) == 1 {
// XXX: shouldn't these two just log and carry on? this is an undo handler...
timings.Run(perfTimings, "discard-snap-namespace", fmt.Sprintf("discard the namespace of snap %q", snapsup.InstanceName()), func(tm timings.Measurer) {
err = m.backend.DiscardSnapNamespace(snapsup.InstanceName())
})
if err != nil {
t.Errorf("cannot discard snap namespace %q, will retry in 3 mins: %s", snapsup.InstanceName(), err)
return &state.Retry{After: 3 * time.Minute}
}
if err := m.removeSnapCookie(st, snapsup.InstanceName()); err != nil {
return fmt.Errorf("cannot remove snap cookie: %v", err)
}
// try to remove the auxiliary store info
if err := discardAuxStoreInfo(snapsup.SideInfo.SnapID); err != nil {
return fmt.Errorf("cannot remove auxiliary store info: %v", err)
}
}
isRevert := snapsup.Revert
// relinking of the old snap is done in the undo of unlink-current-snap
currentIndex := snapst.LastIndex(snapst.Current)
if currentIndex < 0 {
return fmt.Errorf("internal error: cannot find revision %d in %v for undoing the added revision", snapsup.SideInfo.Revision, snapst.Sequence)
}
if oldCandidateIndex < 0 {
snapst.Sequence = append(snapst.Sequence[:currentIndex], snapst.Sequence[currentIndex+1:]...)
} else if !isRevert {
// account for revisions discarded before the install failed
discarded := countMissingRevs(oldRevsBeforeCand, snapst.Sequence)
oldCandidateIndex -= discarded
oldCand := snapst.Sequence[currentIndex]
copy(snapst.Sequence[oldCandidateIndex+1:], snapst.Sequence[oldCandidateIndex:])
snapst.Sequence[oldCandidateIndex] = oldCand
}
snapst.Current = oldCurrent
snapst.Active = false
snapst.TrackingChannel = oldChannel
snapst.IgnoreValidation = oldIgnoreValidation
snapst.TryMode = oldTryMode
snapst.DevMode = oldDevMode
snapst.JailMode = oldJailMode
snapst.Classic = oldClassic
snapst.RefreshInhibitedTime = oldRefreshInhibitedTime
snapst.LastRefreshTime = oldLastRefreshTime
snapst.CohortKey = oldCohortKey
if isRevert {
var oldRevertStatus map[int]RevertStatus
err := t.Get("old-revert-status", &oldRevertStatus)
if err != nil && err != state.ErrNoState {
return err
}
// may be nil if not set (e.g. created by old snapd)
snapst.RevertStatus = oldRevertStatus
}
newInfo, err := readInfo(snapsup.InstanceName(), snapsup.SideInfo, 0)
if err != nil {
return err
}
// we need to undo potential changes to current snap configuration (e.g. if
// modified by post-refresh/install/configure hooks as part of failed
// refresh/install) by restoring the configuration of "old current".
// similarly, we need to re-save the disabled services if there is a
// revision for us to go back to, see comment below for full explanation
if len(snapst.Sequence) > 0 {
if err = config.RestoreRevisionConfig(st, snapsup.InstanceName(), oldCurrent); err != nil {
return err
}
} else {
// in the case of an install we need to clear any config
err = config.DeleteSnapConfig(st, snapsup.InstanceName())
if err != nil {
return err
}
}
pb := NewTaskProgressAdapterLocked(t)
firstInstall := oldCurrent.Unset()
linkCtx := backend.LinkContext{
FirstInstall: firstInstall,
}
var backendErr error
if newInfo.Type() == snap.TypeSnapd && !firstInstall {
// snapst has been updated and now is the old revision, since
// this is not the first install of snapd, it should exist
var oldInfo *snap.Info
oldInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
// the snapd snap is special in the sense that we need to make
// sure that a sensible version is always linked as current,
// also we never reboot when updating snapd snap
_, backendErr = m.backend.LinkSnap(oldInfo, deviceCtx, linkCtx, perfTimings)
} else {
// snapd during first install and all other snaps
backendErr = m.backend.UnlinkSnap(newInfo, linkCtx, pb)
}
if backendErr != nil {
return backendErr
}
if err := m.maybeUndoRemodelBootChanges(t); err != nil {
return err
}
// restart only when snapd was installed for the first time and the rest of
// the cleanup is performed by snapd from core;
// when reverting a subsequent snapd revision, the restart happens in
// undoLinkCurrentSnap() instead
if firstInstall && newInfo.Type() == snap.TypeSnapd {
const rebootRequired = false
m.maybeRestart(t, newInfo, rebootRequired)
}
// write sequence file for failover helpers
if err := writeSeqFile(snapsup.InstanceName(), snapst); err != nil {
return err
}
// mark as inactive
Set(st, snapsup.InstanceName(), snapst)
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
// Make sure if state commits and snapst is mutated we won't be rerun
t.SetStatus(state.UndoneStatus)
// If we are on classic and have no previous version of core
// we may have restarted from a distro package into the core
// snap. We need to undo that restart here. Instead of in
// doUnlinkCurrentSnap() like we usually do when going from
// core snap -> next core snap
if release.OnClassic && newInfo.Type() == snap.TypeOS && oldCurrent.Unset() {
t.Logf("Requested daemon restart (undo classic initial core install)")
restart.Request(st, restart.RestartDaemon)
}
return nil
}
// countMissingRevs counts how many of the revisions aren't present in the sequence of sideInfos
func countMissingRevs(revisions []snap.Revision, sideInfos []*snap.SideInfo) int {
var found int
for _, rev := range revisions {
for _, si := range sideInfos {
if si.Revision == rev {
found++
}
}
}
return len(revisions) - found
}
type doSwitchFlags struct {
switchCurrentChannel bool
}
// doSwitchSnapChannel switches the snap's tracking channel and/or cohort. It
// also switches the current channel if appropriate. For use from 'Update'.
func (m *SnapManager) doSwitchSnapChannel(t *state.Task, _ *tomb.Tomb) error {
return m.genericDoSwitchSnap(t, doSwitchFlags{switchCurrentChannel: true})
}
// doSwitchSnap switches the snap's tracking channel and/or cohort, *without*
// switching the current snap channel. For use from 'Switch'.
func (m *SnapManager) doSwitchSnap(t *state.Task, _ *tomb.Tomb) error {
return m.genericDoSwitchSnap(t, doSwitchFlags{})
}
func (m *SnapManager) genericDoSwitchSnap(t *state.Task, flags doSwitchFlags) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
// switched the tracked channel
if err := snapst.SetTrackingChannel(snapsup.Channel); err != nil {
return err
}
snapst.CohortKey = snapsup.CohortKey
if flags.switchCurrentChannel {
// optionally support switching the current snap channel too, e.g.
// if a snap is in both stable and candidate with the same revision
// we can update it here and it will be displayed correctly in the UI
if snapsup.SideInfo.Channel != "" {
snapst.CurrentSideInfo().Channel = snapsup.Channel
}
}
Set(st, snapsup.InstanceName(), snapst)
return nil
}
func (m *SnapManager) doToggleSnapFlags(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
// for now we support toggling only ignore-validation
snapst.IgnoreValidation = snapsup.IgnoreValidation
Set(st, snapsup.InstanceName(), snapst)
return nil
}
// installModeDisabledServices returns what services with
// "install-mode: disabled" should be disabled. Only services
// seen for the first time are considered.
func installModeDisabledServices(st *state.State, snapst *SnapState, currentInfo *snap.Info) (svcsToDisable []string, err error) {
enabledByHookSvcs := map[string]bool{}
for _, svcName := range snapst.ServicesEnabledByHooks {
enabledByHookSvcs[svcName] = true
}
// find what servies the previous snap had
prevCurrentSvcs := map[string]bool{}
if psi := snapst.previousSideInfo(); psi != nil {
var prevCurrentInfo *snap.Info
if prevCurrentInfo, err = Info(st, snapst.InstanceName(), psi.Revision); prevCurrentInfo != nil {
for _, prevSvc := range prevCurrentInfo.Services() {
prevCurrentSvcs[prevSvc.Name] = true
}
}
}
// and deal with "install-mode: disable" for all new services
// (i.e. not present in previous snap).
//
// Services that are not new but have "install-mode: disable"
// do not need special handling. They are either still disabled
// or something has enabled them and then they should stay enabled.
for _, svc := range currentInfo.Services() {
if svc.InstallMode == "disable" && !enabledByHookSvcs[svc.Name] {
if !prevCurrentSvcs[svc.Name] {
svcsToDisable = append(svcsToDisable, svc.Name)
}
}
}
return svcsToDisable, nil
}
func (m *SnapManager) startSnapServices(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
currentInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
// check if any previously disabled services are now no longer services and
// log messages about that
for _, svc := range snapst.LastActiveDisabledServices {
app, ok := currentInfo.Apps[svc]
if !ok {
logger.Noticef("previously disabled service %s no longer exists", svc)
} else if !app.IsService() {
logger.Noticef("previously disabled service %s is now an app and not a service", svc)
}
}
// get the services which should be disabled (not started),
// as well as the services which are not present in this revision, but were
// present and disabled in a previous one and as such should be kept inside
// snapst for persistent storage
svcsToDisable, svcsToSave, err := missingDisabledServices(snapst.LastActiveDisabledServices, currentInfo)
if err != nil {
return err
}
// check what services with "InstallMode: disable" need to be disabled
svcsToDisableFromInstallMode, err := installModeDisabledServices(st, snapst, currentInfo)
if err != nil {
return err
}
svcsToDisable = append(svcsToDisable, svcsToDisableFromInstallMode...)
// append services that were disabled by hooks (they should not get re-enabled)
svcsToDisable = append(svcsToDisable, snapst.ServicesDisabledByHooks...)
// save the current last-active-disabled-services before we re-write it in case we
// need to undo this
t.Set("old-last-active-disabled-services", snapst.LastActiveDisabledServices)
// commit the missing services to state so when we unlink this revision and
// go to a different revision with potentially different service names, the
// currently missing service names will be re-disabled if they exist later
snapst.LastActiveDisabledServices = svcsToSave
// reset services tracked by operations from hooks
snapst.ServicesDisabledByHooks = nil
snapst.ServicesEnabledByHooks = nil
Set(st, snapsup.InstanceName(), snapst)
svcs := currentInfo.Services()
if len(svcs) == 0 {
return nil
}
startupOrdered, err := snap.SortServices(svcs)
if err != nil {
return err
}
pb := NewTaskProgressAdapterUnlocked(t)
st.Unlock()
err = m.backend.StartServices(startupOrdered, svcsToDisable, pb, perfTimings)
st.Lock()
return err
}
func (m *SnapManager) undoStartSnapServices(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
currentInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
var oldLastActiveDisabledServices []string
if err := t.Get("old-last-active-disabled-services", &oldLastActiveDisabledServices); err != nil && err != state.ErrNoState {
return err
}
snapst.LastActiveDisabledServices = oldLastActiveDisabledServices
Set(st, snapsup.InstanceName(), snapst)
svcs := currentInfo.Services()
if len(svcs) == 0 {
return nil
}
// XXX: stop reason not set on start task, should we have a new reason for undo?
var stopReason snap.ServiceStopReason
// stop the services
st.Unlock()
err = m.backend.StopServices(svcs, stopReason, progress.Null, perfTimings)
st.Lock()
if err != nil {
return err
}
return nil
}
func (m *SnapManager) stopSnapServices(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
currentInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
svcs := currentInfo.Services()
if len(svcs) == 0 {
return nil
}
var stopReason snap.ServiceStopReason
if err := t.Get("stop-reason", &stopReason); err != nil && err != state.ErrNoState {
return err
}
pb := NewTaskProgressAdapterUnlocked(t)
st.Unlock()
defer st.Lock()
// stop the services
err = m.backend.StopServices(svcs, stopReason, pb, perfTimings)
if err != nil {
return err
}
// get the disabled services after we stopped all the services.
// this list is not meant to save what services are disabled at any given
// time, specifically just what services are disabled while systemd loses
// track of the services. this list is also used to determine what services are enabled
// when we start services of a new revision of the snap in
// start-snap-services handler.
disabledServices, err := m.queryDisabledServices(currentInfo, pb)
if err != nil {
return err
}
st.Lock()
defer st.Unlock()
// for undo
t.Set("old-last-active-disabled-services", snapst.LastActiveDisabledServices)
// undo could queryDisabledServices, but this avoids it
t.Set("disabled-services", disabledServices)
// add to the disabled services list in snapst services which were disabled
// for usage across changes like in reverting and enabling after being
// disabled.
// we keep what's already in the list in snapst because that list is
// services which were previously present in the snap and disabled, but are
// no longer present.
snapst.LastActiveDisabledServices = append(
snapst.LastActiveDisabledServices,
disabledServices...,
)
// reset services tracked by operations from hooks
snapst.ServicesDisabledByHooks = nil
snapst.ServicesEnabledByHooks = nil
Set(st, snapsup.InstanceName(), snapst)
return nil
}
func (m *SnapManager) undoStopSnapServices(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
currentInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
svcs := currentInfo.Services()
if len(svcs) == 0 {
return nil
}
startupOrdered, err := snap.SortServices(svcs)
if err != nil {
return err
}
var lastActiveDisabled []string
if err := t.Get("old-last-active-disabled-services", &lastActiveDisabled); err != nil && err != state.ErrNoState {
return err
}
snapst.LastActiveDisabledServices = lastActiveDisabled
Set(st, snapsup.InstanceName(), snapst)
var disabledServices []string
if err := t.Get("disabled-services", &disabledServices); err != nil && err != state.ErrNoState {
return err
}
st.Unlock()
err = m.backend.StartServices(startupOrdered, disabledServices, progress.Null, perfTimings)
st.Lock()
if err != nil {
return err
}
return nil
}
func (m *SnapManager) doUnlinkSnap(t *state.Task, _ *tomb.Tomb) error {
// invoked only if snap has a current active revision, during remove or
// disable
// in case of the snapd snap, we only reach here if disabling or removal
// was deemed ok by earlier checks
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
info, err := Info(t.State(), snapsup.InstanceName(), snapsup.Revision())
if err != nil {
return err
}
// do the final unlink
unlinkCtx := backend.LinkContext{
FirstInstall: false,
}
err = m.backend.UnlinkSnap(info, unlinkCtx, NewTaskProgressAdapterLocked(t))
if err != nil {
return err
}
// mark as inactive
snapst.Active = false
Set(st, snapsup.InstanceName(), snapst)
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
return err
}
func (m *SnapManager) undoUnlinkSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
perfTimings := state.TimingsForTask(t)
defer perfTimings.Save(st)
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
isInstalled := snapst.IsInstalled()
if !isInstalled {
return fmt.Errorf("internal error: snap %q not installed anymore", snapsup.InstanceName())
}
info, err := snapst.CurrentInfo()
if err != nil {
return err
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return err
}
// undo here may be part of failed snap remove change, in which case a later
// "clear-snap" task could have been executed and some or all of the
// data of this snap could be lost. If that's the case, then we should not
// enable the snap back.
// XXX: should make an exception for snapd/core?
place := snapsup.placeInfo()
for _, dir := range []string{place.DataDir(), place.CommonDataDir()} {
if exists, _, _ := osutil.DirExists(dir); !exists {
t.Logf("cannot link snap %q back, some of its data has already been removed", snapsup.InstanceName())
// TODO: mark the snap broken at the SnapState level when we have
// such concept.
return nil
}
}
snapst.Active = true
Set(st, snapsup.InstanceName(), snapst)
opts, err := SnapServiceOptions(st, snapsup.InstanceName(), nil)
if err != nil {
return err
}
linkCtx := backend.LinkContext{
FirstInstall: false,
ServiceOptions: opts,
}
reboot, err := m.backend.LinkSnap(info, deviceCtx, linkCtx, perfTimings)
if err != nil {
return err
}
// Notify link snap participants about link changes.
notifyLinkParticipants(t, snapsup.InstanceName())
// if we just linked back a core snap, request a restart
// so that we switch executing its snapd.
m.maybeRestart(t, info, reboot)
return nil
}
func (m *SnapManager) doClearSnapData(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
snapsup, snapst, err := snapSetupAndState(t)
st.Unlock()
if err != nil {
return err
}
st.Lock()
info, err := Info(t.State(), snapsup.InstanceName(), snapsup.Revision())
st.Unlock()
if err != nil {
return err
}
st.Lock()
opts, err := getDirMigrationOpts(st, snapst, snapsup)
st.Unlock()
if err != nil {
return err
}
dirOpts := opts.getSnapDirOpts()
if err = m.backend.RemoveSnapData(info, dirOpts); err != nil {
return err
}
if len(snapst.Sequence) == 1 {
// Only remove data common between versions if this is the last version
if err = m.backend.RemoveSnapCommonData(info, dirOpts); err != nil {
return err
}
st.Lock()
defer st.Unlock()
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
return err
}
// Snap data directory can be removed now too
if err := m.backend.RemoveSnapDataDir(info, otherInstances); err != nil {
return err
}
}
return nil
}
func (m *SnapManager) doDiscardSnap(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
deviceCtx, err := DeviceCtx(st, t, nil)
if err != nil {
return err
}
if snapst.Current == snapsup.Revision() && snapst.Active {
return fmt.Errorf("internal error: cannot discard snap %q: still active", snapsup.InstanceName())
}
// drop any potential revert status for this revision
delete(snapst.RevertStatus, snapsup.Revision().N)
if len(snapst.Sequence) == 1 {
snapst.Sequence = nil
snapst.Current = snap.Revision{}
} else {
newSeq := make([]*snap.SideInfo, 0, len(snapst.Sequence))
for _, si := range snapst.Sequence {
if si.Revision == snapsup.Revision() {
// leave out
continue
}
newSeq = append(newSeq, si)
}
snapst.Sequence = newSeq
if snapst.Current == snapsup.Revision() {
snapst.Current = newSeq[len(newSeq)-1].Revision
}
}
pb := NewTaskProgressAdapterLocked(t)
typ, err := snapst.Type()
if err != nil {
return err
}
err = m.backend.RemoveSnapFiles(snapsup.placeInfo(), typ, nil, deviceCtx, pb)
if err != nil {
t.Errorf("cannot remove snap file %q, will retry in 3 mins: %s", snapsup.InstanceName(), err)
return &state.Retry{After: 3 * time.Minute}
}
if len(snapst.Sequence) == 0 {
if err = m.backend.RemoveSnapMountUnits(snapsup.placeInfo(), nil); err != nil {
return err
}
if err := pruneRefreshCandidates(st, snapsup.InstanceName()); err != nil {
return err
}
if err := pruneSnapsHold(st, snapsup.InstanceName()); err != nil {
return err
}
// Remove configuration associated with this snap.
err = config.DeleteSnapConfig(st, snapsup.InstanceName())
if err != nil {
return err
}
err = m.backend.DiscardSnapNamespace(snapsup.InstanceName())
if err != nil {
t.Errorf("cannot discard snap namespace %q, will retry in 3 mins: %s", snapsup.InstanceName(), err)
return &state.Retry{After: 3 * time.Minute}
}
err = m.backend.RemoveSnapInhibitLock(snapsup.InstanceName())
if err != nil {
return err
}
if err := m.removeSnapCookie(st, snapsup.InstanceName()); err != nil {
return fmt.Errorf("cannot remove snap cookie: %v", err)
}
otherInstances, err := hasOtherInstances(st, snapsup.InstanceName())
if err != nil {
return err
}
if err := m.backend.RemoveSnapDir(snapsup.placeInfo(), otherInstances); err != nil {
return fmt.Errorf("cannot remove snap directory: %v", err)
}
// try to remove the auxiliary store info
if err := discardAuxStoreInfo(snapsup.SideInfo.SnapID); err != nil {
logger.Noticef("Cannot remove auxiliary store info for %q: %v", snapsup.InstanceName(), err)
}
// XXX: also remove sequence files?
// remove the snap from any quota groups it may have been in, otherwise
// that quota group may get into an inconsistent state
if err := EnsureSnapAbsentFromQuotaGroup(st, snapsup.InstanceName()); err != nil {
return err
}
}
if err = config.DiscardRevisionConfig(st, snapsup.InstanceName(), snapsup.Revision()); err != nil {
return err
}
if err = SecurityProfilesRemoveLate(snapsup.InstanceName(), snapsup.Revision(), snapsup.Type); err != nil {
return err
}
Set(st, snapsup.InstanceName(), snapst)
return nil
}
/* aliases v2
aliases v2 implementation uses the following tasks:
* for install/refresh/remove/enable/disable etc
- remove-aliases: remove aliases of a snap from disk and mark them pending
- setup-aliases: (re)creates aliases from snap state, mark them as
not pending
- set-auto-aliases: updates aliases snap state based on the
snap-declaration and current revision info of the snap
* for refresh & when the snap-declaration aliases change without a
new revision
- refresh-aliases: updates aliases snap state and updates them on disk too;
its undo is used generically by other tasks as well
- prune-auto-aliases: used for the special case of automatic
aliases transferred from one snap to another to prune them from
the source snaps to avoid conflicts in later operations
* for alias/unalias/prefer:
- alias: creates a manual alias
- unalias: removes a manual alias
- disable-aliases: disable the automatic aliases of a snap and
removes all manual ones as well
- prefer-aliases: enables the automatic aliases of a snap after
disabling any other snap conflicting aliases
*/
func (m *SnapManager) doSetAutoAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
curInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
// --unaliased
if snapsup.Unaliased {
t.Set("old-auto-aliases-disabled", snapst.AutoAliasesDisabled)
snapst.AutoAliasesDisabled = true
}
curAliases := snapst.Aliases
// TODO: implement --prefer logic
newAliases, err := refreshAliases(st, curInfo, curAliases)
if err != nil {
return err
}
_, err = checkAliasesConflicts(st, snapName, snapst.AutoAliasesDisabled, newAliases, nil)
if err != nil {
return err
}
t.Set("old-aliases-v2", curAliases)
// noop, except on first install where we need to set this here
snapst.AliasesPending = true
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) doRemoveAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
err = m.backend.RemoveSnapAliases(snapName)
if err != nil {
return err
}
snapst.AliasesPending = true
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) doSetupAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
curAliases := snapst.Aliases
_, _, err = applyAliasesChange(snapName, autoDis, nil, snapst.AutoAliasesDisabled, curAliases, m.backend, doApply)
if err != nil {
return err
}
snapst.AliasesPending = false
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) doRefreshAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
curInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
autoDisabled := snapst.AutoAliasesDisabled
curAliases := snapst.Aliases
newAliases, err := refreshAliases(st, curInfo, curAliases)
if err != nil {
return err
}
_, err = checkAliasesConflicts(st, snapName, autoDisabled, newAliases, nil)
if err != nil {
return err
}
if !snapst.AliasesPending {
if _, _, err := applyAliasesChange(snapName, autoDisabled, curAliases, autoDisabled, newAliases, m.backend, doApply); err != nil {
return err
}
}
t.Set("old-aliases-v2", curAliases)
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) undoRefreshAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
var oldAliases map[string]*AliasTarget
err := t.Get("old-aliases-v2", &oldAliases)
if err == state.ErrNoState {
// nothing to do
return nil
}
if err != nil {
return err
}
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
curAutoDisabled := snapst.AutoAliasesDisabled
autoDisabled := curAutoDisabled
if err = t.Get("old-auto-aliases-disabled", &autoDisabled); err != nil && err != state.ErrNoState {
return err
}
var otherSnapDisabled map[string]*otherDisabledAliases
if err = t.Get("other-disabled-aliases", &otherSnapDisabled); err != nil && err != state.ErrNoState {
return err
}
// check if the old states creates conflicts now
_, err = checkAliasesConflicts(st, snapName, autoDisabled, oldAliases, nil)
if _, ok := err.(*AliasConflictError); ok {
// best we can do is reinstate with all aliases disabled
t.Errorf("cannot reinstate alias state because of conflicts, disabling: %v", err)
oldAliases, _ = disableAliases(oldAliases)
autoDisabled = true
} else if err != nil {
return err
}
if !snapst.AliasesPending {
curAliases := snapst.Aliases
if _, _, err := applyAliasesChange(snapName, curAutoDisabled, curAliases, autoDisabled, oldAliases, m.backend, doApply); err != nil {
return err
}
}
snapst.AutoAliasesDisabled = autoDisabled
snapst.Aliases = oldAliases
newSnapStates := make(map[string]*SnapState, 1+len(otherSnapDisabled))
newSnapStates[snapName] = snapst
// if we disabled other snap aliases try to undo that
conflicting := make(map[string]bool, len(otherSnapDisabled))
otherCurSnapStates := make(map[string]*SnapState, len(otherSnapDisabled))
for otherSnap, otherDisabled := range otherSnapDisabled {
var otherSnapState SnapState
err := Get(st, otherSnap, &otherSnapState)
if err != nil {
return err
}
otherCurInfo, err := otherSnapState.CurrentInfo()
if err != nil {
return err
}
otherCurSnapStates[otherSnap] = &otherSnapState
autoDisabled := otherSnapState.AutoAliasesDisabled
if otherDisabled.Auto {
// automatic aliases of other were disabled, undo that
autoDisabled = false
}
otherAliases := reenableAliases(otherCurInfo, otherSnapState.Aliases, otherDisabled.Manual)
// check for conflicts taking into account
// re-enabled aliases
conflicts, err := checkAliasesConflicts(st, otherSnap, autoDisabled, otherAliases, newSnapStates)
if _, ok := err.(*AliasConflictError); ok {
conflicting[otherSnap] = true
for conflictSnap := range conflicts {
conflicting[conflictSnap] = true
}
} else if err != nil {
return err
}
newSnapState := otherSnapState
newSnapState.Aliases = otherAliases
newSnapState.AutoAliasesDisabled = autoDisabled
newSnapStates[otherSnap] = &newSnapState
}
// apply non-conflicting other
for otherSnap, otherSnapState := range otherCurSnapStates {
if conflicting[otherSnap] {
// keep as it was
continue
}
newSnapSt := newSnapStates[otherSnap]
if !otherSnapState.AliasesPending {
if _, _, err := applyAliasesChange(otherSnap, otherSnapState.AutoAliasesDisabled, otherSnapState.Aliases, newSnapSt.AutoAliasesDisabled, newSnapSt.Aliases, m.backend, doApply); err != nil {
return err
}
}
}
for instanceName, snapst := range newSnapStates {
if conflicting[instanceName] {
// keep as it was
continue
}
Set(st, instanceName, snapst)
}
return nil
}
func (m *SnapManager) doPruneAutoAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
var which []string
err = t.Get("aliases", &which)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
autoDisabled := snapst.AutoAliasesDisabled
curAliases := snapst.Aliases
newAliases := pruneAutoAliases(curAliases, which)
if !snapst.AliasesPending {
if _, _, err := applyAliasesChange(snapName, autoDisabled, curAliases, autoDisabled, newAliases, m.backend, doApply); err != nil {
return err
}
}
t.Set("old-aliases-v2", curAliases)
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
type changedAlias struct {
Snap string `json:"snap"`
App string `json:"app"`
Alias string `json:"alias"`
}
func aliasesTrace(t *state.Task, added, removed []*backend.Alias) error {
chg := t.Change()
var data map[string]interface{}
err := chg.Get("api-data", &data)
if err != nil && err != state.ErrNoState {
return err
}
if len(data) == 0 {
data = make(map[string]interface{})
}
curAdded, _ := data["aliases-added"].([]interface{})
for _, a := range added {
snap, app := snap.SplitSnapApp(a.Target)
curAdded = append(curAdded, &changedAlias{
Snap: snap,
App: app,
Alias: a.Name,
})
}
data["aliases-added"] = curAdded
curRemoved, _ := data["aliases-removed"].([]interface{})
for _, a := range removed {
snap, app := snap.SplitSnapApp(a.Target)
curRemoved = append(curRemoved, &changedAlias{
Snap: snap,
App: app,
Alias: a.Name,
})
}
data["aliases-removed"] = curRemoved
chg.Set("api-data", data)
return nil
}
func (m *SnapManager) doAlias(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
var target, alias string
err = t.Get("target", &target)
if err != nil {
return err
}
err = t.Get("alias", &alias)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
curInfo, err := snapst.CurrentInfo()
if err != nil {
return err
}
autoDisabled := snapst.AutoAliasesDisabled
curAliases := snapst.Aliases
newAliases, err := manualAlias(curInfo, curAliases, target, alias)
if err != nil {
return err
}
_, err = checkAliasesConflicts(st, snapName, autoDisabled, newAliases, nil)
if err != nil {
return err
}
added, removed, err := applyAliasesChange(snapName, autoDisabled, curAliases, autoDisabled, newAliases, m.backend, snapst.AliasesPending)
if err != nil {
return err
}
if err := aliasesTrace(t, added, removed); err != nil {
return err
}
t.Set("old-aliases-v2", curAliases)
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) doDisableAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
oldAutoDisabled := snapst.AutoAliasesDisabled
oldAliases := snapst.Aliases
newAliases, _ := disableAliases(oldAliases)
added, removed, err := applyAliasesChange(snapName, oldAutoDisabled, oldAliases, autoDis, newAliases, m.backend, snapst.AliasesPending)
if err != nil {
return err
}
if err := aliasesTrace(t, added, removed); err != nil {
return err
}
t.Set("old-auto-aliases-disabled", oldAutoDisabled)
snapst.AutoAliasesDisabled = true
t.Set("old-aliases-v2", oldAliases)
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
func (m *SnapManager) doUnalias(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
var alias string
err = t.Get("alias", &alias)
if err != nil {
return err
}
snapName := snapsup.InstanceName()
autoDisabled := snapst.AutoAliasesDisabled
oldAliases := snapst.Aliases
newAliases, err := manualUnalias(oldAliases, alias)
if err != nil {
return err
}
added, removed, err := applyAliasesChange(snapName, autoDisabled, oldAliases, autoDisabled, newAliases, m.backend, snapst.AliasesPending)
if err != nil {
return err
}
if err := aliasesTrace(t, added, removed); err != nil {
return err
}
t.Set("old-aliases-v2", oldAliases)
snapst.Aliases = newAliases
Set(st, snapName, snapst)
return nil
}
// otherDisabledAliases is used to track for the benefit of undo what
// changes were made aka what aliases were disabled of another
// conflicting snap by prefer logic
type otherDisabledAliases struct {
// Auto records whether prefer had to disable automatic aliases
Auto bool `json:"auto,omitempty"`
// Manual records which manual aliases were removed by prefer
Manual map[string]string `json:"manual,omitempty"`
}
func (m *SnapManager) doPreferAliases(t *state.Task, _ *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snapsup, snapst, err := snapSetupAndState(t)
if err != nil {
return err
}
instanceName := snapsup.InstanceName()
if !snapst.AutoAliasesDisabled {
// already enabled, nothing to do
return nil
}
curAliases := snapst.Aliases
aliasConflicts, err := checkAliasesConflicts(st, instanceName, autoEn, curAliases, nil)
conflErr, isConflErr := err.(*AliasConflictError)
if err != nil && !isConflErr {
return err
}
if isConflErr && conflErr.Conflicts == nil {
// it's a snap command namespace conflict, we cannot remedy it
return conflErr
}
// proceed to disable conflicting aliases as needed
// before re-enabling instanceName aliases
otherSnapStates := make(map[string]*SnapState, len(aliasConflicts))
otherSnapDisabled := make(map[string]*otherDisabledAliases, len(aliasConflicts))
for otherSnap := range aliasConflicts {
var otherSnapState SnapState
err := Get(st, otherSnap, &otherSnapState)
if err != nil {
return err
}
otherAliases, disabledManual := disableAliases(otherSnapState.Aliases)
added, removed, err := applyAliasesChange(otherSnap, otherSnapState.AutoAliasesDisabled, otherSnapState.Aliases, autoDis, otherAliases, m.backend, otherSnapState.AliasesPending)
if err != nil {
return err
}
if err := aliasesTrace(t, added, removed); err != nil {
return err
}
var otherDisabled otherDisabledAliases
otherDisabled.Manual = disabledManual
otherSnapState.Aliases = otherAliases
// disable automatic aliases as needed
if !otherSnapState.AutoAliasesDisabled && len(otherAliases) != 0 {
// record that we did disable automatic aliases
otherDisabled.Auto = true
otherSnapState.AutoAliasesDisabled = true
}
otherSnapDisabled[otherSnap] = &otherDisabled
otherSnapStates[otherSnap] = &otherSnapState
}
added, removed, err := applyAliasesChange(instanceName, autoDis, curAliases, autoEn, curAliases, m.backend, snapst.AliasesPending)
if err != nil {
return err
}
if err := aliasesTrace(t, added, removed); err != nil {
return err
}
for otherSnap, otherSnapState := range otherSnapStates {
Set(st, otherSnap, otherSnapState)
}
if len(otherSnapDisabled) != 0 {
t.Set("other-disabled-aliases", otherSnapDisabled)
}
t.Set("old-auto-aliases-disabled", true)
t.Set("old-aliases-v2", curAliases)
snapst.AutoAliasesDisabled = false
Set(st, instanceName, snapst)
return nil
}
// changeReadyUpToTask returns whether all other change's tasks are Ready.
func changeReadyUpToTask(task *state.Task) bool {
me := task.ID()
change := task.Change()
for _, task := range change.Tasks() {
if me == task.ID() {
// ignore self
continue
}
if !task.Status().Ready() {
return false
}
}
return true
}
// refreshedSnaps returns the instance names of the snaps successfully refreshed
// in the last batch of refreshes before the given (re-refresh) task; failed is
// true if any of the snaps failed to refresh.
//
// It does this by advancing through the given task's change's tasks, keeping
// track of the instance names from the first SnapSetup in every lane, stopping
// when finding the given task, and resetting things when finding a different
// re-refresh task (that indicates the end of a batch that isn't the given one).
func refreshedSnaps(reTask *state.Task) (snapNames []string, failed bool) {
// NOTE nothing requires reTask to be a check-rerefresh task, nor even to be in
// a refresh-ish change, but it doesn't make much sense to call this otherwise.
tid := reTask.ID()
laneSnaps := map[int]string{}
// change.Tasks() preserves the order tasks were added, otherwise it all falls apart
for _, task := range reTask.Change().Tasks() {
if task.ID() == tid {
// we've reached ourselves; we don't care about anything beyond this
break
}
if task.Kind() == "check-rerefresh" {
// we've reached a previous check-rerefresh (but not ourselves).
// Only snaps in tasks after this point are of interest.
laneSnaps = map[int]string{}
}
lanes := task.Lanes()
if len(lanes) != 1 {
// can't happen, really
continue
}
lane := lanes[0]
if lane == 0 {
// not really a lane
continue
}
if task.Status() != state.DoneStatus {
// ignore non-successful lane (1)
laneSnaps[lane] = ""
continue
}
if _, ok := laneSnaps[lane]; ok {
// ignore lanes we've already seen (including ones explicitly ignored in (1))
continue
}
var snapsup SnapSetup
if err := task.Get("snap-setup", &snapsup); err != nil {
continue
}
laneSnaps[lane] = snapsup.InstanceName()
}
snapNames = make([]string, 0, len(laneSnaps))
for _, name := range laneSnaps {
if name == "" {
// the lane was unsuccessful
failed = true
continue
}
snapNames = append(snapNames, name)
}
return snapNames, failed
}
// reRefreshSetup holds the necessary details to re-refresh snaps that need it
type reRefreshSetup struct {
UserID int `json:"user-id,omitempty"`
*Flags
}
// reRefreshUpdateMany exists just to make testing simpler
var reRefreshUpdateMany = updateManyFiltered
// reRefreshFilter is an updateFilter that returns whether the given update
// needs a re-refresh because of further epoch transitions available.
func reRefreshFilter(update *snap.Info, snapst *SnapState) bool {
cur, err := snapst.CurrentInfo()
if err != nil {
return false
}
return !update.Epoch.Equal(&cur.Epoch)
}
var reRefreshRetryTimeout = time.Second / 2
func (m *SnapManager) doCheckReRefresh(t *state.Task, tomb *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
if numHaltTasks := t.NumHaltTasks(); numHaltTasks > 0 {
logger.Panicf("Re-refresh task has %d tasks waiting for it.", numHaltTasks)
}
if !changeReadyUpToTask(t) {
return &state.Retry{After: reRefreshRetryTimeout, Reason: "pending refreshes"}
}
snaps, failed := refreshedSnaps(t)
if len(snaps) > 0 {
if err := pruneRefreshCandidates(st, snaps...); err != nil {
return err
}
}
// if any snap failed to refresh, reconsider validation set tracking
if failed {
tasksets, err := maybeRestoreValidationSetsAndRevertSnaps(st, snaps)
if err != nil {
return err
}
if len(tasksets) > 0 {
chg := t.Change()
for _, taskset := range tasksets {
chg.AddAll(taskset)
}
st.EnsureBefore(0)
t.SetStatus(state.DoneStatus)
return nil
}
// else - validation sets tracking got restored or wasn't affected, carry on
}
if len(snaps) == 0 {
// nothing to do (maybe everything failed)
return nil
}
// update validation sets stack: there are two possibilities
// - if maybeRestoreValidationSetsAndRevertSnaps restored previous tracking
// or refresh succeeded and it hasn't changed then this is a noop
// (AddCurrentTrackingToValidationSetsStack ignores tracking if identical
// to the topmost stack entry);
// - if maybeRestoreValidationSetsAndRevertSnaps kept new tracking
// because its constraints were met even after partial failure or
// refresh succeeded and tracking got updated, then
// this creates a new copy of validation-sets tracking data.
if AddCurrentTrackingToValidationSetsStack != nil {
if err := AddCurrentTrackingToValidationSetsStack(st); err != nil {
return err
}
}
var re reRefreshSetup
if err := t.Get("rerefresh-setup", &re); err != nil {
return err
}
chg := t.Change()
updated, tasksets, err := reRefreshUpdateMany(tomb.Context(nil), st, snaps, re.UserID, reRefreshFilter, re.Flags, chg.ID())
if err != nil {
return err
}
if len(updated) == 0 {
t.Logf("No re-refreshes found.")
} else {
t.Logf("Found re-refresh for %s.", strutil.Quoted(updated))
for _, taskset := range tasksets {
chg.AddAll(taskset)
}
st.EnsureBefore(0)
}
t.SetStatus(state.DoneStatus)
return nil
}
func (m *SnapManager) doConditionalAutoRefresh(t *state.Task, tomb *tomb.Tomb) error {
st := t.State()
st.Lock()
defer st.Unlock()
snaps, err := snapsToRefresh(t)
if err != nil {
return err
}
if len(snaps) == 0 {
logger.Debugf("refresh gating: no snaps to refresh")
return nil
}
tss, err := autoRefreshPhase2(context.TODO(), st, snaps, t.Change().ID())
if err != nil {
return err
}
// update the map of refreshed snaps on the task, this affects
// conflict checks (we don't want to conflict on snaps that were held and
// won't be refreshed) - see conditionalAutoRefreshAffectedSnaps().
newToUpdate := make(map[string]*refreshCandidate, len(snaps))
for _, candidate := range snaps {
newToUpdate[candidate.InstanceName()] = candidate
}
t.Set("snaps", newToUpdate)
// update original auto-refresh change
chg := t.Change()
for _, ts := range tss {
ts.WaitFor(t)
chg.AddAll(ts)
}
t.SetStatus(state.DoneStatus)
st.EnsureBefore(0)
return nil
}
// maybeRestoreValidationSetsAndRevertSnaps restores validation-sets to their
// previous state using validation sets stack if there are any enforced
// validation sets and - if necessary - creates tasksets to revert some or all
// of the refreshed snaps to their previous revisions to satisfy the restored
// validation sets tracking.
var maybeRestoreValidationSetsAndRevertSnaps = func(st *state.State, refreshedSnaps []string) ([]*state.TaskSet, error) {
enforcedSets, err := EnforcedValidationSets(st)
if err != nil {
return nil, err
}
if enforcedSets == nil {
// no enforced validation sets, nothing to do
return nil, nil
}
installedSnaps, ignoreValidation, err := InstalledSnaps(st)
if err != nil {
return nil, err
}
if err := enforcedSets.CheckInstalledSnaps(installedSnaps, ignoreValidation); err == nil {
// validation sets are still correct, nothing to do
return nil, nil
}
// restore previous validation sets tracking state
if err := RestoreValidationSetsTracking(st); err != nil {
return nil, fmt.Errorf("cannot restore validation sets: %v", err)
}
// no snaps were refreshed, after restoring validation sets tracking
// there is nothing else to do
if len(refreshedSnaps) == 0 {
return nil, nil
}
// check installed snaps again against restored validation-sets.
// this may fail which is fine, but it tells us which snaps are
// at invalid revisions and need reverting.
// note: we need to fetch enforced sets again because of RestoreValidationSetsTracking.
enforcedSets, err = EnforcedValidationSets(st)
if err != nil {
return nil, err
}
if enforcedSets == nil {
return nil, fmt.Errorf("internal error: no enforced validation sets after restoring from the stack")
}
err = enforcedSets.CheckInstalledSnaps(installedSnaps, ignoreValidation)
if err == nil {
// all fine after restoring validation sets: this can happen if previous
// validation sets only required a snap (regardless of its revision), then
// after update they require a specific snap revision, so after restoring
// we are back with the good state.
return nil, nil
}
verr, ok := err.(*snapasserts.ValidationSetsValidationError)
if !ok {
return nil, err
}
if len(verr.WrongRevisionSnaps) == 0 {
// if we hit ValidationSetsValidationError but it's not about wrong revisions,
// then something is really broken (we shouldn't have invalid or missing required
// snaps at this point).
return nil, fmt.Errorf("internal error: unexpected validation error of installed snaps after unsuccesfull refresh: %v", verr)
}
// revert some or all snaps
var tss []*state.TaskSet
for _, snapName := range refreshedSnaps {
if verr.WrongRevisionSnaps[snapName] != nil {
// XXX: should we be extra paranoid and use RevertToRevision with
// the specific revision from verr.WrongRevisionSnaps?
ts, err := Revert(st, snapName, Flags{RevertStatus: NotBlocked})
if err != nil {
return nil, err
}
tss = append(tss, ts)
}
}
return tss, nil
}
// InjectTasks makes all the halt tasks of the mainTask wait for extraTasks;
// extraTasks join the same lane and change as the mainTask.
func InjectTasks(mainTask *state.Task, extraTasks *state.TaskSet) {
lanes := mainTask.Lanes()
if len(lanes) == 1 && lanes[0] == 0 {
lanes = nil
}
for _, l := range lanes {
extraTasks.JoinLane(l)
}
chg := mainTask.Change()
// Change shouldn't normally be nil, except for cases where
// this helper is used before tasks are added to a change.
if chg != nil {
chg.AddAll(extraTasks)
}
// make all halt tasks of the mainTask wait on extraTasks
ht := mainTask.HaltTasks()
for _, t := range ht {
t.WaitAll(extraTasks)
}
// make the extra tasks wait for main task
extraTasks.WaitFor(mainTask)
}
func InjectAutoConnect(mainTask *state.Task, snapsup *SnapSetup) {
st := mainTask.State()
autoConnect := st.NewTask("auto-connect", fmt.Sprintf(i18n.G("Automatically connect eligible plugs and slots of snap %q"), snapsup.InstanceName()))
autoConnect.Set("snap-setup", snapsup)
InjectTasks(mainTask, state.NewTaskSet(autoConnect))
mainTask.Logf("added auto-connect task")
}
type dirMigrationOptions struct {
// UseHidden states whether the user has requested that the hidden data dir be used
UseHidden bool
// MigratedToHidden states whether the data has been migrated to the hidden dir
MigratedToHidden bool
}
// GetSnapDirOpts returns the snap dir options based on the current the migration status
func (o *dirMigrationOptions) getSnapDirOpts() *dirs.SnapDirOptions {
return &dirs.SnapDirOptions{HiddenSnapDataDir: o.MigratedToHidden}
}
// GetSnapDirOpts returns the options required to get the correct snap dir.
var GetSnapDirOpts = func(st *state.State, name string) (*dirs.SnapDirOptions, error) {
var snapst SnapState
if err := Get(st, name, &snapst); err != nil && !errors.Is(err, state.ErrNoState) {
return nil, err
}
hiddenOpts, err := getDirMigrationOpts(st, &snapst, nil)
if err != nil {
return nil, err
}
return hiddenOpts.getSnapDirOpts(), nil
}
// getDirMigrationOpts checks if the feature flag is set and if the snap data
// has been migrated, first checking the SnapSetup (if not nil) and then
// the SnapState. The state must be locked by the caller.
var getDirMigrationOpts = func(st *state.State, snapst *SnapState, snapsup *SnapSetup) (*dirMigrationOptions, error) {
tr := config.NewTransaction(st)
hiddenDir, err := features.Flag(tr, features.HiddenSnapDataHomeDir)
if err != nil {
return nil, fmt.Errorf("cannot read feature flag %q: %w", features.HiddenSnapDataHomeDir, err)
}
opts := &dirMigrationOptions{UseHidden: hiddenDir}
if snapst != nil {
opts.MigratedToHidden = snapst.MigratedHidden
}
// it was migrated during this change (might not be in the state yet)
if snapsup != nil {
switch {
case snapsup.MigratedHidden && snapsup.MigratedExposed:
// should never happen except for programmer error
return nil, fmt.Errorf("internal error: migration was done and reversed in same change without updating migration flags")
case snapsup.MigratedHidden:
opts.MigratedToHidden = true
case snapsup.MigratedExposed:
opts.MigratedToHidden = false
}
}
return opts, nil
}<|fim▁end|>
|
// hasOtherInstances checks whether there are other instances of the snap, be it
|
<|file_name|>main.rs<|end_file_name|><|fim▁begin|>#![allow(unused_must_use)]
extern crate pad;
#[macro_use]
extern crate quicli;
extern crate reqwest;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
extern crate term;
use pad::PadStr;
use reqwest::Url;
use std::process;
use std::str;
use quicli::prelude::*;
#[macro_use]
mod macros;
#[derive(Debug, StructOpt)]
#[structopt(name = "cargo")]
enum Cli {
#[structopt(name = "ssearch", about = "cargo search on steroids")]
Ssearch {
/// how many packages to display
#[structopt(long = "limit", short = "l", default_value = "10")]
limit: usize,
/// the crates.io search result page to display
#[structopt(long = "page", default_value = "1")]
page: usize,
/// quiet output, display only crate, version and downloads
#[structopt(long = "quiet", short = "q")]
quiet: bool,
/// sort by recent downloads instead of overall downloads
#[structopt(long = "recent", short = "r")]
recent: bool,
/// query string for crates.io
query: String,
},
}
#[derive(Debug, Deserialize)]
struct Args {
flag_info: bool,
arg_query: String,
}
#[derive(Debug, Serialize, Deserialize)]
struct Meta {
total: i32,
}
#[derive(Debug, Serialize, Deserialize)]
struct Response {
crates: Vec<EncodableCrate>,
meta: Meta,
}
// structs from crates.io backend
#[derive(Debug, Serialize, Deserialize)]
struct EncodableCrate {
id: String,
name: String,
updated_at: String,
versions: Option<Vec<i32>>,
created_at: String,
downloads: i32,
max_version: String,
description: Option<String>,
homepage: Option<String>,
documentation: Option<String>,
keywords: Option<Vec<String>>,
license: Option<String>,
repository: Option<String>,
links: CrateLinks,
}
#[derive(Debug, Serialize, Deserialize)]
struct CrateLinks {
version_downloads: String,
versions: Option<String>,
owners: Option<String>,
reverse_dependencies: String,
}
fn query_crates_io(
query: &str,
page: usize,
per_page: usize,
recent: bool,
) -> Result<(i32, Vec<EncodableCrate>)> {
let sort = if recent {
"recent-downloads"
} else {
"downloads"
};
let url = Url::parse_with_params(
"https://crates.io/api/v1/crates",
&[
("q", query),
("page", &page.to_string()),
("per_page", &per_page.to_string()),
("sort", &sort),
],
)?;
let body = reqwest::get(url)?.text()?;
let data: Response = serde_json::from_str(&body)?;
Ok((data.meta.total, data.crates))
}
fn show_crate(t: &mut Box<term::StdoutTerminal>, cr: &EncodableCrate, quiet: bool, max_len: usize) {
p_green!(t, "{}", cr.name.pad_to_width(max_len));
p_white!(
t,
" = \"{}\" \t(downloads: {})\n",
cr.max_version,
cr.downloads
);
if !quiet {
cr.description
.as_ref()
.map(|description| p_yellow!(t, " -> {}\n", description.clone().trim()));
cr.documentation
.as_ref()
.map(|documentation| p_white!(t, " docs: {}\n", documentation));
cr.homepage
.as_ref()
.map(|homepage| p_white!(t, " home: {}\n", homepage));
p_white!(t, "\n");
}
}<|fim▁hole|>main!(|args: Cli| {
let Cli::Ssearch {
query,
page,
limit,
quiet,
recent,
} = args;
let mut t = term::stdout().unwrap();
// TODO: Add decoding of updated_at and allow to use it for sorting
let (total, crates) = query_crates_io(&query, page, limit, recent).unwrap_or_else(|e| {
p_red!(t, "[error]: {}.\n", e);
t.reset().unwrap();
process::exit(1)
});
if total == 0 {
p_white!(t, "No crate matching \"{}\" has been found.\n", query);
t.reset().unwrap();
process::exit(0);
}
p_white!(
t,
"Displaying {} crates from page {} out of the {} found.\n\n",
crates.len(),
page,
total,
);
let max_len = (&crates).iter().map(|ref cr| cr.name.len()).max().unwrap();
for cr in &crates {
show_crate(&mut t, &cr, quiet, max_len);
}
t.reset().unwrap();
});<|fim▁end|>
| |
<|file_name|>ViewQueueHelp.js<|end_file_name|><|fim▁begin|>"use strict";
function HelpTutorial()
{
let _getText = function()
{
return "HelpTutorial_Base";
};
this.getName = function(){ return "HelpTutorial_Base"; };
this.getImageId = function(){ return "button_help"; };<|fim▁hole|>}
function HelpTutorialBuilding(name, image)
{
this.getName = function(){ return name; };
this.getImageId = function(){ return image; };
}
function HelpQueueData(colonyState)
{
let queue = [new HelpTutorial()];
let available = [];
let discovery = colonyState.getDiscovery();
for(let i = 0; i < discovery.length; i++)
{
if(discovery[i].startsWith("Resource_"))
{
queue.push(new HelpTutorialBuilding(discovery[i], discovery[i]));
}
}
let technology = colonyState.getTechnology();
for(let i = 0; i < technology.length; i++)
{
let item = PrototypeLib.get(technology[i]);
available.push(new HelpTutorialBuilding(technology[i], item.getBuildingImageId()));
}
QueueData.call(this, queue, available);
this.getInfo = function(item)
{
let text = "";
if(item != null)
{
let imgSize = GetImageSize(ImagesLib.getImage(item.getImageId()));
text += '<div class="queueInfo">';
if(item.getText != undefined)
{
text += '<div class="queueInfoTitle" style="height: ' + (imgSize.height + 5) + 'px;">';
text += '<img class="queueInfoTitleImage" style="height: ' + imgSize.height + 'px;" src="' + ImagesLib.getFileName(item.getImageId()) + '">';
text += '<div class="queueInfoTitleData queueInfoTitleName">' + TextRepository.get(item.getName()) + '</div>';
text += '</div>';
text += '<div class="queueInfoDetails">' + TextRepository.get(item.getName() + "Description") + '</div>';
}
else
{
let baseImgSize = GetImageSize(ImagesLib.getImage("baseTile"));
text += '<div class="queueInfoTitle" style="height: ' + (imgSize.height + 5) + 'px;">';
text += '<div style="float: left; height: ' + imgSize.height + 'px; background: url(\'' + ImagesLib.getFileName("baseTile") + '\') no-repeat; background-position: 0 ' + (imgSize.height - baseImgSize.height) + 'px;">';
text += '<img style="height: ' + imgSize.height + 'px; border-size: 0;" src="' + ImagesLib.getFileName(item.getImageId()) + '">';
text += '</div>';
text += '<div class="queueInfoTitleData">';
text += '<div class="queueInfoTitleName">' + TextRepository.get(item.getName()) + '</div>';
text += '<div class="queueInfoTitleDescription">' + TextRepository.get(item.getName() + "Description") + '</div>';
text += '</div>';
text += '</div>';
text += '<div class="queueInfoDetails">';
let proto = PrototypeLib.get(item.getName());
text += '<table>';
text += '<tr><td class="tableMainColumn">' + TextRepository.get("TerrainLayer") + ':</td><td></td><td>' + TextRepository.get(proto.getTerrainLayer()) + '</td></tr>';
let list;
let listItem;
if(proto.getBuildingTime() > 0 || Object.keys(proto.getBuildingCost()).length > 0)
{
//text += '<tr><td>' + TextRepository.get("BuildingTitle") + '</td></tr>';
text += '<tr><td>' + TextRepository.get("BuildingTime") + ':</td><td class="tableDataRight">' + proto.getBuildingTime() + '</td><td>' + TextRepository.get("TimeUnit") + '</td></tr>';
list = proto.getBuildingCost();
if(Object.keys(list).length > 0)
{
text += '<tr><td>' + TextRepository.get("BuildingCost") + ':</td></tr>';
for (listItem in list)
{
if(list.hasOwnProperty(listItem))
{
text += '<tr><td class="tableIndentation">' + TextRepository.get(listItem) + '</td><td class="tableDataRight">' + list[listItem] + '</td>';
if(list[listItem] > colonyState.getProduced(listItem))
{
text += '<td class="colorError">' + TextRepository.get("unavailable") + '</td>';
}
text += '</tr>';
}
}
}
}
if(proto.getRequiredResource() != null)
{
text += '<tr><td>' + TextRepository.get("Requirements") + ':</td><td>' + TextRepository.get(proto.getRequiredResource()) + '</td></tr>';
}
list = proto.getCapacity();
if(Object.keys(list).length > 0)
{
text += '<tr><td>' + TextRepository.get("BuildingCapacity") + ':</td></tr>';
for (listItem in list)
{
if(list.hasOwnProperty(listItem))
{
text += '<tr><td class="tableIndentation">' + TextRepository.get(listItem) + '</td><td class="tableDataRight">' + list[listItem] + '</td></tr>';
}
}
}
if((Object.keys(proto.getConsumption()).length +
Object.keys(proto.getProduction()).length +
Object.keys(proto.getProductionWaste()).length) > 0)
{
//text += '<tr><td>' + TextRepository.get("ProductionTitle") + '</td></tr>';
list = proto.getConsumption();
if(Object.keys(list).length > 0)
{
text += '<tr><td>' + TextRepository.get("BuildingConsumption") + ':</td></tr>';
for (listItem in list)
{
if(list.hasOwnProperty(listItem))
{
text += '<tr><td class="tableIndentation">' + TextRepository.get(listItem) + '</td><td class="tableDataRight">' + list[listItem] + '</td></tr>';
}
}
}
list = proto.getProduction();
if(Object.keys(list).length > 0)
{
text += '<tr><td>' + TextRepository.get("BuildingProduction") + ':</td></tr>';
for (listItem in list)
{
if(list.hasOwnProperty(listItem))
{
text += '<tr><td class="tableIndentation">' + TextRepository.get(listItem) + '</td><td class="tableDataRight">' + list[listItem] + '</td></tr>';
}
}
}
list = proto.getProductionWaste();
if(Object.keys(list).length > 0)
{
text += '<tr><td>' + TextRepository.get("BuildingWaste") + ':</td></tr>';
for (listItem in list)
{
if(list.hasOwnProperty(listItem))
{
text += '<tr><td class="tableIndentation">' + TextRepository.get(listItem) + '</td><td class="tableDataRight">' + list[listItem] + '</td></tr>';
}
}
}
}
text += '</table>';
text += '</div>';
}
text += '</div>';
}
return text;
};
this.isSortable = function() { return false; };
this.getTitle = function() { return "HelpTitle"; };
this.getQueueTitle = function() { return "HelpBase"; };
this.getAvailableTitle = function() { return "Buildings"; };
}
HelpQueueData.inherits(QueueData);<|fim▁end|>
|
this.getText = _getText;
|
<|file_name|>watcher_mac.py<|end_file_name|><|fim▁begin|>import subprocess
import sys
import os
import time
from collections import namedtuple
sys.path.append(os.path.join(os.getcwd(), "src"))
from utils import settings
from utils import logger
settings.initialize('watcher')
original_plist = '/opt/TopPatch/agent/daemon/com.toppatch.agent.plist'
osx_plist = '/System/Library/LaunchDaemons/com.toppatch.agent.plist'
daemon_label = 'com.toppatch.agent'
cp_command = ['/bin/cp', original_plist, osx_plist]
list_command = ['/bin/launchctl', 'list']
load_command = ['/bin/launchctl', 'load', '-w', osx_plist]
unload_command = ['/bin/launchctl', 'unload', '-w', osx_plist]
start_command = ['/bin/launchctl', 'start', daemon_label]
stop_command = ['/bin/launchctl', 'stop', daemon_label]
check_in_seconds = 60
def start_agent():
result = False
try:
process = subprocess.Popen(start_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent started.')
result = True
elif 'No such process' in error_output:
logger.log('Agent not found.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
return result
def restart_agent():
try:
process = subprocess.Popen(stop_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent has restarted.')
elif 'No such process' in error_output:
logger.log('Agent not found. Nothing to restart.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not start agent.", logger.LogLevel.Error)
logger.log_exception(e)
def load_agent():
try:
process = subprocess.Popen(load_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent loaded.')
elif 'Already loaded' in error_output:
logger.log('Agent is already loaded.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
def unload_agent():
try:
process = subprocess.Popen(unload_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
if raw_output == '' and error_output == '':
logger.log('Agent unloaded.')
elif 'Error unloading' in error_output:
logger.log('Agent is not loaded/installed.')
else:
logger.log('Unknown output: "%s"' % error_output)
except Exception as e:<|fim▁hole|>
logger.log("Could not load agent.", logger.LogLevel.Error)
logger.log_exception(e)
AgentStatus = namedtuple('AgentStats', ['loaded', 'running'])
def agent_running_stats():
ps_info = []
running = False
loaded = False
process = subprocess.Popen(list_command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
raw_output, error_output = process.communicate()
for line in raw_output.splitlines():
pid, run, pname = line.split('\t')
ps_info.append((pname, run, pid))
for p in ps_info:
if daemon_label == p[0]:
# p[1] can either be:
# : '0' meaning not running.
# : '-' meaning its running.
loaded = True
if p[1] == '-':
running = True
break
elif p[1] == '0':
running = False
status = AgentStatus(loaded, running)
logger.log(str(status), logger.LogLevel.Debug)
return status
if __name__ == '__main__':
logger.log("Starting watcher daemon.")
while True:
time.sleep(check_in_seconds)
agent_status = agent_running_stats()
if agent_status.loaded:
if agent_status.running:
logger.log("Agent is running.", logger.LogLevel.Debug)
continue
else:
if not start_agent():
load_agent()
else:
load_agent()<|fim▁end|>
| |
<|file_name|>classExclusion.d.ts<|end_file_name|><|fim▁begin|>/**
* @license
* Copyright 2013 Palantir Technologies, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import * as ts from "typescript";
import { Location, Privacy } from "../completedDocsRule";
import { Exclusion } from "./exclusion";
export interface IClassExclusionDescriptor {
locations?: Location[];
privacies?: Privacy[];
}
export declare class ClassExclusion extends Exclusion<IClassExclusionDescriptor> {
readonly locations: Set<Location>;
readonly privacies: Set<Privacy>;
excludes(node: ts.Node): boolean;
private shouldLocationBeDocumented(node);<|fim▁hole|><|fim▁end|>
|
private shouldPrivacyBeDocumented(node);
}
|
<|file_name|>findNoteCounts.py<|end_file_name|><|fim▁begin|># Import the Evernote client
from evernote.api.client import EvernoteClient
# Import the Evernote note storetypes to get note datatypes
# to properly get note/tag counts (note filter)
import evernote.edam.notestore.ttypes as NoteStoreTypes
# Define access token either:
# Developer Tokens (https://dev.evernote.com/doc/articles/dev_tokens.php)
# or OAuth (https://dev.evernote.com/doc/articles/authentication.php)
access_token = "insert dev or oauth token here"
# Setup the client
client = EvernoteClient(token = access_token, sandbox = True)
# Get note store object
note_store = client.get_note_store()
# Create note filter object
note_filter = NoteStoreTypes.NoteFilter()
# Set note filter search grammer to get notes created in the last 2 days
note_filter.words = "created:day-2"
# Uncommend the following line to set note filter tag GUIDs
#note_filter.tagGuids = ["GUID of tag1", "GUID of tag 2", "...."]
# Set note filter order to descending
note_filter.ascending = False
# Set note filter inative attribute to False (will search only active notes)
# setting this value to True will only return search results that are in the trash
note_filter.inactive = False
# Uncomment the following line to set note time zone of the search to 'America/Los_Angeles'
#note_filter.timeZone = "America/Los_Angeles"
# Uncomment the following line to set note filter emphasized attribute to additional
# 'wish list' search grammer to be used in conjunction with the orinigal search query to
# highlight search results
#note_filter.emphasized = "any: tag:cool -tag:uncool"
# Uncomment the following line to set note filter includeAllReadableNotebooks attribute
# to include all readable business notebooks in a search
# search must be performed on a business note store with a business auth token<|fim▁hole|># (Boolean) Include note/tags that are in the trash in your note counts
include_trash = True
# Returns an object which maps the number of notes captured by the filter to the corresponding
# notebook GUID
note_counts = note_store.findNoteCounts( note_filter, include_trash )
if note_counts.notebookCounts != None:
print "Found results from %s notebooks" % len(note_counts.notebookCounts)
for notebook in note_counts.notebookCounts:
print " Notebook with GUID %s has %s note(s) that match the filter" % (notebook, note_counts.notebookCounts[notebook])
if note_counts.tagCounts != None:
print "Found results from %s tags" % len(note_counts.notebookCounts)
for tag in note_counts.tagCounts:
print " Tag with GUID %s has %s note(s) that match the filter" % (tag, note_counts.tagCounts[tag])
if not note_counts.tagCounts and not note_counts.notebookCounts:
print "No results"<|fim▁end|>
|
#note_filter.includeAllReadableNotebooks=True
|
<|file_name|>setup_template.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
from setuptools import setup, find_packages
import sys
extra_install = []
if sys.version_info <= (3,1):
extra_install.append('futures')
<|fim▁hole|> extra_install.append('pysha3')
setup(
name="moneywagon",
version='{{ version }}',
description='Next Generation Cryptocurrency Platform',
long_description=open('README.md').read(),
author='Chris Priest',
author_email='[email protected]',
url='https://github.com/priestc/moneywagon',
packages=find_packages(),
scripts=['bin/moneywagon'],
include_package_data=True,
license='LICENSE',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
],
install_requires=[
'requests',
'tabulate',
'base58',
'pytz',
'arrow',
'bitcoin',
'beautifulsoup4'
] + extra_install
)<|fim▁end|>
|
if sys.version_info <= (3,6):
|
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>r"""
Main classes of OpenPNM
=======================
This module contains the main classes from which all other major objects
(Network, Geometry, Physics, Phase, and Algorithm) derive.
The Base class
--------------
The ``Base`` class is a ``dict`` that has added methods for indexing the pores
and throats, applying labels, and managing the stored data. All OpenPNM
object inherit from ``Base`` so possess these methods.
----
``Base`` objects, Networks, Phase, Algorithms, are assigned to all locations
in the domain. The ``Subdomain`` class is a direct descendent of ``Base``
which has the added ability to be assigned to a subset of the domain. Objects<|fim▁hole|>
Boss objects refer to the Full Domain object it is associated with. For
Geomery objects this is the Network, and for Physics objects this is the
Phase that was specified during instantiation.
The associations between an object and it's boss are tracked using labels in
the boss. So a Geometry object named ``geom1`` will put labels 'pore.geom1'
and 'throat.geom1' into the Network dictionary, with ``True`` values indicating
where ``geom1`` applies.
The ModelsMixin class
---------------------
`Mixins <https://en.wikipedia.org/wiki/Mixin>`_ are a useful feature of Python
that allow a few methods to be added to a class that needs them. In OpenPNM,
the ability to store and run 'pore-scale' models is not needed by some objects
(Network, Algorithms), but is essential to Geometry, Physics, and Phase
objects.
In addition to these methods, the ``ModelsMixin`` also adds a ``models``
attribute to each object. This is a dictionary that stores the pore-scale
models and their associated parameters. When ``regenerate_models`` is called
the function and all the given parameters are retrieved from this dictionary
and run.
"""
from ._models import *
from ._base import *
from ._mixins import *
from ._subdomain import *<|fim▁end|>
|
that inherit from ``Subdomain`` are Geomery and Physics.
|
<|file_name|>StockImbalanceReportParam.java<|end_file_name|><|fim▁begin|>/*
* This program was produced for the U.S. Agency for International Development. It was prepared by the USAID | DELIVER PROJECT, Task Order 4. It is part of a project which utilizes code originally licensed under the terms of the Mozilla Public License (MPL) v2 and therefore is licensed under MPL v2 or later.
*
* This program is free software: you can redistribute it and/or modify it under the terms of the Mozilla Public License as published by the Mozilla Foundation, either version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Mozilla Public License for more details.
*
* You should have received a copy of the Mozilla Public License along with this program. If not, see http://www.mozilla.org/MPL/
*/
package org.openlmis.report.model.params;
import lombok.AllArgsConstructor;
import lombok.Data;
import lombok.EqualsAndHashCode;
import lombok.NoArgsConstructor;
import org.openlmis.report.model.ReportData;
import org.openlmis.report.model.ReportParameter;
@Data
@EqualsAndHashCode(callSuper=false)
@NoArgsConstructor
@AllArgsConstructor
public class StockImbalanceReportParam
extends BaseParam implements ReportParameter {
private int facilityTypeId;
private String facilityType;
// private int productId;
private String productId;
private String product;
// private int productCategoryId;
private String productCategoryId;
<|fim▁hole|> private String program;
private int scheduleId;
private String schedule;
private int periodId;
private Long zoneId;
private String period;
private Integer year;
@Override
public String toString() {
StringBuilder filtersValue = new StringBuilder("");
filtersValue.append("Period : ").append(this.period).append("\n").
append("Schedule : ").append(this.schedule).append("\n").
append("Program : ").append(this.program).append("\n").
append("Product Category : ").append(this.productCategory).append("\n").
append("Product : ").append(this.product).append("\n").
append("Facility Types : ").append(this.getFacilityType()).append("\n");
return filtersValue.toString();
}
}<|fim▁end|>
|
private String productCategory;
private String facility;
private int programId;
|
<|file_name|>modules.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
######################################
#
# Détection des modules
#
######################################
#
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
######################################
import os
from runpy import run_path
from .parametres import modules_par_defaut
# Modules a importer
# ----------------
_skip_dir = ('OLD', '__pycache__')
_modules_dir = os.path.normpath(os.path.join(__file__, '..', '..', 'modules'))
def _detecter_modules():
modules = []
descriptions = {}
def err(nom, msg):<|fim▁hole|> description_file = os.path.join(_modules_dir, nom, 'description.py')
if os.path.isfile(description_file):
try:
compile(nom + '=0', '', 'single') # On teste si le nom est valide
try:
d = {}
d = run_path(description_file, d)
if d['description']['groupe'] != "Modules":
# Sert à désactiver les modules en construction.
continue
descriptions[nom] = d['description']
modules.append(nom)
except:
err(nom, "fichier '%s' incorrect" %description_file)
except Exception:
err(nom, "nom de module invalide")
else:
err(nom, "fichier 'description.py' introuvable")
return modules, descriptions
try:
modules, descriptions_modules = _detecter_modules()
except OSError:
print("Warning: impossible de détecter les modules (répertoire '%s') !" % _modules_dir)
modules = []
descriptions_modules = {}
modules_actifs = dict.fromkeys(modules, False)
for nom in modules_par_defaut:
modules_actifs[nom] = True
def _key(nom):
# les modules activés par défaut apparaissent en premier,
# les autres sont classés par ordre alphabétique.
key = [1000000, nom]
if nom in modules_par_defaut:
key[0] = modules_par_defaut.index(nom)
return key
modules.sort(key = _key)<|fim▁end|>
|
print("Warning: %s n'est pas un module valide (%s)." %(nom, msg))
for nom in os.listdir(_modules_dir):
if nom not in _skip_dir and os.path.isdir(os.path.join(_modules_dir, nom)):
|
<|file_name|>iife.js<|end_file_name|><|fim▁begin|>(function () {
'use strict';<|fim▁hole|>
function foo () {
return embiggen( 6, 7 );
}
/**
* Embiggens a number
* @param {number} num - the number to embiggen
* @param {number} factor - the factor to embiggen it by
* @returns {number}
*/
function embiggen ( num, factor ) {
return num * factor;
}
alert( foo() );
}());<|fim▁end|>
| |
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>pub mod postgres;<|fim▁end|>
|
#[cfg(feature = "postgres")]
|
<|file_name|>commands.rs<|end_file_name|><|fim▁begin|><|fim▁hole|>//! Defines the redis commands exposed by the redis client.
//!
#[cfg(test)]
#[path = "./commands_test.rs"]
mod commands_test;
use crate::client::Client;
use crate::types::{RedisArg, RedisBoolResult, RedisEmptyResult, RedisResult, RedisStringResult};
use std::collections::HashMap;
use std::str::FromStr;
/// Defines the redis commands exposed by the redis client.
impl Client {
/// See redis [AUTH](https://redis.io/commands/auth) command.
///
/// # Example
///
/// ```
/// # match simple_redis::create("redis://127.0.0.1:6379/") {
/// # Ok(mut client) => {
/// match client.auth("my_password") {
/// Err(error) => println!("Auth error: {}", error),
/// _ => println!("Authenticated")
/// }
/// # },
/// # Err(error) => println!("Unable to create Redis client: {}", error)
/// # }
/// ```
///
pub fn auth(&mut self, password: &str) -> RedisEmptyResult {
self.run_command_empty_response("AUTH", vec![password])
}
/// See redis [ECHO](https://redis.io/commands/echo) command.
pub fn echo(&mut self, value: &str) -> RedisStringResult {
self.run_command_string_response("ECHO", vec![value])
}
/// See redis [PUBLISH](https://redis.io/commands/publish) command.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.publish("important_notifications", "message text") {
/// Err(error) => println!("Publish error: {}", error),
/// _ => println!("Message published")
/// }
/// ```
///
pub fn publish(&mut self, channel: &str, message: &str) -> RedisEmptyResult {
self.run_command_empty_response("PUBLISH", vec![channel, message])
}
/// See redis [GET](https://redis.io/commands/get) command.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.get::<i64>("my_key") {
/// Ok(value) => println!("Read value from Redis: {}", value),
/// Err(error) => println!("Unable to get value from Redis: {}", error)
/// }
/// ```
///
pub fn get<T: FromStr>(self: &mut Client, key: &str) -> RedisResult<T> {
self.run_command_from_string_response("GET", vec![key])
}
/// See redis [GET](https://redis.io/commands/get) command.<br>
/// This function will always return a String response.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.get_string("my_key") {
/// Ok(value) => println!("Read value from Redis: {}", value),
/// Err(error) => println!("Unable to get value from Redis: {}", error)
/// }
/// ```
///
pub fn get_string(self: &mut Client, key: &str) -> RedisStringResult {
self.run_command_string_response("GET", vec![key])
}
/// See redis [SET](https://redis.io/commands/set) command.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.set("my_key", "my_value") {
/// Err(error) => println!("Unable to set value in Redis: {}", error),
/// _ => println!("Value set in Redis")
/// }
/// ```
///
pub fn set<T: RedisArg>(self: &mut Client, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("SET", vec![key, &value.to_string()])
}
/// See redis [SETEX](https://redis.io/commands/setex) command.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.setex("my_key", "my_value", 10) {
/// Err(error) => println!("Unable to set value in Redis: {}", error),
/// _ => println!("Value set in Redis and will expire in 10 seconds")
/// }
/// ```
///
pub fn setex<T: RedisArg>(&mut self, key: &str, value: T, seconds: usize) -> RedisEmptyResult {
self.run_command_empty_response(
"SETEX",
vec![key, &*seconds.to_string(), &value.to_string()],
)
}
/// See redis [SETNX](https://redis.io/commands/setnx) command.
pub fn setnx<T: RedisArg>(&mut self, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("SETNX", vec![key, &value.to_string()])
}
/// See redis [GETSET](https://redis.io/commands/getset) command.
pub fn getset<T: RedisArg, V: FromStr>(&mut self, key: &str, value: T) -> RedisResult<V> {
self.run_command_from_string_response::<V>("GETSET", vec![key, &value.to_string()])
}
/// See redis [GETSET](https://redis.io/commands/getset) command.
pub fn getset_string<T: RedisArg>(&mut self, key: &str, value: T) -> RedisStringResult {
self.run_command_string_response("GETSET", vec![key, &value.to_string()])
}
/// See redis [DEL](https://redis.io/commands/del) command.
pub fn del(&mut self, key: &str) -> RedisEmptyResult {
self.run_command_empty_response("DEL", vec![key])
}
/// See redis [EXISTS](https://redis.io/commands/exists) command.
pub fn exists(&mut self, key: &str) -> RedisBoolResult {
self.run_command_bool_response("EXISTS", vec![key])
}
/// See redis [EXPIRE](https://redis.io/commands/expire) command.
pub fn expire(&mut self, key: &str, seconds: usize) -> RedisEmptyResult {
self.run_command_empty_response("EXPIRE", vec![key, &*seconds.to_string()])
}
/// See redis [PEXPIRE](https://redis.io/commands/pexpire) command.
pub fn pexpire(&mut self, key: &str, millies: usize) -> RedisEmptyResult {
self.run_command_empty_response("PEXPIRE", vec![key, &*millies.to_string()])
}
/// See redis [PERSIST](https://redis.io/commands/persist) command.
pub fn persist(&mut self, key: &str) -> RedisEmptyResult {
self.run_command_empty_response("PERSIST", vec![key])
}
/// See redis [RENAME](https://redis.io/commands/rename) command.
pub fn rename(&mut self, key: &str, new_key: &str) -> RedisEmptyResult {
self.run_command_empty_response("RENAME", vec![key, new_key])
}
/// See redis [RENAMENX](https://redis.io/commands/renamenx) command.
pub fn renamenx(&mut self, key: &str, new_key: &str) -> RedisEmptyResult {
self.run_command_empty_response("RENAMENX", vec![key, new_key])
}
/// See redis [APPEND](https://redis.io/commands/append) command.
pub fn append(&mut self, key: &str, value: &str) -> RedisEmptyResult {
self.run_command_empty_response("APPEND", vec![key, value])
}
/// See redis [INCR](https://redis.io/commands/incr) command.
pub fn incr(&mut self, key: &str) -> RedisResult<i64> {
self.run_command::<i64>("INCR", vec![key])
}
/// See redis [INCRBY](https://redis.io/commands/incrby) command.
pub fn incrby<T: RedisArg>(&mut self, key: &str, value: T) -> RedisResult<i64> {
self.run_command::<i64>("INCRBY", vec![key, &*value.to_string()])
}
/// See redis [INCRBYFLOAT](https://redis.io/commands/incrbyfloat) command.
pub fn incrbyfloat<T: RedisArg>(&mut self, key: &str, value: T) -> RedisResult<f64> {
self.run_command::<f64>("INCRBYFLOAT", vec![key, &*value.to_string()])
}
/// See redis [STRLEN](https://redis.io/commands/strlen) command.
pub fn strlen(&mut self, key: &str) -> RedisResult<i32> {
self.run_command::<i32>("STRLEN", vec![key])
}
/// See redis [KEYS](https://redis.io/commands/keys) command.
pub fn keys(&mut self, pattern: &str) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("KEYS", vec![pattern])
}
/// See redis [HGET](https://redis.io/commands/hget) command.
pub fn hget<T: FromStr>(self: &mut Client, key: &str, field: &str) -> RedisResult<T> {
self.run_command_from_string_response("HGET", vec![key, field])
}
/// See redis [HGET](https://redis.io/commands/hget) command.
pub fn hget_string(self: &mut Client, key: &str, field: &str) -> RedisStringResult {
self.run_command_string_response("HGET", vec![key, field])
}
/// See redis [HGETALL](https://redis.io/commands/hgetall) command.
///
/// # Example
///
/// ```
/// # let mut client = simple_redis::create("redis://127.0.0.1:6379/").unwrap();
/// match client.hgetall("my_map") {
/// Ok(map) => {
/// match map.get("my_field") {
/// Some(value) => println!("Got field value from map: {}", value),
/// None => println!("Map field is emtpy"),
/// }
/// },
/// Err(error) => println!("Unable to read map from Redis: {}", error),
/// }
/// ```
///
pub fn hgetall(self: &mut Client, key: &str) -> RedisResult<HashMap<String, String>> {
self.run_command::<HashMap<String, String>>("HGETALL", vec![key])
}
/// See redis [HSET](https://redis.io/commands/hset) command.
pub fn hset<T: RedisArg>(
self: &mut Client,
key: &str,
field: &str,
value: T,
) -> RedisEmptyResult {
self.run_command_empty_response("HSET", vec![key, field, &value.to_string()])
}
/// See redis [HSETNX](https://redis.io/commands/hsetnx) command.
pub fn hsetnx<T: RedisArg>(
self: &mut Client,
key: &str,
field: &str,
value: T,
) -> RedisEmptyResult {
self.run_command_empty_response("HSETNX", vec![key, field, &value.to_string()])
}
/// See redis [HDEL](https://redis.io/commands/hdel) command.
pub fn hdel(self: &mut Client, key: &str, field: &str) -> RedisEmptyResult {
self.run_command_empty_response("HDEL", vec![key, field])
}
/// See redis [HEXISTS](https://redis.io/commands/hexists) command.
pub fn hexists(self: &mut Client, key: &str, field: &str) -> RedisBoolResult {
self.run_command_bool_response("HEXISTS", vec![key, field])
}
/// See redis [HKEYS](https://redis.io/commands/hkeys) command.
pub fn hkeys(&mut self, key: &str) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("HKEYS", vec![key])
}
/// See redis [HVALS](https://redis.io/commands/hvals) command.
pub fn hvals(&mut self, key: &str) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("HVALS", vec![key])
}
/// See redis [LSET](https://redis.io/commands/lset) command.
pub fn lset<T: RedisArg>(
self: &mut Client,
key: &str,
index: isize,
value: T,
) -> RedisEmptyResult {
self.run_command_empty_response("LSET", vec![key, &index.to_string(), &value.to_string()])
}
/// See redis [HGET](https://redis.io/commands/lindex) command.
pub fn lindex<T: FromStr>(self: &mut Client, key: &str, index: isize) -> RedisResult<T> {
self.run_command_from_string_response("LINDEX", vec![key, &index.to_string()])
}
/// See redis [HGET](https://redis.io/commands/lindex) command.
pub fn lindex_string(self: &mut Client, key: &str, index: isize) -> RedisStringResult {
self.run_command_string_response("LINDEX", vec![key, &index.to_string()])
}
/// See redis [LLEN](https://redis.io/commands/llen) command.
pub fn llen(self: &mut Client, key: &str) -> RedisResult<i32> {
self.run_command::<i32>("LLEN", vec![key])
}
/// See redis [LPOP](https://redis.io/commands/lpop) command.
pub fn lpop<T: FromStr>(self: &mut Client, key: &str) -> RedisResult<T> {
self.run_command_from_string_response("LPOP", vec![key])
}
/// See redis [LPUSH](https://redis.io/commands/lpush) command.
pub fn lpush<T: RedisArg>(self: &mut Client, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("LPUSH", vec![key, &value.to_string()])
}
/// See redis [LPUSHX](https://redis.io/commands/lpushx) command.
pub fn lpushx<T: RedisArg>(self: &mut Client, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("LPUSHX", vec![key, &value.to_string()])
}
/// See redis [LRANGE](https://redis.io/commands/lrange) command.
pub fn lrange(
self: &mut Client,
key: &str,
start: isize,
stop: isize,
) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("LRANGE", vec![key, &start.to_string(), &stop.to_string()])
}
/// See redis [LREM](https://redis.io/commands/lrem) command.
pub fn lrem<T: RedisArg>(
self: &mut Client,
key: &str,
count: isize,
value: T,
) -> RedisEmptyResult {
self.run_command_empty_response("LREM", vec![key, &count.to_string(), &value.to_string()])
}
/// See redis [LTRIM](https://redis.io/commands/ltrim) command.
pub fn ltrim(self: &mut Client, key: &str, start: isize, stop: isize) -> RedisEmptyResult {
self.run_command_empty_response("LTRIM", vec![key, &start.to_string(), &stop.to_string()])
}
/// See redis [RPOP](https://redis.io/commands/rpop) command.
pub fn rpop<T: FromStr>(self: &mut Client, key: &str) -> RedisResult<T> {
self.run_command_from_string_response("RPOP", vec![key])
}
/// See redis [RPUSH](https://redis.io/commands/rpush) command.
pub fn rpush<T: RedisArg>(self: &mut Client, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("RPUSH", vec![key, &value.to_string()])
}
/// See redis [RPUSHX](https://redis.io/commands/rpushx) command.
pub fn rpushx<T: RedisArg>(self: &mut Client, key: &str, value: T) -> RedisEmptyResult {
self.run_command_empty_response("RPUSHX", vec![key, &value.to_string()])
}
/// See redis [SADD](https://redis.io/commands/sadd) command.
pub fn sadd(self: &mut Client, key: &str, member: &str) -> RedisResult<i32> {
self.run_command::<i32>("SADD", vec![key, member])
}
/// See redis [SCARD](https://redis.io/commands/scard) command.
pub fn scard(self: &mut Client, key: &str) -> RedisResult<i32> {
self.run_command::<i32>("SCARD", vec![key])
}
/// See redis [SDIFF](https://redis.io/commands/sdiff) command.
pub fn sdiff(self: &mut Client, keys: Vec<&str>) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("SDIFF", keys)
}
/// See redis [SISMEMBER](https://redis.io/commands/sismember) command.
pub fn sismember(self: &mut Client, key: &str, member: &str) -> RedisBoolResult {
self.run_command("SISMEMBER", vec![key, member])
}
/// See redis [SMEMBERS](https://redis.io/commands/smembers) command.
pub fn smembers(self: &mut Client, key: &str) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("SMEMBERS", vec![key])
}
/// See redis [SMOVE](https://redis.io/commands/smove) command.
pub fn smove(
self: &mut Client,
source_key: &str,
destination_key: &str,
member: &str,
) -> RedisEmptyResult {
self.run_command("SMOVE", vec![source_key, destination_key, member])
}
/// See redis [SREM](https://redis.io/commands/srem) command.
pub fn srem(self: &mut Client, key: &str, member: &str) -> RedisEmptyResult {
self.run_command("SREM", vec![key, member])
}
/// See redis [ZADD](https://redis.io/commands/zadd) command.
pub fn zadd(self: &mut Client, key: &str, score: isize, member: &str) -> RedisResult<i32> {
self.run_command("ZADD", vec![key, &score.to_string(), member])
}
/// See redis [ZRANGE](https://redis.io/commands/zrange) command.
pub fn zrange(
self: &mut Client,
key: &str,
start: isize,
stop: isize,
) -> RedisResult<Vec<String>> {
self.run_command::<Vec<String>>("ZRANGE", vec![key, &start.to_string(), &stop.to_string()])
}
}<|fim▁end|>
|
//! # commands
//!
|
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium OS Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//! A safe wrapper around the kernel's KVM interface.
mod cap;
use std::cell::RefCell;
use std::cmp::{min, Ordering};
use std::collections::{BTreeMap, BinaryHeap};
use std::ffi::CString;
use std::fs::File;
use std::mem::size_of;
use std::ops::{Deref, DerefMut};
use std::os::raw::*;
use std::os::unix::prelude::OsStrExt;
use std::path::{Path, PathBuf};
use std::ptr::copy_nonoverlapping;
use std::sync::Arc;
use sync::Mutex;
use base::{AsRawDescriptor, FromRawDescriptor, RawDescriptor};
use data_model::vec_with_array_field;
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
use data_model::FlexibleArrayWrapper;
use libc::sigset_t;
use libc::{open, EBUSY, EINVAL, ENOENT, ENOSPC, EOVERFLOW, O_CLOEXEC, O_RDWR};
use kvm_sys::*;
#[allow(unused_imports)]
use base::{
block_signal, ioctl, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref,
ioctl_with_val, pagesize, signal, unblock_signal, warn, Error, Event, IoctlNr, MappedRegion,
MemoryMapping, MemoryMappingBuilder, MmapError, Result, SIGRTMIN,
};
use vm_memory::{GuestAddress, GuestMemory};
pub use crate::cap::*;
fn errno_result<T>() -> Result<T> {
Err(Error::last())
}
unsafe fn set_user_memory_region<F: AsRawDescriptor>(
fd: &F,
slot: u32,
read_only: bool,
log_dirty_pages: bool,
guest_addr: u64,
memory_size: u64,
userspace_addr: *mut u8,
) -> Result<()> {
let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
if log_dirty_pages {
flags |= KVM_MEM_LOG_DIRTY_PAGES;
}
let region = kvm_userspace_memory_region {
slot,
flags,
guest_phys_addr: guest_addr,
memory_size,
userspace_addr: userspace_addr as u64,
};
let ret = ioctl_with_ref(fd, KVM_SET_USER_MEMORY_REGION(), ®ion);
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Helper function to determine the size in bytes of a dirty log bitmap for the given memory region
/// size.
///
/// # Arguments
///
/// * `size` - Number of bytes in the memory region being queried.
pub fn dirty_log_bitmap_size(size: usize) -> usize {
let page_size = pagesize();
(((size + page_size - 1) / page_size) + 7) / 8
}
/// A wrapper around opening and using `/dev/kvm`.
///
/// Useful for querying extensions and basic values from the KVM backend. A `Kvm` is required to
/// create a `Vm` object.
pub struct Kvm {
kvm: File,
}
impl Kvm {
/// Opens `/dev/kvm/` and returns a Kvm object on success.
pub fn new() -> Result<Kvm> {
Kvm::new_with_path(&PathBuf::from("/dev/kvm"))
}
/// Opens a KVM device at `device_path` and returns a Kvm object on success.
pub fn new_with_path(device_path: &Path) -> Result<Kvm> {
// Open calls are safe because we give a nul-terminated string and verify the result.
let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
if ret < 0 {
return errno_result();
}
// Safe because we verify that ret is valid and we own the fd.
Ok(Kvm {
kvm: unsafe { File::from_raw_descriptor(ret) },
})
}
fn check_extension_int(&self, c: Cap) -> i32 {
// Safe because we know that our file is a KVM fd and that the extension is one of the ones
// defined by kernel.
unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) }
}
/// Checks if a particular `Cap` is available.
pub fn check_extension(&self, c: Cap) -> bool {
self.check_extension_int(c) == 1
}
/// Gets the size of the mmap required to use vcpu's `kvm_run` structure.
pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
// Safe because we know that our file is a KVM fd and we verify the return result.
let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE()) };
if res > 0 {
Ok(res as usize)
} else {
errno_result()
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
const MAX_KVM_CPUID_ENTRIES: usize = 256;
let mut cpuid = CpuId::new(MAX_KVM_CPUID_ENTRIES);
let ret = unsafe {
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
// allocated for the struct. The limit is read from nent, which is set to the allocated
// size(MAX_KVM_CPUID_ENTRIES) above.
ioctl_with_mut_ptr(self, kind, cpuid.as_mut_ptr())
};
if ret < 0 {
return errno_result();
}
Ok(cpuid)
}
/// X86 specific call to get the system supported CPUID values
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_supported_cpuid(&self) -> Result<CpuId> {
self.get_cpuid(KVM_GET_SUPPORTED_CPUID())
}
/// X86 specific call to get the system emulated CPUID values
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_emulated_cpuid(&self) -> Result<CpuId> {
self.get_cpuid(KVM_GET_EMULATED_CPUID())
}
/// X86 specific call to get list of supported MSRS
///
/// See the documentation for KVM_GET_MSR_INDEX_LIST.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_msr_index_list(&self) -> Result<Vec<u32>> {
const MAX_KVM_MSR_ENTRIES: usize = 256;
let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
let ret = unsafe {
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
// allocated for the struct. The limit is read from nmsrs, which is set to the allocated
// size (MAX_KVM_MSR_ENTRIES) above.
ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0])
};
if ret < 0 {
return errno_result();
}
let mut nmsrs = msr_list[0].nmsrs;
// Mapping the unsized array to a slice is unsafe because the length isn't known. Using
// the length we originally allocated with eliminates the possibility of overflow.
let indices: &[u32] = unsafe {
if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
nmsrs = MAX_KVM_MSR_ENTRIES as u32;
}
msr_list[0].indices.as_slice(nmsrs as usize)
};
Ok(indices.to_vec())
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
// The x86 machine type is always 0
pub fn get_vm_type(&self) -> c_ulong {
0
}
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
// Compute the machine type, which should be the IPA range for the VM
// Ideally, this would take a description of the memory map and return
// the closest machine type for this VM. Here, we just return the maximum
// the kernel support.
pub fn get_vm_type(&self) -> c_ulong {
// Safe because we know self is a real kvm fd
match unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), KVM_CAP_ARM_VM_IPA_SIZE.into()) }
{
// Not supported? Use 0 as the machine type, which implies 40bit IPA
ret if ret < 0 => 0,
// Use the lower 8 bits representing the IPA space as the machine type
ipa => (ipa & 0xff) as c_ulong,
}
}
}
impl AsRawDescriptor for Kvm {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.kvm.as_raw_descriptor()
}
}
/// An address either in programmable I/O space or in memory mapped I/O space.
#[derive(Copy, Clone, Debug)]
pub enum IoeventAddress {
Pio(u64),
Mmio(u64),
}
/// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
pub enum Datamatch {
AnyLength,
U8(Option<u8>),
U16(Option<u16>),
U32(Option<u32>),
U64(Option<u64>),
}
/// A source of IRQs in an `IrqRoute`.
pub enum IrqSource {
Irqchip { chip: u32, pin: u32 },
Msi { address: u64, data: u32 },
}
/// A single route for an IRQ.
pub struct IrqRoute {
pub gsi: u32,
pub source: IrqSource,
}
/// Interrupt controller IDs
pub enum PicId {
Primary = 0,
Secondary = 1,
}
/// Number of pins on the IOAPIC.
pub const NUM_IOAPIC_PINS: usize = 24;
// Used to invert the order when stored in a max-heap.
#[derive(Copy, Clone, Eq, PartialEq)]
struct MemSlot(u32);
impl Ord for MemSlot {
fn cmp(&self, other: &MemSlot) -> Ordering {
// Notice the order is inverted so the lowest magnitude slot has the highest priority in a
// max-heap.
other.0.cmp(&self.0)
}
}
impl PartialOrd for MemSlot {
fn partial_cmp(&self, other: &MemSlot) -> Option<Ordering> {
Some(self.cmp(other))
}
}
/// A wrapper around creating and using a VM.
pub struct Vm {
vm: File,
guest_mem: GuestMemory,
mem_regions: Arc<Mutex<BTreeMap<u32, Box<dyn MappedRegion>>>>,
mem_slot_gaps: Arc<Mutex<BinaryHeap<MemSlot>>>,
}
impl Vm {
/// Constructs a new `Vm` using the given `Kvm` instance.
pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<Vm> {
// Safe because we know kvm is a real kvm fd as this module is the only one that can make
// Kvm objects.
let ret = unsafe { ioctl_with_val(kvm, KVM_CREATE_VM(), kvm.get_vm_type()) };
if ret >= 0 {
// Safe because we verify the value of ret and we are the owners of the fd.
let vm_file = unsafe { File::from_raw_descriptor(ret) };
guest_mem.with_regions(|index, guest_addr, size, host_addr, _, _| {
unsafe {
// Safe because the guest regions are guaranteed not to overlap.
set_user_memory_region(
&vm_file,
index as u32,
false,
false,
guest_addr.offset() as u64,
size as u64,
host_addr as *mut u8,
)
}
})?;
Ok(Vm {
vm: vm_file,
guest_mem,
mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
})
} else {
errno_result()
}
}
/// Checks if a particular `Cap` is available.
///
/// This is distinct from the `Kvm` version of this method because the some extensions depend on
/// the particular `Vm` existence. This method is encouraged by the kernel because it more
/// accurately reflects the usable capabilities.
pub fn check_extension(&self, c: Cap) -> bool {
// Safe because we know that our file is a KVM fd and that the extension is one of the ones
// defined by kernel.
unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) == 1 }
}
/// Inserts the given `mem` into the VM's address space at `guest_addr`.
///
/// The slot that was assigned the kvm memory mapping is returned on success. The slot can be
/// given to `Vm::remove_memory_region` to remove the memory from the VM's address space and
/// take back ownership of `mem`.
///
/// Note that memory inserted into the VM's address space must not overlap with any other memory
/// slot's region.
///
/// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
/// write will trigger a mmio VM exit, leaving the memory untouched.
///
/// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
/// by the guest with `get_dirty_log`.
pub fn add_memory_region(
&mut self,
guest_addr: GuestAddress,
mem: Box<dyn MappedRegion>,
read_only: bool,
log_dirty_pages: bool,
) -> Result<u32> {
let size = mem.size() as u64;
let end_addr = guest_addr
.checked_add(size)
.ok_or_else(|| Error::new(EOVERFLOW))?;
if self.guest_mem.range_overlap(guest_addr, end_addr) {
return Err(Error::new(ENOSPC));
}
let mut regions = self.mem_regions.lock();
let mut gaps = self.mem_slot_gaps.lock();
let slot = match gaps.pop() {
Some(gap) => gap.0,
None => (regions.len() + self.guest_mem.num_regions() as usize) as u32,
};
// Safe because we check that the given guest address is valid and has no overlaps. We also
// know that the pointer and size are correct because the MemoryMapping interface ensures
// this. We take ownership of the memory mapping so that it won't be unmapped until the slot
// is removed.
let res = unsafe {
set_user_memory_region(
&self.vm,
slot,
read_only,
log_dirty_pages,
guest_addr.offset() as u64,
size,
mem.as_ptr(),
)
};
if let Err(e) = res {
gaps.push(MemSlot(slot));
return Err(e);
}
regions.insert(slot, mem);
Ok(slot)
}
/// Removes memory that was previously added at the given slot.
///
/// Ownership of the host memory mapping associated with the given slot is returned on success.
pub fn remove_memory_region(&mut self, slot: u32) -> Result<Box<dyn MappedRegion>> {
let mut regions = self.mem_regions.lock();
if !regions.contains_key(&slot) {
return Err(Error::new(ENOENT));
}
// Safe because the slot is checked against the list of memory slots.
unsafe {
set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?;
}
self.mem_slot_gaps.lock().push(MemSlot(slot));
// This remove will always succeed because of the contains_key check above.
Ok(regions.remove(&slot).unwrap())
}
/// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
/// `slot`.
///
/// The size of `dirty_log` must be at least as many bits as there are pages in the memory
/// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
/// be 2 bytes or greater.
pub fn get_dirty_log(&self, slot: u32, dirty_log: &mut [u8]) -> Result<()> {
match self.mem_regions.lock().get(&slot) {
Some(mem) => {
// Ensures that there are as many bytes in dirty_log as there are pages in the mmap.
if dirty_log_bitmap_size(mem.size()) > dirty_log.len() {
return Err(Error::new(EINVAL));
}
let mut dirty_log_kvm = kvm_dirty_log {
slot,
..Default::default()
};
dirty_log_kvm.__bindgen_anon_1.dirty_bitmap = dirty_log.as_ptr() as *mut c_void;
// Safe because the `dirty_bitmap` pointer assigned above is guaranteed to be valid
// (because it's from a slice) and we checked that it will be large enough to hold<|fim▁hole|> let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG(), &dirty_log_kvm) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
_ => Err(Error::new(ENOENT)),
}
}
/// Gets a reference to the guest memory owned by this VM.
///
/// Note that `GuestMemory` does not include any mmio memory that may have been added after
/// this VM was constructed.
pub fn get_memory(&self) -> &GuestMemory {
&self.guest_mem
}
/// Sets the address of a one-page region in the VM's address space.
///
/// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
// Safe because we know that our file is a VM fd and we verify the return result.
let ret =
unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &(addr.offset() as u64)) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Retrieves the current timestamp of kvmclock as seen by the current guest.
///
/// See the documentation on the KVM_GET_CLOCK ioctl.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_clock(&self) -> Result<kvm_clock_data> {
// Safe because we know that our file is a VM fd, we know the kernel will only write
// correct amount of memory to our pointer, and we verify the return result.
let mut clock_data = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) };
if ret == 0 {
Ok(clock_data)
} else {
errno_result()
}
}
/// Sets the current timestamp of kvmclock to the specified value.
///
/// See the documentation on the KVM_SET_CLOCK ioctl.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_clock(&self, clock_data: &kvm_clock_data) -> Result<()> {
// Safe because we know that our file is a VM fd, we know the kernel will only read
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), clock_data) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Crates an in kernel interrupt controller.
///
/// See the documentation on the KVM_CREATE_IRQCHIP ioctl.
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
))]
pub fn create_irq_chip(&self) -> Result<()> {
// Safe because we know that our file is a VM fd and we verify the return result.
let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_pic_state(&self, id: PicId) -> Result<kvm_pic_state> {
let mut irqchip_state = kvm_irqchip {
chip_id: id as u32,
..Default::default()
};
let ret = unsafe {
// Safe because we know our file is a VM fd, we know the kernel will only write
// correct amount of memory to our pointer, and we verify the return result.
ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
};
if ret == 0 {
Ok(unsafe {
// Safe as we know that we are retrieving data related to the
// PIC (primary or secondary) and not IOAPIC.
irqchip_state.chip.pic
})
} else {
errno_result()
}
}
/// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_pic_state(&self, id: PicId, state: &kvm_pic_state) -> Result<()> {
let mut irqchip_state = kvm_irqchip {
chip_id: id as u32,
..Default::default()
};
irqchip_state.chip.pic = *state;
// Safe because we know that our file is a VM fd, we know the kernel will only read
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
let mut irqchip_state = kvm_irqchip {
chip_id: 2,
..Default::default()
};
let ret = unsafe {
// Safe because we know our file is a VM fd, we know the kernel will only write
// correct amount of memory to our pointer, and we verify the return result.
ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
};
if ret == 0 {
Ok(unsafe {
// Safe as we know that we are retrieving data related to the
// IOAPIC and not PIC.
irqchip_state.chip.ioapic
})
} else {
errno_result()
}
}
/// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
let mut irqchip_state = kvm_irqchip {
chip_id: 2,
..Default::default()
};
irqchip_state.chip.ioapic = *state;
// Safe because we know that our file is a VM fd, we know the kernel will only read
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
))]
pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
let mut irq_level = kvm_irq_level::default();
irq_level.__bindgen_anon_1.irq = irq;
irq_level.level = if active { 1 } else { 0 };
// Safe because we know that our file is a VM fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn create_pit(&self) -> Result<()> {
let pit_config = kvm_pit_config::default();
// Safe because we know that our file is a VM fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_pit`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
// Safe because we know that our file is a VM fd, we know the kernel will only write
// correct amount of memory to our pointer, and we verify the return result.
let mut pit_state = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) };
if ret == 0 {
Ok(pit_state)
} else {
errno_result()
}
}
/// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
///
/// Note that this call can only succeed after a call to `Vm::create_pit`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
// Safe because we know that our file is a VM fd, we know the kernel will only read
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Registers an event to be signaled whenever a certain address is written to.
///
/// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
/// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
/// and must match the expected size of the guest's write.
///
/// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
/// triggered is prevented.
pub fn register_ioevent(
&self,
evt: &Event,
addr: IoeventAddress,
datamatch: Datamatch,
) -> Result<()> {
self.ioeventfd(evt, addr, datamatch, false)
}
/// Unregisters an event previously registered with `register_ioevent`.
///
/// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
/// `register_ioevent`.
pub fn unregister_ioevent(
&self,
evt: &Event,
addr: IoeventAddress,
datamatch: Datamatch,
) -> Result<()> {
self.ioeventfd(evt, addr, datamatch, true)
}
fn ioeventfd(
&self,
evt: &Event,
addr: IoeventAddress,
datamatch: Datamatch,
deassign: bool,
) -> Result<()> {
let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
Datamatch::AnyLength => (false, 0, 0),
Datamatch::U8(v) => match v {
Some(u) => (true, u as u64, 1),
None => (false, 0, 1),
},
Datamatch::U16(v) => match v {
Some(u) => (true, u as u64, 2),
None => (false, 0, 2),
},
Datamatch::U32(v) => match v {
Some(u) => (true, u as u64, 4),
None => (false, 0, 4),
},
Datamatch::U64(v) => match v {
Some(u) => (true, u as u64, 8),
None => (false, 0, 8),
},
};
let mut flags = 0;
if deassign {
flags |= 1 << kvm_ioeventfd_flag_nr_deassign;
}
if do_datamatch {
flags |= 1 << kvm_ioeventfd_flag_nr_datamatch
}
if let IoeventAddress::Pio(_) = addr {
flags |= 1 << kvm_ioeventfd_flag_nr_pio;
}
let ioeventfd = kvm_ioeventfd {
datamatch: datamatch_value,
len: datamatch_len,
addr: match addr {
IoeventAddress::Pio(p) => p as u64,
IoeventAddress::Mmio(m) => m,
},
fd: evt.as_raw_descriptor(),
flags,
..Default::default()
};
// Safe because we know that our file is a VM fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Registers an event that will, when signalled, trigger the `gsi` irq, and `resample_evt` will
/// get triggered when the irqchip is resampled.
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
))]
pub fn register_irqfd_resample(
&self,
evt: &Event,
resample_evt: &Event,
gsi: u32,
) -> Result<()> {
let irqfd = kvm_irqfd {
flags: KVM_IRQFD_FLAG_RESAMPLE,
fd: evt.as_raw_descriptor() as u32,
resamplefd: resample_evt.as_raw_descriptor() as u32,
gsi,
..Default::default()
};
// Safe because we know that our file is a VM fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Unregisters an event that was previously registered with
/// `register_irqfd`/`register_irqfd_resample`.
///
/// The `evt` and `gsi` pair must be the same as the ones passed into
/// `register_irqfd`/`register_irqfd_resample`.
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
))]
pub fn unregister_irqfd(&self, evt: &Event, gsi: u32) -> Result<()> {
let irqfd = kvm_irqfd {
fd: evt.as_raw_descriptor() as u32,
gsi,
flags: KVM_IRQFD_FLAG_DEASSIGN,
..Default::default()
};
// Safe because we know that our file is a VM fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Sets the GSI routing table, replacing any table set with previous calls to
/// `set_gsi_routing`.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_gsi_routing(&self, routes: &[IrqRoute]) -> Result<()> {
let mut irq_routing =
vec_with_array_field::<kvm_irq_routing, kvm_irq_routing_entry>(routes.len());
irq_routing[0].nr = routes.len() as u32;
// Safe because we ensured there is enough space in irq_routing to hold the number of
// route entries.
let irq_routes = unsafe { irq_routing[0].entries.as_mut_slice(routes.len()) };
for (route, irq_route) in routes.iter().zip(irq_routes.iter_mut()) {
irq_route.gsi = route.gsi;
match route.source {
IrqSource::Irqchip { chip, pin } => {
irq_route.type_ = KVM_IRQ_ROUTING_IRQCHIP;
irq_route.u.irqchip = kvm_irq_routing_irqchip { irqchip: chip, pin }
}
IrqSource::Msi { address, data } => {
irq_route.type_ = KVM_IRQ_ROUTING_MSI;
irq_route.u.msi = kvm_irq_routing_msi {
address_lo: address as u32,
address_hi: (address >> 32) as u32,
data,
..Default::default()
}
}
}
}
let ret = unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING(), &irq_routing[0]) };
if ret == 0 {
Ok(())
} else {
errno_result()
}
}
/// Enable the specified capability.
/// See documentation for KVM_ENABLE_CAP.
/// This function is marked as unsafe because `cap` may contain values which are interpreted as
/// pointers by the kernel.
pub unsafe fn kvm_enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> {
// Safe because we allocated the struct and we know the kernel will read exactly the size of
// the struct.
let ret = ioctl_with_ref(self, KVM_ENABLE_CAP(), cap);
if ret < 0 {
errno_result()
} else {
Ok(())
}
}
}
impl AsRawDescriptor for Vm {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.vm.as_raw_descriptor()
}
}
/// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
#[derive(Debug)]
pub enum VcpuExit {
/// An out port instruction was run on the given port with the given data.
IoOut {
port: u16,
size: usize,
data: [u8; 8],
},
/// An in port instruction was run on the given port.
///
/// The date that the instruction receives should be set with `set_data` before `Vcpu::run` is
/// called again.
IoIn {
port: u16,
size: usize,
},
/// A read instruction was run against the given MMIO address.
///
/// The date that the instruction receives should be set with `set_data` before `Vcpu::run` is
/// called again.
MmioRead {
address: u64,
size: usize,
},
/// A write instruction was run against the given MMIO address with the given data.
MmioWrite {
address: u64,
size: usize,
data: [u8; 8],
},
IoapicEoi {
vector: u8,
},
HypervSynic {
msr: u32,
control: u64,
evt_page: u64,
msg_page: u64,
},
HypervHcall {
input: u64,
params: [u64; 2],
},
Unknown,
Exception,
Hypercall,
Debug,
Hlt,
IrqWindowOpen,
Shutdown,
FailEntry {
hardware_entry_failure_reason: u64,
},
Intr,
SetTpr,
TprAccess,
S390Sieic,
S390Reset,
Dcr,
Nmi,
InternalError,
Osi,
PaprHcall,
S390Ucontrol,
Watchdog,
S390Tsch,
Epr,
/// The cpu triggered a system level event which is specified by the type field.
/// The first field is the event type and the second field is flags.
/// The possible event types are shutdown, reset, or crash. So far there
/// are not any flags defined.
SystemEvent(u32 /* event_type */, u64 /* flags */),
}
/// A wrapper around creating and using a VCPU.
/// `Vcpu` provides all functionality except for running. To run, `to_runnable` must be called to
/// lock the vcpu to a thread. Then the returned `RunnableVcpu` can be used for running.
pub struct Vcpu {
vcpu: File,
run_mmap: MemoryMapping,
}
pub struct VcpuThread {
run: *mut kvm_run,
signal_num: Option<c_int>,
}
thread_local!(static VCPU_THREAD: RefCell<Option<VcpuThread>> = RefCell::new(None));
impl Vcpu {
/// Constructs a new VCPU for `vm`.
///
/// The `id` argument is the CPU number between [0, max vcpus).
pub fn new(id: c_ulong, kvm: &Kvm, vm: &Vm) -> Result<Vcpu> {
let run_mmap_size = kvm.get_vcpu_mmap_size()?;
// Safe because we know that vm a VM fd and we verify the return result.
let vcpu_fd = unsafe { ioctl_with_val(vm, KVM_CREATE_VCPU(), id) };
if vcpu_fd < 0 {
return errno_result();
}
// Wrap the vcpu now in case the following ? returns early. This is safe because we verified
// the value of the fd and we own the fd.
let vcpu = unsafe { File::from_raw_descriptor(vcpu_fd) };
let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
.from_file(&vcpu)
.build()
.map_err(|_| Error::new(ENOSPC))?;
Ok(Vcpu { vcpu, run_mmap })
}
/// Consumes `self` and returns a `RunnableVcpu`. A `RunnableVcpu` is required to run the
/// guest.
/// Assigns a vcpu to the current thread and stores it in a hash map that can be used by signal
/// handlers to call set_local_immediate_exit(). An optional signal number will be temporarily
/// blocked while assigning the vcpu to the thread and later blocked when `RunnableVcpu` is
/// destroyed.
///
/// Returns an error, `EBUSY`, if the current thread already contains a Vcpu.
#[allow(clippy::cast_ptr_alignment)]
pub fn to_runnable(self, signal_num: Option<c_int>) -> Result<RunnableVcpu> {
// Block signal while we add -- if a signal fires (very unlikely,
// as this means something is trying to pause the vcpu before it has
// even started) it'll try to grab the read lock while this write
// lock is grabbed and cause a deadlock.
// Assuming that a failure to block means it's already blocked.
let _blocked_signal = signal_num.map(BlockedSignal::new);
VCPU_THREAD.with(|v| {
if v.borrow().is_none() {
*v.borrow_mut() = Some(VcpuThread {
run: self.run_mmap.as_ptr() as *mut kvm_run,
signal_num,
});
Ok(())
} else {
Err(Error::new(EBUSY))
}
})?;
Ok(RunnableVcpu {
vcpu: self,
phantom: Default::default(),
})
}
/// Sets the data received by a mmio read, ioport in, or hypercall instruction.
///
/// This function should be called after `Vcpu::run` returns an `VcpuExit::IoIn`,
/// `VcpuExit::MmioRead`, or 'VcpuExit::HypervHcall`.
#[allow(clippy::cast_ptr_alignment)]
pub fn set_data(&self, data: &[u8]) -> Result<()> {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was. The pointer is page aligned so casting to a different
// type is well defined, hence the clippy allow attribute.
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
match run.exit_reason {
KVM_EXIT_IO => {
let run_start = run as *mut kvm_run as *mut u8;
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let io = unsafe { run.__bindgen_anon_1.io };
if io.direction as u32 != KVM_EXIT_IO_IN {
return Err(Error::new(EINVAL));
}
let data_size = (io.count as usize) * (io.size as usize);
if data_size != data.len() {
return Err(Error::new(EINVAL));
}
// The data_offset is defined by the kernel to be some number of bytes into the
// kvm_run structure, which we have fully mmap'd.
unsafe {
let data_ptr = run_start.offset(io.data_offset as isize);
copy_nonoverlapping(data.as_ptr(), data_ptr, data_size);
}
Ok(())
}
KVM_EXIT_MMIO => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
if mmio.is_write != 0 {
return Err(Error::new(EINVAL));
}
let len = mmio.len as usize;
if len != data.len() {
return Err(Error::new(EINVAL));
}
mmio.data[..len].copy_from_slice(data);
Ok(())
}
KVM_EXIT_HYPERV => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let hyperv = unsafe { &mut run.__bindgen_anon_1.hyperv };
if hyperv.type_ != KVM_EXIT_HYPERV_HCALL {
return Err(Error::new(EINVAL));
}
let hcall = unsafe { &mut hyperv.u.hcall };
if data.len() != std::mem::size_of::<u64>() {
return Err(Error::new(EINVAL));
}
hcall.result.to_ne_bytes().copy_from_slice(data);
Ok(())
}
_ => Err(Error::new(EINVAL)),
}
}
/// Sets the bit that requests an immediate exit.
#[allow(clippy::cast_ptr_alignment)]
pub fn set_immediate_exit(&self, exit: bool) {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was. The pointer is page aligned so casting to a different
// type is well defined, hence the clippy allow attribute.
let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
run.immediate_exit = if exit { 1 } else { 0 };
}
/// Sets/clears the bit for immediate exit for the vcpu on the current thread.
pub fn set_local_immediate_exit(exit: bool) {
VCPU_THREAD.with(|v| {
if let Some(state) = &(*v.borrow()) {
unsafe {
(*state.run).immediate_exit = if exit { 1 } else { 0 };
};
}
});
}
/// Gets the VCPU registers.
#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
pub fn get_regs(&self) -> Result<kvm_regs> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let mut regs = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) };
if ret != 0 {
return errno_result();
}
Ok(regs)
}
/// Sets the VCPU registers.
#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
pub fn set_regs(&self, regs: &kvm_regs) -> Result<()> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), regs) };
if ret != 0 {
return errno_result();
}
Ok(())
}
/// Gets the VCPU special registers.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_sregs(&self) -> Result<kvm_sregs> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
// correct amount of memory to our pointer, and we verify the return result.
let mut regs = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) };
if ret != 0 {
return errno_result();
}
Ok(regs)
}
/// Sets the VCPU special registers.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_sregs(&self, sregs: &kvm_sregs) -> Result<()> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only read the
// correct amount of memory from our pointer, and we verify the return result.
let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), sregs) };
if ret != 0 {
return errno_result();
}
Ok(())
}
/// Gets the VCPU FPU registers.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_fpu(&self) -> Result<kvm_fpu> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
// correct amount of memory to our pointer, and we verify the return result.
let mut regs = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut regs) };
if ret != 0 {
return errno_result();
}
Ok(regs)
}
/// X86 specific call to setup the FPU
///
/// See the documentation for KVM_SET_FPU.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_fpu(&self, fpu: &kvm_fpu) -> Result<()> {
let ret = unsafe {
// Here we trust the kernel not to read past the end of the kvm_fpu struct.
ioctl_with_ref(self, KVM_SET_FPU(), fpu)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Gets the VCPU debug registers.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_debugregs(&self) -> Result<kvm_debugregs> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
// correct amount of memory to our pointer, and we verify the return result.
let mut regs = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) };
if ret != 0 {
return errno_result();
}
Ok(regs)
}
/// Sets the VCPU debug registers
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_debugregs(&self, dregs: &kvm_debugregs) -> Result<()> {
let ret = unsafe {
// Here we trust the kernel not to read past the end of the kvm_fpu struct.
ioctl_with_ref(self, KVM_SET_DEBUGREGS(), dregs)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Gets the VCPU extended control registers
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_xcrs(&self) -> Result<kvm_xcrs> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only write the
// correct amount of memory to our pointer, and we verify the return result.
let mut regs = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) };
if ret != 0 {
return errno_result();
}
Ok(regs)
}
/// Sets the VCPU extended control registers
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_xcrs(&self, xcrs: &kvm_xcrs) -> Result<()> {
let ret = unsafe {
// Here we trust the kernel not to read past the end of the kvm_xcrs struct.
ioctl_with_ref(self, KVM_SET_XCRS(), xcrs)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// X86 specific call to get the MSRS
///
/// See the documentation for KVM_SET_MSRS.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_msrs(&self, msr_entries: &mut Vec<kvm_msr_entry>) -> Result<()> {
let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_entries.len());
unsafe {
// Mapping the unsized array to a slice is unsafe because the length isn't known.
// Providing the length used to create the struct guarantees the entire slice is valid.
let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(msr_entries.len());
entries.copy_from_slice(msr_entries);
}
msrs[0].nmsrs = msr_entries.len() as u32;
let ret = unsafe {
// Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0])
};
if ret < 0 {
// KVM_SET_MSRS actually returns the number of msr entries written.
return errno_result();
}
unsafe {
let count = ret as usize;
assert!(count <= msr_entries.len());
let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(count);
msr_entries.truncate(count);
msr_entries.copy_from_slice(entries);
}
Ok(())
}
/// X86 specific call to setup the MSRS
///
/// See the documentation for KVM_SET_MSRS.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_msrs(&self, msrs: &kvm_msrs) -> Result<()> {
let ret = unsafe {
// Here we trust the kernel not to read past the end of the kvm_msrs struct.
ioctl_with_ref(self, KVM_SET_MSRS(), msrs)
};
if ret < 0 {
// KVM_SET_MSRS actually returns the number of msr entries written.
return errno_result();
}
Ok(())
}
/// X86 specific call to setup the CPUID registers
///
/// See the documentation for KVM_SET_CPUID2.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_cpuid2(&self, cpuid: &CpuId) -> Result<()> {
let ret = unsafe {
// Here we trust the kernel not to read past the end of the kvm_msrs struct.
ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr())
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// X86 specific call to get the system emulated hyper-v CPUID values
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_hyperv_cpuid(&self) -> Result<CpuId> {
const MAX_KVM_CPUID_ENTRIES: usize = 256;
let mut cpuid = CpuId::new(MAX_KVM_CPUID_ENTRIES);
let ret = unsafe {
// ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
// allocated for the struct. The limit is read from nent, which is set to the allocated
// size(MAX_KVM_CPUID_ENTRIES) above.
ioctl_with_mut_ptr(self, KVM_GET_SUPPORTED_HV_CPUID(), cpuid.as_mut_ptr())
};
if ret < 0 {
return errno_result();
}
Ok(cpuid)
}
/// X86 specific call to get the state of the "Local Advanced Programmable Interrupt Controller".
///
/// See the documentation for KVM_GET_LAPIC.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
let mut klapic: kvm_lapic_state = Default::default();
let ret = unsafe {
// The ioctl is unsafe unless you trust the kernel not to write past the end of the
// local_apic struct.
ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic)
};
if ret < 0 {
return errno_result();
}
Ok(klapic)
}
/// X86 specific call to set the state of the "Local Advanced Programmable Interrupt Controller".
///
/// See the documentation for KVM_SET_LAPIC.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
let ret = unsafe {
// The ioctl is safe because the kernel will only read from the klapic struct.
ioctl_with_ref(self, KVM_SET_LAPIC(), klapic)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Gets the vcpu's current "multiprocessing state".
///
/// See the documentation for KVM_GET_MP_STATE. This call can only succeed after
/// a call to `Vm::create_irq_chip`.
///
/// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
/// to run crosvm on s390.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_mp_state(&self) -> Result<kvm_mp_state> {
// Safe because we know that our file is a VCPU fd, we know the kernel will only
// write correct amount of memory to our pointer, and we verify the return result.
let mut state: kvm_mp_state = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE(), &mut state) };
if ret < 0 {
return errno_result();
}
Ok(state)
}
/// Sets the vcpu's current "multiprocessing state".
///
/// See the documentation for KVM_SET_MP_STATE. This call can only succeed after
/// a call to `Vm::create_irq_chip`.
///
/// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
/// to run crosvm on s390.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_mp_state(&self, state: &kvm_mp_state) -> Result<()> {
let ret = unsafe {
// The ioctl is safe because the kernel will only read from the kvm_mp_state struct.
ioctl_with_ref(self, KVM_SET_MP_STATE(), state)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Gets the vcpu's currently pending exceptions, interrupts, NMIs, etc
///
/// See the documentation for KVM_GET_VCPU_EVENTS.
///
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn get_vcpu_events(&self) -> Result<kvm_vcpu_events> {
// Safe because we know that our file is a VCPU fd, we know the kernel
// will only write correct amount of memory to our pointer, and we
// verify the return result.
let mut events: kvm_vcpu_events = unsafe { std::mem::zeroed() };
let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut events) };
if ret < 0 {
return errno_result();
}
Ok(events)
}
/// Sets the vcpu's currently pending exceptions, interrupts, NMIs, etc
///
/// See the documentation for KVM_SET_VCPU_EVENTS.
///
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub fn set_vcpu_events(&self, events: &kvm_vcpu_events) -> Result<()> {
let ret = unsafe {
// The ioctl is safe because the kernel will only read from the
// kvm_vcpu_events.
ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), events)
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Enable the specified capability.
/// See documentation for KVM_ENABLE_CAP.
/// This function is marked as unsafe because `cap` may contain values which are interpreted as
/// pointers by the kernel.
pub unsafe fn kvm_enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> {
// Safe because we allocated the struct and we know the kernel will read exactly the size of
// the struct.
let ret = ioctl_with_ref(self, KVM_ENABLE_CAP(), cap);
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Specifies set of signals that are blocked during execution of KVM_RUN.
/// Signals that are not blocked will cause KVM_RUN to return with -EINTR.
///
/// See the documentation for KVM_SET_SIGNAL_MASK
pub fn set_signal_mask(&self, signals: &[c_int]) -> Result<()> {
let sigset = signal::create_sigset(signals)?;
let mut kvm_sigmask = vec_with_array_field::<kvm_signal_mask, sigset_t>(1);
// Rust definition of sigset_t takes 128 bytes, but the kernel only
// expects 8-bytes structure, so we can't write
// kvm_sigmask.len = size_of::<sigset_t>() as u32;
kvm_sigmask[0].len = 8;
// Ensure the length is not too big.
const _ASSERT: usize = size_of::<sigset_t>() - 8usize;
// Safe as we allocated exactly the needed space
unsafe {
copy_nonoverlapping(
&sigset as *const sigset_t as *const u8,
kvm_sigmask[0].sigset.as_mut_ptr(),
8,
);
}
let ret = unsafe {
// The ioctl is safe because the kernel will only read from the
// kvm_signal_mask structure.
ioctl_with_ref(self, KVM_SET_SIGNAL_MASK(), &kvm_sigmask[0])
};
if ret < 0 {
return errno_result();
}
Ok(())
}
/// Sets the value of one register on this VCPU. The id of the register is
/// encoded as specified in the kernel documentation for KVM_SET_ONE_REG.
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
pub fn set_one_reg(&self, reg_id: u64, data: u64) -> Result<()> {
let data_ref = &data as *const u64;
let onereg = kvm_one_reg {
id: reg_id,
addr: data_ref as u64,
};
// safe because we allocated the struct and we know the kernel will read
// exactly the size of the struct
let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG(), &onereg) };
if ret < 0 {
return errno_result();
}
Ok(())
}
}
impl AsRawDescriptor for Vcpu {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.vcpu.as_raw_descriptor()
}
}
/// A Vcpu that has a thread and can be run. Created by calling `to_runnable` on a `Vcpu`.
/// Implements `Deref` to a `Vcpu` so all `Vcpu` methods are usable, with the addition of the `run`
/// function to execute the guest.
pub struct RunnableVcpu {
vcpu: Vcpu,
// vcpus must stay on the same thread once they start.
// Add the PhantomData pointer to ensure RunnableVcpu is not `Send`.
phantom: std::marker::PhantomData<*mut u8>,
}
impl RunnableVcpu {
/// Runs the VCPU until it exits, returning the reason for the exit.
///
/// Note that the state of the VCPU and associated VM must be setup first for this to do
/// anything useful.
#[allow(clippy::cast_ptr_alignment)]
// The pointer is page aligned so casting to a different type is well defined, hence the clippy
// allow attribute.
pub fn run(&self) -> Result<VcpuExit> {
// Safe because we know that our file is a VCPU fd and we verify the return result.
let ret = unsafe { ioctl(self, KVM_RUN()) };
if ret == 0 {
// Safe because we know we mapped enough memory to hold the kvm_run struct because the
// kernel told us how large it was.
let run = unsafe { &*(self.run_mmap.as_ptr() as *const kvm_run) };
match run.exit_reason {
KVM_EXIT_IO => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let io = unsafe { run.__bindgen_anon_1.io };
let port = io.port;
let size = (io.count as usize) * (io.size as usize);
match io.direction as u32 {
KVM_EXIT_IO_IN => Ok(VcpuExit::IoIn { port, size }),
KVM_EXIT_IO_OUT => {
let mut data = [0; 8];
let run_start = run as *const kvm_run as *const u8;
// The data_offset is defined by the kernel to be some number of bytes
// into the kvm_run structure, which we have fully mmap'd.
unsafe {
let data_ptr = run_start.offset(io.data_offset as isize);
copy_nonoverlapping(
data_ptr,
data.as_mut_ptr(),
min(size, data.len()),
);
}
Ok(VcpuExit::IoOut { port, size, data })
}
_ => Err(Error::new(EINVAL)),
}
}
KVM_EXIT_MMIO => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let mmio = unsafe { &run.__bindgen_anon_1.mmio };
let address = mmio.phys_addr;
let size = min(mmio.len as usize, mmio.data.len());
if mmio.is_write != 0 {
Ok(VcpuExit::MmioWrite {
address,
size,
data: mmio.data,
})
} else {
Ok(VcpuExit::MmioRead { address, size })
}
}
KVM_EXIT_IOAPIC_EOI => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
Ok(VcpuExit::IoapicEoi { vector })
}
KVM_EXIT_HYPERV => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let hyperv = unsafe { &run.__bindgen_anon_1.hyperv };
match hyperv.type_ as u32 {
KVM_EXIT_HYPERV_SYNIC => {
let synic = unsafe { &hyperv.u.synic };
Ok(VcpuExit::HypervSynic {
msr: synic.msr,
control: synic.control,
evt_page: synic.evt_page,
msg_page: synic.msg_page,
})
}
KVM_EXIT_HYPERV_HCALL => {
let hcall = unsafe { &hyperv.u.hcall };
Ok(VcpuExit::HypervHcall {
input: hcall.input,
params: hcall.params,
})
}
_ => Err(Error::new(EINVAL)),
}
}
KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),
KVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
KVM_EXIT_HLT => Ok(VcpuExit::Hlt),
KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen),
KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown),
KVM_EXIT_FAIL_ENTRY => {
// Safe because the exit_reason (which comes from the kernel) told us which
// union field to use.
let hardware_entry_failure_reason = unsafe {
run.__bindgen_anon_1
.fail_entry
.hardware_entry_failure_reason
};
Ok(VcpuExit::FailEntry {
hardware_entry_failure_reason,
})
}
KVM_EXIT_INTR => Ok(VcpuExit::Intr),
KVM_EXIT_SET_TPR => Ok(VcpuExit::SetTpr),
KVM_EXIT_TPR_ACCESS => Ok(VcpuExit::TprAccess),
KVM_EXIT_S390_SIEIC => Ok(VcpuExit::S390Sieic),
KVM_EXIT_S390_RESET => Ok(VcpuExit::S390Reset),
KVM_EXIT_DCR => Ok(VcpuExit::Dcr),
KVM_EXIT_NMI => Ok(VcpuExit::Nmi),
KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
KVM_EXIT_OSI => Ok(VcpuExit::Osi),
KVM_EXIT_PAPR_HCALL => Ok(VcpuExit::PaprHcall),
KVM_EXIT_S390_UCONTROL => Ok(VcpuExit::S390Ucontrol),
KVM_EXIT_WATCHDOG => Ok(VcpuExit::Watchdog),
KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch),
KVM_EXIT_EPR => Ok(VcpuExit::Epr),
KVM_EXIT_SYSTEM_EVENT => {
// Safe because we know the exit reason told us this union
// field is valid
let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
let event_flags = unsafe { run.__bindgen_anon_1.system_event.flags };
Ok(VcpuExit::SystemEvent(event_type, event_flags))
}
r => panic!("unknown kvm exit reason: {}", r),
}
} else {
errno_result()
}
}
}
impl Deref for RunnableVcpu {
type Target = Vcpu;
fn deref(&self) -> &Self::Target {
&self.vcpu
}
}
impl DerefMut for RunnableVcpu {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.vcpu
}
}
impl AsRawDescriptor for RunnableVcpu {
fn as_raw_descriptor(&self) -> RawDescriptor {
self.vcpu.as_raw_descriptor()
}
}
impl Drop for RunnableVcpu {
fn drop(&mut self) {
VCPU_THREAD.with(|v| {
// This assumes that a failure in `BlockedSignal::new` means the signal is already
// blocked and there it should not be unblocked on exit.
let _blocked_signal = &(*v.borrow())
.as_ref()
.and_then(|state| state.signal_num)
.map(BlockedSignal::new);
*v.borrow_mut() = None;
});
}
}
/// Wrapper for kvm_cpuid2 which has a zero length array at the end.
/// Hides the zero length array behind a bounds check.
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pub type CpuId = FlexibleArrayWrapper<kvm_cpuid2, kvm_cpuid_entry2>;
// Represents a temporarily blocked signal. It will unblock the signal when dropped.
struct BlockedSignal {
signal_num: c_int,
}
impl BlockedSignal {
// Returns a `BlockedSignal` if the specified signal can be blocked, otherwise None.
fn new(signal_num: c_int) -> Option<BlockedSignal> {
if block_signal(signal_num).is_ok() {
Some(BlockedSignal { signal_num })
} else {
None
}
}
}
impl Drop for BlockedSignal {
fn drop(&mut self) {
let _ = unblock_signal(self.signal_num).expect("failed to restore signal mask");
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn dirty_log_size() {
let page_size = pagesize();
assert_eq!(dirty_log_bitmap_size(0), 0);
assert_eq!(dirty_log_bitmap_size(page_size), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
}
#[test]
fn new() {
Kvm::new().unwrap();
}
#[test]
fn create_vm() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
Vm::new(&kvm, gm).unwrap();
}
#[test]
fn check_extension() {
let kvm = Kvm::new().unwrap();
assert!(kvm.check_extension(Cap::UserMemory));
// I assume nobody is testing this on s390
assert!(!kvm.check_extension(Cap::S390UserSigp));
}
#[test]
fn check_vm_extension() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
assert!(vm.check_extension(Cap::UserMemory));
// I assume nobody is testing this on s390
assert!(!vm.check_extension(Cap::S390UserSigp));
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_supported_cpuid() {
let kvm = Kvm::new().unwrap();
let mut cpuid = kvm.get_supported_cpuid().unwrap();
let cpuid_entries = cpuid.mut_entries_slice();
assert!(cpuid_entries.len() > 0);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_emulated_cpuid() {
let kvm = Kvm::new().unwrap();
kvm.get_emulated_cpuid().unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_msr_index_list() {
let kvm = Kvm::new().unwrap();
let msr_list = kvm.get_msr_index_list().unwrap();
assert!(msr_list.len() >= 2);
}
#[test]
fn add_memory() {
let kvm = Kvm::new().unwrap();
let gm =
GuestMemory::new(&[(GuestAddress(0), 0x1000), (GuestAddress(0x5000), 0x5000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x10000), Box::new(mem), false, false)
.unwrap();
}
#[test]
fn add_memory_ro() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
vm.add_memory_region(GuestAddress(0x1000), Box::new(mem), true, false)
.unwrap();
}
#[test]
fn remove_memory_region() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x1000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
let mem_ptr = mem.as_ptr();
let slot = vm
.add_memory_region(GuestAddress(0x1000), Box::new(mem), false, false)
.unwrap();
let removed_mem = vm.remove_memory_region(slot).unwrap();
assert_eq!(removed_mem.size(), mem_size);
assert_eq!(removed_mem.as_ptr(), mem_ptr);
}
#[test]
fn remove_invalid_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
assert!(vm.remove_memory_region(0).is_err());
}
#[test]
fn overlap_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let mut vm = Vm::new(&kvm, gm).unwrap();
let mem_size = 0x2000;
let mem = MemoryMappingBuilder::new(mem_size).build().unwrap();
assert!(vm
.add_memory_region(GuestAddress(0x2000), Box::new(mem), false, false)
.is_err());
}
#[test]
fn get_memory() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let obj_addr = GuestAddress(0xf0);
vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
assert_eq!(read_val, 67u8);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn clock_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let mut clock_data = vm.get_clock().unwrap();
clock_data.clock += 1000;
vm.set_clock(&clock_data).unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn pic_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
let pic_state = vm.get_pic_state(PicId::Secondary).unwrap();
vm.set_pic_state(PicId::Secondary, &pic_state).unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn ioapic_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
let ioapic_state = vm.get_ioapic_state().unwrap();
vm.set_ioapic_state(&ioapic_state).unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn pit_handling() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
vm.create_pit().unwrap();
let pit_state = vm.get_pit_state().unwrap();
vm.set_pit_state(&pit_state).unwrap();
}
#[test]
fn register_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoeventAddress::Pio(0xc1),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoeventAddress::Pio(0xc2),
Datamatch::U16(Some(0x1337u16)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoeventAddress::Pio(0xc4),
Datamatch::U32(Some(0xdeadbeefu32)),
)
.unwrap();
vm.register_ioevent(
&evtfd,
IoeventAddress::Pio(0xc8),
Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
)
.unwrap();
}
#[test]
fn unregister_ioevent() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let evtfd = Event::new().unwrap();
vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.register_ioevent(
&evtfd,
IoeventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
vm.unregister_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
.unwrap();
vm.unregister_ioevent(
&evtfd,
IoeventAddress::Mmio(0x1004),
Datamatch::U8(Some(0x7fu8)),
)
.unwrap();
}
#[test]
fn irqfd_resample() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let evtfd1 = Event::new().unwrap();
let evtfd2 = Event::new().unwrap();
vm.create_irq_chip().unwrap();
vm.register_irqfd_resample(&evtfd1, &evtfd2, 4).unwrap();
vm.unregister_irqfd(&evtfd1, 4).unwrap();
// Ensures the ioctl is actually reading the resamplefd.
vm.register_irqfd_resample(&evtfd1, unsafe { &Event::from_raw_descriptor(-1) }, 4)
.unwrap_err();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn set_gsi_routing() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
vm.set_gsi_routing(&[]).unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: KVM_IRQCHIP_IOAPIC,
pin: 3,
},
}])
.unwrap();
vm.set_gsi_routing(&[IrqRoute {
gsi: 1,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
}])
.unwrap();
vm.set_gsi_routing(&[
IrqRoute {
gsi: 1,
source: IrqSource::Irqchip {
chip: KVM_IRQCHIP_IOAPIC,
pin: 3,
},
},
IrqRoute {
gsi: 2,
source: IrqSource::Msi {
address: 0xf000000,
data: 0xa0,
},
},
])
.unwrap();
}
#[test]
fn create_vcpu() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
Vcpu::new(0, &kvm, &vm).unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn debugregs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let mut dregs = vcpu.get_debugregs().unwrap();
dregs.dr7 = 13;
vcpu.set_debugregs(&dregs).unwrap();
let dregs2 = vcpu.get_debugregs().unwrap();
assert_eq!(dregs.dr7, dregs2.dr7);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn xcrs() {
let kvm = Kvm::new().unwrap();
if !kvm.check_extension(Cap::Xcrs) {
return;
}
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let mut xcrs = vcpu.get_xcrs().unwrap();
xcrs.xcrs[0].value = 1;
vcpu.set_xcrs(&xcrs).unwrap();
let xcrs2 = vcpu.get_xcrs().unwrap();
assert_eq!(xcrs.xcrs[0].value, xcrs2.xcrs[0].value);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_msrs() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let mut msrs = vec![
// This one should succeed
kvm_msr_entry {
index: 0x0000011e,
..Default::default()
},
// This one will fail to fetch
kvm_msr_entry {
index: 0x000003f1,
..Default::default()
},
];
vcpu.get_msrs(&mut msrs).unwrap();
assert_eq!(msrs.len(), 1);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn get_hyperv_cpuid() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let cpuid = vcpu.get_hyperv_cpuid();
// Older kernels don't support so tolerate this kind of failure.
match cpuid {
Ok(_) => {}
Err(e) => {
assert_eq!(e.errno(), EINVAL);
}
}
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn enable_feature() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let cap: kvm_enable_cap = kvm_sys::kvm_enable_cap {
cap: kvm_sys::KVM_CAP_HYPERV_SYNIC,
..Default::default()
};
unsafe { vcpu.kvm_enable_cap(&cap) }.unwrap();
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn mp_state() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.create_irq_chip().unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
let state = vcpu.get_mp_state().unwrap();
vcpu.set_mp_state(&state).unwrap();
}
#[test]
fn set_signal_mask() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
vcpu.set_signal_mask(&[base::SIGRTMIN() + 0]).unwrap();
}
#[test]
fn vcpu_mmap_size() {
let kvm = Kvm::new().unwrap();
let mmap_size = kvm.get_vcpu_mmap_size().unwrap();
let page_size = pagesize();
assert!(mmap_size >= page_size);
assert!(mmap_size % page_size == 0);
}
#[test]
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
fn set_identity_map_addr() {
let kvm = Kvm::new().unwrap();
let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
let vm = Vm::new(&kvm, gm).unwrap();
vm.set_identity_map_addr(GuestAddress(0x20000)).unwrap();
}
}<|fim▁end|>
|
// the entire log.
|
<|file_name|>account_connectivity.py<|end_file_name|><|fim▁begin|># ~*~ coding: utf-8 ~*~
from celery import shared_task
from django.utils.translation import ugettext as _, gettext_noop
from common.utils import get_logger
from orgs.utils import org_aware_func
from ..models import Connectivity
from . import const
from .utils import check_asset_can_run_ansible
logger = get_logger(__file__)
__all__ = [
'test_account_connectivity_util', 'test_accounts_connectivity_manual',
'get_test_account_connectivity_tasks', 'test_user_connectivity',
'run_adhoc',
]
def get_test_account_connectivity_tasks(asset):
if asset.is_unixlike():
tasks = const.PING_UNIXLIKE_TASKS
elif asset.is_windows():
tasks = const.PING_WINDOWS_TASKS
else:
msg = _(
"The asset {} system platform {} does not "
"support run Ansible tasks".format(asset.hostname, asset.platform)
)
logger.info(msg)
tasks = []
return tasks
def run_adhoc(task_name, tasks, inventory):
"""
:param task_name
:param tasks
:param inventory
"""
from ops.ansible.runner import AdHocRunner
runner = AdHocRunner(inventory, options=const.TASK_OPTIONS)
result = runner.run(tasks, 'all', task_name)
return result.results_raw, result.results_summary
def test_user_connectivity(task_name, asset, username, password=None, private_key=None):
"""
:param task_name
:param asset
:param username
:param password
:param private_key
"""
from ops.inventory import JMSCustomInventory
tasks = get_test_account_connectivity_tasks(asset)
if not tasks:
logger.debug("No tasks ")
return {}, {}
inventory = JMSCustomInventory(
assets=[asset], username=username, password=password,
private_key=private_key
)
raw, summary = run_adhoc(
task_name=task_name, tasks=tasks, inventory=inventory
)
return raw, summary
@org_aware_func("account")
def test_account_connectivity_util(account, task_name):
"""
:param account: <AuthBook>对象
:param task_name:
:return:
"""
if not check_asset_can_run_ansible(account.asset):
return
account.load_auth()
try:
raw, summary = test_user_connectivity(
task_name=task_name, asset=account.asset,
username=account.username, password=account.password,
private_key=account.private_key_file
)
except Exception as e:
logger.warn("Failed run adhoc {}, {}".format(task_name, e))
return
if summary.get('success'):
account.set_connectivity(Connectivity.ok)
else:
account.set_connectivity(Connectivity.failed)
<|fim▁hole|>def test_accounts_connectivity_manual(accounts):
"""
:param accounts: <AuthBook>对象
"""
for account in accounts:
task_name = gettext_noop("Test account connectivity: ") + str(account)
test_account_connectivity_util(account, task_name)
print(".\n")<|fim▁end|>
|
@shared_task(queue="ansible")
|
<|file_name|>DocumentController.js<|end_file_name|><|fim▁begin|>/**
* DocumentController
*
* @description :: Server-side logic for managing documents
* @help :: See http://links.sailsjs.org/docs/controllers
*/
var exec = require('child_process').exec;
var path = require('path');
var fs = require('fs');
var UPLOADFOLDER = __dirname+'/../../.tmp/uploads';
module.exports = {
/**
* `OdfController.create()`
*/
upload: function (req, res) {
req.file("documents").upload(function (err, files) {
if (err) {
sails.log.error(err);
return res.serverError(err);
}
for (var i = 0; i < files.length; i++) {
files[i].uploadedAs = path.basename(files[i].fd);
};
// EmailService.send(from, subject, text, html);
return res.json({
message: files.length + ' file(s) uploaded successfully!',
files: files
});
});
},
// filters source: http://listarchives.libreoffice.org/global/users/msg15151.html
// org.openoffice.da.writer2xhtml.epub
// org.openoffice.da.calc2xhtml11
// Text - txt - csv (StarCalc)
// impress_svg_Export
// math8
// EPS - Encapsulated PostScript
// StarOffice XML (Base) Report Chart
// org.openoffice.da.writer2xhtml.mathml.xsl
// impress_svm_Export
// MS Excel 95 (StarWriter)
// impress_pdf_addstream_import
// JPG - JPEG
// placeware_Export
// StarOffice XML (Math)
// T602Document
// impress_jpg_Export
// writer_globaldocument_StarOffice_XML_Writer
// draw_emf_Export
// MS Word 2003 XML
// WMF - MS Windows Metafile
// GIF - Graphics Interchange
// writer_pdf_import
// calc8
// writer_globaldocument_StarOffice_XML_Writer_GlobalDocument
// MS Word 97 Vorlage
// impress_tif_Export
// draw_xpm_Export
// Calc MS Excel 2007 XML
// Text (encoded)
// MathML XML (Math)
// MET - OS/2 Metafile
// MS PowerPoint 97 AutoPlay
// impress8
// StarOffice XML (Calc)
// calc_HTML_WebQuery
// RAS - Sun Rasterfile
// MS Excel 5.0 (StarWriter)
// impress_png_Export
// DXF - AutoCAD Interchange
// impress_pct_Export
// impress_met_Export
// SGF - StarOffice Writer SGF
// draw_eps_Export
// Calc MS Excel 2007 Binary
// calc8_template
// Calc MS Excel 2007 XML Template
// impress_pbm_Export
// draw_pdf_import
// Calc Office Open XML
// math_pdf_Export
// Rich Text Format (StarCalc)
// MS PowerPoint 97 Vorlage
// StarOffice XML (Base)
// DIF
// Impress MS PowerPoint 2007 XML Template
// MS Excel 2003 XML
// impress_ras_Export
// draw_PCD_Photo_CD_Base16
// draw_bmp_Export
// WordPerfect Graphics
// StarOffice XML (Writer)
// PGM - Portable Graymap
// Office Open XML Text Template
// MS Excel 5.0/95
// draw_svg_Export
// draw_PCD_Photo_CD_Base4
// TGA - Truevision TARGA
// Quattro Pro 6.0
// writer_globaldocument_pdf_Export
// calc_pdf_addstream_import
// writerglobal8_HTML
// draw_svm_Export
// HTML
// EMF - MS Windows Metafile
// PPM - Portable Pixelmap
// Lotus
// impress_ppm_Export
// draw_jpg_Export
// Text
// TIF - Tag Image File
// Impress Office Open XML AutoPlay
// StarOffice XML (Base) Report
// PNG - Portable Network Graphic
// draw8
// Rich Text Format
// writer_web_StarOffice_XML_Writer_Web_Template
// org.openoffice.da.writer2xhtml
// MS_Works
// Office Open XML Text
// SVG - Scalable Vector Graphics
// org.openoffice.da.writer2xhtml11
// draw_tif_Export
// impress_gif_Export
// StarOffice XML (Draw)
// StarOffice XML (Impress)
// Text (encoded) (StarWriter/Web)
// writer_web_pdf_Export
// MediaWiki_Web
// impress_pdf_Export
// draw_pdf_addstream_import<|fim▁hole|> // draw_png_Export
// HTML (StarCalc)
// HTML (StarWriter)
// impress_StarOffice_XML_Impress_Template
// draw_pct_Export
// calc_StarOffice_XML_Calc_Template
// MS Excel 95 Vorlage/Template
// writerglobal8_writer
// MS Excel 95
// draw_met_Export
// dBase
// MS Excel 97
// MS Excel 4.0
// draw_pbm_Export
// impress_StarOffice_XML_Draw
// Impress Office Open XML
// writerweb8_writer
// chart8
// MediaWiki
// MS Excel 4.0 Vorlage/Template
// impress_wmf_Export
// draw_ras_Export
// writer_StarOffice_XML_Writer_Template
// BMP - MS Windows
// impress8_template
// LotusWordPro
// impress_pgm_Export
// SGV - StarDraw 2.0
// draw_PCD_Photo_CD_Base
// draw_html_Export
// writer8_template
// Calc Office Open XML Template
// writerglobal8
// draw_flash_Export
// MS Word 2007 XML Template
// impress8_draw
// CGM - Computer Graphics Metafile
// MS PowerPoint 97
// WordPerfect
// impress_emf_Export
// writer_pdf_Export
// PSD - Adobe Photoshop
// PBM - Portable Bitmap
// draw_ppm_Export
// writer_pdf_addstream_import
// PCX - Zsoft Paintbrush
// writer_web_HTML_help
// MS Excel 4.0 (StarWriter)
// Impress Office Open XML Template
// org.openoffice.da.writer2xhtml.mathml
// MathType 3.x
// impress_xpm_Export
// writer_web_StarOffice_XML_Writer
// writerweb8_writer_template
// MS Word 95
// impress_html_Export
// MS Word 97
// draw_gif_Export
// writer8
// MS Excel 5.0/95 Vorlage/Template
// draw8_template
// StarOffice XML (Chart)
// XPM
// draw_pdf_Export
// calc_pdf_Export
// impress_eps_Export
// XBM - X-Consortium
// Text (encoded) (StarWriter/GlobalDocument)
// writer_MIZI_Hwp_97
// MS WinWord 6.0
// Lotus 1-2-3 1.0 (WIN) (StarWriter)
// SYLK
// MS Word 2007 XML
// Text (StarWriter/Web)
// impress_pdf_import
// MS Excel 97 Vorlage/Template
// Impress MS PowerPoint 2007 XML AutoPlay
// Impress MS PowerPoint 2007 XML
// draw_wmf_Export
// Unifa Adressbuch
// org.openoffice.da.calc2xhtml
// impress_bmp_Export
// Lotus 1-2-3 1.0 (DOS) (StarWriter)
// MS Word 95 Vorlage
// MS WinWord 5
// PCT - Mac Pict
// SVM - StarView Metafile
// draw_StarOffice_XML_Draw_Template
// impress_flash_Export
// draw_pgm_Export
convert: function (req, res) {
var stdout = '';
var stderr = '';
sails.log.info('convert');
if(!req.param('filename')) res.badRequest('filename is required');
var source = req.param('filename');
var inputDir = UPLOADFOLDER +'/'+source;
var outputFileExtension = req.param('extension') ? req.param('extension') : 'pdf'; // example 'pdf';
var outputFilterName = req.param('filter') ? ':'+req.param('filter') : ''; //(optinal) example ':'+'MS Excel 95';
var outputDir = UPLOADFOLDER;
if(req.param('dir')) {
outputDir += '/'+req.param('dir');
}
outputDir = path.normalize(outputDir);
inputDir = path.normalize(inputDir);
var target = outputDir+"/"+path.basename(source, '.odt')+"."+outputFileExtension;
var command = 'soffice --headless --invisible --convert-to '+outputFileExtension+outputFilterName+' --outdir '+outputDir+' '+inputDir;
sails.log.info(command);
var child = exec(command, function (code, stdout, stderr) {
if(code) {
sails.log.error(code);
}
if(stderr) {
sails.log.error(stderr);
}
if(stdout) {
sails.log.info(stdout);
}
res.json({target:target, code: code, stdout: stdout, stderr: stderr});
// res.download(target); // not working over socket.io
});
}
};<|fim▁end|>
| |
<|file_name|>_env.py<|end_file_name|><|fim▁begin|># -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2017 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from snapcraft import formatting_utils, project
from snapcraft.internal import common, elf, pluginhandler
from typing import Dict, List
def env_for_classic(base: str, arch_triplet: str) -> List[str]:
"""Set the required environment variables for a classic confined build."""
env = []
core_path = common.get_core_path(base)
paths = common.get_library_paths(core_path, arch_triplet, existing_only=False)
env.append(
formatting_utils.format_path_variable(
"LD_LIBRARY_PATH", paths, prepend="", separator=":"
)
)
return env
def runtime_env(root: str, arch_triplet: str) -> List[str]:
"""Set the environment variables required for running binaries."""
env = []
env.append(
'PATH="'
+ ":".join(
["{0}/usr/sbin", "{0}/usr/bin", "{0}/sbin", "{0}/bin", "$PATH"]
).format(root)
+ '"'
)
# Add the default LD_LIBRARY_PATH
paths = common.get_library_paths(root, arch_triplet)
# Add more specific LD_LIBRARY_PATH from staged packages if necessary
paths += elf.determine_ld_library_path(root)
if paths:
env.append(
formatting_utils.format_path_variable(
"LD_LIBRARY_PATH", paths, prepend="", separator=":"
)
)
return env
def build_env(root: str, snap_name: str, arch_triplet: str) -> List[str]:
"""Set the environment variables required for building.
This is required for the current parts installdir due to stage-packages
and also to setup the stagedir.
"""
env = []
paths = common.get_include_paths(root, arch_triplet)
if paths:
for envvar in ["CPPFLAGS", "CFLAGS", "CXXFLAGS"]:
env.append(
formatting_utils.format_path_variable(
envvar, paths, prepend="-I", separator=" "
)
)
paths = common.get_library_paths(root, arch_triplet)
if paths:
env.append(
formatting_utils.format_path_variable(
"LDFLAGS", paths, prepend="-L", separator=" "
)
)
paths = common.get_pkg_config_paths(root, arch_triplet)
if paths:<|fim▁hole|> )
)
return env
def build_env_for_stage(stagedir: str, snap_name: str, arch_triplet: str) -> List[str]:
env = build_env(stagedir, snap_name, arch_triplet)
env.append('PERL5LIB="{0}/usr/share/perl5/"'.format(stagedir))
return env
def snapcraft_global_environment(project: project.Project) -> Dict[str, str]:
if project.info.name:
name = project.info.name
else:
name = ""
if project.info.version:
version = project.info.version
else:
version = ""
if project.info.grade:
grade = project.info.grade
else:
grade = ""
return {
"SNAPCRAFT_ARCH_TRIPLET": project.arch_triplet,
"SNAPCRAFT_PARALLEL_BUILD_COUNT": project.parallel_build_count,
"SNAPCRAFT_PROJECT_NAME": name,
"SNAPCRAFT_PROJECT_VERSION": version,
"SNAPCRAFT_PROJECT_GRADE": grade,
"SNAPCRAFT_STAGE": project.stage_dir,
"SNAPCRAFT_PRIME": project.prime_dir,
}
def snapcraft_part_environment(part: pluginhandler.PluginHandler) -> Dict[str, str]:
return {
"SNAPCRAFT_PART_SRC": part.plugin.sourcedir,
"SNAPCRAFT_PART_BUILD": part.plugin.builddir,
"SNAPCRAFT_PART_INSTALL": part.plugin.installdir,
}
def environment_to_replacements(environment: Dict[str, str]) -> Dict[str, str]:
replacements = {} # type: Dict[str, str]
for variable, value in environment.items():
# Support both $VAR and ${VAR} syntax
replacements["${}".format(variable)] = value
replacements["${{{}}}".format(variable)] = value
return replacements<|fim▁end|>
|
env.append(
formatting_utils.format_path_variable(
"PKG_CONFIG_PATH", paths, prepend="", separator=":"
|
<|file_name|>users.js<|end_file_name|><|fim▁begin|>// @flow
import type { Action } from "../actions/types";
import type { UserState } from "../types";
const initialState: UserState = {
fetching: false,
fetched: false,
users: [],
error: null
};
export default function users(state: UserState = initialState, action: Action) {
switch (action.type) {
case "FETCH_USERS_PENDING":
return {
...state,
fetching: true
};
case "FETCH_USERS_FULFILLED":
return {
...state,
fetching: false,
fetched: true,
users: action.payload.data,
error: null
};
case "FETCH_USERS_REJECTED":
return {
...state,
fetching: false,
error: action.payload
};
default:
return state;
}<|fim▁hole|><|fim▁end|>
|
}
|
<|file_name|>navigator-view.js<|end_file_name|><|fim▁begin|>// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
(async function() {
TestRunner.addResult(`Tests scripts panel file selectors.\n`);
await TestRunner.loadLegacyModule('sources'); await TestRunner.loadTestModule('sources_test_runner');
await TestRunner.loadTestModule('sdk_test_runner');
await TestRunner.showPanel('sources');
await TestRunner.addIframe(
'resources/post-message-listener.html', {name: 'childframe'});
Bindings.debuggerWorkspaceBinding.resetForTest(TestRunner.mainTarget);
Bindings.resourceMapping.resetForTest(TestRunner.mainTarget);
var subframe = TestRunner.mainFrame().childFrames[0];
var sourcesNavigatorView = new Sources.NetworkNavigatorView();
sourcesNavigatorView.show(UI.inspectorView.element);
var contentScriptsNavigatorView = new Sources.ContentScriptsNavigatorView();
contentScriptsNavigatorView.show(UI.inspectorView.element);
var uiSourceCodes = [];
async function addUISourceCode(url, isContentScript, frame) {
if (isContentScript) {
var uiSourceCode =
await SourcesTestRunner.addScriptUISourceCode(url, '', true, 42);
uiSourceCodes.push(uiSourceCode);
return;
}
TestRunner.addScriptForFrame(url, '', frame || TestRunner.mainFrame());
var uiSourceCode = await waitForUISourceCodeAdded(url);
uiSourceCodes.push(uiSourceCode);
}
async function addUISourceCode2(url) {
TestRunner.evaluateInPageAnonymously(`
window.workers = window.workers || [];
window.workers.push(new Worker('${url}'));
`);
var uiSourceCode = await waitForUISourceCodeAdded(url);
uiSourceCodes.push(uiSourceCode);
}
function waitForUISourceCodeAdded(url) {
var fulfill;
var promise = new Promise(x => fulfill = x);
Workspace.workspace.addEventListener(
Workspace.Workspace.Events.UISourceCodeAdded, uiSourceCodeAdded);
return promise;
function uiSourceCodeAdded(event) {
if (event.data.url() !== url)
return;
Workspace.workspace.removeEventListener(
Workspace.Workspace.Events.UISourceCodeAdded, uiSourceCodeAdded);
fulfill(event.data);
}
}
function revealUISourceCode(uiSourceCode) {
sourcesNavigatorView.revealUISourceCode(uiSourceCode);
contentScriptsNavigatorView.revealUISourceCode(uiSourceCode);
}
var rootURL = 'http://localhost:8080/LayoutTests/inspector/debugger/';
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Adding first resource:');
await addUISourceCode(rootURL + 'foo/bar/script.js', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Adding second resource:');
await addUISourceCode(rootURL + 'foo/bar/script.js?a=2', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Adding resources into another frame:');
await addUISourceCode(rootURL + 'foo/bar/script.js?a=1', false, subframe);
await addUISourceCode(rootURL + 'foo/baz/script.js', false, subframe);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Adding resources into another target:');
await addUISourceCode2(TestRunner.url('resources/script1.js?a=3'));
await addUISourceCode2(TestRunner.url('resources/script2.js'));
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Adding content scripts and some random resources:');
await addUISourceCode(rootURL + 'foo/bar/contentScript2.js?a=1', true);
await addUISourceCode(rootURL + 'foo/bar/contentScript.js?a=2', true);
await addUISourceCode(rootURL + 'foo/bar/contentScript.js?a=1', true);
await addUISourceCode('http://example.com/', false);
await addUISourceCode('http://example.com/?a=b', false);
await addUISourceCode(
'http://example.com/the%2fdir/foo?bar=100&baz=a%20%2fb', false);
// Verify that adding invalid URL does not throw exception.
await addUISourceCode(
'http://example.com/the%2fdir/foo?bar=100%&baz=a%20%2fb', false);
await addUISourceCode(
'http://example.com/path%20with%20spaces/white%20space.html', false);
await addUISourceCode('?a=b', false);
await addUISourceCode(<|fim▁hole|> SourcesTestRunner.dumpNavigatorViewInAllModes(contentScriptsNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Revealing first resource:');
revealUISourceCode(uiSourceCodes[0]);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
// Here we keep http://localhost:8080/LayoutTests/inspector/debugger2/ folder
// collapsed while adding resources into it.
TestRunner.addResult('\n\n================================================');
TestRunner.addResult(
'Adding some resources to change the way debugger folder looks like, first:');
var rootURL2 = 'http://localhost:8080/LayoutTests/inspector/debugger2/';
await addUISourceCode(rootURL2 + 'foo/bar/script.js', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Second:');
await addUISourceCode(rootURL2 + 'foo/bar/script.js?a=2', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Others:');
await addUISourceCode(rootURL2 + 'foo/bar/script.js?a=1', false);
await addUISourceCode(rootURL2 + 'foo/baz/script.js', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
var rootURL3 = 'http://localhost:8080/LayoutTests/inspector/debugger3/';
await addUISourceCode(
rootURL3 + 'hasOwnProperty/__proto__/constructor/foo.js', false);
await addUISourceCode(rootURL3 + 'hasOwnProperty/__proto__/foo.js', false);
await addUISourceCode(rootURL3 + 'hasOwnProperty/foo.js', false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Revealing all resources:');
for (var i = 0; i < uiSourceCodes.length; ++i)
revealUISourceCode(uiSourceCodes[i]);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
SourcesTestRunner.dumpNavigatorViewInAllModes(contentScriptsNavigatorView);
TestRunner.addResult('\n\n================================================');
TestRunner.addResult('Removing all resources:');
for (const target of SDK.targetManager.targets()) {
if (target !== TestRunner.mainTarget)
Bindings.debuggerWorkspaceBinding.resetForTest(target);
}
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
SourcesTestRunner.dumpNavigatorViewInAllModes(contentScriptsNavigatorView);
TestRunner.completeTest();
})();<|fim▁end|>
|
'very_looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong_url',
false);
SourcesTestRunner.dumpNavigatorViewInAllModes(sourcesNavigatorView);
|
<|file_name|>todo-input.component.spec.ts<|end_file_name|><|fim▁begin|>/* tslint:disable:no-unused-variable */
import { async, ComponentFixture, TestBed } from '@angular/core/testing';
import { By } from '@angular/platform-browser';
import { DebugElement } from '@angular/core';
import { TodoInputComponent } from './todo-input.component';
describe('TodoInputComponent', () => {
let component: TodoInputComponent;
let fixture: ComponentFixture<TodoInputComponent>;<|fim▁hole|> })
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(TodoInputComponent);
component = fixture.componentInstance;
fixture.detectChanges();
});
it('should create', () => {
expect(component).toBeTruthy();
});
});<|fim▁end|>
|
beforeEach(async(() => {
TestBed.configureTestingModule({
declarations: [ TodoInputComponent ]
|
<|file_name|>measure.py<|end_file_name|><|fim▁begin|># Copyright (c) 2007, Robert Coup <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Distance nor the names of its contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
"""
Distance and Area objects to allow for sensible and convenient calculation
and conversions.
Authors: Robert Coup, Justin Bronn, Riccardo Di Virgilio
Inspired by GeoPy (http://exogen.case.edu/projects/geopy/)
and Geoff Biggs' PhD work on dimensioned units for robotics.
"""
__all__ = ['A', 'Area', 'D', 'Distance']
from decimal import Decimal
from functools import total_ordering
from django.utils import six
NUMERIC_TYPES = six.integer_types + (float, Decimal)
AREA_PREFIX = "sq_"
def pretty_name(obj):
return obj.__name__ if obj.__class__ == type else obj.__class__.__name__
@total_ordering
class MeasureBase(object):
STANDARD_UNIT = None
ALIAS = {}
UNITS = {}
LALIAS = {}
def __init__(self, default_unit=None, **kwargs):
value, self._default_unit = self.default_units(kwargs)
setattr(self, self.STANDARD_UNIT, value)<|fim▁hole|> if default_unit and isinstance(default_unit, six.string_types):
self._default_unit = default_unit
def _get_standard(self):
return getattr(self, self.STANDARD_UNIT)
def _set_standard(self, value):
setattr(self, self.STANDARD_UNIT, value)
standard = property(_get_standard, _set_standard)
def __getattr__(self, name):
if name in self.UNITS:
return self.standard / self.UNITS[name]
else:
raise AttributeError('Unknown unit type: %s' % name)
def __repr__(self):
return '%s(%s=%s)' % (pretty_name(self), self._default_unit,
getattr(self, self._default_unit))
def __str__(self):
return '%s %s' % (getattr(self, self._default_unit), self._default_unit)
# **** Comparison methods ****
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.standard == other.standard
else:
return NotImplemented
def __lt__(self, other):
if isinstance(other, self.__class__):
return self.standard < other.standard
else:
return NotImplemented
# **** Operators methods ****
def __add__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard + other.standard)})
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __iadd__(self, other):
if isinstance(other, self.__class__):
self.standard += other.standard
return self
else:
raise TypeError('%(class)s must be added with %(class)s' % {"class": pretty_name(self)})
def __sub__(self, other):
if isinstance(other, self.__class__):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard - other.standard)})
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __isub__(self, other):
if isinstance(other, self.__class__):
self.standard -= other.standard
return self
else:
raise TypeError('%(class)s must be subtracted from %(class)s' % {"class": pretty_name(self)})
def __mul__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __imul__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard *= float(other)
return self
else:
raise TypeError('%(class)s must be multiplied with number' % {"class": pretty_name(self)})
def __rmul__(self, other):
return self * other
def __truediv__(self, other):
if isinstance(other, self.__class__):
return self.standard / other.standard
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided with number or %(class)s' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
def __itruediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
self.standard /= float(other)
return self
else:
raise TypeError('%(class)s must be divided with number' % {"class": pretty_name(self)})
def __idiv__(self, other): # Python 2 compatibility
return type(self).__itruediv__(self, other)
def __bool__(self):
return bool(self.standard)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def default_units(self, kwargs):
"""
Return the unit value and the default units specified
from the given keyword arguments dictionary.
"""
val = 0.0
default_unit = self.STANDARD_UNIT
for unit, value in six.iteritems(kwargs):
if not isinstance(value, float):
value = float(value)
if unit in self.UNITS:
val += self.UNITS[unit] * value
default_unit = unit
elif unit in self.ALIAS:
u = self.ALIAS[unit]
val += self.UNITS[u] * value
default_unit = u
else:
lower = unit.lower()
if lower in self.UNITS:
val += self.UNITS[lower] * value
default_unit = lower
elif lower in self.LALIAS:
u = self.LALIAS[lower]
val += self.UNITS[u] * value
default_unit = u
else:
raise AttributeError('Unknown unit type: %s' % unit)
return val, default_unit
@classmethod
def unit_attname(cls, unit_str):
"""
Retrieves the unit attribute name for the given unit string.
For example, if the given unit string is 'metre', 'm' would be returned.
An exception is raised if an attribute cannot be found.
"""
lower = unit_str.lower()
if unit_str in cls.UNITS:
return unit_str
elif lower in cls.UNITS:
return lower
elif lower in cls.LALIAS:
return cls.LALIAS[lower]
else:
raise Exception('Could not find a unit keyword associated with "%s"' % unit_str)
class Distance(MeasureBase):
STANDARD_UNIT = "m"
UNITS = {
'chain': 20.1168,
'chain_benoit': 20.116782,
'chain_sears': 20.1167645,
'british_chain_benoit': 20.1167824944,
'british_chain_sears': 20.1167651216,
'british_chain_sears_truncated': 20.116756,
'cm': 0.01,
'british_ft': 0.304799471539,
'british_yd': 0.914398414616,
'clarke_ft': 0.3047972654,
'clarke_link': 0.201166195164,
'fathom': 1.8288,
'ft': 0.3048,
'german_m': 1.0000135965,
'gold_coast_ft': 0.304799710181508,
'indian_yd': 0.914398530744,
'inch': 0.0254,
'km': 1000.0,
'link': 0.201168,
'link_benoit': 0.20116782,
'link_sears': 0.20116765,
'm': 1.0,
'mi': 1609.344,
'mm': 0.001,
'nm': 1852.0,
'nm_uk': 1853.184,
'rod': 5.0292,
'sears_yd': 0.91439841,
'survey_ft': 0.304800609601,
'um': 0.000001,
'yd': 0.9144,
}
# Unit aliases for `UNIT` terms encountered in Spatial Reference WKT.
ALIAS = {
'centimeter': 'cm',
'foot': 'ft',
'inches': 'inch',
'kilometer': 'km',
'kilometre': 'km',
'meter': 'm',
'metre': 'm',
'micrometer': 'um',
'micrometre': 'um',
'millimeter': 'mm',
'millimetre': 'mm',
'mile': 'mi',
'yard': 'yd',
'British chain (Benoit 1895 B)': 'british_chain_benoit',
'British chain (Sears 1922)': 'british_chain_sears',
'British chain (Sears 1922 truncated)': 'british_chain_sears_truncated',
'British foot (Sears 1922)': 'british_ft',
'British foot': 'british_ft',
'British yard (Sears 1922)': 'british_yd',
'British yard': 'british_yd',
"Clarke's Foot": 'clarke_ft',
"Clarke's link": 'clarke_link',
'Chain (Benoit)': 'chain_benoit',
'Chain (Sears)': 'chain_sears',
'Foot (International)': 'ft',
'German legal metre': 'german_m',
'Gold Coast foot': 'gold_coast_ft',
'Indian yard': 'indian_yd',
'Link (Benoit)': 'link_benoit',
'Link (Sears)': 'link_sears',
'Nautical Mile': 'nm',
'Nautical Mile (UK)': 'nm_uk',
'US survey foot': 'survey_ft',
'U.S. Foot': 'survey_ft',
'Yard (Indian)': 'indian_yd',
'Yard (Sears)': 'sears_yd'
}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __mul__(self, other):
if isinstance(other, self.__class__):
return Area(default_unit=AREA_PREFIX + self._default_unit,
**{AREA_PREFIX + self.STANDARD_UNIT: (self.standard * other.standard)})
elif isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard * other)})
else:
raise TypeError('%(distance)s must be multiplied with number or %(distance)s' % {
"distance": pretty_name(self.__class__),
})
class Area(MeasureBase):
STANDARD_UNIT = AREA_PREFIX + Distance.STANDARD_UNIT
# Getting the square units values and the alias dictionary.
UNITS = {'%s%s' % (AREA_PREFIX, k): v ** 2 for k, v in Distance.UNITS.items()}
ALIAS = {k: '%s%s' % (AREA_PREFIX, v) for k, v in Distance.ALIAS.items()}
LALIAS = {k.lower(): v for k, v in ALIAS.items()}
def __truediv__(self, other):
if isinstance(other, NUMERIC_TYPES):
return self.__class__(default_unit=self._default_unit,
**{self.STANDARD_UNIT: (self.standard / other)})
else:
raise TypeError('%(class)s must be divided by a number' % {"class": pretty_name(self)})
def __div__(self, other): # Python 2 compatibility
return type(self).__truediv__(self, other)
# Shortcuts
D = Distance
A = Area<|fim▁end|>
| |
<|file_name|>hops_editor.py<|end_file_name|><|fim▁begin|># ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from __future__ import absolute_import
from __future__ import print_function
import os
import yaml
from pyface.confirmation_dialog import ConfirmationDialog
from pyface.constant import OK, CANCEL, YES
from pyface.file_dialog import FileDialog
from traits.api import (
HasTraits,
Str,
List,
Int,
Any,
Button,
Bool,
on_trait_change,
Instance,
)
from traitsui.api import (
View,
Item,
UItem,
HGroup,
InstanceEditor,
HSplit,
VGroup,
EnumEditor,
)
from traitsui.handler import Controller
from traitsui.menu import Action
from traitsui.table_column import ObjectColumn
from pychron.core.helpers.filetools import fileiter, add_extension
from pychron.core.ui.table_editor import myTableEditor
from pychron.core.ui.text_editor import myTextEditor
from pychron.core.yaml import yload
from pychron.envisage.icon_button_editor import icon_button_editor
from pychron.envisage.resources import icon
from pychron.experiment.automated_run.hop_util import split_hopstr
from pychron.loggable import Loggable
from pychron.paths import paths
# class NullInt(Int):
# default_value = None
class Position(HasTraits):
detector = Str
isotope = Str
deflection = Int
name = Str
# available_isotopes=Property(depends_on='isotope')
#
# def _get_available_isotopes(self):
# from pychron.pychron_constants import ISOTOPES
# isos=list(ISOTOPES)
# isos.remove(self.isotope)
# return isos
def to_yaml(self):
return {
"detector": self.detector,
"isotope": self.isotope,
"active": True,
"deflection": self.deflection,
"protect": False,
"is_baseline": False,
}
def to_string(self):
s = "{}:{}".format(self.isotope, self.detector)
if self.deflection:
s = "{}:{}".format(s, self.deflection)
return s
class Hop(HasTraits):
positions = List
counts = Int
settle = Int
isotope_label = Str
name = Str
detectors = List(["A", "B"])
add_position_button = Button
remove_position_button = Button
selected = Any
error_message = Str
def to_string(self):
vs = [str(self.counts), str(self.settle)]
hs = "'{}'".format(
", ".join(
[p.to_string() for p in self.positions if p.isotope and p.detector]
)
)
return "({}, {})".format(hs, ", ".join(vs))
def to_yaml(self):
obj = {"counts": self.counts, "settle": self.settle}
poss = [p for p in self.positions if p.isotope and p.detector]
if poss:
obj["cup_configuration"] = [p.to_yaml() for p in poss]
pp = poss[0]
obj["positioning"] = {"detector": pp.detector, "isotope": pp.isotope}
return obj
def parse_hopstr(self, hs):
for is_baseline, iso, det, defl in split_hopstr(hs):
p = Position(isotope=iso, detector=det, deflection=int(defl) if defl else 0)
self.positions.append(p)
self._handle_position_change()
def validate_hop(self):
"""
return true if no duplicates
"""
self.error_message = ""
n = len(self.positions)
ps = {p.isotope for p in self.positions}
dup_iso = len(set(ps)) < n
if dup_iso:
self.error_message = self._make_error_message("isotope")
ds = {p.detector for p in self.positions}
dup_det = len(ds) < n
if dup_det:
em = self._make_error_message("detector")
if self.error_message:
self.error_message = "{}; {}".format(self.error_message, em)
else:
self.error_message = em
return not (dup_iso or dup_det)
def _make_error_message(self, attr):
dets = []
ps = []
for p in self.positions:
det = getattr(p, attr)
if det in dets:
ps.append(det)
dets.append(det)
return "Multiple {}s: {}".format(attr.capitalize(), ", ".join(ps))
def _add_position_button_fired(self):
self.positions.append(Position())
def _remove_position_button_fired(self):
idx = self.positions.index(self.selected)
self.positions.remove(self.selected)
if len(self.positions) > 0:
self.selected = self.positions[idx - 1]
else:
self.selected = None
@on_trait_change("positions:isotope, positions[]")
def _handle_position_change(self):
self.isotopes_label = ",".join([i.isotope for i in self.positions])
def traits_view(self):
from pychron.pychron_constants import ISOTOPES
cols = [
ObjectColumn(name="name", label="", width=20, editable=False),
ObjectColumn(name="isotope", editor=EnumEditor(values=ISOTOPES)),
ObjectColumn(name="detector", editor=EnumEditor(values=self.detectors)),
ObjectColumn(
name="deflection",
),
]
v = View(
VGroup(
HGroup(
Item("counts", tooltip="Number of measurements at this position"),
Item(
"settle",
label="Settle (s)",
tooltip="Delay in seconds after magnet move and before measurement",
),
),
UItem(
"positions",
editor=myTableEditor(
columns=cols,
sortable=False,
clear_selection_on_dclicked=True,
selected="selected",
),
),
HGroup(
icon_button_editor(
"add_position_button",
"add",
tooltip="Add isotope/detector to measure",
),
icon_button_editor(
"remove_position_button",
"delete",
tooltip="Remove selected isotope/detector",
enabled_when="selected",
),
),
)
)
return v
class HopSequence(HasTraits):
hops = List
def to_string(self):
return "\n".join([hi.to_string() for hi in self.hops])
def to_yaml(self):
return [hi.to_yaml() for hi in self.hops]
def add_hop(self, idx):
if idx is not None:
h = self.hops[idx]
hh = h.clone_traits()
self.hops.insert(idx, hh)
else:
h = Hop()
self.hops.append(h)
self._label_hops()
def _label_hops(self):
for i, hi in enumerate(self.hops):
hi.name = str(i + 1)
print(hi.name)
for j, pi in enumerate(hi.positions):
pi.name = str(j + 1)
def remove_hop(self, idx):
self.hops.pop(idx)
self._label_hops()
def label_hops(self):
self._label_hops()
class HopEditorModel(Loggable):
hop_sequence = Instance(HopSequence)
selected = Any
path = Str
detectors = List
add_hop_button = Button
remove_hop_button = Button
# saveable = Bool
# saveasable = Bool
text = Str
dirty = Bool
use_yaml = True
def new(self):
self.hop_sequence = HopSequence()
return True
def open(self, p=None):
if p is None:
p = "/Users/ross/Pychrondata_dev/scripts/measurement/hops/hop.txt"
if not os.path.isfile(p):
p = ""
dialog = FileDialog(action="open", default_directory=paths.hops_dir)
if dialog.open() == OK:
p = dialog.path
if os.path.isfile(p):
self.path = p
# self.saveable = True
# self.saveasable = True
return self._load(p)
def save(self):
if self.path:
if self._validate_sequence():
self._save_file(self.path)
else:
self.save_as()
def save_as(self):
if self._validate_sequence():
dialog = FileDialog(action="save as", default_directory=paths.hops_dir)<|fim▁hole|> p = add_extension(p, ".yaml" if self.use_yaml else ".txt")
self._save_file(p)
self.path = p
def _load(self, p):
self.hop_sequence = hs = HopSequence()
if p.endswith(".txt"):
self.use_yaml = False
with open(p, "r") as rfile:
hops = [eval(l) for l in fileiter(rfile)]
for i, (hopstr, cnt, settle) in enumerate(hops):
h = Hop(
name=str(i + 1),
counts=cnt,
settle=settle,
detectors=self.detectors,
)
h.parse_hopstr(hopstr)
hs.hops.append(h)
hs.label_hops()
self.selected = hs.hops[0]
with open(p, "r") as rfile:
self.text = rfile.read()
else:
self.use_yaml = True
with open(p, "r") as rfile:
self.text = rfile.read()
try:
for i, hop in enumerate(yload(self.text)):
h = Hop(
name=str(i + 1),
counts=hop.get("counts", 0),
settle=hop.get("settle", 0),
detectors=self.detectors,
)
for p in hop.get("cup_configurations"):
pos = Position(
detector=p.get("detector", ""),
isotope=p.get("isotope", ""),
active=p.get("active", True),
is_baseline=p.get("is_baseline", False),
protect=p.get("protect", False),
deflection=p.get("deflection", ""),
)
h.positions.append(pos)
hs.hops.append(h)
hs.label_hops()
return True
except yaml.YAMLError:
pass
def _validate_sequence(self):
hs = []
for h in self.hop_sequence.hops:
if not h.validate_hop():
hs.append("Invalid Hop {}. {}".format(h.name, h.error_message))
if hs:
self.warning_dialog("\n".join(hs))
else:
return True
def _save_file(self, p):
self.info("saving hop to {}".format(p))
with open(p, "w") as wfile:
if self.use_yaml:
yaml.dump(self.to_yaml(), wfile, default_flow_style=False)
else:
# header = '#hopstr e.i iso:det[:defl][,iso:det....], count, settle\n'
# txt = self.hop_sequence.to_string()
txt = self.to_string()
wfile.write(txt)
self.text = txt
self.dirty = False
def to_yaml(self):
return self.hop_sequence.to_yaml()
def to_string(self):
header1 = "#hopstr ('iso:det[:defl][,iso:det....]', count, settle)"
header2 = "#e.g ('Ar40:H1, Ar41:H2, Ar38:L1, Ar37:L2, Ar36:CDD:110', 15, 3)"
return "\n".join((header1, header2, self.hop_sequence.to_string()))
def to_text(self):
if self.use_yaml:
return yaml.dump(self.to_yaml(), default_flow_style=False)
else:
return self.to_string()
def _add_hop_button_fired(self):
idx = None
if self.selected:
idx = self.hop_sequence.hops.index(self.selected)
self.hop_sequence.add_hop(idx)
# self.saveasable = True
self.dirty = True
def _remove_hop_button_fired(self):
hops = self.hop_sequence.hops
idx = hops.index(self.selected)
if len(hops) > 1:
self.selected = hops[0]
else:
self.selected = None
self.hop_sequence.remove_hop(idx)
self.dirty = True
# if not self.hop_sequence.hops:
# self.saveasable = False
# self.saveable = False
class HopEditorView(Controller):
model = HopEditorModel
title = Str("Peak Hops Editor")
def close(self, info, is_ok):
if self.model.dirty:
dlg = ConfirmationDialog(
message="Save changes to Hops file",
cancel=True,
default=CANCEL,
title="Save Changes?",
)
ret = dlg.open()
if ret == CANCEL:
return False
elif ret == YES:
self.model.save()
return True
@on_trait_change(
"model:hop_sequence:hops:[counts,settle, positions:[isotope,detector,deflection]]"
)
def _handle_edit(self):
self.model.dirty = True
self.model.text = self.model.to_text()
@on_trait_change("model.[path,dirty]")
def _handle_path_change(self):
p = self.model.path
n = os.path.basename(p)
if self.model.dirty:
n = "*{}".format(n)
d = os.path.dirname(p)
d = d.replace(os.path.expanduser("~"), "")
t = "{} - PeakHop Editor - {}".format(n, d)
if not self.info:
self.title = t
else:
self.info.ui.title = t
def save(self, info):
self.model.save()
def save_as(self, info):
self.model.save_as()
def traits_view(self):
cols = [
ObjectColumn(name="name", label="", editable=False),
ObjectColumn(name="counts"),
ObjectColumn(name="settle", label="Settle (s)"),
ObjectColumn(
name="isotopes_label", editable=False, width=175, label="Isotopes"
),
]
hgrp = VGroup(
UItem(
"object.hop_sequence.hops",
editor=myTableEditor(
columns=cols,
clear_selection_on_dclicked=True,
sortable=False,
selected="selected",
),
),
HGroup(
icon_button_editor("add_hop_button", "add", tooltip="Add peak hop"),
icon_button_editor(
"remove_hop_button",
"delete",
tooltip="Delete selected peak hop",
enabled_when="selected",
),
),
)
sgrp = UItem("selected", style="custom", editor=InstanceEditor())
grp = HSplit(hgrp, sgrp)
save_action = Action(
name="Save",
image=icon("document-save"),
enabled_when="object.saveable",
action="save",
)
save_as_acion = Action(
name="Save As",
image=icon("document-save-as"),
action="save_as",
enabled_when="object.saveasable",
)
teditor = myTextEditor(
bgcolor="#F7F6D0",
fontsize=12,
fontsize_name="fontsize",
wrap=False,
tab_width=15,
)
v = View(
VGroup(
VGroup(grp, label="Editor"),
VGroup(
UItem("object.text", editor=teditor, style="custom"), label="Text"
),
),
# toolbar=ToolBar(),
width=690,
title=self.title,
buttons=["OK", save_action, save_as_acion],
resizable=True,
)
return v
if __name__ == "__main__":
root = os.path.join(os.path.expanduser("~"), "PychronDev")
paths.build(root)
m = HopEditorModel()
m.detectors = ["H2", "H1", "CDD"]
# m.open()
m.new()
h = HopEditorView(model=m)
# m.new()
h.configure_traits()
# ============= EOF =============================================<|fim▁end|>
|
if dialog.open() == OK:
p = dialog.path
|
<|file_name|>pe054-poker-hands.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python
# coding=utf-8
"""54. Poker hands
https://projecteuler.net/problem=54
In the card game poker, a hand consists of five cards and are ranked, from
lowest to highest, in the following way:
* **High Card** : Highest value card.
* **One Pair** : Two cards of the same value.
* **Two Pairs** : Two different pairs.
* **Three of a Kind** : Three cards of the same value.
* **Straight** : All cards are consecutive values.
* **Flush** : All cards of the same suit.
* **Full House** : Three of a kind and a pair.
* **Four of a Kind** : Four cards of the same value.
* **Straight Flush** : All cards are consecutive values of same suit.
* **Royal Flush** : Ten, Jack, Queen, King, Ace, in same suit.
The cards are valued in the order:
2, 3, 4, 5, 6, 7, 8, 9, 10, Jack, Queen, King, Ace.
If two players have the same ranked hands then the rank made up of the highest
value wins; for example, a pair of eights beats a pair of fives (see example 1
below). But if two ranks tie, for example, both players have a pair of queens,
then highest cards in each hand are compared (see example 4 below); if the<|fim▁hole|>
Consider the following five hands dealt to two players:
**Hand**| | **Player 1**| | **Player 2**| | **Winner**
---|---|---|---|---|---|---
**1**| | 5H 5C 6S 7S KD
Pair of Fives
| | 2C 3S 8S 8D TD
Pair of Eights
| | Player 2
**2**| | 5D 8C 9S JS AC
Highest card Ace
| | 2C 5C 7D 8S QH
Highest card Queen
| | Player 1
**3**| | 2D 9C AS AH AC
Three Aces
| | 3D 6D 7D TD QD
Flush with Diamonds
| | Player 2
**4**| | 4D 6S 9H QH QC
Pair of Queens
Highest card Nine
| | 3D 6D 7H QD QS
Pair of Queens
Highest card Seven
| | Player 1
**5**| | 2H 2D 4C 4D 4S
Full House
With Three Fours
| | 3C 3D 3S 9S 9D
Full House
with Three Threes
| | Player 1
The file, [poker.txt](project/resources/p054_poker.txt), contains one-thousand
random hands dealt to two players. Each line of the file contains ten cards
(separated by a single space): the first five are Player 1's cards and the
last five are Player 2's cards. You can assume that all hands are valid (no
invalid characters or repeated cards), each player's hand is in no specific
order, and in each hand there is a clear winner.
How many hands does Player 1 win?
"""<|fim▁end|>
|
highest cards tie then the next highest cards are compared, and so on.
|
<|file_name|>constellation.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The `Constellation`, Servo's Grand Central Station
//!
//! The constellation tracks all information kept globally by the
//! browser engine, which includes:
//!
//! * The set of all `EventLoop` objects. Each event loop is
//! the constellation's view of a script thread. The constellation
//! interacts with a script thread by message-passing.
//!
//! * The set of all `Pipeline` objects. Each pipeline gives the
//! constellation's view of a `Window`, with its script thread and
//! layout threads. Pipelines may share script threads, but not
//! layout threads.
//!
//! * The set of all `Frame` objects. Each frame gives the constellation's
//! view of a browsing context. Each browsing context stores an independent
//! session history, created by navigation of that frame. The session
//! history can be traversed, for example by the back and forwards UI,
//! so each session history maintains a list of past and future pipelines,
//! as well as the current active pipeline.
//!
//! There are two kinds of frames: top-level frames (for example tabs
//! in a browser UI), and nested frames (typically caused by `iframe`
//! elements). Frames have a hierarchy (typically caused by `iframe`s
//! containing `iframe`s), giving rise to a frame tree with a root frame.
//! The logical relationship between these types is:
//!
//! ```
//! +---------+ +------------+ +-------------+
//! | Frame | --parent?--> | Pipeline | --event_loop--> | EventLoop |
//! | | --current--> | | | |
//! | | --prev*----> | | <---pipeline*-- | |
//! | | --next*----> | | +-------------+
//! | | | |
//! | | <----frame-- | |
//! +---------+ +------------+
//! ```
//
//! Complicating matters, there are also mozbrowser iframes, which are top-level
//! frames with a parent.
//!
//! The constellation also maintains channels to threads, including:
//!
//! * The script and layout threads.
//! * The graphics compositor.
//! * The font cache, image cache, and resource manager, which load
//! and cache shared fonts, images, or other resources.
//! * The service worker manager.
//! * The devtools, debugger and webdriver servers.
//!
//! The constellation passes messages between the threads, and updates its state
//! to track the evolving state of the frame tree.
//!
//! The constellation acts as a logger, tracking any `warn!` messages from threads,
//! and converting any `error!` or `panic!` into a crash report, which is filed
//! using an appropriate `mozbrowsererror` event.
//!
//! Since there is only one constellation, and its responsibilities include crash reporting,
//! it is very important that it does not panic.
use backtrace::Backtrace;
use bluetooth_traits::BluetoothRequest;
use canvas::canvas_paint_thread::CanvasPaintThread;
use canvas::webgl_paint_thread::WebGLPaintThread;
use canvas_traits::CanvasMsg;
use compositing::SendableFrameTree;
use compositing::compositor_thread::CompositorProxy;
use compositing::compositor_thread::Msg as ToCompositorMsg;
use debugger;
use devtools_traits::{ChromeToDevtoolsControlMsg, DevtoolsControlMsg};
use euclid::scale_factor::ScaleFactor;
use euclid::size::{Size2D, TypedSize2D};
use event_loop::EventLoop;
use frame::{Frame, FrameChange, FrameState, FrameTreeIterator, FullFrameTreeIterator};
use gfx::font_cache_thread::FontCacheThread;
use gfx_traits::Epoch;
use ipc_channel::ipc::{self, IpcSender};
use ipc_channel::router::ROUTER;
use layout_traits::LayoutThreadFactory;
use log::{Log, LogLevel, LogLevelFilter, LogMetadata, LogRecord};
use msg::constellation_msg::{FrameId, FrameType, PipelineId};
use msg::constellation_msg::{Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineNamespace, PipelineNamespaceId, TraversalDirection};
use net_traits::{self, IpcSend, ResourceThreads};
use net_traits::image_cache_thread::ImageCacheThread;
use net_traits::pub_domains::reg_host;
use net_traits::storage_thread::{StorageThreadMsg, StorageType};
use offscreen_gl_context::{GLContextAttributes, GLLimits};
use pipeline::{InitialPipelineState, Pipeline};
use profile_traits::mem;
use profile_traits::time;
use script_traits::{AnimationState, AnimationTickType, CompositorEvent};
use script_traits::{ConstellationControlMsg, ConstellationMsg as FromCompositorMsg, DiscardBrowsingContext};
use script_traits::{DocumentState, LayoutControlMsg, LoadData};
use script_traits::{IFrameLoadInfo, IFrameLoadInfoWithData, IFrameSandboxState, TimerEventRequest};
use script_traits::{LayoutMsg as FromLayoutMsg, ScriptMsg as FromScriptMsg, ScriptThreadFactory};
use script_traits::{LogEntry, ServiceWorkerMsg, webdriver_msg};
use script_traits::{MozBrowserErrorType, MozBrowserEvent, WebDriverCommandMsg, WindowSizeData};
use script_traits::{SWManagerMsg, ScopeThings, WindowSizeType};
use script_traits::WebVREventMsg;
use servo_config::opts;
use servo_config::prefs::PREFS;
use servo_rand::{Rng, SeedableRng, ServoRng, random};
use servo_remutex::ReentrantMutex;
use servo_url::ServoUrl;
use std::borrow::ToOwned;
use std::collections::{HashMap, VecDeque};
use std::io::Error as IOError;
use std::iter::once;
use std::marker::PhantomData;
use std::process;
use std::rc::{Rc, Weak};
use std::sync::Arc;
use std::sync::mpsc::{Receiver, Sender, channel};
use std::thread;
use std::time::Instant;
use style_traits::PagePx;
use style_traits::cursor::Cursor;
use style_traits::viewport::ViewportConstraints;
use timer_scheduler::TimerScheduler;
use webrender_traits;
use webvr_traits::WebVRMsg;
/// The `Constellation` itself. In the servo browser, there is one
/// constellation, which maintains all of the browser global data.
/// In embedded applications, there may be more than one constellation,
/// which are independent of each other.
///
/// The constellation may be in a different process from the pipelines,
/// and communicates using IPC.
///
/// It is parameterized over a `LayoutThreadFactory` and a
/// `ScriptThreadFactory` (which in practice are implemented by
/// `LayoutThread` in the `layout` crate, and `ScriptThread` in
/// the `script` crate). Script and layout communicate using a `Message`
/// type.
pub struct Constellation<Message, LTF, STF> {
/// An IPC channel for script threads to send messages to the constellation.
/// This is the script threads' view of `script_receiver`.
script_sender: IpcSender<FromScriptMsg>,
/// A channel for the constellation to receive messages from script threads.
/// This is the constellation's view of `script_sender`.
script_receiver: Receiver<FromScriptMsg>,
/// An IPC channel for layout threads to send messages to the constellation.
/// This is the layout threads' view of `layout_receiver`.
layout_sender: IpcSender<FromLayoutMsg>,
/// A channel for the constellation to receive messages from layout threads.
/// This is the constellation's view of `layout_sender`.
layout_receiver: Receiver<FromLayoutMsg>,
/// A channel for the constellation to receive messages from the compositor thread.
compositor_receiver: Receiver<FromCompositorMsg>,
/// A channel (the implementation of which is port-specific) for the
/// constellation to send messages to the compositor thread.
compositor_proxy: Box<CompositorProxy>,
/// Channels for the constellation to send messages to the public
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
public_resource_threads: ResourceThreads,
/// Channels for the constellation to send messages to the private
/// resource-related threads. There are two groups of resource
/// threads: one for public browsing, and one for private
/// browsing.
private_resource_threads: ResourceThreads,
/// A channel for the constellation to send messages to the image
/// cache thread.
image_cache_thread: ImageCacheThread,
/// A channel for the constellation to send messages to the font
/// cache thread.
font_cache_thread: FontCacheThread,
/// A channel for the constellation to send messages to the
/// debugger thread.
debugger_chan: Option<debugger::Sender>,
/// A channel for the constellation to send messages to the
/// devtools thread.
devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// An IPC channel for the constellation to send messages to the
/// bluetooth thread.
bluetooth_thread: IpcSender<BluetoothRequest>,
/// An IPC channel for the constellation to send messages to the
/// Service Worker Manager thread.
swmanager_chan: Option<IpcSender<ServiceWorkerMsg>>,
/// An IPC channel for Service Worker Manager threads to send
/// messages to the constellation. This is the SW Manager thread's
/// view of `swmanager_receiver`.
swmanager_sender: IpcSender<SWManagerMsg>,
/// A channel for the constellation to receive messages from the
/// Service Worker Manager thread. This is the constellation's view of
/// `swmanager_sender`.
swmanager_receiver: Receiver<SWManagerMsg>,
/// A channel for the constellation to send messages to the
/// time profiler thread.
time_profiler_chan: time::ProfilerChan,
/// A channel for the constellation to send messages to the
/// memory profiler thread.
mem_profiler_chan: mem::ProfilerChan,
/// A channel for the constellation to send messages to the
/// timer thread.
scheduler_chan: IpcSender<TimerEventRequest>,
/// A channel for the constellation to send messages to the
/// Webrender thread.
webrender_api_sender: webrender_traits::RenderApiSender,
/// The set of all event loops in the browser. We generate a new
/// event loop for each registered domain name (aka eTLD+1) in
/// each top-level frame. We store the event loops in a map
/// indexed by top-level frame id (as a `FrameId`) and registered
/// domain name (as a `String`) to event loops. This double
/// indirection ensures that separate tabs do not share event
/// loops, even if the same domain is loaded in each.
/// It is important that scripts with the same eTLD+1
/// share an event loop, since they can use `document.domain`
/// to become same-origin, at which point they can share DOM objects.
event_loops: HashMap<FrameId, HashMap<String, Weak<EventLoop>>>,
/// The set of all the pipelines in the browser.
/// (See the `pipeline` module for more details.)
pipelines: HashMap<PipelineId, Pipeline>,
/// The set of all the frames in the browser.
frames: HashMap<FrameId, Frame>,
/// When a navigation is performed, we do not immediately update
/// the frame tree, instead we ask the event loop to begin loading
/// the new document, and do not update the frame tree until the
/// document is active. Between starting the load and it activating,
/// we store a `FrameChange` object for the navigation in progress.
pending_frames: Vec<FrameChange>,
/// The root frame.
root_frame_id: FrameId,
/// The currently focused pipeline for key events.
focus_pipeline_id: Option<PipelineId>,
/// Pipeline IDs are namespaced in order to avoid name collisions,
/// and the namespaces are allocated by the constellation.
next_pipeline_namespace_id: PipelineNamespaceId,
/// The size of the top-level window.
window_size: WindowSizeData,
/// Bits of state used to interact with the webdriver implementation
webdriver: WebDriverData,
/// Document states for loaded pipelines (used only when writing screenshots).
document_states: HashMap<PipelineId, DocumentState>,
/// Are we shutting down?
shutting_down: bool,
/// Have we seen any warnings? Hopefully always empty!
/// The buffer contains `(thread_name, reason)` entries.
handled_warnings: VecDeque<(Option<String>, String)>,
/// The random number generator and probability for closing pipelines.
/// This is for testing the hardening of the constellation.
random_pipeline_closure: Option<(ServoRng, f32)>,
/// Phantom data that keeps the Rust type system happy.
phantom: PhantomData<(Message, LTF, STF)>,
/// A channel through which messages can be sent to the webvr thread.
webvr_thread: Option<IpcSender<WebVRMsg>>,
}
/// State needed to construct a constellation.
pub struct InitialConstellationState {
/// A channel through which messages can be sent to the compositor.
pub compositor_proxy: Box<CompositorProxy + Send>,
/// A channel to the debugger, if applicable.
pub debugger_chan: Option<debugger::Sender>,
/// A channel to the developer tools, if applicable.
pub devtools_chan: Option<Sender<DevtoolsControlMsg>>,
/// A channel to the bluetooth thread.
pub bluetooth_thread: IpcSender<BluetoothRequest>,
/// A channel to the image cache thread.
pub image_cache_thread: ImageCacheThread,
/// A channel to the font cache thread.
pub font_cache_thread: FontCacheThread,
/// A channel to the resource thread.
pub public_resource_threads: ResourceThreads,
/// A channel to the resource thread.
pub private_resource_threads: ResourceThreads,
/// A channel to the time profiler thread.
pub time_profiler_chan: time::ProfilerChan,
/// A channel to the memory profiler thread.
pub mem_profiler_chan: mem::ProfilerChan,
/// Webrender API.
pub webrender_api_sender: webrender_traits::RenderApiSender,
/// Whether the constellation supports the clipboard.
/// TODO: this field is not used, remove it?
pub supports_clipboard: bool,
}
/// Data needed for webdriver
struct WebDriverData {
load_channel: Option<(PipelineId, IpcSender<webdriver_msg::LoadStatus>)>,
resize_channel: Option<IpcSender<WindowSizeData>>,
}
impl WebDriverData {
fn new() -> WebDriverData {
WebDriverData {
load_channel: None,
resize_channel: None,
}
}
}
/// When we are running reftests, we save an image to compare against a reference.
/// This enum gives the possible states of preparing such an image.
#[derive(Debug, PartialEq)]
enum ReadyToSave {
NoRootFrame,
PendingFrames,
WebFontNotLoaded,
DocumentLoading,
EpochMismatch,
PipelineUnknown,
Ready,
}
/// When we are exiting a pipeline, we can either force exiting or not.
/// A normal exit waits for the compositor to update its state before
/// exiting, and delegates layout exit to script. A forced exit does
/// not notify the compositor, and exits layout without involving script.
#[derive(Clone, Copy)]
enum ExitPipelineMode {
Normal,
Force,
}
/// The constellation uses logging to perform crash reporting.
/// The constellation receives all `warn!`, `error!` and `panic!` messages,
/// and generates a crash report when it receives a panic.
/// A logger directed at the constellation from content processes
#[derive(Clone)]
pub struct FromScriptLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<IpcSender<FromScriptMsg>>>,
}
impl FromScriptLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: IpcSender<FromScriptMsg>) -> FromScriptLogger {
FromScriptLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromScriptLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// A logger directed at the constellation from the compositor
#[derive(Clone)]
pub struct FromCompositorLogger {
/// A channel to the constellation
pub constellation_chan: Arc<ReentrantMutex<Sender<FromCompositorMsg>>>,
}
impl FromCompositorLogger {
/// Create a new constellation logger.
pub fn new(constellation_chan: Sender<FromCompositorMsg>) -> FromCompositorLogger {
FromCompositorLogger {
constellation_chan: Arc::new(ReentrantMutex::new(constellation_chan))
}
}
/// The maximum log level the constellation logger is interested in.
pub fn filter(&self) -> LogLevelFilter {
LogLevelFilter::Warn
}
}
impl Log for FromCompositorLogger {
fn enabled(&self, metadata: &LogMetadata) -> bool {
metadata.level() <= LogLevel::Warn
}
fn log(&self, record: &LogRecord) {
if let Some(entry) = log_entry(record) {
debug!("Sending log entry {:?}.", entry);
let top_level_frame_id = FrameId::installed();
let thread_name = thread::current().name().map(ToOwned::to_owned);
let msg = FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry);
let chan = self.constellation_chan.lock().unwrap_or_else(|err| err.into_inner());
let _ = chan.send(msg);
}
}
}
/// Rust uses `LogRecord` for storing logging, but servo converts that to
/// a `LogEntry`. We do this so that we can record panics as well as log
/// messages, and because `LogRecord` does not implement serde (de)serialization,
/// so cannot be used over an IPC channel.
fn log_entry(record: &LogRecord) -> Option<LogEntry> {
match record.level() {
LogLevel::Error if thread::panicking() => Some(LogEntry::Panic(
format!("{}", record.args()),
format!("{:?}", Backtrace::new())
)),
LogLevel::Error => Some(LogEntry::Error(
format!("{}", record.args())
)),
LogLevel::Warn => Some(LogEntry::Warn(
format!("{}", record.args())
)),
_ => None,
}
}
/// The number of warnings to include in each crash report.
const WARNINGS_BUFFER_SIZE: usize = 32;
impl<Message, LTF, STF> Constellation<Message, LTF, STF>
where LTF: LayoutThreadFactory<Message=Message>,
STF: ScriptThreadFactory<Message=Message>
{
/// Create a new constellation thread.
pub fn start(state: InitialConstellationState) -> (Sender<FromCompositorMsg>, IpcSender<SWManagerMsg>) {
let (compositor_sender, compositor_receiver) = channel();
// service worker manager to communicate with constellation
let (swmanager_sender, swmanager_receiver) = ipc::channel().expect("ipc channel failure");
let sw_mgr_clone = swmanager_sender.clone();
thread::Builder::new().name("Constellation".to_owned()).spawn(move || {
let (ipc_script_sender, ipc_script_receiver) = ipc::channel().expect("ipc channel failure");
let script_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_script_receiver);
let (ipc_layout_sender, ipc_layout_receiver) = ipc::channel().expect("ipc channel failure");
let layout_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(ipc_layout_receiver);
let swmanager_receiver = ROUTER.route_ipc_receiver_to_new_mpsc_receiver(swmanager_receiver);
PipelineNamespace::install(PipelineNamespaceId(0));
let mut constellation: Constellation<Message, LTF, STF> = Constellation {
script_sender: ipc_script_sender,
layout_sender: ipc_layout_sender,
script_receiver: script_receiver,
compositor_receiver: compositor_receiver,
layout_receiver: layout_receiver,
compositor_proxy: state.compositor_proxy,
debugger_chan: state.debugger_chan,
devtools_chan: state.devtools_chan,
bluetooth_thread: state.bluetooth_thread,
public_resource_threads: state.public_resource_threads,
private_resource_threads: state.private_resource_threads,
image_cache_thread: state.image_cache_thread,
font_cache_thread: state.font_cache_thread,
swmanager_chan: None,
swmanager_receiver: swmanager_receiver,
swmanager_sender: sw_mgr_clone,
event_loops: HashMap::new(),
pipelines: HashMap::new(),
frames: HashMap::new(),
pending_frames: vec!(),
// We initialize the namespace at 1, since we reserved namespace 0 for the constellation
next_pipeline_namespace_id: PipelineNamespaceId(1),
root_frame_id: FrameId::new(),
focus_pipeline_id: None,
time_profiler_chan: state.time_profiler_chan,
mem_profiler_chan: state.mem_profiler_chan,
window_size: WindowSizeData {
visible_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
initial_viewport: opts::get().initial_window_size.to_f32() *
ScaleFactor::new(1.0),
device_pixel_ratio:
ScaleFactor::new(opts::get().device_pixels_per_px.unwrap_or(1.0)),
},
phantom: PhantomData,
webdriver: WebDriverData::new(),
scheduler_chan: TimerScheduler::start(),
document_states: HashMap::new(),
webrender_api_sender: state.webrender_api_sender,
shutting_down: false,
handled_warnings: VecDeque::new(),
random_pipeline_closure: opts::get().random_pipeline_closure_probability.map(|prob| {
let seed = opts::get().random_pipeline_closure_seed.unwrap_or_else(random);
let rng = ServoRng::from_seed(&[seed]);
warn!("Randomly closing pipelines.");
info!("Using seed {} for random pipeline closure.", seed);
(rng, prob)
}),
webvr_thread: None
};
constellation.run();
}).expect("Thread spawning failed");
(compositor_sender, swmanager_sender)
}
/// The main event loop for the constellation.
fn run(&mut self) {
while !self.shutting_down || !self.pipelines.is_empty() {
// Randomly close a pipeline if --random-pipeline-closure-probability is set
// This is for testing the hardening of the constellation.
self.maybe_close_random_pipeline();
self.handle_request();
}
self.handle_shutdown();
}
/// Generate a new pipeline id namespace.
fn next_pipeline_namespace_id(&mut self) -> PipelineNamespaceId {
let namespace_id = self.next_pipeline_namespace_id;
let PipelineNamespaceId(ref mut i) = self.next_pipeline_namespace_id;
*i += 1;
namespace_id
}
/// Helper function for creating a pipeline
fn new_pipeline(&mut self,
pipeline_id: PipelineId,
frame_id: FrameId,
parent_info: Option<(PipelineId, FrameType)>,
initial_window_size: Option<TypedSize2D<f32, PagePx>>,
load_data: LoadData,
sandbox: IFrameSandboxState,
is_private: bool) {
if self.shutting_down { return; }
// TODO: can we get a case where the child pipeline is created
// before the parent is part of the frame tree?
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let (event_loop, host) = match sandbox {
IFrameSandboxState::IFrameSandboxed => (None, None),
IFrameSandboxState::IFrameUnsandboxed => match reg_host(&load_data.url) {
None => (None, None),
Some(host) => {
let event_loop = self.event_loops.get(&top_level_frame_id)
.and_then(|map| map.get(host))
.and_then(|weak| weak.upgrade());
match event_loop {
None => (None, Some(String::from(host))),
Some(event_loop) => (Some(event_loop.clone()), None),
}
},
},
};
let resource_threads = if is_private {
self.private_resource_threads.clone()
} else {
self.public_resource_threads.clone()
};
let parent_visibility = parent_info
.and_then(|(parent_pipeline_id, _)| self.pipelines.get(&parent_pipeline_id))
.map(|pipeline| pipeline.visible);
let prev_visibility = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.pipeline_id))
.map(|pipeline| pipeline.visible)
.or(parent_visibility);
// TODO: think about the case where the child pipeline is created
// before the parent is part of the frame tree.
let top_level_frame_id = match parent_info {
Some((_, FrameType::MozBrowserIFrame)) => frame_id,
Some((parent_id, _)) => self.get_top_level_frame_for_pipeline(parent_id),
None => self.root_frame_id,
};
let result = Pipeline::spawn::<Message, LTF, STF>(InitialPipelineState {
id: pipeline_id,
frame_id: frame_id,
top_level_frame_id: top_level_frame_id,
parent_info: parent_info,
constellation_chan: self.script_sender.clone(),
layout_to_constellation_chan: self.layout_sender.clone(),
scheduler_chan: self.scheduler_chan.clone(),
compositor_proxy: self.compositor_proxy.clone_compositor_proxy(),
devtools_chan: self.devtools_chan.clone(),
bluetooth_thread: self.bluetooth_thread.clone(),
swmanager_thread: self.swmanager_sender.clone(),
image_cache_thread: self.image_cache_thread.clone(),
font_cache_thread: self.font_cache_thread.clone(),
resource_threads: resource_threads,
time_profiler_chan: self.time_profiler_chan.clone(),
mem_profiler_chan: self.mem_profiler_chan.clone(),
window_size: initial_window_size,
event_loop: event_loop,
load_data: load_data,
device_pixel_ratio: self.window_size.device_pixel_ratio,
pipeline_namespace_id: self.next_pipeline_namespace_id(),
prev_visibility: prev_visibility,
webrender_api_sender: self.webrender_api_sender.clone(),
is_private: is_private,
webvr_thread: self.webvr_thread.clone()
});
let pipeline = match result {
Ok(result) => result,
Err(e) => return self.handle_send_error(pipeline_id, e),
};
if let Some(host) = host {
self.event_loops.entry(top_level_frame_id)
.or_insert_with(HashMap::new)
.insert(host, Rc::downgrade(&pipeline.event_loop));
}
assert!(!self.pipelines.contains_key(&pipeline_id));
self.pipelines.insert(pipeline_id, pipeline);
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over the fully active frames in the tree.
fn current_frame_tree_iter(&self, frame_id_root: FrameId) -> FrameTreeIterator {
FrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// Get an iterator for the current frame tree. Specify self.root_frame_id to
/// iterate the entire tree, or a specific frame id to iterate only that sub-tree.
/// Iterates over all frames in the tree.
fn full_frame_tree_iter(&self, frame_id_root: FrameId) -> FullFrameTreeIterator {
FullFrameTreeIterator {
stack: vec!(frame_id_root),
pipelines: &self.pipelines,
frames: &self.frames,
}
}
/// The joint session future is the merge of the session future of every
/// frame in the frame tree, sorted chronologically.
fn joint_session_future<'a>(&'a self, frame_id_root: FrameId) -> impl Iterator<Item=FrameState> {
let mut future: Vec<FrameState> = self.full_frame_tree_iter(frame_id_root)
.flat_map(|frame| frame.next.iter().cloned())
.collect();
// Sort the joint session future by the timestamp that the pipeline was navigated to
// in chronological order
future.sort_by(|a, b| a.instant.cmp(&b.instant));
future.into_iter()
}
/// Is the joint session future empty?
fn joint_session_future_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.next.is_empty())
}
/// The joint session past is the merge of the session past of every
/// frame in the frame tree, sorted reverse chronologically.
fn joint_session_past<'a>(&self, frame_id_root: FrameId) -> impl Iterator<Item=FrameState> {
let mut past: Vec<(Instant, FrameState)> = self.full_frame_tree_iter(frame_id_root)
.flat_map(|frame| frame.prev.iter().rev().scan(frame.instant, |prev_instant, entry| {
let instant = *prev_instant;
*prev_instant = entry.instant;
Some((instant, entry.clone()))
}))
.collect();
// Sort the joint session past by the timestamp that the pipeline was navigated from
// in reverse chronological order
past.sort_by(|a, b| b.0.cmp(&a.0));
past.into_iter().map(|(_, entry)| entry)
}
/// Is the joint session past empty?
fn joint_session_past_is_empty(&self, frame_id_root: FrameId) -> bool {
self.full_frame_tree_iter(frame_id_root)
.all(|frame| frame.prev.is_empty())
}
/// Create a new frame and update the internal bookkeeping.
fn new_frame(&mut self, frame_id: FrameId, pipeline_id: PipelineId, url: ServoUrl) {
let frame = Frame::new(frame_id, pipeline_id, url);
self.frames.insert(frame_id, frame);
// If a child frame, add it to the parent pipeline.
let parent_info = self.pipelines.get(&pipeline_id)
.and_then(|pipeline| pipeline.parent_info);
if let Some((parent_id, _)) = parent_info {
if let Some(parent) = self.pipelines.get_mut(&parent_id) {
parent.add_child(frame_id);
}
}
}
/// Handles loading pages, navigation, and granting access to the compositor
#[allow(unsafe_code)]
fn handle_request(&mut self) {
enum Request {
Script(FromScriptMsg),
Compositor(FromCompositorMsg),
Layout(FromLayoutMsg),
FromSWManager(SWManagerMsg),
}
// Get one incoming request.
// This is one of the few places where the compositor is
// allowed to panic. If one of the receiver.recv() calls
// fails, it is because the matching sender has been
// reclaimed, but this can't happen in normal execution
// because the constellation keeps a pointer to the sender,
// so it should never be reclaimed. A possible scenario in
// which receiver.recv() fails is if some unsafe code
// produces undefined behaviour, resulting in the destructor
// being called. If this happens, there's not much we can do
// other than panic.
let request = {
let receiver_from_script = &self.script_receiver;
let receiver_from_compositor = &self.compositor_receiver;
let receiver_from_layout = &self.layout_receiver;
let receiver_from_swmanager = &self.swmanager_receiver;
select! {
msg = receiver_from_script.recv() =>
Request::Script(msg.expect("Unexpected script channel panic in constellation")),
msg = receiver_from_compositor.recv() =>
Request::Compositor(msg.expect("Unexpected compositor channel panic in constellation")),
msg = receiver_from_layout.recv() =>
Request::Layout(msg.expect("Unexpected layout channel panic in constellation")),
msg = receiver_from_swmanager.recv() =>
Request::FromSWManager(msg.expect("Unexpected panic channel panic in constellation"))
}
};
match request {
Request::Compositor(message) => {
self.handle_request_from_compositor(message)
},
Request::Script(message) => {
self.handle_request_from_script(message);
},
Request::Layout(message) => {
self.handle_request_from_layout(message);
},
Request::FromSWManager(message) => {
self.handle_request_from_swmanager(message);
}
}
}
fn handle_request_from_swmanager(&mut self, message: SWManagerMsg) {
match message {
SWManagerMsg::OwnSender(sw_sender) => {
// store service worker manager for communicating with it.
self.swmanager_chan = Some(sw_sender);
}
}
}
fn handle_request_from_compositor(&mut self, message: FromCompositorMsg) {
match message {
FromCompositorMsg::Exit => {
debug!("constellation exiting");
self.handle_exit();
}
// The compositor discovered the size of a subframe. This needs to be reflected by all
// frame trees in the navigation context containing the subframe.
FromCompositorMsg::FrameSize(pipeline_id, size) => {
debug!("constellation got frame size message");
self.handle_frame_size_msg(pipeline_id, &TypedSize2D::from_untyped(&size));
}
FromCompositorMsg::GetFrame(pipeline_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_frame(pipeline_id, resp_chan);
}
FromCompositorMsg::GetPipeline(frame_id, resp_chan) => {
debug!("constellation got get root pipeline message");
self.handle_get_pipeline(frame_id, resp_chan);
}
FromCompositorMsg::GetPipelineTitle(pipeline_id) => {
debug!("constellation got get-pipeline-title message");
self.handle_get_pipeline_title_msg(pipeline_id);
}
FromCompositorMsg::KeyEvent(ch, key, state, modifiers) => {
debug!("constellation got key event message");
self.handle_key_msg(ch, key, state, modifiers);
}
// Load a new page from a typed url
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromCompositorMsg::LoadUrl(source_id, load_data) => {
debug!("constellation got URL load message from compositor");
self.handle_load_url_msg(source_id, load_data, false);
}
FromCompositorMsg::IsReadyToSaveImage(pipeline_states) => {
let is_ready = self.handle_is_ready_to_save_image(pipeline_states);
debug!("Ready to save image {:?}.", is_ready);
if opts::get().is_running_problem_test {
println!("got ready to save image query, result is {:?}", is_ready);
}
let is_ready = is_ready == ReadyToSave::Ready;
self.compositor_proxy.send(ToCompositorMsg::IsReadyToSaveImageReply(is_ready));
if opts::get().is_running_problem_test {
println!("sent response");
}
}
// This should only be called once per constellation, and only by the browser
FromCompositorMsg::InitLoadUrl(url) => {
debug!("constellation got init load URL message");
self.handle_init_load(url);
}
// Handle a forward or back request
FromCompositorMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from compositor");
self.handle_traverse_history_msg(pipeline_id, direction);
}
FromCompositorMsg::WindowSize(new_size, size_type) => {
debug!("constellation got window resize message");
self.handle_window_size_msg(new_size, size_type);
}
FromCompositorMsg::TickAnimation(pipeline_id, tick_type) => {
self.handle_tick_animation(pipeline_id, tick_type)
}
FromCompositorMsg::WebDriverCommand(command) => {
debug!("constellation got webdriver command message");
self.handle_webdriver_msg(command);
}
FromCompositorMsg::Reload => {
debug!("constellation got reload message");
self.handle_reload_msg();
}
FromCompositorMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
FromCompositorMsg::SetWebVRThread(webvr_thread) => {
assert!(self.webvr_thread.is_none());
self.webvr_thread = Some(webvr_thread)
}
FromCompositorMsg::WebVREvent(pipeline_ids, event) => {
debug!("constellation got WebVR event");
self.handle_webvr_event(pipeline_ids, event);
}
}
}
fn handle_request_from_script(&mut self, message: FromScriptMsg) {
match message {
FromScriptMsg::PipelineExited(pipeline_id) => {
self.handle_pipeline_exited(pipeline_id);
}
FromScriptMsg::ScriptLoadedURLInIFrame(load_info) => {
debug!("constellation got iframe URL load message {:?} {:?} {:?}",
load_info.info.parent_pipeline_id,
load_info.old_pipeline_id,
load_info.info.new_pipeline_id);
self.handle_script_loaded_url_in_iframe_msg(load_info);
}
FromScriptMsg::ScriptLoadedAboutBlankInIFrame(load_info, lc) => {
debug!("constellation got loaded `about:blank` in iframe message {:?} {:?}",
load_info.parent_pipeline_id,
load_info.new_pipeline_id);
self.handle_script_loaded_about_blank_in_iframe_msg(load_info, lc);
}
FromScriptMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
// Load a new page from a mouse click
// If there is already a pending page (self.pending_frames), it will not be overridden;
// However, if the id is not encompassed by another change, it will be.
FromScriptMsg::LoadUrl(source_id, load_data, replace) => {
debug!("constellation got URL load message from script");
self.handle_load_url_msg(source_id, load_data, replace);
}
// A page loaded has completed all parsing, script, and reflow messages have been sent.
FromScriptMsg::LoadComplete(pipeline_id) => {
debug!("constellation got load complete message");
self.handle_load_complete_msg(pipeline_id)
}
// Handle a forward or back request
FromScriptMsg::TraverseHistory(pipeline_id, direction) => {
debug!("constellation got traverse history message from script");
self.handle_traverse_history_msg(pipeline_id, direction);
}
// Handle a joint session history length request.
FromScriptMsg::JointSessionHistoryLength(pipeline_id, sender) => {
debug!("constellation got joint session history length message from script");
self.handle_joint_session_history_length(pipeline_id, sender);
}
// Notification that the new document is ready to become active
FromScriptMsg::ActivateDocument(pipeline_id) => {
debug!("constellation got activate document message");
self.handle_activate_document_msg(pipeline_id);
}
// Update pipeline url after redirections
FromScriptMsg::SetFinalUrl(pipeline_id, final_url) => {
// The script may have finished loading after we already started shutting down.
if let Some(ref mut pipeline) = self.pipelines.get_mut(&pipeline_id) {
debug!("constellation got set final url message");
pipeline.url = final_url;
} else {
warn!("constellation got set final url message for dead pipeline");
}
}
FromScriptMsg::MozBrowserEvent(parent_pipeline_id, pipeline_id, event) => {
debug!("constellation got mozbrowser event message");
self.handle_mozbrowser_event_msg(parent_pipeline_id,
pipeline_id,
event);
}
FromScriptMsg::Focus(pipeline_id) => {
debug!("constellation got focus message");
self.handle_focus_msg(pipeline_id);
}
FromScriptMsg::ForwardEvent(pipeline_id, event) => {
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
None => { debug!("Pipeline {:?} got event after closure.", pipeline_id); return; }
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
FromScriptMsg::GetClipboardContents(sender) => {
if let Err(e) = sender.send("".to_owned()) {
warn!("Failed to send clipboard ({})", e);
}
}
FromScriptMsg::SetClipboardContents(_) => {
}
FromScriptMsg::SetVisible(pipeline_id, visible) => {
debug!("constellation got set visible messsage");
self.handle_set_visible_msg(pipeline_id, visible);
}
FromScriptMsg::VisibilityChangeComplete(pipeline_id, visible) => {
debug!("constellation got set visibility change complete message");
self.handle_visibility_change_complete(pipeline_id, visible);
}
FromScriptMsg::RemoveIFrame(frame_id, sender) => {
debug!("constellation got remove iframe message");
let removed_pipeline_ids = self.handle_remove_iframe_msg(frame_id);
if let Err(e) = sender.send(removed_pipeline_ids) {
warn!("Error replying to remove iframe ({})", e);
}
}
FromScriptMsg::NewFavicon(url) => {
debug!("constellation got new favicon message");
self.compositor_proxy.send(ToCompositorMsg::NewFavicon(url));
}
FromScriptMsg::HeadParsed => {
debug!("constellation got head parsed message");
self.compositor_proxy.send(ToCompositorMsg::HeadParsed);
}
FromScriptMsg::CreateCanvasPaintThread(size, sender) => {
debug!("constellation got create-canvas-paint-thread message");
self.handle_create_canvas_paint_thread_msg(&size, sender)
}
FromScriptMsg::CreateWebGLPaintThread(size, attributes, sender) => {
debug!("constellation got create-WebGL-paint-thread message");
self.handle_create_webgl_paint_thread_msg(&size, attributes, sender)
}
FromScriptMsg::NodeStatus(message) => {
debug!("constellation got NodeStatus message");
self.compositor_proxy.send(ToCompositorMsg::Status(message));
}
FromScriptMsg::SetDocumentState(pipeline_id, state) => {
debug!("constellation got SetDocumentState message");
self.document_states.insert(pipeline_id, state);
}
FromScriptMsg::Alert(pipeline_id, message, sender) => {
debug!("constellation got Alert message");
self.handle_alert(pipeline_id, message, sender);
}
FromScriptMsg::ScrollFragmentPoint(pipeline_id, scroll_root_id, point, smooth) => {
self.compositor_proxy.send(ToCompositorMsg::ScrollFragmentPoint(pipeline_id,
scroll_root_id,
point,
smooth));
}
FromScriptMsg::GetClientWindow(send) => {
self.compositor_proxy.send(ToCompositorMsg::GetClientWindow(send));
}
FromScriptMsg::MoveTo(point) => {
self.compositor_proxy.send(ToCompositorMsg::MoveTo(point));
}
FromScriptMsg::ResizeTo(size) => {
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
}
FromScriptMsg::Exit => {
self.compositor_proxy.send(ToCompositorMsg::Exit);
}
FromScriptMsg::LogEntry(top_level_frame_id, thread_name, entry) => {
self.handle_log_entry(top_level_frame_id, thread_name, entry);
}
FromScriptMsg::SetTitle(pipeline_id, title) => {
self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, title))
}
FromScriptMsg::SendKeyEvent(ch, key, key_state, key_modifiers) => {
self.compositor_proxy.send(ToCompositorMsg::KeyEvent(ch, key, key_state, key_modifiers))
}
FromScriptMsg::TouchEventProcessed(result) => {
self.compositor_proxy.send(ToCompositorMsg::TouchEventProcessed(result))
}
FromScriptMsg::RegisterServiceWorker(scope_things, scope) => {
debug!("constellation got store registration scope message");
self.handle_register_serviceworker(scope_things, scope);
}
FromScriptMsg::ForwardDOMMessage(msg_vec, scope_url) => {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::ForwardDOMMessage(msg_vec, scope_url));
} else {
warn!("Unable to forward DOMMessage for postMessage call");
}
}
FromScriptMsg::BroadcastStorageEvent(pipeline_id, storage, url, key, old_value, new_value) => {
self.handle_broadcast_storage_event(pipeline_id, storage, url, key, old_value, new_value);
}
FromScriptMsg::SetFullscreenState(state) => {
self.compositor_proxy.send(ToCompositorMsg::SetFullscreenState(state));
}
}
}
fn handle_request_from_layout(&mut self, message: FromLayoutMsg) {
match message {
FromLayoutMsg::ChangeRunningAnimationsState(pipeline_id, animation_state) => {
self.handle_change_running_animations_state(pipeline_id, animation_state)
}
FromLayoutMsg::SetCursor(cursor) => {
self.handle_set_cursor_msg(cursor)
}
FromLayoutMsg::ViewportConstrained(pipeline_id, constraints) => {
debug!("constellation got viewport-constrained event message");
self.handle_viewport_constrained_msg(pipeline_id, constraints);
}
}
}
fn handle_register_serviceworker(&self, scope_things: ScopeThings, scope: ServoUrl) {
if let Some(ref mgr) = self.swmanager_chan {
let _ = mgr.send(ServiceWorkerMsg::RegisterServiceWorker(scope_things, scope));
} else {
warn!("sending scope info to service worker manager failed");
}
}
fn handle_broadcast_storage_event(&self, pipeline_id: PipelineId, storage: StorageType, url: ServoUrl,
key: Option<String>, old_value: Option<String>, new_value: Option<String>) {
let origin = url.origin();
for pipeline in self.pipelines.values() {
if (pipeline.id != pipeline_id) && (pipeline.url.origin() == origin) {
let msg = ConstellationControlMsg::DispatchStorageEvent(
pipeline.id, storage, url.clone(), key.clone(), old_value.clone(), new_value.clone()
);
if let Err(err) = pipeline.event_loop.send(msg) {
warn!("Failed to broadcast storage event to pipeline {} ({:?}).", pipeline.id, err);
}
}
}
}
fn handle_exit(&mut self) {
// TODO: add a timer, which forces shutdown if threads aren't responsive.
if self.shutting_down { return; }
self.shutting_down = true;
self.mem_profiler_chan.send(mem::ProfilerMsg::Exit);
// TODO: exit before the root frame is initialized?
debug!("Removing root frame.");
let root_frame_id = self.root_frame_id;
self.close_frame(root_frame_id, ExitPipelineMode::Normal);
// Close any pending frames and pipelines
while let Some(pending) = self.pending_frames.pop() {
debug!("Removing pending frame {}.", pending.frame_id);
self.close_frame(pending.frame_id, ExitPipelineMode::Normal);
debug!("Removing pending pipeline {}.", pending.new_pipeline_id);
self.close_pipeline(pending.new_pipeline_id, DiscardBrowsingContext::Yes, ExitPipelineMode::Normal);
}
// In case there are frames which weren't attached to the frame tree, we close them.
let frame_ids: Vec<FrameId> = self.frames.keys().cloned().collect();
for frame_id in frame_ids {
debug!("Removing detached frame {}.", frame_id);
self.close_frame(frame_id, ExitPipelineMode::Normal);
}
// In case there are pipelines which weren't attached to the pipeline tree, we close them.
let pipeline_ids: Vec<PipelineId> = self.pipelines.keys().cloned().collect();
for pipeline_id in pipeline_ids {
debug!("Removing detached pipeline {}.", pipeline_id);
self.close_pipeline(pipeline_id, DiscardBrowsingContext::Yes, ExitPipelineMode::Normal);
}
}
fn handle_shutdown(&mut self) {
// At this point, there are no active pipelines,
// so we can safely block on other threads, without worrying about deadlock.
// Channels to receive signals when threads are done exiting.
let (core_sender, core_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (storage_sender, storage_receiver) = ipc::channel().expect("Failed to create IPC channel!");
debug!("Exiting image cache.");
self.image_cache_thread.exit();
debug!("Exiting core resource threads.");
if let Err(e) = self.public_resource_threads.send(net_traits::CoreResourceMsg::Exit(core_sender)) {
warn!("Exit resource thread failed ({})", e);
}
if let Some(ref chan) = self.debugger_chan {
debugger::shutdown_server(chan);
}
if let Some(ref chan) = self.devtools_chan {
debug!("Exiting devtools.");
let msg = DevtoolsControlMsg::FromChrome(ChromeToDevtoolsControlMsg::ServerExitMsg);
if let Err(e) = chan.send(msg) {
warn!("Exit devtools failed ({})", e);
}
}
debug!("Exiting storage resource threads.");
if let Err(e) = self.public_resource_threads.send(StorageThreadMsg::Exit(storage_sender)) {
warn!("Exit storage thread failed ({})", e);
}
debug!("Exiting bluetooth thread.");
if let Err(e) = self.bluetooth_thread.send(BluetoothRequest::Exit) {
warn!("Exit bluetooth thread failed ({})", e);
}
debug!("Exiting service worker manager thread.");
if let Some(mgr) = self.swmanager_chan.as_ref() {
if let Err(e) = mgr.send(ServiceWorkerMsg::Exit) {
warn!("Exit service worker manager failed ({})", e);
}
}
if let Some(chan) = self.webvr_thread.as_ref() {
debug!("Exiting WebVR thread.");
if let Err(e) = chan.send(WebVRMsg::Exit) {
warn!("Exit WebVR thread failed ({})", e);
}
}
debug!("Exiting font cache thread.");
self.font_cache_thread.exit();
// Receive exit signals from threads.
if let Err(e) = core_receiver.recv() {
warn!("Exit resource thread failed ({})", e);
}
if let Err(e) = storage_receiver.recv() {
warn!("Exit storage thread failed ({})", e);
}
debug!("Asking compositor to complete shutdown.");
self.compositor_proxy.send(ToCompositorMsg::ShutdownComplete);
}
fn handle_pipeline_exited(&mut self, pipeline_id: PipelineId) {
debug!("Pipeline {:?} exited.", pipeline_id);
self.pipelines.remove(&pipeline_id);
}
fn handle_send_error(&mut self, pipeline_id: PipelineId, err: IOError) {
// Treat send error the same as receiving a panic message
debug!("Pipeline {:?} send error ({}).", pipeline_id, err);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let reason = format!("Send failed ({})", err);
self.handle_panic(top_level_frame_id, reason, None);
}
fn handle_panic(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if opts::get().hard_fail {
// It's quite difficult to make Servo exit cleanly if some threads have failed.
// Hard fail exists for test runners so we crash and that's good enough.
println!("Pipeline failed in hard-fail mode. Crashing!");
process::exit(1);
}
debug!("Panic handler for top-level frame {}: {}.", top_level_frame_id, reason);
// Notify the browser chrome that the pipeline has failed
self.trigger_mozbrowsererror(top_level_frame_id, reason, backtrace);
let pipeline_id = self.frames.get(&top_level_frame_id).map(|frame| frame.pipeline_id);
let pipeline_url = pipeline_id.and_then(|id| self.pipelines.get(&id).map(|pipeline| pipeline.url.clone()));
let parent_info = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.parent_info));
let window_size = pipeline_id.and_then(|id| self.pipelines.get(&id).and_then(|pipeline| pipeline.size));
self.close_frame_children(top_level_frame_id, DiscardBrowsingContext::No, ExitPipelineMode::Force);
let failure_url = ServoUrl::parse("about:failure").expect("infallible");
if let Some(pipeline_url) = pipeline_url {
if pipeline_url == failure_url {
return error!("about:failure failed");
}
}
warn!("creating replacement pipeline for about:failure");
let new_pipeline_id = PipelineId::new();
let load_data = LoadData::new(failure_url.clone(), None, None);
let sandbox = IFrameSandboxState::IFrameSandboxed;
self.new_pipeline(new_pipeline_id, top_level_frame_id, parent_info, window_size, load_data, sandbox, false);
self.pending_frames.push(FrameChange {
frame_id: top_level_frame_id,
old_pipeline_id: pipeline_id,
new_pipeline_id: new_pipeline_id,
url: failure_url,
replace: None,
});
}
fn handle_log_entry(&mut self, top_level_frame_id: Option<FrameId>, thread_name: Option<String>, entry: LogEntry) {
debug!("Received log entry {:?}.", entry);
match entry {
LogEntry::Panic(reason, backtrace) => {
let top_level_frame_id = top_level_frame_id.unwrap_or(self.root_frame_id);
self.handle_panic(top_level_frame_id, reason, Some(backtrace));
},
LogEntry::Error(reason) | LogEntry::Warn(reason) => {
// VecDeque::truncate is unstable
if WARNINGS_BUFFER_SIZE <= self.handled_warnings.len() {
self.handled_warnings.pop_front();
}
self.handled_warnings.push_back((thread_name, reason));
},
}
}
fn handle_webvr_event(&mut self, ids: Vec<PipelineId>, event: WebVREventMsg) {
for id in ids {
match self.pipelines.get_mut(&id) {
Some(ref pipeline) => {
// Notify script thread
let _ = pipeline.event_loop.send(ConstellationControlMsg::WebVREvent(id, event.clone()));
},
None => warn!("constellation got webvr event for dead pipeline")
}
}
}
fn handle_init_load(&mut self, url: ServoUrl) {
let window_size = self.window_size.visible_viewport;
let root_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let load_data = LoadData::new(url.clone(), None, None);
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
self.new_pipeline(root_pipeline_id, root_frame_id, None, Some(window_size), load_data, sandbox, false);
self.handle_load_start_msg(root_pipeline_id);
self.pending_frames.push(FrameChange {
frame_id: self.root_frame_id,
old_pipeline_id: None,
new_pipeline_id: root_pipeline_id,
url: url.clone(),
replace: None,
});
self.compositor_proxy.send(ToCompositorMsg::ChangePageUrl(root_pipeline_id, url));
}
fn handle_frame_size_msg(&mut self,
pipeline_id: PipelineId,
size: &TypedSize2D<f32, PagePx>) {
let msg = ConstellationControlMsg::Resize(pipeline_id, WindowSizeData {
visible_viewport: *size,
initial_viewport: *size * ScaleFactor::new(1.0),
device_pixel_ratio: self.window_size.device_pixel_ratio,
}, WindowSizeType::Initial);
// Store the new rect inside the pipeline
let result = {
// Find the pipeline that corresponds to this rectangle. It's possible that this
// pipeline may have already exited before we process this message, so just
// early exit if that occurs.
match self.pipelines.get_mut(&pipeline_id) {
Some(pipeline) => {
pipeline.size = Some(*size);
pipeline.event_loop.send(msg)
}
None => return,
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_subframe_loaded(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_id) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((parent_id, _)) => (pipeline.frame_id, parent_id),
None => return warn!("Pipeline {} has no parent.", pipeline_id),
},
None => return warn!("Pipeline {} loaded after closure.", pipeline_id),
};
let msg = ConstellationControlMsg::DispatchFrameLoadEvent {
target: frame_id,
parent: parent_id,
child: pipeline_id,
};
let result = match self.pipelines.get(&parent_id) {
Some(parent) => parent.event_loop.send(msg),
None => return warn!("Parent {} frame loaded after closure.", parent_id),
};
if let Err(e) = result {
self.handle_send_error(parent_id, e);
}
}
// The script thread associated with pipeline_id has loaded a URL in an iframe via script. This
// will result in a new pipeline being spawned and a frame tree being added to
// parent_pipeline_id's frame tree's children. This message is never the result of a
// page navigation.
fn handle_script_loaded_url_in_iframe_msg(&mut self, load_info: IFrameLoadInfoWithData) {
let (load_data, window_size, is_private) = {
let old_pipeline = load_info.old_pipeline_id
.and_then(|old_pipeline_id| self.pipelines.get(&old_pipeline_id));
let source_pipeline = match self.pipelines.get(&load_info.info.parent_pipeline_id) {
Some(source_pipeline) => source_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", load_info.info.parent_pipeline_id),
};
// If no url is specified, reload.
let load_data = load_info.load_data.unwrap_or_else(|| {
let url = match old_pipeline {
Some(old_pipeline) => old_pipeline.url.clone(),
None => ServoUrl::parse("about:blank").expect("infallible"),
};
// TODO - loaddata here should have referrer info (not None, None)
LoadData::new(url, None, None)
});
let is_private = load_info.info.is_private || source_pipeline.is_private;
let window_size = old_pipeline.and_then(|old_pipeline| old_pipeline.size);
if let Some(old_pipeline) = old_pipeline {
old_pipeline.freeze();
}
(load_data, window_size, is_private)
};
let replace = if load_info.info.replace {
self.frames.get(&load_info.info.frame_id).map(|frame| frame.current())
} else {
None
};
// Create the new pipeline, attached to the parent and push to pending frames
self.pending_frames.push(FrameChange {
frame_id: load_info.info.frame_id,
old_pipeline_id: load_info.old_pipeline_id,
new_pipeline_id: load_info.info.new_pipeline_id,
url: load_data.url.clone(),
replace: replace,
});
self.new_pipeline(load_info.info.new_pipeline_id,
load_info.info.frame_id,
Some((load_info.info.parent_pipeline_id, load_info.info.frame_type)),
window_size,
load_data,
load_info.sandbox,
is_private);
}
fn handle_script_loaded_about_blank_in_iframe_msg(&mut self,
load_info: IFrameLoadInfo,
layout_sender: IpcSender<LayoutControlMsg>) {
let IFrameLoadInfo {
parent_pipeline_id,
new_pipeline_id,
frame_type,
replace,
frame_id,
is_private,
} = load_info;
let url = ServoUrl::parse("about:blank").expect("infallible");
let pipeline = {
let parent_pipeline = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline,
None => return warn!("Script loaded url in closed iframe {}.", parent_pipeline_id),
};
let script_sender = parent_pipeline.event_loop.clone();
Pipeline::new(new_pipeline_id,
frame_id,
Some((parent_pipeline_id, frame_type)),
script_sender,
layout_sender,
self.compositor_proxy.clone_compositor_proxy(),
is_private || parent_pipeline.is_private,
url.clone(),
None,
parent_pipeline.visible)
};
let replace = if replace {
self.frames.get(&frame_id).map(|frame| frame.current())
} else {
None
};
assert!(!self.pipelines.contains_key(&new_pipeline_id));
self.pipelines.insert(new_pipeline_id, pipeline);
self.pending_frames.push(FrameChange {<|fim▁hole|> new_pipeline_id: new_pipeline_id,
url: url,
replace: replace,
});
}
fn handle_set_cursor_msg(&mut self, cursor: Cursor) {
self.compositor_proxy.send(ToCompositorMsg::SetCursor(cursor))
}
fn handle_change_running_animations_state(&mut self,
pipeline_id: PipelineId,
animation_state: AnimationState) {
self.compositor_proxy.send(ToCompositorMsg::ChangeRunningAnimationsState(pipeline_id,
animation_state))
}
fn handle_tick_animation(&mut self, pipeline_id: PipelineId, tick_type: AnimationTickType) {
let result = match tick_type {
AnimationTickType::Script => {
let msg = ConstellationControlMsg::TickAllAnimations(pipeline_id);
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} got script tick after closure.", pipeline_id),
}
}
AnimationTickType::Layout => {
let msg = LayoutControlMsg::TickAnimations;
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.layout_chan.send(msg),
None => return warn!("Pipeline {:?} got layout tick after closure.", pipeline_id),
}
}
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_alert(&mut self,
pipeline_id: PipelineId,
message: String,
sender: IpcSender<bool>) {
let pipeline_isnt_root = self.pipelines.get(&pipeline_id).and_then(|pipeline| pipeline.parent_info).is_some();
let mozbrowser_modal_prompt = pipeline_isnt_root && PREFS.is_mozbrowser_enabled();
if mozbrowser_modal_prompt {
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsershowmodalprompt
let prompt_type = String::from("alert");
let title = String::from("Alert");
let return_value = String::from("");
let event = MozBrowserEvent::ShowModalPrompt(prompt_type, title, message, return_value);
let top_level_frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
match self.frames.get(&self.root_frame_id) {
None => warn!("Alert sent after root frame closure."),
Some(root_frame) => match self.pipelines.get(&root_frame.pipeline_id) {
None => warn!("Alert sent after root pipeline closure."),
Some(root_pipeline) => root_pipeline.trigger_mozbrowser_event(Some(top_level_frame_id), event),
}
}
}
let result = sender.send(!mozbrowser_modal_prompt);
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_load_url_msg(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) {
self.load_url(source_id, load_data, replace);
}
fn load_url(&mut self, source_id: PipelineId, load_data: LoadData, replace: bool) -> Option<PipelineId> {
debug!("Loading {} in pipeline {}.", load_data.url, source_id);
// If this load targets an iframe, its framing element may exist
// in a separate script thread than the framed document that initiated
// the new load. The framing element must be notified about the
// requested change so it can update its internal state.
//
// If replace is true, the current entry is replaced instead of a new entry being added.
let (frame_id, parent_info) = match self.pipelines.get(&source_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => {
warn!("Pipeline {:?} loaded after closure.", source_id);
return None;
}
};
match parent_info {
Some((parent_pipeline_id, _)) => {
self.handle_load_start_msg(source_id);
// Message the constellation to find the script thread for this iframe
// and issue an iframe load through there.
let msg = ConstellationControlMsg::Navigate(parent_pipeline_id, frame_id, load_data, replace);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(parent_pipeline) => parent_pipeline.event_loop.send(msg),
None => {
warn!("Pipeline {:?} child loaded after closure", parent_pipeline_id);
return None;
},
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
Some(source_id)
}
None => {
// Make sure no pending page would be overridden.
for frame_change in &self.pending_frames {
if frame_change.old_pipeline_id == Some(source_id) {
// id that sent load msg is being changed already; abort
return None;
}
}
if !self.pipeline_is_in_current_frame(source_id) {
// Disregard this load if the navigating pipeline is not actually
// active. This could be caused by a delayed navigation (eg. from
// a timer) or a race between multiple navigations (such as an
// onclick handler on an anchor element).
return None;
}
self.handle_load_start_msg(source_id);
// Being here means either there are no pending frames, or none of the pending
// changes would be overridden by changing the subframe associated with source_id.
// Create the new pipeline
let window_size = self.pipelines.get(&source_id).and_then(|source| source.size);
let new_pipeline_id = PipelineId::new();
let root_frame_id = self.root_frame_id;
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
let replace = if replace {
self.frames.get(&frame_id).map(|frame| frame.current())
} else {
None
};
self.pending_frames.push(FrameChange {
frame_id: root_frame_id,
old_pipeline_id: Some(source_id),
new_pipeline_id: new_pipeline_id,
url: load_data.url.clone(),
replace: replace,
});
self.new_pipeline(new_pipeline_id, root_frame_id, None, window_size, load_data, sandbox, false);
// Send message to ScriptThread that will suspend all timers
match self.pipelines.get(&source_id) {
Some(source) => source.freeze(),
None => warn!("Pipeline {:?} loaded after closure", source_id),
};
Some(new_pipeline_id)
}
}
}
fn handle_load_start_msg(&mut self, pipeline_id: PipelineId) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
self.compositor_proxy.send(ToCompositorMsg::LoadStart(back, forward));
}
fn handle_load_complete_msg(&mut self, pipeline_id: PipelineId) {
let mut webdriver_reset = false;
if let Some((expected_pipeline_id, ref reply_chan)) = self.webdriver.load_channel {
debug!("Sending load to WebDriver");
if expected_pipeline_id == pipeline_id {
let _ = reply_chan.send(webdriver_msg::LoadStatus::LoadComplete);
webdriver_reset = true;
}
}
if webdriver_reset {
self.webdriver.load_channel = None;
}
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
let forward = !self.joint_session_future_is_empty(frame_id);
let back = !self.joint_session_past_is_empty(frame_id);
let root = self.root_frame_id == frame_id;
self.compositor_proxy.send(ToCompositorMsg::LoadComplete(back, forward, root));
self.handle_subframe_loaded(pipeline_id);
}
fn handle_traverse_history_msg(&mut self,
pipeline_id: Option<PipelineId>,
direction: TraversalDirection) {
let top_level_frame_id = pipeline_id
.map(|pipeline_id| self.get_top_level_frame_for_pipeline(pipeline_id))
.unwrap_or(self.root_frame_id);
let mut size = 0;
let mut table = HashMap::new();
match direction {
TraversalDirection::Forward(delta) => {
for entry in self.joint_session_future(top_level_frame_id).take(delta) {
size = size + 1;
table.insert(entry.frame_id, entry);
}
if size < delta {
return debug!("Traversing forward too much.");
}
},
TraversalDirection::Back(delta) => {
for entry in self.joint_session_past(top_level_frame_id).take(delta) {
size = size + 1;
table.insert(entry.frame_id, entry);
}
if size < delta {
return debug!("Traversing back too much.");
}
},
}
for (_, entry) in table {
self.traverse_to_entry(entry);
}
}
fn handle_joint_session_history_length(&self, pipeline_id: PipelineId, sender: IpcSender<u32>) {
let frame_id = self.get_top_level_frame_for_pipeline(pipeline_id);
// Initialize length at 1 to count for the current active entry
let mut length = 1;
for frame in self.full_frame_tree_iter(frame_id) {
length += frame.next.len();
length += frame.prev.len();
}
let _ = sender.send(length as u32);
}
fn handle_key_msg(&mut self, ch: Option<char>, key: Key, state: KeyState, mods: KeyModifiers) {
// Send to the explicitly focused pipeline (if it exists), or the root
// frame's current pipeline. If neither exist, fall back to sending to
// the compositor below.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.pipeline_id);
let pipeline_id = self.focus_pipeline_id.or(root_pipeline_id);
match pipeline_id {
Some(pipeline_id) => {
let event = CompositorEvent::KeyEvent(ch, key, state, mods);
let msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got key event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
None => {
let event = ToCompositorMsg::KeyEvent(ch, key, state, mods);
self.compositor_proxy.clone_compositor_proxy().send(event);
}
}
}
fn handle_reload_msg(&mut self) {
// Send Reload constellation msg to root script channel.
let root_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.pipeline_id);
if let Some(pipeline_id) = root_pipeline_id {
let msg = ConstellationControlMsg::Reload(pipeline_id);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return debug!("Pipeline {:?} got reload event after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
}
fn handle_get_pipeline_title_msg(&mut self, pipeline_id: PipelineId) {
let result = match self.pipelines.get(&pipeline_id) {
None => return self.compositor_proxy.send(ToCompositorMsg::ChangePageTitle(pipeline_id, None)),
Some(pipeline) => pipeline.event_loop.send(ConstellationControlMsg::GetTitle(pipeline_id)),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
}
fn handle_mozbrowser_event_msg(&mut self,
parent_pipeline_id: PipelineId,
pipeline_id: PipelineId,
event: MozBrowserEvent) {
assert!(PREFS.is_mozbrowser_enabled());
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
// If the pipeline lookup fails, it is because we have torn down the pipeline,
// so it is reasonable to silently ignore the event.
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.trigger_mozbrowser_event(frame_id, event),
None => warn!("Pipeline {:?} handling mozbrowser event after closure.", parent_pipeline_id),
}
}
fn handle_get_pipeline(&mut self, frame_id: Option<FrameId>,
resp_chan: IpcSender<Option<PipelineId>>) {
let frame_id = frame_id.unwrap_or(self.root_frame_id);
let current_pipeline_id = self.frames.get(&frame_id)
.map(|frame| frame.pipeline_id);
let pipeline_id_loaded = self.pending_frames.iter().rev()
.find(|x| x.old_pipeline_id == current_pipeline_id)
.map(|x| x.new_pipeline_id)
.or(current_pipeline_id);
if let Err(e) = resp_chan.send(pipeline_id_loaded) {
warn!("Failed get_pipeline response ({}).", e);
}
}
fn handle_get_frame(&mut self,
pipeline_id: PipelineId,
resp_chan: IpcSender<Option<FrameId>>) {
let frame_id = self.pipelines.get(&pipeline_id).map(|pipeline| pipeline.frame_id);
if let Err(e) = resp_chan.send(frame_id) {
warn!("Failed get_frame response ({}).", e);
}
}
fn focus_parent_pipeline(&mut self, pipeline_id: PipelineId) {
let (frame_id, parent_info) = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
None => return warn!("Pipeline {:?} focus parent after closure.", pipeline_id),
};
let (parent_pipeline_id, _) = match parent_info {
Some(info) => info,
None => return debug!("Pipeline {:?} focus has no parent.", pipeline_id),
};
// Send a message to the parent of the provided pipeline (if it exists)
// telling it to mark the iframe element as focused.
let msg = ConstellationControlMsg::FocusIFrame(parent_pipeline_id, frame_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(msg),
None => return warn!("Pipeline {:?} focus after closure.", parent_pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
self.focus_parent_pipeline(parent_pipeline_id);
}
fn handle_focus_msg(&mut self, pipeline_id: PipelineId) {
self.focus_pipeline_id = Some(pipeline_id);
// Focus parent iframes recursively
self.focus_parent_pipeline(pipeline_id);
}
fn handle_remove_iframe_msg(&mut self, frame_id: FrameId) -> Vec<PipelineId> {
let result = self.full_frame_tree_iter(frame_id)
.flat_map(|frame| frame.next.iter().chain(frame.prev.iter())
.filter_map(|entry| entry.pipeline_id)
.chain(once(frame.pipeline_id)))
.collect();
self.close_frame(frame_id, ExitPipelineMode::Normal);
result
}
fn handle_set_visible_msg(&mut self, pipeline_id: PipelineId, visible: bool) {
let frame_id = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.frame_id,
None => return warn!("No frame associated with pipeline {:?}", pipeline_id),
};
let child_pipeline_ids: Vec<PipelineId> = self.full_frame_tree_iter(frame_id)
.flat_map(|frame| frame.prev.iter().chain(frame.next.iter())
.filter_map(|entry| entry.pipeline_id)
.chain(once(frame.pipeline_id)))
.collect();
for id in child_pipeline_ids {
if let Some(pipeline) = self.pipelines.get_mut(&id) {
pipeline.change_visibility(visible);
}
}
}
fn handle_visibility_change_complete(&mut self, pipeline_id: PipelineId, visibility: bool) {
let (frame_id, parent_pipeline_info) = match self.pipelines.get(&pipeline_id) {
None => return warn!("Visibity change for closed pipeline {:?}.", pipeline_id),
Some(pipeline) => (pipeline.frame_id, pipeline.parent_info),
};
if let Some((parent_pipeline_id, _)) = parent_pipeline_info {
let visibility_msg = ConstellationControlMsg::NotifyVisibilityChange(parent_pipeline_id,
frame_id,
visibility);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Parent pipeline {:?} closed", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline.event_loop.send(visibility_msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
}
}
fn handle_create_canvas_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
response_sender: IpcSender<IpcSender<CanvasMsg>>) {
let webrender_api = self.webrender_api_sender.clone();
let sender = CanvasPaintThread::start(*size, webrender_api,
opts::get().enable_canvas_antialiasing);
if let Err(e) = response_sender.send(sender) {
warn!("Create canvas paint thread response failed ({})", e);
}
}
fn handle_create_webgl_paint_thread_msg(
&mut self,
size: &Size2D<i32>,
attributes: GLContextAttributes,
response_sender: IpcSender<Result<(IpcSender<CanvasMsg>, GLLimits), String>>) {
let webrender_api = self.webrender_api_sender.clone();
let response = WebGLPaintThread::start(*size, attributes, webrender_api);
if let Err(e) = response_sender.send(response) {
warn!("Create WebGL paint thread response failed ({})", e);
}
}
fn handle_webdriver_msg(&mut self, msg: WebDriverCommandMsg) {
// Find the script channel for the given parent pipeline,
// and pass the event to that script thread.
match msg {
WebDriverCommandMsg::GetWindowSize(_, reply) => {
let _ = reply.send(self.window_size);
},
WebDriverCommandMsg::SetWindowSize(_, size, reply) => {
self.webdriver.resize_channel = Some(reply);
self.compositor_proxy.send(ToCompositorMsg::ResizeTo(size));
},
WebDriverCommandMsg::LoadUrl(pipeline_id, load_data, reply) => {
self.load_url_for_webdriver(pipeline_id, load_data, reply, false);
},
WebDriverCommandMsg::Refresh(pipeline_id, reply) => {
let load_data = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => LoadData::new(pipeline.url.clone(), None, None),
None => return warn!("Pipeline {:?} Refresh after closure.", pipeline_id),
};
self.load_url_for_webdriver(pipeline_id, load_data, reply, true);
}
WebDriverCommandMsg::ScriptCommand(pipeline_id, cmd) => {
let control_msg = ConstellationControlMsg::WebDriverScriptCommand(pipeline_id, cmd);
let result = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.send(control_msg),
None => return warn!("Pipeline {:?} ScriptCommand after closure.", pipeline_id),
};
if let Err(e) = result {
self.handle_send_error(pipeline_id, e);
}
},
WebDriverCommandMsg::SendKeys(pipeline_id, cmd) => {
let event_loop = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline.event_loop.clone(),
None => return warn!("Pipeline {:?} SendKeys after closure.", pipeline_id),
};
for (key, mods, state) in cmd {
let event = CompositorEvent::KeyEvent(None, key, state, mods);
let control_msg = ConstellationControlMsg::SendEvent(pipeline_id, event);
if let Err(e) = event_loop.send(control_msg) {
return self.handle_send_error(pipeline_id, e);
}
}
},
WebDriverCommandMsg::TakeScreenshot(pipeline_id, reply) => {
let current_pipeline_id = self.frames.get(&self.root_frame_id)
.map(|root_frame| root_frame.pipeline_id);
if Some(pipeline_id) == current_pipeline_id {
self.compositor_proxy.send(ToCompositorMsg::CreatePng(reply));
} else {
if let Err(e) = reply.send(None) {
warn!("Screenshot reply failed ({})", e);
}
}
},
}
}
// https://html.spec.whatwg.org/multipage/#traverse-the-history
fn traverse_to_entry(&mut self, entry: FrameState) {
// Step 1.
let frame_id = entry.frame_id;
let pipeline_id = match entry.pipeline_id {
Some(pipeline_id) => pipeline_id,
None => {
// If there is no pipeline, then the document for this
// entry has been discarded, so we navigate to the entry
// URL instead. When the document has activated, it will
// traverse to the entry, but with the new pipeline id.
debug!("Reloading document {} for frame {}.", entry.url, frame_id);
// TODO: referrer?
let load_data = LoadData::new(entry.url.clone(), None, None);
// TODO: save the sandbox state so it can be restored here.
let sandbox = IFrameSandboxState::IFrameUnsandboxed;
let new_pipeline_id = PipelineId::new();
let (old_pipeline_id, parent_info, window_size, is_private) = match self.frames.get(&frame_id) {
Some(frame) => match self.pipelines.get(&frame.pipeline_id) {
Some(pipeline) => (frame.pipeline_id, pipeline.parent_info, pipeline.size, pipeline.is_private),
None => (frame.pipeline_id, None, None, false),
},
None => return warn!("no frame to traverse"),
};
self.new_pipeline(new_pipeline_id, frame_id, parent_info, window_size, load_data, sandbox, is_private);
self.pending_frames.push(FrameChange {
frame_id: frame_id,
old_pipeline_id: Some(old_pipeline_id),
new_pipeline_id: new_pipeline_id,
url: entry.url.clone(),
replace: Some(entry),
});
return;
}
};
// Check if the currently focused pipeline is the pipeline being replaced
// (or a child of it). This has to be done here, before the current
// frame tree is modified below.
let update_focus_pipeline = self.focused_pipeline_in_tree(entry.frame_id);
let old_pipeline_id = match self.frames.get_mut(&frame_id) {
Some(frame) => {
let old_pipeline_id = frame.pipeline_id;
let mut curr_entry = frame.current();
if entry.instant > frame.instant {
// We are traversing to the future.
while let Some(next) = frame.next.pop() {
frame.prev.push(curr_entry);
curr_entry = next;
if entry.instant <= curr_entry.instant { break; }
}
} else if entry.instant < frame.instant {
// We are traversing to the past.
while let Some(prev) = frame.prev.pop() {
frame.next.push(curr_entry);
curr_entry = prev;
if entry.instant >= curr_entry.instant { break; }
}
}
debug_assert_eq!(entry.instant, curr_entry.instant);
frame.update_current(pipeline_id, &entry);
old_pipeline_id
},
None => return warn!("no frame to traverse"),
};
let parent_info = self.pipelines.get(&old_pipeline_id)
.and_then(|pipeline| pipeline.parent_info);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if update_focus_pipeline {
self.focus_pipeline_id = Some(pipeline_id);
}
// Suspend the old pipeline, and resume the new one.
if let Some(pipeline) = self.pipelines.get(&old_pipeline_id) {
pipeline.freeze();
}
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
pipeline.thaw();
}
// Set paint permissions correctly for the compositor layers.
self.send_frame_tree();
// Update the owning iframe to point to the new pipeline id.
// This makes things like contentDocument work correctly.
if let Some((parent_pipeline_id, _)) = parent_info {
let msg = ConstellationControlMsg::UpdatePipelineId(parent_pipeline_id, frame_id, pipeline_id);
let result = match self.pipelines.get(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child traversed after closure.", parent_pipeline_id),
Some(pipeline) => pipeline.event_loop.send(msg),
};
if let Err(e) = result {
self.handle_send_error(parent_pipeline_id, e);
}
// If this is an iframe, send a mozbrowser location change event.
// This is the result of a back/forward traversal.
self.trigger_mozbrowserlocationchange(pipeline_id);
}
}
fn get_top_level_frame_for_pipeline(&self, mut pipeline_id: PipelineId) -> FrameId {
if PREFS.is_mozbrowser_enabled() {
loop {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => match pipeline.parent_info {
Some((_, FrameType::MozBrowserIFrame)) => return pipeline.frame_id,
Some((parent_id, _)) => pipeline_id = parent_id,
None => return self.root_frame_id,
},
None => {
warn!("Finding top-level ancestor for pipeline {} after closure.", pipeline_id);
return self.root_frame_id;
},
}
}
} else {
// If mozbrowser is not enabled, the root frame is the only top-level frame
self.root_frame_id
}
}
fn load_url_for_webdriver(&mut self,
pipeline_id: PipelineId,
load_data: LoadData,
reply: IpcSender<webdriver_msg::LoadStatus>,
replace: bool) {
let new_pipeline_id = self.load_url(pipeline_id, load_data, replace);
if let Some(id) = new_pipeline_id {
self.webdriver.load_channel = Some((id, reply));
}
}
fn add_or_replace_pipeline_in_frame_tree(&mut self, frame_change: FrameChange) {
debug!("Setting frame {} to be pipeline {}.", frame_change.frame_id, frame_change.new_pipeline_id);
// If the currently focused pipeline is the one being changed (or a child
// of the pipeline being changed) then update the focus pipeline to be
// the replacement.
if let Some(old_pipeline_id) = frame_change.old_pipeline_id {
if let Some(old_frame_id) = self.pipelines.get(&old_pipeline_id).map(|pipeline| pipeline.frame_id) {
if self.focused_pipeline_in_tree(old_frame_id) {
self.focus_pipeline_id = Some(frame_change.new_pipeline_id);
}
}
}
let (evicted_id, new_frame, clear_future, location_changed) = if let Some(mut entry) = frame_change.replace {
debug!("Replacing pipeline in existing frame.");
let evicted_id = entry.pipeline_id;
entry.replace_pipeline(frame_change.new_pipeline_id, frame_change.url.clone());
self.traverse_to_entry(entry);
(evicted_id, false, false, false)
} else if let Some(frame) = self.frames.get_mut(&frame_change.frame_id) {
debug!("Adding pipeline to existing frame.");
frame.load(frame_change.new_pipeline_id, frame_change.url.clone());
let evicted_id = frame.prev.len()
.checked_sub(PREFS.get("session-history.max-length").as_u64().unwrap_or(20) as usize)
.and_then(|index| frame.prev.get_mut(index))
.and_then(|entry| entry.pipeline_id.take());
(evicted_id, false, true, true)
} else {
(None, true, false, true)
};
if let Some(evicted_id) = evicted_id {
self.close_pipeline(evicted_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal);
}
if new_frame {
self.new_frame(frame_change.frame_id, frame_change.new_pipeline_id, frame_change.url);
};
if clear_future {
let top_level_frame_id = self.get_top_level_frame_for_pipeline(frame_change.new_pipeline_id);
self.clear_joint_session_future(top_level_frame_id);
}
if location_changed {
self.trigger_mozbrowserlocationchange(frame_change.new_pipeline_id);
}
// Build frame tree
self.send_frame_tree();
}
fn handle_activate_document_msg(&mut self, pipeline_id: PipelineId) {
debug!("Document ready to activate {:?}", pipeline_id);
// Notify the parent (if there is one).
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
if let Some((parent_pipeline_id, _)) = pipeline.parent_info {
if let Some(parent_pipeline) = self.pipelines.get(&parent_pipeline_id) {
let msg = ConstellationControlMsg::FramedContentChanged(parent_pipeline_id, pipeline.frame_id);
let _ = parent_pipeline.event_loop.send(msg);
}
}
}
// Find the pending frame change whose new pipeline id is pipeline_id.
let pending_index = self.pending_frames.iter().rposition(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
// If it is found, remove it from the pending frames, and make it
// the active document of its frame.
if let Some(pending_index) = pending_index {
let frame_change = self.pending_frames.swap_remove(pending_index);
self.add_or_replace_pipeline_in_frame_tree(frame_change);
}
}
/// Called when the window is resized.
fn handle_window_size_msg(&mut self, new_size: WindowSizeData, size_type: WindowSizeType) {
debug!("handle_window_size_msg: {:?} {:?}", new_size.initial_viewport.to_untyped(),
new_size.visible_viewport.to_untyped());
if let Some(frame) = self.frames.get(&self.root_frame_id) {
// Send Resize (or ResizeInactive) messages to each
// pipeline in the frame tree.
let pipeline_id = frame.pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => return warn!("Pipeline {:?} resized after closing.", pipeline_id),
Some(pipeline) => pipeline,
};
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
let pipelines = frame.prev.iter().chain(frame.next.iter())
.filter_map(|entry| entry.pipeline_id)
.filter_map(|pipeline_id| self.pipelines.get(&pipeline_id));
for pipeline in pipelines {
let _ = pipeline.event_loop.send(ConstellationControlMsg::ResizeInactive(
pipeline.id,
new_size
));
}
}
// Send resize message to any pending pipelines that aren't loaded yet.
for pending_frame in &self.pending_frames {
let pipeline_id = pending_frame.new_pipeline_id;
let pipeline = match self.pipelines.get(&pipeline_id) {
None => { warn!("Pending pipeline {:?} is closed", pipeline_id); continue; }
Some(pipeline) => pipeline,
};
if pipeline.parent_info.is_none() {
let _ = pipeline.event_loop.send(ConstellationControlMsg::Resize(
pipeline.id,
new_size,
size_type
));
}
}
if let Some(resize_channel) = self.webdriver.resize_channel.take() {
let _ = resize_channel.send(new_size);
}
self.window_size = new_size;
}
/// Handle updating actual viewport / zoom due to @viewport rules
fn handle_viewport_constrained_msg(&mut self,
pipeline_id: PipelineId,
constraints: ViewportConstraints) {
self.compositor_proxy.send(ToCompositorMsg::ViewportConstrained(pipeline_id, constraints));
}
/// Checks the state of all script and layout pipelines to see if they are idle
/// and compares the current layout state to what the compositor has. This is used
/// to check if the output image is "stable" and can be written as a screenshot
/// for reftests.
/// Since this function is only used in reftests, we do not harden it against panic.
fn handle_is_ready_to_save_image(&mut self,
pipeline_states: HashMap<PipelineId, Epoch>) -> ReadyToSave {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
//
// If there is no root frame yet, the initial page has
// not loaded, so there is nothing to save yet.
if !self.frames.contains_key(&self.root_frame_id) {
return ReadyToSave::NoRootFrame;
}
// If there are pending loads, wait for those to complete.
if !self.pending_frames.is_empty() {
return ReadyToSave::PendingFrames;
}
let (state_sender, state_receiver) = ipc::channel().expect("Failed to create IPC channel!");
let (epoch_sender, epoch_receiver) = ipc::channel().expect("Failed to create IPC channel!");
// Step through the current frame tree, checking that the script
// thread is idle, and that the current epoch of the layout thread
// matches what the compositor has painted. If all these conditions
// are met, then the output image should not change and a reftest
// screenshot can safely be written.
for frame in self.current_frame_tree_iter(self.root_frame_id) {
let pipeline_id = frame.pipeline_id;
debug!("Checking readiness of frame {}, pipeline {}.", frame.id, pipeline_id);
let pipeline = match self.pipelines.get(&pipeline_id) {
None => {
warn!("Pipeline {:?} screenshot while closing.", pipeline_id);
continue;
},
Some(pipeline) => pipeline,
};
// Check to see if there are any webfonts still loading.
//
// If GetWebFontLoadState returns false, either there are no
// webfonts loading, or there's a WebFontLoaded message waiting in
// script_chan's message queue. Therefore, we need to check this
// before we check whether the document is ready; otherwise,
// there's a race condition where a webfont has finished loading,
// but hasn't yet notified the document.
let msg = LayoutControlMsg::GetWebFontLoadState(state_sender.clone());
if let Err(e) = pipeline.layout_chan.send(msg) {
warn!("Get web font failed ({})", e);
}
if state_receiver.recv().unwrap_or(true) {
return ReadyToSave::WebFontNotLoaded;
}
// See if this pipeline has reached idle script state yet.
match self.document_states.get(&frame.pipeline_id) {
Some(&DocumentState::Idle) => {}
Some(&DocumentState::Pending) | None => {
return ReadyToSave::DocumentLoading;
}
}
// Check the visible rectangle for this pipeline. If the constellation has received a
// size for the pipeline, then its painting should be up to date. If the constellation
// *hasn't* received a size, it could be that the layer was hidden by script before the
// compositor discovered it, so we just don't check the layer.
if let Some(size) = pipeline.size {
// If the rectangle for this pipeline is zero sized, it will
// never be painted. In this case, don't query the layout
// thread as it won't contribute to the final output image.
if size == TypedSize2D::zero() {
continue;
}
// Get the epoch that the compositor has drawn for this pipeline.
let compositor_epoch = pipeline_states.get(&frame.pipeline_id);
match compositor_epoch {
Some(compositor_epoch) => {
// Synchronously query the layout thread to see if the current
// epoch matches what the compositor has drawn. If they match
// (and script is idle) then this pipeline won't change again
// and can be considered stable.
let message = LayoutControlMsg::GetCurrentEpoch(epoch_sender.clone());
if let Err(e) = pipeline.layout_chan.send(message) {
warn!("Failed to send GetCurrentEpoch ({}).", e);
}
match epoch_receiver.recv() {
Err(e) => warn!("Failed to receive current epoch ({}).", e),
Ok(layout_thread_epoch) => if layout_thread_epoch != *compositor_epoch {
return ReadyToSave::EpochMismatch;
},
}
}
None => {
// The compositor doesn't know about this pipeline yet.
// Assume it hasn't rendered yet.
return ReadyToSave::PipelineUnknown;
}
}
}
}
// All script threads are idle and layout epochs match compositor, so output image!
ReadyToSave::Ready
}
fn clear_joint_session_future(&mut self, frame_id: FrameId) {
let frame_ids: Vec<FrameId> = self.full_frame_tree_iter(frame_id)
.map(|frame| frame.id)
.collect();
for frame_id in frame_ids {
let evicted = match self.frames.get_mut(&frame_id) {
Some(frame) => frame.remove_forward_entries(),
None => continue,
};
for entry in evicted {
if let Some(pipeline_id) = entry.pipeline_id {
self.close_pipeline(pipeline_id, DiscardBrowsingContext::No, ExitPipelineMode::Normal);
}
}
}
}
// Close a frame (and all children)
fn close_frame(&mut self, frame_id: FrameId, exit_mode: ExitPipelineMode) {
debug!("Closing frame {}.", frame_id);
let parent_info = self.frames.get(&frame_id)
.and_then(|frame| self.pipelines.get(&frame.pipeline_id))
.and_then(|pipeline| pipeline.parent_info);
self.close_frame_children(frame_id, DiscardBrowsingContext::Yes, exit_mode);
self.event_loops.remove(&frame_id);
if self.frames.remove(&frame_id).is_none() {
warn!("Closing frame {:?} twice.", frame_id);
}
if let Some((parent_pipeline_id, _)) = parent_info {
let parent_pipeline = match self.pipelines.get_mut(&parent_pipeline_id) {
None => return warn!("Pipeline {:?} child closed after parent.", parent_pipeline_id),
Some(parent_pipeline) => parent_pipeline,
};
parent_pipeline.remove_child(frame_id);
}
debug!("Closed frame {:?}.", frame_id);
}
// Close the children of a frame
fn close_frame_children(&mut self, frame_id: FrameId, dbc: DiscardBrowsingContext, exit_mode: ExitPipelineMode) {
debug!("Closing frame children {}.", frame_id);
// Store information about the pipelines to be closed. Then close the
// pipelines, before removing ourself from the frames hash map. This
// ordering is vital - so that if close_pipeline() ends up closing
// any child frames, they can be removed from the parent frame correctly.
let mut pipelines_to_close: Vec<PipelineId> = self.pending_frames.iter()
.filter(|frame_change| frame_change.frame_id == frame_id)
.map(|frame_change| frame_change.new_pipeline_id)
.collect();
if let Some(frame) = self.frames.get(&frame_id) {
pipelines_to_close.extend(frame.next.iter().filter_map(|state| state.pipeline_id));
pipelines_to_close.push(frame.pipeline_id);
pipelines_to_close.extend(frame.prev.iter().filter_map(|state| state.pipeline_id));
}
for pipeline_id in pipelines_to_close {
self.close_pipeline(pipeline_id, dbc, exit_mode);
}
debug!("Closed frame children {}.", frame_id);
}
// Close all pipelines at and beneath a given frame
fn close_pipeline(&mut self, pipeline_id: PipelineId, dbc: DiscardBrowsingContext, exit_mode: ExitPipelineMode) {
debug!("Closing pipeline {:?}.", pipeline_id);
// Store information about the frames to be closed. Then close the
// frames, before removing ourself from the pipelines hash map. This
// ordering is vital - so that if close_frame() ends up closing
// any child pipelines, they can be removed from the parent pipeline correctly.
let frames_to_close = {
let mut frames_to_close = vec!();
if let Some(pipeline) = self.pipelines.get(&pipeline_id) {
frames_to_close.extend_from_slice(&pipeline.children);
}
frames_to_close
};
// Remove any child frames
for child_frame in &frames_to_close {
self.close_frame(*child_frame, exit_mode);
}
// Note, we don't remove the pipeline now, we wait for the message to come back from
// the pipeline.
let pipeline = match self.pipelines.get(&pipeline_id) {
Some(pipeline) => pipeline,
None => return warn!("Closing pipeline {:?} twice.", pipeline_id),
};
// Remove this pipeline from pending frames if it hasn't loaded yet.
let pending_index = self.pending_frames.iter().position(|frame_change| {
frame_change.new_pipeline_id == pipeline_id
});
if let Some(pending_index) = pending_index {
self.pending_frames.remove(pending_index);
}
// Inform script, compositor that this pipeline has exited.
match exit_mode {
ExitPipelineMode::Normal => pipeline.exit(dbc),
ExitPipelineMode::Force => pipeline.force_exit(dbc),
}
debug!("Closed pipeline {:?}.", pipeline_id);
}
// Randomly close a pipeline -if --random-pipeline-closure-probability is set
fn maybe_close_random_pipeline(&mut self) {
match self.random_pipeline_closure {
Some((ref mut rng, probability)) => if probability <= rng.gen::<f32>() { return },
_ => return,
};
// In order to get repeatability, we sort the pipeline ids.
let mut pipeline_ids: Vec<&PipelineId> = self.pipelines.keys().collect();
pipeline_ids.sort();
if let Some((ref mut rng, _)) = self.random_pipeline_closure {
if let Some(pipeline_id) = rng.choose(&*pipeline_ids) {
if let Some(pipeline) = self.pipelines.get(pipeline_id) {
// Don't kill the mozbrowser pipeline
if PREFS.is_mozbrowser_enabled() && pipeline.parent_info.is_none() {
info!("Not closing mozbrowser pipeline {}.", pipeline_id);
} else {
// Note that we deliberately do not do any of the tidying up
// associated with closing a pipeline. The constellation should cope!
warn!("Randomly closing pipeline {}.", pipeline_id);
pipeline.force_exit(DiscardBrowsingContext::No);
}
}
}
}
}
// Convert a frame to a sendable form to pass to the compositor
fn frame_to_sendable(&self, frame_id: FrameId) -> Option<SendableFrameTree> {
self.frames.get(&frame_id).and_then(|frame: &Frame| {
self.pipelines.get(&frame.pipeline_id).map(|pipeline: &Pipeline| {
let mut frame_tree = SendableFrameTree {
pipeline: pipeline.to_sendable(),
size: pipeline.size,
children: vec!(),
};
for child_frame_id in &pipeline.children {
if let Some(frame) = self.frame_to_sendable(*child_frame_id) {
frame_tree.children.push(frame);
}
}
frame_tree
})
})
}
// Send the current frame tree to compositor
fn send_frame_tree(&mut self) {
// Note that this function can panic, due to ipc-channel creation failure.
// avoiding this panic would require a mechanism for dealing
// with low-resource scenarios.
debug!("Sending frame tree for frame {}.", self.root_frame_id);
if let Some(frame_tree) = self.frame_to_sendable(self.root_frame_id) {
let (chan, port) = ipc::channel().expect("Failed to create IPC channel!");
self.compositor_proxy.send(ToCompositorMsg::SetFrameTree(frame_tree,
chan));
if port.recv().is_err() {
warn!("Compositor has discarded SetFrameTree");
return; // Our message has been discarded, probably shutting down.
}
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowserlocationchange
// Note that this is a no-op if the pipeline is not a mozbrowser iframe
fn trigger_mozbrowserlocationchange(&self, pipeline_id: PipelineId) {
match self.pipelines.get(&pipeline_id) {
Some(pipeline) => if let Some((parent_id, FrameType::MozBrowserIFrame)) = pipeline.parent_info {
match self.pipelines.get(&parent_id) {
Some(parent) => {
let can_go_forward = !self.joint_session_future_is_empty(pipeline.frame_id);
let can_go_back = !self.joint_session_past_is_empty(pipeline.frame_id);
let url = pipeline.url.to_string();
let event = MozBrowserEvent::LocationChange(url, can_go_back, can_go_forward);
parent.trigger_mozbrowser_event(Some(pipeline.frame_id), event);
},
None => warn!("triggered mozbrowser location change on closed parent {}", parent_id),
}
},
None => warn!("triggered mozbrowser location change on closed pipeline {}", pipeline_id),
}
}
// https://developer.mozilla.org/en-US/docs/Web/Events/mozbrowsererror
// Note that this does not require the pipeline to be an immediate child of the root
fn trigger_mozbrowsererror(&mut self, top_level_frame_id: FrameId, reason: String, backtrace: Option<String>) {
if !PREFS.is_mozbrowser_enabled() { return; }
let mut report = String::new();
for (thread_name, warning) in self.handled_warnings.drain(..) {
report.push_str("\nWARNING: ");
if let Some(thread_name) = thread_name {
report.push_str("<");
report.push_str(&*thread_name);
report.push_str(">: ");
}
report.push_str(&*warning);
}
report.push_str("\nERROR: ");
report.push_str(&*reason);
if let Some(backtrace) = backtrace {
report.push_str("\n\n");
report.push_str(&*backtrace);
}
let event = MozBrowserEvent::Error(MozBrowserErrorType::Fatal, reason, report);
match self.frames.get(&top_level_frame_id) {
None => warn!("Mozbrowser error after top-level frame closed."),
Some(frame) => match self.pipelines.get(&frame.pipeline_id) {
None => warn!("Mozbrowser error after top-level pipeline closed."),
Some(pipeline) => match pipeline.parent_info {
None => pipeline.trigger_mozbrowser_event(None, event),
Some((parent_id, _)) => match self.pipelines.get(&parent_id) {
None => warn!("Mozbrowser error after root pipeline closed."),
Some(parent) => parent.trigger_mozbrowser_event(Some(top_level_frame_id), event),
},
},
},
};
}
fn focused_pipeline_in_tree(&self, frame_id: FrameId) -> bool {
self.focus_pipeline_id.map_or(false, |pipeline_id| {
self.pipeline_exists_in_tree(pipeline_id, frame_id)
})
}
fn pipeline_is_in_current_frame(&self, pipeline_id: PipelineId) -> bool {
self.pipeline_exists_in_tree(pipeline_id, self.root_frame_id)
}
fn pipeline_exists_in_tree(&self,
pipeline_id: PipelineId,
root_frame_id: FrameId) -> bool {
self.current_frame_tree_iter(root_frame_id)
.any(|current_frame| current_frame.pipeline_id == pipeline_id)
}
}<|fim▁end|>
|
frame_id: frame_id,
old_pipeline_id: None,
|
<|file_name|>dlatrd.go<|end_file_name|><|fim▁begin|>// Copyright ©2016 The gonum Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package testlapack
import (
"fmt"
"math"
"math/rand"
"testing"
"gonum.org/v1/gonum/blas"
"gonum.org/v1/gonum/blas/blas64"
)
type Dlatrder interface {
Dlatrd(uplo blas.Uplo, n, nb int, a []float64, lda int, e, tau, w []float64, ldw int)
}
func DlatrdTest(t *testing.T, impl Dlatrder) {
rnd := rand.New(rand.NewSource(1))
for _, uplo := range []blas.Uplo{blas.Upper, blas.Lower} {
for _, test := range []struct {
n, nb, lda, ldw int
}{
{5, 2, 0, 0},
{5, 5, 0, 0},
{5, 3, 10, 11},
{5, 5, 10, 11},
} {
n := test.n
nb := test.nb
lda := test.lda
if lda == 0 {
lda = n
}
ldw := test.ldw
if ldw == 0 {
ldw = nb
}
a := make([]float64, n*lda)
for i := range a {
a[i] = rnd.NormFloat64()
}
e := make([]float64, n-1)
for i := range e {
e[i] = math.NaN()
}
tau := make([]float64, n-1)
for i := range tau {
tau[i] = math.NaN()
}
w := make([]float64, n*ldw)
for i := range w {
w[i] = math.NaN()
}
aCopy := make([]float64, len(a))
copy(aCopy, a)
impl.Dlatrd(uplo, n, nb, a, lda, e, tau, w, ldw)
// Construct Q.
ldq := n
q := blas64.General{
Rows: n,
Cols: n,
Stride: ldq,
Data: make([]float64, n*ldq),
}
for i := 0; i < n; i++ {
q.Data[i*ldq+i] = 1
}
if uplo == blas.Upper {
for i := n - 1; i >= n-nb; i-- {
if i == 0 {
continue
}
h := blas64.General{
Rows: n, Cols: n, Stride: n, Data: make([]float64, n*n),
}
for j := 0; j < n; j++ {
h.Data[j*n+j] = 1
}
v := blas64.Vector{
Inc: 1,
Data: make([]float64, n),
}
for j := 0; j < i-1; j++ {
v.Data[j] = a[j*lda+i]
}
v.Data[i-1] = 1
blas64.Ger(-tau[i-1], v, v, h)
qTmp := blas64.General{
Rows: n, Cols: n, Stride: n, Data: make([]float64, n*n),
}
copy(qTmp.Data, q.Data)
blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, qTmp, h, 0, q)
}
} else {
for i := 0; i < nb; i++ {
if i == n-1 {
continue
}
h := blas64.General{
Rows: n, Cols: n, Stride: n, Data: make([]float64, n*n),
}
for j := 0; j < n; j++ {
h.Data[j*n+j] = 1
}
v := blas64.Vector{
Inc: 1,
Data: make([]float64, n),
}
v.Data[i+1] = 1
for j := i + 2; j < n; j++ {
v.Data[j] = a[j*lda+i]
}
blas64.Ger(-tau[i], v, v, h)
qTmp := blas64.General{
Rows: n, Cols: n, Stride: n, Data: make([]float64, n*n),
}
copy(qTmp.Data, q.Data)
blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, qTmp, h, 0, q)
}
}
errStr := fmt.Sprintf("isUpper = %v, n = %v, nb = %v", uplo == blas.Upper, n, nb)
if !isOrthonormal(q) {
t.Errorf("Q not orthonormal. %s", errStr)
}
aGen := genFromSym(blas64.Symmetric{N: n, Stride: lda, Uplo: uplo, Data: aCopy})
if !dlatrdCheckDecomposition(t, uplo, n, nb, e, tau, a, lda, aGen, q) {
t.Errorf("Decomposition mismatch. %s", errStr)
}<|fim▁hole|>
// dlatrdCheckDecomposition checks that the first nb rows have been successfully
// reduced.
func dlatrdCheckDecomposition(t *testing.T, uplo blas.Uplo, n, nb int, e, tau, a []float64, lda int, aGen, q blas64.General) bool {
// Compute Q^T * A * Q.
tmp := blas64.General{
Rows: n,
Cols: n,
Stride: n,
Data: make([]float64, n*n),
}
ans := blas64.General{
Rows: n,
Cols: n,
Stride: n,
Data: make([]float64, n*n),
}
blas64.Gemm(blas.Trans, blas.NoTrans, 1, q, aGen, 0, tmp)
blas64.Gemm(blas.NoTrans, blas.NoTrans, 1, tmp, q, 0, ans)
// Compare with T.
if uplo == blas.Upper {
for i := n - 1; i >= n-nb; i-- {
for j := 0; j < n; j++ {
v := ans.Data[i*ans.Stride+j]
switch {
case i == j:
if math.Abs(v-a[i*lda+j]) > 1e-10 {
return false
}
case i == j-1:
if math.Abs(a[i*lda+j]-1) > 1e-10 {
return false
}
if math.Abs(v-e[i]) > 1e-10 {
return false
}
case i == j+1:
default:
if math.Abs(v) > 1e-10 {
return false
}
}
}
}
} else {
for i := 0; i < nb; i++ {
for j := 0; j < n; j++ {
v := ans.Data[i*ans.Stride+j]
switch {
case i == j:
if math.Abs(v-a[i*lda+j]) > 1e-10 {
return false
}
case i == j-1:
case i == j+1:
if math.Abs(a[i*lda+j]-1) > 1e-10 {
return false
}
if math.Abs(v-e[i-1]) > 1e-10 {
return false
}
default:
if math.Abs(v) > 1e-10 {
return false
}
}
}
}
}
return true
}
// genFromSym constructs a (symmetric) general matrix from the data in the
// symmetric.
// TODO(btracey): Replace other constructions of this with a call to this function.
func genFromSym(a blas64.Symmetric) blas64.General {
n := a.N
lda := a.Stride
uplo := a.Uplo
b := blas64.General{
Rows: n,
Cols: n,
Stride: n,
Data: make([]float64, n*n),
}
for i := 0; i < n; i++ {
for j := i; j < n; j++ {
v := a.Data[i*lda+j]
if uplo == blas.Lower {
v = a.Data[j*lda+i]
}
b.Data[i*n+j] = v
b.Data[j*n+i] = v
}
}
return b
}<|fim▁end|>
|
}
}
}
|
<|file_name|>token_parser.py<|end_file_name|><|fim▁begin|>from tokens.andd import And
from tokens.expression import Expression
from tokens.iff import Iff
from tokens.kfalse import ConstantFalse
from tokens.ktrue import ConstantTrue
from tokens.nop import Not
from tokens.orr import Or<|fim▁hole|>
class TokenParser:
"""This parser only works with atomic expressions,
so parenthesis are needed everywhere to group items"""
@staticmethod
def parse_expression(string):
# Separate parenthesis so they're new tokens
# Also convert [ or { to the same parenthesis (
for s in '([{':
string = string.replace(s, ' ( ')
for s in ')]}':
string = string.replace(s, ' ) ')
# Get all operators so we can iterate over them
#
# Note that the order here is important. We first need to replace long
# expressions, such as '<->' with their single character representations.
#
# If we didn't do this, after we tried to separate the tokens from other
# expressions by adding spaces on both sides of the operator, '->' would
# break '<->' turning it into '< ->', which would not be recognised.
#
# We add spaces between the tokens so it's easy to split them and identify them.
# Another way would be to iterate over the string and finding the tokens. Once
# identified, they'd be put, in order, on a different list. However, this is
# not as simple as the currently used approach.
operators = [Iff, Then, Not, Or, And, ConstantTrue, ConstantFalse]
# Find all the representations on the string and add surrounding spaces,
# this will allow us to call 'string.split()' to separate variable names
# from the operators so the user doesn't need to enter them separated
for operator in operators:
for representation in operator.representations:
string = string.replace(representation, ' '+operator.single_char_representation+' ')
# Get all the tokens
words = string.split()
# Store the found nested expressions on the stack
expressions_stack = [Expression()]
for w in words:
done = False
for operator in operators:
# We replaced all the operator with their single character representations. We
# don't need to check whether the current word (representation) is any of the
# available representations for this operator, since it's the single-character one.
if w == operator.single_char_representation:
expressions_stack[-1].add_token(operator())
done = True
break
if done:
pass
elif w == '(':
expressions_stack.append(Expression())
elif w == ')':
e = expressions_stack.pop()
expressions_stack[-1].add_token(e)
else:
expressions_stack[-1].add_token(Variable(w))
# Tokenize the top expression (this will also tokenize its children)
expressions_stack[0].tokenize()
# Return the top expression once it's completely valid
return expressions_stack[0]<|fim▁end|>
|
from tokens.then import Then
from tokens.variable import Variable
|
<|file_name|>menuShortcuts.py<|end_file_name|><|fim▁begin|># coding=utf-8
from qtpy import QtWidgets
class MenuShortcuts(QtWidgets.QWidget):
"""
Window displaying the application shortcuts
"""
def __init__(self):
QtWidgets.QWidget.__init__(self)
self.setWindowTitle('Shortcuts')
l = QtWidgets.QGridLayout()
self.setLayout(l)
lk = QtWidgets.QLabel('<b>Key</b>')
ld = QtWidgets.QLabel('<b>Description</b>')
l.addWidget(lk, 0, 0)
l.addWidget(ld, 0, 1)
line = QtWidgets.QFrame(self)
line.setLineWidth(2)
line.setMidLineWidth(1)
line.setFrameShape(QtWidgets.QFrame.HLine)
line.setFrameShadow(QtWidgets.QFrame.Raised)
l.addWidget(line, 1, 0, 1, 2)
self._r = 2
def addShortcut(self, key, description):
<|fim▁hole|> ld = QtWidgets.QLabel(description)
l = self.layout()
l.addWidget(lk, self._r, 0)
l.addWidget(ld, self._r, 1)
self._r += 1<|fim▁end|>
|
lk = QtWidgets.QLabel(key)
|
<|file_name|>L-dynamic.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-
import pairing_heap as pheap
from copy import deepcopy,copy
import threading
import Queue
import requests
from requests.auth import HTTPDigestAuth
import json
import sys
import communication
import config
import time
import L_sprit
# グローバル変数の宣言
LIMIT_SELECTION = 0
SELECTON_RATE = 0
EXCHANGE_RATE = 0
MODE_CHANGE_THRESHOLD = 0.50
ALL_COST = 0
columns = 0
rows = 0
mode_flag = "N"
fwd_ahead = []
back_ahead = []
thresh = MODE_CHANGE_THRESHOLD
class Node :
def __init__ (self, board, selection,exchange,distance):
self.board = board
self.selection = selection
self.exchange = exchange
self.mydistance = distance
def get_next_nodes(self): #渡したノードに隣接するノードを返す
nodes_dic = {}
board = self.board
for i in range(len(board)): #選択するマスを変えたノードをキューに追加する。
for j in range(len(board[0])):
x,y = (i,j)
#右と交換
nodes_dic[((i,j),"R")] = Node(exchange(board,(x, y), (x + 1, y)) , (x + 1, y),(x,y),0)
#左と交換
if x == 0:
# 左への移動は存在しない
nodes_dic[((i,j),"L")] = Node(None, (x - 1, y), (x,y),0)
else:
# 一つ左の選択のRを流用する
#nodes_dic[((i,j),"L")] = Node(exchange(board,(x, y), (x - 1, y)) , (x - 1, y))
nodes_dic[((i,j),"L")] = Node(nodes_dic[((i - 1, j), "R")].board, (x - 1, y), (x, y),0)
#上と交換
if y == 0:
# 上への移動は存在しない
nodes_dic[((i,j),"U")] = Node(None, (x, y - 1), (x,y), 0)
else:<|fim▁hole|> #nodes_dic[((i,j),"U")] = Node(exchange(board,(x, y), (x, y - 1)) , (x, y - 1))
nodes_dic[((i,j),"U")] = Node(nodes_dic[((i, j - 1), "D")].board, (x, y - 1), (x,y), 0)
#下と交換
nodes_dic[((i,j),"D")] = Node(exchange(board,(x, y), (x, y + 1)) , (x, y + 1),(x,y),0)
return nodes_dic
def make_problem(w, h):
arr = []
for i in range(w):
column = []
for j in range(h):
column.append((i, j))
arr.append(column)
return arr
def transpose(arr2d): #転置した2次元配列を返す
result = []
for i in range(len(arr2d[0])):
arr = []
for j in range(len(arr2d)):
arr.append(arr2d[j][i])
result.append(arr)
return result
def operations_to_list(operations): #operationsの型を普通のリストに戻した物を返す
pair = operations
lst = []
while pair != ():
lst.append(pair[0])
pair = pair[1]
return lst
def exchange (then_board, start, destination): # then_boadのstartとdestinationを交換したboardを返す
# 変更されたcolumnだけをdeep copyする
x, y = start
new_x, new_y = destination
if not(0 <= new_x < len(then_board) and 0 <= new_y < len(then_board[0])):
return None
startImg = then_board[x][y]
destImg = then_board[new_x][new_y]
return [
then_board[x] if x != start[0] and x != destination[0]
else [destImg if (x, y) == start
else (startImg if (x, y) == destination else then_board[x][y])
for y in range(len(then_board[0]))]
for x in range(len(then_board))]
board = copy(then_board)
board[x] = deepcopy(then_board[x])
if x != new_x:
board[new_x] = deepcopy(then_board[new_x])
destination_element = board[new_x][new_y]
board[new_x][new_y] = board[x][y]
board[x][y] = destination_element
return board
def create_distance_table(goal): #距離計算用のテーブルを返す
table = []
for i in range(len(goal)):
col = []
for j in range(len(goal[0])):
col.append(None)
table.append(col)
for i in range(len(goal)):
for j in range(len(goal[0])):
(goal_x, goal_y) = goal[i][j]
table[goal_x][goal_y] = (i, j)
return table
def distance_to_goal(table, board): #ノードとゴールノードまでの予測距離を返す。引数は(距離計算用テーブル,ゴールのボード)
ans = 0
for i in range(len(board)):
for j in range(len(board[0])):
(board_x, board_y) = board[i][j]
a = table[board_x][board_y]
b = (i, j)
x = abs(a[0] - b[0])
y = abs(a[1] - b[1])
ans += x + y
return ans * EXCHANGE_RATE
def point_md(point,board, table):
table_x, table_y = board[point[0]][point[1]]
a = table[table_x][table_y]
x = abs(a[0] - point[0])
y = abs(a[1] - point[1])
ans = x + y
return ans
def fast_distance_to_goal(looking_node,node, table):
parent_distance = looking_node.mydistance
parent_board = looking_node.board
selection = node.selection
exchange = node.exchange
child_board = node.board
exchange_distance = point_md(selection,parent_board, table) - point_md(exchange ,child_board, table)
selection_distance = point_md(exchange ,parent_board, table) - point_md(selection,child_board, table)
child_distance = parent_distance - (exchange_distance + selection_distance)
node.mydistance = child_distance
return child_distance * EXCHANGE_RATE
def tuplenode (node) : #ノードをtupleの形にした物を返す
return (tuple([tuple(a) for a in node.board]) , node.selection)
def caliculate_cost (operations): #現在のoperationsのコストを返す
pair = operations
cost = 0
lst = []
while pair != ():
if pair[0][0] == "S":
cost += SELECTON_RATE
else:
cost += EXCHANGE_RATE
pair = pair[1]
return cost
def count_missmatch_image(board1, board2):#board1とboard2間の不一致画像の数を返す
counts = 0
for i in range(len(board1)):
for j in range(len(board1[0])):
try:
if board1[i][j] != board2[i][j]:
counts += 1
except:
print "----"
print board1
print board2
sys.exit()
return counts
def count_selection(operations): #選択を数える
count = 0
for op in operations:
if op[0] == "S":
count += 1
return count
def encode_answer_format(operations_list,L_answer_text):
selectcount = 1
changecount = 0
ans = ""
word = ""
for i in range(len(operations_list)):
if((operations_list[i] == "L")or(operations_list[i] == "R")or(operations_list[i] == "U")or(operations_list[i] == "D")):
word += operations_list[i]
changecount +=1
else:
ans = "\r\n" + word[::-1] + ans
ans = "\r\n" + str(changecount) +ans
ans = "\r\n" + operations_list[i][1:] + ans
word = ""
changecount = 0
selectcount += 1
ans = str(selectcount) + "\r\n" +L_answer_text+ ans
return ans
# リストの先頭から順番に実行する
def move_position(move_list, pos):
pos = list(pos)
for move in move_list:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] -= 1
elif move == "D":
pos[1] += 1
return tuple(pos)
def reverse_operations(operations):
reverse_table = {
"L": "R",
"R": "L",
"U": "D",
"D": "U"
}
result = []
moves = []
for op in operations:
if op[0] == "S":
pos = (int(op[1], 16), int(op[2], 16))
rev_moves = [reverse_table[a] for a in moves]
new_pos = move_position(reversed(moves), pos)
new_op = "S%X%X" % new_pos
result.append(new_op)
result += rev_moves
moves = []
else:
moves.append(op)
rev_moves = [reverse_table[a] for a in moves]
result += rev_moves
return result
def astar_step(queue, checked_nodes, table, min_distance, tag, fwd_ahead, back_ahead):
dummy, looking_node, operations, selection_count = queue.pop() #キューの先頭を取り出
g_star = caliculate_cost(operations)
checked_nodes[(tuplenode(looking_node),tag)] = operations #chacked_nodes集合にチェック済みとして追加
next_nodes = looking_node.get_next_nodes() #looking_nodeに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
cost = 0
select = False
if key[0] != looking_node.selection :
select = True
cost += SELECTON_RATE
added_operation = (key[1],("S%X%X"%key[0],operations))
else:
added_operation = (key[1],operations)
if node.board != None and not((tuplenode(node),tag) in checked_nodes): #各隣接ノードがcheckd_nodesに無ければキューに追加。
h_star = fast_distance_to_goal(looking_node,node, table)
f_star = g_star + h_star
if select:
new_selection_count = selection_count + 1
else:
new_selection_count = selection_count
if new_selection_count <= LIMIT_SELECTION:
queue.push((f_star + cost + EXCHANGE_RATE, node, added_operation, new_selection_count))
if h_star <= min_distance:
min_distance = h_star
print "%s distance=%d tag=%s" % (operations_to_list(added_operation), h_star, tag)
#if int(h_star) == 0:
#cost = -1000000000
#print "stop!"
return min_distance
def forward(problem, answer, checked_nodes,L_answer_text, result_queue):
global mode_flag ,fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(answer)
static_h_star = distance_to_goal(distance_table,problem)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
forward_min = 999999999999
my_tag = "f"
back_tag = "b"
true_ans = answer
next_nodes = Node(problem,(0,0),(0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items(): #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = distance_to_goal(distance_table,node.board)
h_star = fast_distance_to_goal(Node(problem,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_ans: #仮に取り出したキューが正答と一致したら終了
print "forward goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(operations_to_list(operations)))
return
if (tuplenode(queue.element[1]),back_tag) in checked_nodes:
print "ぶつかったforward"
fwd_op = list(reversed(operations_to_list(operations)))
fwd_cost = caliculate_cost(operations)
back_op = checked_nodes[(tuplenode(queue.element[1]),back_tag)]
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
fwd_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
answer = back_ahead
distance_table = create_distance_table(answer)
print distance_table
ans_status = 1
forward_min = min(forward_min, astar_step(queue, checked_nodes, distance_table, forward_min, my_tag, fwd_ahead, back_ahead))
def back(problem, answer, checked_nodes, L_answer_text, result_queue):
global mode_flag, fwd_ahead, back_ahead, thresh
ans_status = 0
distance_table = create_distance_table(problem)
static_h_star = distance_to_goal(distance_table,answer)/EXCHANGE_RATE
print static_h_star
queue = pheap.Empty(key=lambda a: a[0]) #空のキューを作成
back_min = 999999999999
my_tag = "b"
fwd_tag = "f"
true_prob = problem
next_nodes = Node(answer,(0,0), (0,0),static_h_star).get_next_nodes() #problemに隣接するノードたち(上下左右)を辞書型でnext_nodesに追加
for key, node in next_nodes.items() : #中身全部取り出すぜー
added_operation = (key[1],("S%X%X"%key[0],()))
if node.board != None :
h_star = fast_distance_to_goal(Node(answer,(0,0),(0,0),static_h_star),node, distance_table)
queue.push((h_star+SELECTON_RATE+EXCHANGE_RATE, node, added_operation, 1))
while not queue.is_empty:
operations = queue.element[2]
if queue.element[1].board == true_prob: #仮に取り出したキューが正答と一致したら終了
print "back goal"
print operations_to_list(operations)
print "cost=%d" % caliculate_cost(operations)
ALL_COST = caliculate_cost(operations)
result_queue.put(encode_answer_format(list(reversed(reverse_operations(operations_to_list(operations))))))
return
if (tuplenode(queue.element[1]),fwd_tag) in checked_nodes:
print "ぶつかったback"
fwd_op = checked_nodes[(tuplenode(queue.element[1]),fwd_tag)]
fwd_op = list(reversed(operations_to_list(fwd_op)))
fwd_cost = caliculate_cost(operations)
back_op = operations
back_cost = caliculate_cost(back_op) - SELECTON_RATE
back_op = reverse_operations(operations_to_list(back_op))[1:]
full_op = fwd_op + back_op
full_cost = fwd_cost + back_cost
ALL_COST = full_cost
result_queue.put(encode_answer_format(list(reversed(full_op)), L_answer_text))
return
back_ahead = queue.element[1].board
if count_missmatch_image(fwd_ahead, back_ahead) <= int(rows * columns * thresh):# and mode_flag == "N":
print "mode change!"
mode_flag = "A"
thresh *= MODE_CHANGE_THRESHOLD
ans_status = 0
if mode_flag == "A" and ans_status == 0:
print "change answer!"
problem = fwd_ahead
distance_table = create_distance_table(problem)
print distance_table
ans_status = 1
back_min = min(back_min, astar_step(queue, checked_nodes, distance_table, back_min, my_tag, fwd_ahead, back_ahead))
def solve(sortedImages, splitColumns, splitRows, limit, sel_rate, exc_rate, target_columns, target_rows):
global LIMIT_SELECTION, SELECTON_RATE, EXCHANGE_RATE, rows, columns, fwd_ahead, back_ahead
LIMIT_SELECTION = limit
SELECTON_RATE = sel_rate
EXCHANGE_RATE = exc_rate
problem = make_problem(splitColumns, splitRows)
answer = sortedImages
columns = splitColumns
rows = splitRows
checked_nodes = {} #set() #チェック済みのノード集合
problem,L_answer_text = L_sprit.corner_L_sprit(target_columns, target_rows, problem,answer)
LIMIT_SELECTION -= 1
fwd_ahead = problem
back_ahead = answer
result_queue = Queue.Queue()
fwd_thr = threading.Thread(target=forward, name="fwd", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
back_thr = threading.Thread(target=back, name="back", args=(problem, answer, checked_nodes, L_answer_text, result_queue))
fwd_thr.daemon = True
back_thr.daemon = True
fwd_thr.start()
back_thr.start()
while True:
try:
# 1秒ごとにタイムアウトする
# タイムアウト時にキューに内容が無ければEmpty例外が出る
return result_queue.get(True, 1)
except Queue.Empty:
# 例外が出ても何もしない
pass
except KeyboardInterrupt:
print "aborting"
# kill flagをセットしてスレッドを終了させる
kill_flag = True
sys.exit(0)
#main
master = ""
target_columns = 4
target_rows = 4
if len(sys.argv) == 3:
master = sys.argv[1]
target_columns,target_rows = sys.argv[2].split("-")
elif len(sys.argv) == 2:
if '.' in sys.argv[1]:
master = sys.argv[1]
elif '-' in sys.argv[1]:
target_columns,target_rows = sys.argv[1].split("-")
master = config.master
else:
master = config.master
para = communication.get_problem(master)
ans_str = solve(para['answer'], para['columns'], para['rows'], para['lim_select'], para['selection_rate'], para['exchange_rate'],int(target_columns),int(target_rows))
communication.post(master, ans_str)<|fim▁end|>
|
# 一つ上の選択のDを流用する
|
<|file_name|>hello.rs<|end_file_name|><|fim▁begin|>// -*- rust -*-
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at<|fim▁hole|>//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main() {
println("hello, world");
}<|fim▁end|>
|
// http://rust-lang.org/COPYRIGHT.
|
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>import setuptools
setuptools.setup(
name="sirius",
version="0.5",
author="",<|fim▁hole|> description="pySIRIUS",
url="https://github.com/electronic_structure/SIRIUS",
packages=['sirius'],
install_requires=['mpi4py', 'voluptuous', 'numpy', 'h5py', 'scipy', 'PyYAML'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)<|fim▁end|>
|
author_email="[email protected]",
|
<|file_name|>player-name-check.js<|end_file_name|><|fim▁begin|>'use strict';
/**
* Confirm new player name
*/
module.exports = (srcPath) => {
const EventUtil = require(srcPath + 'EventUtil');<|fim▁hole|> event: state => (socket, args) => {
const say = EventUtil.genSay(socket);
const write = EventUtil.genWrite(socket);
write(`<bold>${args.name} doesn't exist, would you like to create it?</bold> <cyan>[y/n]</cyan> `);
socket.once('data', confirmation => {
say('');
confirmation = confirmation.toString().trim().toLowerCase();
if (!/[yn]/.test(confirmation)) {
return socket.emit('player-name-check', socket, args);
}
if (confirmation === 'n') {
say(`Let's try again...`);
return socket.emit('create-player', socket, args);
}
return socket.emit('finish-player', socket, args);
});
}
};
};<|fim▁end|>
|
return {
|
<|file_name|>attempt_test.go<|end_file_name|><|fim▁begin|>package aws_test
import (
"github.com/lizdeika/goamz/aws"
. "github.com/motain/gocheck"
"time"
)
func (S) TestAttemptTiming(c *C) {
testAttempt := aws.AttemptStrategy{
Total: 0.25e9,
Delay: 0.1e9,
}
want := []time.Duration{0, 0.1e9, 0.2e9, 0.2e9}
got := make([]time.Duration, 0, len(want)) // avoid allocation when testing timing
t0 := time.Now()
for a := testAttempt.Start(); a.Next(); {
got = append(got, time.Now().Sub(t0))
}
got = append(got, time.Now().Sub(t0))
c.Assert(got, HasLen, len(want))
const margin = 0.01e9
for i, got := range want {
lo := want[i] - margin
hi := want[i] + margin
if got < lo || got > hi {
c.Errorf("attempt %d want %g got %g", i, want[i].Seconds(), got.Seconds())
}
}
}
func (S) TestAttemptNextHasNext(c *C) {
a := aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 2e8}.Start()
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
time.Sleep(2e8)<|fim▁hole|> c.Assert(a.Next(), Equals, true)
c.Assert(a.Next(), Equals, false)
a = aws.AttemptStrategy{Total: 1e8, Min: 2}.Start()
time.Sleep(1e8)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, true)
c.Assert(a.Next(), Equals, true)
c.Assert(a.HasNext(), Equals, false)
c.Assert(a.Next(), Equals, false)
}<|fim▁end|>
|
c.Assert(a.HasNext(), Equals, true)
|
<|file_name|>gulpfile.js<|end_file_name|><|fim▁begin|>var gulp = require('gulp');
var del = require('del');
var plumber = require('gulp-plumber');
var replace = require('gulp-replace');
// lib
gulp.task('lib-clean', function () {
return del('./lib/*');
});
gulp.task('lib-assets', ['lib-clean'], function () {
return gulp.src(['./src/**/*.*', '!./src/**/*.js'])
.pipe(gulp.dest('./lib'));
});
gulp.task('lib-compile', ['lib-clean'], function () {
return gulp.src(['./src/**/*.js'])
.pipe(plumber())
.pipe(replace('require(\'zrender\')', 'require(\'zrenderjs\')'))
.pipe(replace('require(\'zrender/', 'require(\'zrenderjs/'))
.pipe(gulp.dest('./lib'));
});
gulp.task('lib', ['lib-clean', 'lib-assets', 'lib-compile']);
// dist
gulp.task('dist-clean', function () {
return del('./dist/*');
});
gulp.task('dist-assets', ['dist-clean'], function () {
return gulp.src(['./src/**/*.*', '!./src/**/*.js'])
.pipe(gulp.dest('./dist'));
});
gulp.task('dist-compile', ['dist-clean'], function () {
return gulp.src(['./src/**/*.js'])
.pipe(plumber())
.pipe(replace('require(\'zrender\')', 'require(\'zrenderjs\')'))
.pipe(replace('require(\'zrender/', 'require(\'zrenderjs/'))
// .pipe(replace('require(\'text!', 'require(\''))
.pipe(gulp.dest('./dist'));<|fim▁hole|> .pipe(gulp.dest('./dist'));
});
gulp.task('dist', ['dist-clean', 'dist-assets', 'dist-compile', 'dist-x']);
gulp.task('default', ['dist']);<|fim▁end|>
|
});
gulp.task('dist-x', ['dist-clean'], function () {
return gulp.src(['./package.json', './README.md', './LICENSE'])
|
<|file_name|>control_flow_ops.py<|end_file_name|><|fim▁begin|># Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""## Control Flow Operations
TensorFlow provides several operations and classes that you can use to control
the execution of operations and add conditional dependencies to your graph.
@@identity
@@tuple
@@group
@@no_op
@@count_up_to
@@cond
## Logical Operators
TensorFlow provides several operations that you can use to add logical operators
to your graph.
@@logical_and
@@logical_not
@@logical_or
@@logical_xor
## Comparison Operators
TensorFlow provides several operations that you can use to add comparison
operators to your graph.
@@equal
@@not_equal
@@less
@@less_equal
@@greater
@@greater_equal
@@select
@@where
## Debugging Operations
TensorFlow provides several operations that you can use to validate values and
debug your graph.
@@is_finite
@@is_inf
@@is_nan
@@verify_tensor_all_finite
@@check_numerics
@@add_check_numerics_ops
@@Assert
@@Print
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
from tensorflow.python.platform import logging
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
# pylint: disable=protected-access
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
if not data.dtype.is_ref_dtype:
return array_ops.identity(data, name=name)
else:
return gen_array_ops._ref_identity(data, name=name)
def _NextIteration(data, name=None):
if not data.dtype.is_ref_dtype:
return next_iteration(data, name=name)
else:
return ref_next_iteration(data, name=name)
def _Merge(values, name=None):
if all([v.dtype.is_ref_dtype for v in values]):
return gen_control_flow_ops._ref_merge(values, name)
else:
return gen_control_flow_ops._merge(values, name)
def _Enter(data, frame_name, is_constant=False, parallel_iterations=10,
use_ref=True, name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `output` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations` iterations
are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
if data.dtype.is_ref_dtype and use_ref:
return ref_enter(data, frame_name, is_constant, parallel_iterations,
name=name)
else:
return enter(data, frame_name, is_constant, parallel_iterations,
name=name)
def exit(data, name=None):
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
if data.dtype.is_ref_dtype:
return gen_control_flow_ops._ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
"""
with ops.op_scope([data, pred], name, "Switch") as name:
data = ops.convert_to_tensor_or_indexed_slices(data, dtype=dtype,
name="data")
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops._switch(data, pred, name=name)
else:
val, ind, dense_shape = data.values, data.indices, data.dense_shape
val_f, val_t = gen_control_flow_ops._switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops._switch(ind, pred, name="indices")
if dense_shape:
dense_shape_f, dense_shape_t = gen_control_flow_ops._switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If inputs are IndexedSlices and some but not all have a
dense_shape property.
"""
with ops.op_scope(inputs, name, "Merge") as name:
inputs = [ops.convert_to_tensor_or_indexed_slices(inp)
for inp in inputs]
if all([isinstance(inp, ops.Tensor) for inp in inputs]):
return _Merge(inputs, name=name)
else:
inputs = math_ops._as_indexed_slices_list(inputs)
values, _ = _Merge([inp.values for inp in inputs], name=name)
indices, chosen_index = _Merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape for inp in inputs):
if not all(inp.dense_shape for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = _Merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is true, the `data` input is forwared to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_false)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
with ops.device(data.device):
if isinstance(data, ops.Tensor):
if not data.dtype.is_ref_dtype:
return switch(data, pred, name=name)
else:
return ref_switch(data, pred, name=name)
else:
return switch(data, pred, name=name)
def _convert_tensorarrays_to_flows(tensors_or_tensor_arrays):
return [ta.flow if isinstance(ta, tensor_array_ops.TensorArray)
else ta
for ta in tensors_or_tensor_arrays]
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d"
% (len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
tensor_array_ops.TensorArray(
dtype=ta.dtype, handle=ta.handle, flow=t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray)
else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)]
class ControlFlowOpWrapper(object):
"""A wrapper class for Operation.
A wrapped op allows us to capture the uses of its inputs and outputs. In
gradients(), right before calling the gradient function of an op, we wrap
the op by calling MakeWrapper. So during the exection of the gradient
function of an op , any time when one of its inputs/outputs is used, we
generate code to remember its values for all iterations.
"""
class _ControlFlowOpInputs(object):
"""An indirection to capture the input tensors needed in backprop."""
def __init__(self, op, grad_state):
self._op = op
self._grad_state = grad_state
self._inputs = None
def __len__(self):
return len(self._op._inputs)
def __getitem__(self, index):
if self._inputs is None:
self._inputs = [None for _ in self._op.inputs]
if isinstance(index, int):
val = self._inputs[index]
if val is None:
f_val = self._op.inputs[index]
val = self._grad_state.GetRealValue(f_val)
self._inputs[index] = val
return val
elif isinstance(index, slice):
start, stop, step = index.indices(len(self))
vals = [self[i] for i in xrange(start, stop, step)]
return vals
else:
raise TypeError("index must be an integer or slice")
class _ControlFlowOpOutputs(object):
"""An indirection to capture the output tensors needed in backprop."""
def __init__(self, op, grad_state):
self._op = op
self._grad_state = grad_state
self._outputs = None
def __len__(self):
return len(self._op._outputs)
def __getitem__(self, index):
if self._outputs is None:
self._outputs = [None for _ in self._op.outputs]
if isinstance(index, int):
val = self._outputs[index]
if val is None:
f_val = self._op.outputs[index]
val = self._grad_state.GetRealValue(f_val)
self._outputs[index] = val
return val
elif isinstance(index, slice):
start, stop, step = index.indices(len(self))
vals = [self[i] for i in xrange(start, stop, step)]
return vals
else:
raise TypeError("index must be an integer or slice")
def __init__(self, op, grad_state):
self._grad_state = grad_state # The GradLoopState this op belongs to.
self._op = op
self._inputs = None
self._outputs = None
@property
def grad_state(self):
return self._grad_state
@property
def inputs(self):
if self._inputs is None:
self._inputs = self._ControlFlowOpInputs(self._op, self._grad_state)
return self._inputs
@property
def outputs(self):
if self._outputs is None:
self._outputs = self._ControlFlowOpOutputs(self._op, self._grad_state)
return self._outputs
@property
def op(self):
return self._op
@property
def name(self):
"""Returns the name of this instance of op."""
return self._op.name
@property
def _id(self):
"""Returns the unique id of this operation."""
return self._op._id
@property
def device(self):
"""Returns the device of this operation.
Returns:
a string or None if the device was not set.
"""
return self._op.device
@property
def type(self):
"""Returns the type of the op."""
return self._op.type
@property
def graph(self):
"""The `Graph` that contains this operation."""
return self._op.graph
def get_attr(self, name):
"""Returns the value of the attr of this op with the given `name`."""
return self._op.get_attr(name)
def _get_control_flow_context(self):
"""Returns the control flow context of this op."""
return self._op._get_control_flow_context()
def _IsLoopConstantEnter(op):
"""Returns true iff op is a loop invariant."""
is_enter = (op.type == "Enter" or op.type == "RefEnter")
return is_enter and op.get_attr("is_constant")
def _IsLoopExit(op):
return op.type == "Exit" or op.type == "RefExit"
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackPropCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt: outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardCounter()
if outer_forward_ctxt: outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.name)
real_cnt = outer_grad_state.AddBackPropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackPropCounter(real_cnt)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt: outer_forward_ctxt.Enter()
self._grad_context = WhileContext(forward_ctxt.parallel_iterations,
forward_ctxt.back_prop,
forward_ctxt.name)
self._grad_index = self._grad_context.AddBackPropCounter(cnt)
if outer_forward_ctxt: outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the While loop."""
return self._switch_map
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The tensor that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
"""
# TODO(yuanbyu): Make sure the colocation of stack ops and value.
# pylint: disable=protected-access
acc = gen_data_flow_ops._stack(value.dtype.base_dtype, name="f_acc")
# pylint: enable=protected-access
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
value_ctxt = value.op._get_control_flow_context()
if _IsLoopExit(value.op):
value_ctxt = value_ctxt.outer_context
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
push = gen_data_flow_ops._stack_push(enter_acc, value)
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
self.forward_context.Exit()
else:
# value is in a cond context within the forward context.
assert isinstance(value_ctxt, CondContext)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
push = gen_data_flow_ops._stack_push(enter_acc, value)
value_ctxt.outer_context.Exit()
# Guard with a switch but take the other branch.
pred = self.history_map.get(value_ctxt.pred.name)
branch = value_ctxt.branch
value_ctxt.AddName(push.name)
value_ctxt.Enter()
push = _SwitchRefOrTensor(push, pred)[1 - branch]
value_ctxt.Exit()
else:
value_ctxt.Enter()
push = gen_data_flow_ops._stack_push(enter_acc, value)
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackPropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond
grad_state = self
pred = None
while not pred and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
pop = gen_data_flow_ops._stack_pop(history_value, value.dtype.base_dtype)
if self.grad_context.parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value.
If backprop "uses" a value produced by forward inference, an
accumulator is added in the forward loop to accumulate its values.
We use the accumulated value.
Args:
value: A tensor to be captured.
Returns:
The same tensor value from the saved history.
"""
assert value.op.type != "Variable"
real_value = self._history_map.get(value.name)
if real_value is None:
if _IsLoopConstantEnter(value.op):
# Special case for loop invariant.
if self._outer_grad_state:
# This is a nested loop so we record the history of this
# value in outer_forward_ctxt.
self._grad_context.Exit()
outer_value = value.op.inputs[0]
history_value = self._outer_grad_state.AddForwardAccumulator(
outer_value)
self._grad_context.Enter()
else:
# Just use the input value of this Enter node.
real_value = GetRealOp(value.op).inputs[0]
else:
# Record the history of this value in forward_ctxt.
# NOTE(yuanbyu): Don't record for constants.
self._grad_context.Exit()
history_value = self.AddForwardAccumulator(value)
self._grad_context.Enter()
if real_value is None:
# Add the stack pop op in the grad context.
real_value = self.AddBackPropAccumulatedValue(history_value, value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def _GetGradState(self, op):
forward_ctxt = _GetWhileContext(op)
if forward_ctxt is None:
return None
return self._map.get(forward_ctxt)
def MakeWrapper(self, op):
"""Make a wrapper for op if it is in a WhileContext."""
grad_state = self._GetGradState(op)
if grad_state:
return ControlFlowOpWrapper(op, grad_state)
return op
def GetAllLoopExits(self):
"""Return a list containing the exits of all the loops."""
loop_exits = []
for forward_ctxt in self._map:
for loop_exit in forward_ctxt.loop_exits:
loop_exits.append(loop_exit)
return loop_exits
def EnterGradWhileContext(self, op):
"""Enter the WhileContext for gradient computation."""
grad_state = self._GetGradState(op)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op):
"""Exit the WhileContext for gradient computation."""
grad_state = self._GetGradState(op)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in forward_ctxt.loop_exits:
if not between_ops[loop_exit.op._id]:
between_ops[loop_exit.op._id] = True
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
history_val = outer_grad_state.AddForwardAccumulator(val)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_val = outer_grad_state.AddBackPropAccumulatedValue(
history_val, val)
result = array_ops.zeros_like(real_val)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
This method must be called in the grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if IsLoopSwitch(op): return None
dead_branch = op.type in {"Switch", "RefSwitch"}
forward_ctxt = _GetWhileContext(op)
if forward_ctxt is None:
return array_ops.zeros_like(op.outputs[index])
op_ctxt = op._get_control_flow_context()
grad_state = self._map.get(forward_ctxt)
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
op_ctxt.Enter()
zeros_shape = shape(val)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_shape = grad_state.AddForwardAccumulator(zeros_shape, dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackPropAccumulatedValue(
history_shape, zero_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def GetRealOp(op):
"""Get the real op by removing the wrapper."""
while isinstance(op, ControlFlowOpWrapper):
op = op.op
return op
def MaybeCreateControlFlowState(between_op_list, between_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if _IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def IsLoopSwitch(op):
"""Return true if `op` is the Switch for a While loop."""
if op.type == "Switch" or op.type == "RefSwitch":
ctxt = op._get_control_flow_context()
return ctxt and isinstance(ctxt, WhileContext)
return False
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self):
self._outer_context = ops.get_default_graph()._get_control_flow_context()
self._context_stack = []
# Values that have been already seen in this context.
self._values = set()
# Values referenced by but external to this context.
self._external_values = {}
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
for x in result:
self._outer_context.AddName(x.name)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def MaybeAddToWhileContext(self, op):
"""Add a control dependency to the containing WhileContext.
The added control dependency ensures that the outputs of this op
belong to the WhileContext. Do nothing if the op is not contained
in a WhileContext.
Args:
op: An operation.
"""
while_ctxt = self.GetWhileContext()
if while_ctxt is not None:
# pylint: disable=protected-access
op._add_control_input(while_ctxt.GetControlPivot().op)
# pylint: enable=protected-access
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self, pred, pivot, branch):
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context.
self._values.add(pred.name)
self._values.add(pivot.name)
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Add this op to the enclosing while context
self.MaybeAddToWhileContext(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
if x.name not in self._values:
self._values.add(x.name)
# Add this value to the parent contexts up to the context that
# creates this value.
real_x = x
if self._outer_context:
real_x = self._outer_context.AddValue(x)
self._values.add(real_x.name)
real_x = _SwitchRefOrTensor(real_x, self._pred)[self._branch]
self._external_values[x.name] = real_x
x = self._external_values.get(x.name)
if x is not None:
op._update_input(index, x)
for x in op.outputs:
self._values.add(x.name)
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
r = fn()
result = []
if r is not None:
if not isinstance(r, list) and not isinstance(r, _basetuple):
r = [r]
for v in r:
real_v = v
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
real_v = with_dependencies([v], self._pivot)
elif v.name not in self._values:
# Handle the special case of lambda: x
self._values.add(v.name)
if self._outer_context:
real_v = self._outer_context.AddValue(v)
self._values.add(real_v.name)
real_v = _SwitchRefOrTensor(real_v, self._pred)[self._branch]
self._external_values[v.name] = real_v
else:
external_v = self._external_values.get(v.name)
if external_v is not None:
real_v = external_v
result.append(real_v)
return result
def cond(pred, fn1, fn2, name=None):
"""Return either fn1() or fn2() based on the boolean predicate `pred`.
`fn1` and `fn2` both return lists of output tensors. `fn1` and `fn2` must have
the same non-zero number and type of outputs.
Args:
pred: A scalar determining whether to return the result of `fn1` or `fn2`.
fn1: The function to be performed if pred is true.
fn2: The function to be performed if pref is false.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `fn1` or `fn2`. If the functions
return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `fn1` or `fn2` is not callable.
ValueError: if `fn1` and `fn2` do not return the same number of tensors, or
return tensors of different types.
Example:
```python
x = constant(2)
y = constant(5)
def f1(): return constant(17)
def f2(): return constant(23)
r = cond(math_ops.less(x, y), f1, f2)
# r is set to f1()
```
"""
with ops.op_scope([pred], name, "cond") as name:
if not callable(fn1):
raise TypeError("fn1 must be callable.")
if not callable(fn2):
raise TypeError("fn2 must be callable.")
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, 1)
context_t.Enter()
res_t = context_t.BuildCondBranch(fn1)
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, 0)
context_f.Enter()
res_f = context_f.BuildCondBranch(fn2)
context_f.ExitResult(res_f)
context_f.Exit()
# Add the final merge to the graph.
if len(res_t) != len(res_f):
raise ValueError("fn1 and fn2 must return the same number of results.")
if not res_t:
raise ValueError("fn1 and fn2 must return at least one result.")
for x, y in zip(res_f, res_t):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError("Outputs of fn1 and fn2 must have the same type: "
"%s, %s" % (val_x.dtype.name, val_y.dtype.name))
merges = [merge([x[0], x[1]])[0] for x in zip(res_f, res_t)]
return merges[0] if len(merges) == 1 else merges
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self, parallel_iterations, back_prop, name):
ControlFlowContext.__init__(self)
self._name = ops.get_default_graph().unique_name(name)
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = None
@property
def name(self):
return self._name
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this While loop."""
return self._back_prop
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
if val.name not in self._values:
self._values.add(val.name)
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(result, self._name, is_constant=True,
parallel_iterations=self._parallel_iterations)
# pylint: disable=protected-access
enter.op._set_control_flow_context(self)
# pylint: enable=protected-access
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Adds `op` to the current context."""
if not op.inputs:
if not op.control_inputs:
# Add a control edge from the control pivot to this op.
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
else:
# Control edges must be in the same context.<|fim▁hole|> "Control inputs must come from Operations in the same while "
"loop context (not an outer context).")
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
self.AddValue(x)
real_x = self._external_values.get(x.name)
if real_x is not None:
op._update_input(index, real_x)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
if real_x.op.type == "RefEnter" and real_x.op.get_attr("is_constant"):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
def AddForwardCounter(self):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
assert n.op._get_control_flow_context() == self.outer_context
self.Enter()
self.AddName(n.name)
enter_n = _Enter(n, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackPropCounter(self, count):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Args:
count: The number of iterations for backprop.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(count, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
cond = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(cond, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.sub(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
self.Exit()
return next_count
def AddBackPropAccumulator(self, value):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate
partial gradients within each loop iteration. Called when in the
gradient while context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += value;
}
```
Args:
value: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
if self.outer_context: self.outer_context.Enter()
acc = constant_op.constant(0, value.dtype, name="b_acc")
if self.outer_context: self.outer_context.Exit()
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(acc, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc[1], value)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc)
acc_result = exit(switch_acc[0], name="b_acc")
self.ExitResult([acc_result])
return acc_result
def BuildLoop(self, pred, body, loop_vars):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
# Connvert TensorArrays to their flow variables
loop_vars = _convert_tensorarrays_to_flows(loop_vars)
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
# Let the context know the loop variabes so the loop variables
# would be added in the outer contexts properly.
self._values = set([x.name for x in loop_vars])
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [_Enter(x, self._name, is_constant=False,
parallel_iterations=self._parallel_iterations)
for x in real_vars]
for x in enter_vars:
x.op._set_control_flow_context(self) # pylint: disable=protected-access
self._values = set([x.name for x in enter_vars])
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(original_loop_vars, merge_vars))
c = ops.convert_to_tensor(pred(*merge_vars_with_tensor_arrays))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(original_loop_vars, vars_for_body))
body_result = body(*vars_for_body_with_tensor_arrays)
if not isinstance(body_result, collections.Sequence):
body_result = [body_result]
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
result = _convert_tensorarrays_to_flows(body_result)
result = ops.convert_n_to_tensor_or_indexed_slices(result)
next_vars = [_NextIteration(x) for x in result]
# Add the back edges to complete the loop.
assert len(merge_vars) == len(next_vars)
for x in zip(merge_vars, next_vars):
x[0].op._update_input(1, x[1])
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
for m_var, n_var, e_var in zip(merge_vars, next_vars, exit_vars):
if m_var.get_shape().is_compatible_with(n_var.get_shape()):
e_var.set_shape(m_var.get_shape().merge_with(n_var.get_shape()))
# Exit the loop.
self.ExitResult(exit_vars)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(original_body_result, exit_vars))
return (exit_vars_with_tensor_arrays[0]
if len(exit_vars) == 1
else exit_vars_with_tensor_arrays)
def While(cond, body, loop_vars, parallel_iterations=10, back_prop=True,
name=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a function taking a list of tensors and returning a boolean scalar
tensor. `body` is a function taking a list of tensors and returning a list of
tensors of the same length and with the same types as the input. `loop_vars`
is a list of tensors that is passed to both `cond` and `body`.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
While `cond` evaluates to true, `body` is executed.
Args:
cond: The termination condition of the loop.
body: A function that represents the loop body.
loop_vars: The list of variable input tensors.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
name: Optional name prefix for the returned tensors.
Returns:
The output tensors for the loop variables after the loop.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_var` is empty.
Example:
```python
i = Constant(0)
c = lambda i: math_ops.less(i, 10)
b = lambda i: math_ops.add(i, 1)
r = While(c, b, [i])
```
"""
with ops.op_scope(loop_vars, name, "While") as name:
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
context = WhileContext(parallel_iterations, back_prop, name)
context.Enter()
result = context.BuildLoop(cond, body, loop_vars)
context.Exit()
return result
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(ops.IndexedSlices(array_ops.identity(v.values),
array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." %
(x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also `tuple` and `group`.
Args:
dependencies: A list of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
with ops.op_scope(dependencies + [output_tensor], name,
"control_dependency") as name:
with ops.device(output_tensor.device
or ops.get_default_graph().get_default_device()):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(_Identity(output_tensor.values, name=name),
output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `input` have finished. This op has no
output.
See also `tuple` and `with_dependencies`.
Args:
*inputs: One or more tensors to group.
**kwargs: Optional parameters to pass when constructing the NodeDef.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided, or if there are
no inputs.
"""
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
if not inputs:
# TODO(touts): Would make sense to return a NoOp.
raise ValueError("No inputs provided")
with ops.op_scope(inputs, name, "group_deps") as name:
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in inputs:
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
return _GroupControlDeps(None, deps, name=name)
def tuple(tensors, name=None, control_inputs=None):
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also `group` and `with_dependencies`.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
with ops.op_scope(tensors, name, "tuple") as name:
gating_ops = [t.op for t in tensors if t]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if t:
tpl.append(with_dependencies([gate], t))
else:
tpl.append(None)
return tpl
# TODO(yuanbyu): It would be nicer if we could have the distributed list
# support that Derek has been proposing.
# TODO(yuanbyu, mrry): Handle stride to support sliding windows.
def fold(fn, elems, elem_shape, name=None):
"""The fold operator on slices of a tensor.
This fold operator applies the function `fn` to slices of `elems` on
dimension 0. The shape of the slices is specified by `elem_shape`. `elems`
must contain at least one slice (`shape(elems)[0] / elem_shape[0] > 0`).
Args:
fn: The function to be performed on each slice of the tensor.
elems: The tensor to whose slices we want to apply `fn`.
elem_shape: The shape definition for the slices.
name: Optional name prefix for the returned tensors.
Returns:
A tensor resulting from applying `fn` consecutively on each slice of
`elems`.
Raises:
TypeError: if `fn` is not callable.
"""
with ops.op_scope([elems], name, "fold") as name:
if not callable(fn):
raise TypeError("fn must be callable.")
s0 = array_ops.shape(elems)[0]
d0 = elem_shape[0]
n = math_ops.div(s0, d0)
b1 = array_ops.zeros(array_ops.expand_dims(array_ops.rank(elems) - 1, 0),
dtype=dtypes.int32)
# Initialize the output with slice 0
b = array_ops.concat(0, [[0], b1])
o = array_ops.slice(elems, b, elem_shape)
i = ops.convert_to_tensor(d0)
def Compute(i, o):
b = array_ops.concat(0, [array_ops.expand_dims(i, 0), b1])
x = array_ops.slice(elems, b, elem_shape)
o = fn(o, x)
i = math_ops.add(i, d0)
return [i, o]
r = While(lambda i, o: math_ops.less(i, n), Compute, [i, o])
return r[1]
def case(pred_fn_pairs, default, exclusive=False, name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True. `default`
is a callable generating a list of tensors. All the callables in
`pred_fn_pairs` as well as `default` should return the same number and types
of tensors.
If `exclusive==True`, all predicates are evaluated, and a logging operation
with an error is returned if more than one of the predicates evaluates to
True. If `exclusive==False`, execution stops are the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
Example 1:
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
Example 2:
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: A callable that returns a list of tensors.
exclusive: True iff more than one predicate is allowed to evaluate to True.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
pfp = pred_fn_pairs # For readability
if not (isinstance(pfp, list) or isinstance(pfp, _basetuple)
or isinstance(pfp, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pfp, dict):
pfp = pfp.items()
if not exclusive:
logging.warn("%s: Provided dictionary of predicate/fn pairs, but "
"exclusive=False. Order of conditional tests is "
"not guaranteed.", name)
for tup in pfp:
if not isinstance(tup, _basetuple) or len(tup) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = tup
if pred.dtype != dtypes.bool:
raise TypeError("pred must be of type bool: %s", pred.name)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
if not callable(default):
raise TypeError("default must be callable.")
preds, fns = map(list, zip(*pfp))
with ops.op_scope([preds], name, "case"):
if not preds:
return default()
not_preds = []
for i, p in enumerate(preds):
with ops.name_scope("not_%d" % i):
not_preds.append(math_ops.logical_not(p))
and_not_preds = [constant_op.constant(True, name="and_not_true")]
for i, notp in enumerate(not_preds[:-1]):
with ops.name_scope("and_not_%d" % i):
and_not_preds.append(math_ops.logical_and(and_not_preds[-1], notp))
# preds = [p1, p2, p3]
# fns = [f1, f2, f3]
# not_preds = [~p1, ~p2, ~p3]
# case_preds = [p1 & True,
# p2 & ~p1,
# p3 & ~p1 & ~ p2]
case_preds = []
for i, (p, and_not_p_prev) in enumerate(zip(preds, and_not_preds)):
with ops.name_scope("case_%d" % i):
case_preds.append(math_ops.logical_and(p, and_not_p_prev))
# case_sequence = [cond(p3 & ..., f3, default),
# cond(p2 & ..., f2, lambda: case_sequence[0]),
# ...
# cond(p1 & True, f1, lambda: case_sequence[i-1])]
# and prev_case_seq will loop from case_sequence[0] to case_sequence[-1]
if exclusive:
# TODO(ebrevdo): Add Where() for DT_BOOL, replace with Size(Where(preds))
preds_c = array_ops.concat(0, preds, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
at_most_one_true_condition = math_ops.less(
num_true_conditions, constant_op.constant(2, name="two_true_conds"))
error_msg = [
("More than one condition evaluated as True but "
"exclusive=True. Conditions: (%s), Values:"
% ", ".join([p.name for p in preds])),
preds_c]
with ops.control_dependencies([
logging_ops.Assert(condition=at_most_one_true_condition,
data=error_msg, summarize=len(preds))]):
prev_case_seq = None
for i, (cp, fn) in enumerate(zip(case_preds, fns)[::-1]):
prev_case_seq = cond(
cp, fn,
default if i == 0 else lambda: prev_case_seq,
name="If_%d" % i)
else:
prev_case_seq = None
for i, (cp, fn) in enumerate(zip(case_preds, fns)[::-1]):
prev_case_seq = cond(
cp, fn,
default if i == 0 else lambda: prev_case_seq,
name="If_%d" % i)
return prev_case_seq
ops.RegisterShape("Enter")(common_shapes.unchanged_shape)
ops.RegisterShape("Exit")(common_shapes.unknown_shape)
ops.RegisterShape("NextIteration")(common_shapes.unchanged_shape)
ops.RegisterShape("RefEnter")(common_shapes.unchanged_shape)
ops.RegisterShape("RefExit")(common_shapes.unknown_shape)
ops.RegisterShape("RefNextIteration")(common_shapes.unchanged_shape)
ops.RegisterShape("ControlTrigger")(common_shapes.no_outputs)
ops.RegisterShape("NoOp")(common_shapes.no_outputs)
@ops.RegisterShape("LoopCond")
def _LoopCondShape(op):
"""Shape function for the LoopCond op."""
return [op.inputs[0].get_shape().merge_with(tensor_shape.scalar())]
@ops.RegisterShape("Merge")
def _MergeShape(op):
"""Shape function for the Merge op.
The Merge op takes many inputs of arbitrary shapes, and produces a
first output that is one of those inputs, and a second scalar
output.
If all input shapes are known and have the same rank, the output
shape must have that rank, otherwise the output shape is unknown.
Each output dimension is specified only if that dimension in all
inputs are the same.
Args:
op: A Merge Operation.
Returns:
A single-element list containing the Shape of the Merge op.
"""
output_shape = op.inputs[0].get_shape()
if output_shape.dims is None:
return [tensor_shape.unknown_shape(), tensor_shape.scalar()]
else:
for input_ in op.inputs[1:]:
input_shape = input_.get_shape()
if input_shape.dims is None or input_shape.ndims != output_shape.ndims:
return [tensor_shape.unknown_shape(), tensor_shape.scalar()]
else:
output_shape = tensor_shape.TensorShape(
[input_dim.value if input_dim.value == output_dim.value else None
for input_dim, output_dim in zip(input_shape.dims,
output_shape.dims)])
return [output_shape, tensor_shape.scalar()]
ops.RegisterShape("RefMerge")(_MergeShape)
@ops.RegisterShape("RefSelect")
def _RefSelectShape(op):
"""Shape function for the RefSelect op.
The RefSelect takes one scalar input and N inputs of arbitrary
shapes, and produces one output, which is one of those N inputs.
This function conservatively assumes that if any of the N inputs is
not fully defined, the output shape is unknown. If all of the N
inputs have the exact same known shape, the output must have that
shape.
Args:
op: A RefSelect Operation.
Returns:
A single-element list containing the Shape of the RefSelect op.
"""
unused_shape = op.inputs[0].get_shape().merge_with(tensor_shape.scalar())
first_input_shape = op.inputs[1].get_shape()
if first_input_shape.is_fully_defined():
for input_ in op.inputs[2:]:
input_shape = input_.get_shape()
if (not input_shape.is_fully_defined()
or not input_shape.is_compatible_with(first_input_shape)):
return [tensor_shape.unknown_shape()]
return [first_input_shape]
else:
return [tensor_shape.unknown_shape()]
@ops.RegisterShape("RefSwitch")
@ops.RegisterShape("Switch")
def _SwitchShape(op):
input_shape = op.inputs[0].get_shape()
unused_pred_shape = op.inputs[1].get_shape().merge_with(tensor_shape.scalar())
return [input_shape] * 2<|fim▁end|>
|
for x in op.control_inputs:
assert x._get_control_flow_context() == self, (
|
<|file_name|>p28.py<|end_file_name|><|fim▁begin|>sum = 1
curr = 3
<|fim▁hole|> sum = sum + curr #bottom left
curr = curr + inc
sum = sum + curr #top left
curr = curr + inc
sum = sum + curr #top right
curr = curr + inc + 2
print sum<|fim▁end|>
|
for width in xrange(3,1002,2):
inc = width - 1
sum = sum + curr #bottom right
curr = curr + inc
|
<|file_name|>cygwinccompiler.py<|end_file_name|><|fim▁begin|>"""distutils.cygwinccompiler
Provides the CygwinCCompiler class, a subclass of UnixCCompiler that
handles the Cygwin port of the GNU C compiler to Windows. It also contains
the Mingw32CCompiler class which handles the mingw32 port of GCC (same as
cygwin in no-cygwin mode).
"""
# problems:
#
# * if you use a msvc compiled python version (1.5.2)
# 1. you have to insert a __GNUC__ section in its config.h
# 2. you have to generate an import library for its dll
# - create a def-file for python??.dll
# - create an import library using
# dlltool --dllname python15.dll --def python15.def \
# --output-lib libpython15.a
#
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
#
# * We put export_symbols in a def-file, and don't use
# --export-all-symbols because it doesn't worked reliable in some
# tested configurations. And because other windows compilers also
# need their symbols specified this no serious problem.
#
# tested configurations:
#
# * cygwin gcc 2.91.57/ld 2.9.4/dllwrap 0.2.4 works
# (after patching python's config.h and for C++ some other include files)
# see also http://starship.python.net/crew/kernr/mingw32/Notes.html
# * mingw32 gcc 2.95.2/ld 2.9.4/dllwrap 0.2.4 works
# (ld doesn't support -shared, so we use dllwrap)
# * cygwin gcc 2.95.2/ld 2.10.90/dllwrap 2.10.90 works now
# - its dllwrap doesn't work, there is a bug in binutils 2.10.90
# see also http://sources.redhat.com/ml/cygwin/2000-06/msg01274.html
# - using gcc -mdll instead dllwrap doesn't work without -static because
# it tries to link against dlls instead their import libraries. (If
# it finds the dll first.)
# By specifying -static we force ld to link against the import libraries,
# this is windows standard and there are normally not the necessary symbols
# in the dlls.
# *** only the version of June 2000 shows these problems
# * cygwin gcc 3.2/ld 2.13.90 works
# (ld supports -shared)
# * mingw gcc 3.2/ld 2.13 works
# (ld supports -shared)
import os
import sys
import copy
from subprocess import Popen, PIPE, check_output
import re
from distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.file_util import write_file
from distutils.errors import (DistutilsExecError, CCompilerError,
CompileError, UnknownFileError)
from distutils import log
from distutils.version import LooseVersion
from distutils.spawn import find_executable
def get_msvcr():
"""Include the appropriate MSVC runtime library if Python was built
with MSVC 7.0 or later.
"""
msc_pos = sys.version.find('MSC v.')
if msc_pos != -1:
msc_ver = sys.version[msc_pos+6:msc_pos+10]
if msc_ver == '1300':
# MSVC 7.0
return ['msvcr70']
elif msc_ver == '1310':
# MSVC 7.1
return ['msvcr71']
elif msc_ver == '1400':
# VS2005 / MSVC 8.0
return ['msvcr80']
elif msc_ver == '1500':
# VS2008 / MSVC 9.0
return ['msvcr90']
elif msc_ver == '1600':
# VS2010 / MSVC 10.0
return ['msvcr100']
else:
raise ValueError("Unknown MS Compiler version %s " % msc_ver)
class CygwinCCompiler(UnixCCompiler):
""" Handles the Cygwin port of the GNU C compiler to Windows.
"""
compiler_type = 'cygwin'
obj_extension = ".o"
static_lib_extension = ".a"<|fim▁hole|> exe_extension = ".exe"
def __init__(self, verbose=0, dry_run=0, force=0):
UnixCCompiler.__init__(self, verbose, dry_run, force)
status, details = check_config_h()
self.debug_print("Python's GCC status: %s (details: %s)" %
(status, details))
if status is not CONFIG_H_OK:
self.warn(
"Python's pyconfig.h doesn't seem to support your compiler. "
"Reason: %s. "
"Compiling may fail because of undefined preprocessor macros."
% details)
self.gcc_version, self.ld_version, self.dllwrap_version = \
get_versions()
self.debug_print(self.compiler_type + ": gcc %s, ld %s, dllwrap %s\n" %
(self.gcc_version,
self.ld_version,
self.dllwrap_version) )
# ld_version >= "2.10.90" and < "2.13" should also be able to use
# gcc -mdll instead of dllwrap
# Older dllwraps had own version numbers, newer ones use the
# same as the rest of binutils ( also ld )
# dllwrap 2.10.90 is buggy
if self.ld_version >= "2.10.90":
self.linker_dll = "gcc"
else:
self.linker_dll = "dllwrap"
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# Hard-code GCC because that's what this is all about.
# XXX optimization, warnings etc. should be customizable.
self.set_executables(compiler='gcc -mcygwin -O -Wall',
compiler_so='gcc -mcygwin -mdll -O -Wall',
compiler_cxx='g++ -mcygwin -O -Wall',
linker_exe='gcc -mcygwin',
linker_so=('%s -mcygwin %s' %
(self.linker_dll, shared_option)))
# cygwin and mingw32 need different sets of libraries
if self.gcc_version == "2.91.57":
# cygwin shouldn't need msvcrt, but without the dlls will crash
# (gcc version 2.91.57) -- perhaps something about initialization
self.dll_libraries=["msvcrt"]
self.warn(
"Consider upgrading to a newer version of gcc")
else:
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
def _compile(self, obj, src, ext, cc_args, extra_postargs, pp_opts):
"""Compiles the source by spawning GCC and windres if needed."""
if ext == '.rc' or ext == '.res':
# gcc needs '.res' and '.rc' compiled to object files !!!
try:
self.spawn(["windres", "-i", src, "-o", obj])
except DistutilsExecError as msg:
raise CompileError(msg)
else: # for other files use the C-compiler
try:
self.spawn(self.compiler_so + cc_args + [src, '-o', obj] +
extra_postargs)
except DistutilsExecError as msg:
raise CompileError(msg)
def link(self, target_desc, objects, output_filename, output_dir=None,
libraries=None, library_dirs=None, runtime_library_dirs=None,
export_symbols=None, debug=0, extra_preargs=None,
extra_postargs=None, build_temp=None, target_lang=None):
"""Link the objects."""
# use separate copies, so we can modify the lists
extra_preargs = copy.copy(extra_preargs or [])
libraries = copy.copy(libraries or [])
objects = copy.copy(objects or [])
# Additional libraries
libraries.extend(self.dll_libraries)
# handle export symbols by creating a def-file
# with executables this only works with gcc/ld as linker
if ((export_symbols is not None) and
(target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# (The linker doesn't do anything if output is up-to-date.
# So it would probably better to check if we really need this,
# but for this we had to insert some unchanged parts of
# UnixCCompiler, and this is not what we want.)
# we want to put some files in the same directory as the
# object files are, build_temp doesn't help much
# where are the object files
temp_dir = os.path.dirname(objects[0])
# name of dll to give the helper files the same base name
(dll_name, dll_extension) = os.path.splitext(
os.path.basename(output_filename))
# generate the filenames for these files
def_file = os.path.join(temp_dir, dll_name + ".def")
lib_file = os.path.join(temp_dir, 'lib' + dll_name + ".a")
# Generate .def file
contents = [
"LIBRARY %s" % os.path.basename(output_filename),
"EXPORTS"]
for sym in export_symbols:
contents.append(sym)
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# next add options for def-file and to creating import libraries
# dllwrap uses different options than gcc/ld
if self.linker_dll == "dllwrap":
extra_preargs.extend(["--output-lib", lib_file])
# for dllwrap we have to use a special option
extra_preargs.extend(["--def", def_file])
# we use gcc/ld here and can be sure ld is >= 2.9.10
else:
# doesn't work: bfd_close build\...\libfoo.a: Invalid operation
#extra_preargs.extend(["-Wl,--out-implib,%s" % lib_file])
# for gcc/ld the def-file is specified as any object files
objects.append(def_file)
#end: if ((export_symbols is not None) and
# (target_desc != self.EXECUTABLE or self.linker_dll == "gcc")):
# who wants symbols and a many times larger output file
# should explicitly switch the debug mode on
# otherwise we let dllwrap/ld strip the output file
# (On my machine: 10KiB < stripped_file < ??100KiB
# unstripped_file = stripped_file + XXX KiB
# ( XXX=254 for a typical python extension))
if not debug:
extra_preargs.append("-s")
UnixCCompiler.link(self, target_desc, objects, output_filename,
output_dir, libraries, library_dirs,
runtime_library_dirs,
None, # export_symbols, we do this in our def-file
debug, extra_preargs, extra_postargs, build_temp,
target_lang)
# -- Miscellaneous methods -----------------------------------------
def object_filenames(self, source_filenames, strip_dir=0, output_dir=''):
"""Adds supports for rc and res files."""
if output_dir is None:
output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
base, ext = os.path.splitext(os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError("unknown file type '%s' (from '%s')" % \
(ext, src_name))
if strip_dir:
base = os.path.basename (base)
if ext in ('.res', '.rc'):
# these need to be compiled to object files
obj_names.append (os.path.join(output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join(output_dir,
base + self.obj_extension))
return obj_names
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(CygwinCCompiler):
""" Handles the Mingw32 port of the GNU C compiler to Windows.
"""
compiler_type = 'mingw32'
def __init__(self, verbose=0, dry_run=0, force=0):
CygwinCCompiler.__init__ (self, verbose, dry_run, force)
# ld_version >= "2.13" support -shared so use it instead of
# -mdll -static
if self.ld_version >= "2.13":
shared_option = "-shared"
else:
shared_option = "-mdll -static"
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if is_cygwingcc():
raise CCompilerError(
'Cygwin gcc cannot be used with --compiler=mingw32')
self.set_executables(compiler='gcc -O -Wall',
compiler_so='gcc -mdll -O -Wall',
compiler_cxx='g++ -O -Wall',
linker_exe='gcc',
linker_so='%s %s %s'
% (self.linker_dll, shared_option,
entry_point))
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
self.dll_libraries=[]
# Include the appropriate MSVC runtime library if Python was built
# with MSVC 7.0 or later.
self.dll_libraries = get_msvcr()
# Because these compilers aren't configured in Python's pyconfig.h file by
# default, we should at least warn the user if he is using an unmodified
# version.
CONFIG_H_OK = "ok"
CONFIG_H_NOTOK = "not ok"
CONFIG_H_UNCERTAIN = "uncertain"
def check_config_h():
"""Check if the current Python installation appears amenable to building
extensions with GCC.
Returns a tuple (status, details), where 'status' is one of the following
constants:
- CONFIG_H_OK: all is well, go ahead and compile
- CONFIG_H_NOTOK: doesn't look good
- CONFIG_H_UNCERTAIN: not sure -- unable to read pyconfig.h
'details' is a human-readable string explaining the situation.
Note there are two ways to conclude "OK": either 'sys.version' contains
the string "GCC" (implying that this Python was built with GCC), or the
installed "pyconfig.h" contains the string "__GNUC__".
"""
# XXX since this function also checks sys.version, it's not strictly a
# "pyconfig.h" check -- should probably be renamed...
from distutils import sysconfig
# if sys.version contains GCC then python was compiled with GCC, and the
# pyconfig.h file should be OK
if "GCC" in sys.version:
return CONFIG_H_OK, "sys.version mentions 'GCC'"
# let's see if __GNUC__ is mentioned in python.h
fn = sysconfig.get_config_h_filename()
try:
config_h = open(fn)
try:
if "__GNUC__" in config_h.read():
return CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn
else:
return CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn
finally:
config_h.close()
except OSError as exc:
return (CONFIG_H_UNCERTAIN,
"couldn't read '%s': %s" % (fn, exc.strerror))
RE_VERSION = re.compile(br'(\d+\.\d+(\.\d+)*)')
def _find_exe_version(cmd):
"""Find the version of an executable by running `cmd` in the shell.
If the command is not found, or the output does not match
`RE_VERSION`, returns None.
"""
executable = cmd.split()[0]
if find_executable(executable) is None:
return None
out = Popen(cmd, shell=True, stdout=PIPE).stdout
try:
out_string = out.read()
finally:
out.close()
result = RE_VERSION.search(out_string)
if result is None:
return None
# LooseVersion works with strings
# so we need to decode our bytes
return LooseVersion(result.group(1).decode())
def get_versions():
""" Try to find out the versions of gcc, ld and dllwrap.
If not possible it returns None for it.
"""
commands = ['gcc -dumpversion', 'ld -v', 'dllwrap --version']
return tuple([_find_exe_version(cmd) for cmd in commands])
def is_cygwingcc():
'''Try to determine if the gcc that would be used is from cygwin.'''
out_string = check_output(['gcc', '-dumpmachine'])
return out_string.strip().endswith(b'cygwin')<|fim▁end|>
|
shared_lib_extension = ".dll"
static_lib_format = "lib%s%s"
shared_lib_format = "%s%s"
|
<|file_name|>test_lazy_canteen.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*-
from datetime import date
import re
import pytest
from pyopenmensa.feed import LazyBuilder
@pytest.fixture
def canteen():
return LazyBuilder()
def test_date_converting(canteen):
day = date(2013, 3, 7)
assert canteen.dayCount() == 0
canteen.setDayClosed('2013-03-07')
assert canteen.dayCount() == 1
canteen.setDayClosed(day)
assert canteen.dayCount() == 1
canteen.setDayClosed('07.03.2013')
assert canteen.dayCount() == 1
def test_has_meals_for(canteen):
day = date(2013, 3, 7)
assert canteen.hasMealsFor(day) is False
canteen._days[day] = {'Hausgericht': ('Gulash', [], {})}
assert canteen.hasMealsFor(day) is True
canteen.setDayClosed(day)
assert canteen.hasMealsFor(day) is False
def test_add_meal(canteen):<|fim▁hole|>
def test_to_long_meal_name(canteen):
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Y'*251)
canteen.hasMealsFor(day)
def test_caseinsensitive_notes(canteen):
day = date(2013, 3, 7)
canteen.legendKeyFunc = lambda v: v.lower()
canteen.setLegendData(legend={'f': 'Note'})
canteen.addMeal(day, 'Test', 'Essen(F)')
assert canteen._days[day]['Test'][0] == ('Essen', ['Note'], {})
def test_notes_regex(canteen):
day = date(2013, 3, 7)
canteen.extra_regex = re.compile('_([0-9]{1,3})_(?:: +)?', re.UNICODE)
canteen.setLegendData(legend={'2': 'Found Note'})
canteen.addMeal(day, 'Test', '_2_: Essen _a_, _2,2_, (2)')
assert canteen._days[day]['Test'][0] == ('Essen _a_, _2,2_, (2)', ['Found Note'], {})<|fim▁end|>
|
day = date(2013, 3, 7)
canteen.addMeal(day, 'Hauptgericht', 'Gulasch')
assert canteen.hasMealsFor(day)
|
<|file_name|>information.py<|end_file_name|><|fim▁begin|>class Information:
def __init__(self, objectid, cvid, information_type_id, description):
self.objectid = objectid<|fim▁hole|> self.information_type_id = information_type_id
self.description = description
self.deleted = 0<|fim▁end|>
|
self.cvid = cvid
|
<|file_name|>issue-61936.rs<|end_file_name|><|fim▁begin|>// run-pass
trait SliceExt<T: Clone> {
fn array_windows_example<'a, const N: usize>(&'a self) -> ArrayWindowsExample<'a, T, N>;
}
impl <T: Clone> SliceExt<T> for [T] {
fn array_windows_example<'a, const N: usize>(&'a self) -> ArrayWindowsExample<'a, T, N> {
ArrayWindowsExample{ idx: 0, slice: &self }
}
}
struct ArrayWindowsExample<'a, T, const N: usize> {
slice: &'a [T],
idx: usize,
}
impl <'a, T: Clone, const N: usize> Iterator for ArrayWindowsExample<'a, T, N> {
type Item = [T; N];
fn next(&mut self) -> Option<Self::Item> {
// Note: this is unsound for some `T` and not meant as an example
// on how to implement `ArrayWindows`.
let mut res = unsafe{ std::mem::zeroed() };
let mut ptr = &mut res as *mut [T; N] as *mut T;
for i in 0..N {
match self.slice[self.idx..].get(i) {
None => return None,
Some(elem) => unsafe { std::ptr::write_volatile(ptr, elem.clone())},
};
ptr = ptr.wrapping_add(1);
self.idx += 1;
}
Some(res)
}
}
const FOUR: usize = 4;
fn main() {<|fim▁hole|> let v: Vec<usize> = vec![0; 100];
for array in v.as_slice().array_windows_example::<FOUR>() {
assert_eq!(array, [0, 0, 0, 0])
}
}<|fim▁end|>
| |
<|file_name|>settings.py<|end_file_name|><|fim▁begin|>"""
Django settings for kore project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '9j++(0=dc&6w&113d4bofcjy1xy-pe$frla&=s*8w94=0ym0@&'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'grappelli',
'nested_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.gis',
'raven.contrib.django.raven_compat',
'django_extensions',
'rest_framework',
'corsheaders',
'modeltranslation',
'leaflet',
'munigeo',
'schools',
'django_filters'
]
if DEBUG:
# INSTALLED_APPS.insert(0, 'devserver')
# INSTALLED_APPS.insert(0, 'debug_toolbar')
pass
MIDDLEWARE_CLASSES = (
'django.middleware.locale.LocaleMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'kore.urls'
WSGI_APPLICATION = 'kore.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'kore',
}
}
# Munigeo
# https://github.com/City-of-Helsinki/munigeo
PROJECTION_SRID = 3067
# If no country specified (for example through a REST API call), use this
# as default.
DEFAULT_COUNTRY = 'fi'
# The word used for municipality in the OCD identifiers in the default country.
DEFAULT_OCD_MUNICIPALITY = 'kunta'
BOUNDING_BOX = [-548576, 6291456, 1548576, 8388608]
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
<|fim▁hole|>TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('fi', gettext('Finnish')),
('sv', gettext('Swedish')),
('en', gettext('English')),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "var", "static")
LOCALE_PATH = os.path.join(BASE_DIR, "schools", "locale")
REST_FRAMEWORK = {
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE': 20,
'MAX_PAGINATE_BY': 1000, # Maximum limit allowed when using `?page_size=xxx`.
'DEFAULT_FILTER_BACKENDS':
('rest_framework.filters.DjangoFilterBackend',),
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.JSONRenderer',
'rest_framework.renderers.BrowsableAPIRenderer',
)
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
CORS_ORIGIN_ALLOW_ALL = True
# local_settings.py can be used to override environment-specific settings
# like database and email that differ between development and production.
try:
from local_settings import *
except ImportError:
pass<|fim▁end|>
|
LANGUAGE_CODE = 'en-us'
|
<|file_name|>automlapi.ts<|end_file_name|><|fim▁begin|>// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
import automl from '@google-cloud/automl';
import * as dayjs from 'dayjs';
import * as express from 'express';
import { auth } from 'google-auth-library';
import * as morgan from 'morgan';
import {
AUTOML_API_SCOPE,
AUTOML_API_URL,
AUTOML_BUCKET_URL,
LOCATION,
PROJECT_ID,
} from './constants';
import { OperationMetadata } from './types';
export const app = express();
app.use(express.json());
app.use(morgan('combined'));
const client = new automl.v1beta1.AutoMlClient();
// Controls model type. For more options, see:
// https://cloud.google.com/vision/automl/alpha/docs/reference/rest/v1beta1/projects.locations.models#imageclassificationmodelmetadata
const DEFAULT_MODEL_TYPE = 'mobile-high-accuracy-1';
const DEFAULT_TRAIN_BUDGET = 1;
const DATASET_NAME_REGEX = new RegExp('^[a-zA-Z_0-9]+$');
const MODEL_VERSION_FORMAT = 'vYYYYMMDDHHmmss';
const parent = client.locationPath(PROJECT_ID, LOCATION);
// A model as returned by AutoML /models response
interface Model {
name: string;
datasetId: string;
displayName: string;
createTime: string;
updateTime: string;
imageClassificationModelMetadata: {
trainBudget: string;
trainCost: string;
stopReason: string;
modelType: string;
};
}
interface ModelResp {
model: Model[];
}
/// create a new dataset
function createDataset(displayName: String): Promise<any> {
const dataset = {
name: displayName,
displayName,
imageClassificationDatasetMetadata: {
classificationType: 'MULTICLASS',
},
};
return client.createDataset({ parent, dataset });
}
const extractIdFromName = (datasetName: string): string => {
const parts = datasetName.split('/');
return parts[parts.length - 1];
};
/// returns the ID of a dataset of the format ICN** or null if not found
function getDatasetName(automlId: string): Promise<string | null> {
return client.listDatasets({ parent }).then((responses: any[]) => {
const datasets = responses[0];
for (const dataset of datasets) {
if (extractIdFromName(dataset['name']) === automlId) {
return dataset['name'];
}
}
return null;
});
}
/// initiates an operation on automl to start importing data for a dataset
async function importDataset(
name: string,
displayName: string,
labels: string
): Promise<OperationMetadata> {
const inputConfig = {
gcsSource: {
inputUris: [`${AUTOML_BUCKET_URL}/${displayName}/${labels}`],
},
};
return client
.importData({ name, inputConfig })
.then((responses: any[]) => responses[1]); // initial api response with operation metadata
}
/**
* List all datasets
*/
app.get('/datasets', async (req, res, next) => {
try {
const authClient = await auth.getClient({ scopes: [AUTOML_API_SCOPE] });
const url = `${AUTOML_API_URL}/datasets`;
const resp = await authClient.request({ url });
res.json(resp.data);
} catch (err) {
console.error(err);
next(err);
}
});
/**
* Endpoint to create a new dataset in automl. Requires a name parameter
*/
app.post('/datasets', async (req, res, next) => {
try {
const { displayName } = req.body;
if (displayName === undefined) {
res.status(400).send('Expected a dataset `displayName`');
return;
}
if (!displayName.match(DATASET_NAME_REGEX)) {
res
.status(400)
.send(
'The displayName contains a not allowed character, the' +
' only allowed ones are ASCII Latin letters A-Z and a-z, an underscore (_),' +
' and ASCII digits 0-9'
);
return;
}
console.info(`Attempting to create dataset: ${displayName}`);
const [response] = await createDataset(displayName);
res.json(response);
} catch (err) {
res.status(500);
res.json({message: err.message});
console.error(err);
}
});
/**
* Endpoint to delete dataset from automl
*/
app.delete('/datasets/:datasetId', async (req, res, next) => {
try {
const { datasetId } = req.params;
if (!datasetId) {
res.status(400).send(`Expected datasetId: ${datasetId}`);
return;
}
const name = await getDatasetName(datasetId);
if (name === null) {
res.status(404).send(`No dataset found for id: ${datasetId}`);
return;
}
const resp = await client.deleteDataset({ name });
console.log(resp);
res.json();
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/**
* Endpoint to initiate importing data for a dataset in automl.
*
* Inputs:
* - datasetId: string - automl ID of the dataset
* - name: string - display name of the dataset
* - labels: string - file name containing the labels information. e.g
* labels.csv
*/
app.post('/import', async (req, res, next) => {
const { name, labels, datasetId } = req.body;
if (!name) {
res.status(400).json({ error: 'Need a dataset name' });
return;
}
if (!datasetId) {
res.status(400).json({ error: 'Need a dataset Id' });
return;
}
if (!labels) {
res.status(400).json({ error: 'Need a path for labels file' });
return;
}
try {
const datasetName = await getDatasetName(datasetId);
if (datasetName === null) {
res.status(400).json({ error: 'Dataset not found' });
return;
}
const operationMetadata = await importDataset(datasetName, name, labels);
res.json(operationMetadata);
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/**
* Endpoint to initiate creation of a new model for the provided dataset
*
* Inputs
* - datasetId: string - automl ID of the dataset
* - trainBudget (optional)
* - modelType (optional)
* Calls the create model api on AutoML
* https://cloud.google.com/vision/automl/alpha/docs/reference/rest/v1beta1/projects.locations.models/create
*
* Uses the rest API
*/
app.post('/train', async (req, res, next) => {
const { datasetId } = req.body;
if (!datasetId) {
res.status(400).json({ error: 'Need a dataset Id' });
return;
}
let { trainBudget, modelType } = req.body;
trainBudget = trainBudget === undefined ? DEFAULT_TRAIN_BUDGET : trainBudget;
modelType = modelType === undefined ? DEFAULT_MODEL_TYPE : modelType;
console.log(
`Using train budget: ${trainBudget}, and model type: ${modelType}`
);
try {
const datasetName = await getDatasetName(datasetId);
if (datasetName === null) {
res.status(400).json({ error: 'Dataset not found' });
return;
}
const authClient = await auth.getClient({ scopes: [AUTOML_API_SCOPE] });
const url = `${AUTOML_API_URL}/models`;
const resp = await authClient.request({
method: 'POST',
data: {
displayName: `${dayjs().format(MODEL_VERSION_FORMAT)}`,
dataset_id: datasetId,
imageClassificationModelMetadata: { trainBudget, modelType },
},
url,
});
const operationMetadata = resp.data as OperationMetadata;
res.json(operationMetadata);
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/**
* Exports a model in tflite format to a gcspath
*
* modelId - AutoML model ID: "ICN1119584480450950787",
* gcsPath - Path to which model is exported
* "gs://${AUTOML_BUCKET}/models/on-device/<folder_name>"
*
* Note the model will be generated in a folder with timestamp as name. For
* more, refer to
* https://cloud.google.com/vision/automl/alpha/docs/deploy#deployment_on_mobile_models_not_core_ml
*/
app.post('/export', async (req, res, next) => {
const { modelId, gcsPath } = req.body;
if (!modelId) {
res.status(400).send('need a model id: modelId');
return;
}
if (!gcsPath) {
res.status(400).send('need a gcs path: gcsPath');
return;
}
const authClient = await auth.getClient({ scopes: [AUTOML_API_SCOPE] });
const url = `${AUTOML_API_URL}/models/${modelId}:export`;
try {
const operationMetadata = await authClient
.request({
method: 'POST',
url,
data: {
output_config: {
model_format: 'tflite',
gcs_destination: {
output_uri_prefix: gcsPath,
},
},
},
})
.then(resp => resp.data as OperationMetadata);
res.json(operationMetadata);
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/**
* Exports the latest generated model for the dataset
*/
app.post('/exportlatestmodel', async (req, res, next) => {
const { datasetId, gcsPath } = req.body;
if (!datasetId) {
res.status(400).send('need a dataset id: datasetId');
return;
}
if (!gcsPath) {
res.status(400).send('need a gcs path: gcsPath');
return;
}
try {
// 1. Get all the models
const modelsResp = (await getAllModels()).data as ModelResp;
// 2. Filter the models for the provided dataset and get the latest model
const datasetModels = modelsResp.model.filter(
m =>
m.datasetId === datasetId &&
m.imageClassificationModelMetadata.modelType.startsWith('mobile-')
);
if (datasetModels === undefined) {
throw new Error('No models found for this dataset');
}
// 3. Find the latest (based on createTime) model
const latestModel = datasetModels.sort(<|fim▁hole|> )[0];
// 3. Initiate its export
console.log('Initiating export for the latest model', latestModel);
const modelId = extractIdFromName(latestModel.name);
const authClient = await auth.getClient({ scopes: [AUTOML_API_SCOPE] });
const url = `${AUTOML_API_URL}/models/${modelId}:export`;
const operationMetadata = await authClient
.request({
method: 'POST',
url,
data: {
output_config: {
model_format: 'tflite',
gcs_destination: {
output_uri_prefix: gcsPath,
},
},
},
})
.then(resp => resp.data as OperationMetadata);
res.json(operationMetadata);
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/**
* List all models - trying out the REST API
*/
app.get('/models', async (req, res, next) => {
try {
const resp = await getAllModels();
res.json(resp.data);
} catch (err) {
console.error(err);
res.status(500);
res.json({message: err.message});
}
});
/** Queries all models from AutoML */
async function getAllModels() {
const authClient = await auth.getClient({ scopes: [AUTOML_API_SCOPE] });
const url = `${AUTOML_API_URL}/models`;
return authClient.request({ url });
}<|fim▁end|>
|
(m1, m2) =>
new Date(m2.createTime).getTime() - new Date(m1.createTime).getTime()
|
<|file_name|>index.spec.js<|end_file_name|><|fim▁begin|>'use strict';
<|fim▁hole|> });
});<|fim▁end|>
|
describe('nothing', () => {
it('should do nothing', () => {
//
|
<|file_name|>revalidate-tests.ts<|end_file_name|><|fim▁begin|>import r = require("revalidate");
const isValidEmail = r.createValidator(
message => value => {
if (value && !/^[A-Z0-9._%+-]+@[A-Z0-9.-]+\.[A-Z]{2,4}$/i.test(value)) {
return message;
}
},
'Invalid email address'
);
const isGreaterThan = (n: any) => r.createValidator(
message => value => {
if (value && Number(value) <= n) {
return message;
}
},
field => `${field} must be greater than ${n}`
);
const customIsRequired = r.isRequired({ message: 'Required' });
const lessThan = r.hasLengthLessThan(16)({ message: 'Must be 15 characters or less' });
const username = r.composeValidators(
customIsRequired,
lessThan
)();
const email = r.composeValidators(
customIsRequired,
isValidEmail
)();<|fim▁hole|>
const isGreater = isGreaterThan(17)({
message: 'Sorry, you must be at least 18 years old'
});
const age = r.composeValidators(
customIsRequired,
isNumber,
isGreater
)();
const validate = r.combineValidators({
username,
email,
age
});<|fim▁end|>
|
const isNumber = r.isNumeric({
message: 'Must be a number'
});
|
<|file_name|>blend_subtract.py<|end_file_name|><|fim▁begin|>'''OpenGL extension OES.blend_subtract
This module customises the behaviour of the
OpenGL.raw.GLES1.OES.blend_subtract to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/OES/blend_subtract.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLES1 import _types, _glgets
from OpenGL.raw.GLES1.OES.blend_subtract import *<|fim▁hole|>def glInitBlendSubtractOES():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION<|fim▁end|>
|
from OpenGL.raw.GLES1.OES.blend_subtract import _EXTENSION_NAME
|
<|file_name|>hr.min.js<|end_file_name|><|fim▁begin|><|fim▁hole|>Bitrix 16.5 Business Demo = bf14f0c5bad016e66d0ed2d224b15630<|fim▁end|>
| |
<|file_name|>securityappcontainer.rs<|end_file_name|><|fim▁begin|>// Copyright © 2017 winapi-rs developers
// Licensed under the Apache License, Version 2.0
// <LICENSE-APACHE or http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your option.
// All files in the project carrying such notice may not be copied, modified, or distributed
// except according to those terms.
use shared::minwindef::{BOOL, PULONG, ULONG};<|fim▁hole|> Token: HANDLE,
AppContainerSid: PSID,
ObjectPathLength: ULONG,
ObjectPath: LPWSTR,
ReturnLength: PULONG,
) -> BOOL;
}<|fim▁end|>
|
use um::winnt::{HANDLE, LPWSTR, PSID};
extern "system" {
pub fn GetAppContainerNamedObjectPath(
|
<|file_name|>makefastqdumpScript.py<|end_file_name|><|fim▁begin|>def makefastqdumpScript(sraFileNameListFileName, outputDirectory, scriptFileName):
# Make a script that will run fastq-dump on each of a list of SRA files
sraFileNameListFile = open(sraFileNameListFileName)
scriptFile = open(scriptFileName, 'w+')
for line in sraFileNameListFile:
# Iterate through the SRA files and make a line in the script for each
scriptFile.write("fastq-dump --split-3 --outdir " + outputDirectory + " --gzip " + line.strip() + "\n")
sraFileNameListFile.close()
scriptFile.close()
if __name__=="__main__":
import sys
sraFileNameListFileName = sys.argv[1] # Contents should end with .sra
outputDirectory = sys.argv[2]
<|fim▁hole|> makefastqdumpScript(sraFileNameListFileName, outputDirectory, scriptFileName)<|fim▁end|>
|
scriptFileName = sys.argv[3]
|
<|file_name|>SectorsResource.java<|end_file_name|><|fim▁begin|><|fim▁hole|>package ch.heigvd.amt.mvcdemo.rest.resources;
import ch.heigvd.amt.mvcdemo.model.entities.Sector;
import ch.heigvd.amt.mvcdemo.rest.dto.SectorDTO;
import ch.heigvd.amt.mvcdemo.services.dao.BusinessDomainEntityNotFoundException;
import ch.heigvd.amt.mvcdemo.services.dao.SectorsDAOLocal;
import java.net.URI;
import java.util.ArrayList;
import java.util.List;
import javax.ejb.EJB;
import javax.ejb.Stateless;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.GET;
import javax.ws.rs.POST;
import javax.ws.rs.PUT;
import javax.ws.rs.Path;
import javax.ws.rs.PathParam;
import javax.ws.rs.Produces;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.UriInfo;
/**
*
* @author Olivier Liechti ([email protected])
*/
@Stateless
@Path("/sectors")
public class SectorsResource {
@Context
UriInfo uriInfo;
@EJB
private SectorsDAOLocal sectorsDAO;
@GET
@Produces("application/json")
public List<SectorDTO> getSectors() {
List<SectorDTO> result = new ArrayList<>();
List<Sector> sectors = sectorsDAO.findAll();
for (Sector sector : sectors) {
long sectorId = sector.getId();
URI sectorHref = uriInfo
.getAbsolutePathBuilder()
.path(SectorsResource.class, "getSector")
.build(sectorId);
SectorDTO dto = new SectorDTO();
dto.setHref(sectorHref);
dto.setName(sector.getName());
result.add(dto);
}
return result;
}
@POST
@Consumes("application/json")
public Response createSector(SectorDTO sectorDTO) {
boolean created;
long sectorId;
try {
sectorId = sectorsDAO.findByName(sectorDTO.getName()).getId();
created = false;
} catch (BusinessDomainEntityNotFoundException ex) {
created = true;
sectorId = sectorsDAO.create(new Sector(sectorDTO.getName()));
}
URI sectorUri = uriInfo
.getBaseUriBuilder()
.path(SectorsResource.class)
.path(SectorsResource.class, "getSector")
.build(sectorId);
ResponseBuilder builder;
if (created) {
builder = Response.created(sectorUri);
} else {
builder = Response.ok().location(sectorUri);
}
return builder.build();
}
@GET
@Path("/{id}")
@Produces("application/json")
public Sector getSector(@PathParam(value = "id") long id) throws BusinessDomainEntityNotFoundException {
return sectorsDAO.findById(id);
}
@PUT
@Path("/{id}")
@Consumes("application/json")
public Response updateSector(SectorDTO sectorDTO, @PathParam(value = "id") long id) throws BusinessDomainEntityNotFoundException {
Sector sector = sectorsDAO.findById(id);
sector.setName(sectorDTO.getName());
return Response.ok().build();
}
@DELETE
@Path("/{id}")
public Response deleteSector(@PathParam(value = "id") long id) throws BusinessDomainEntityNotFoundException {
Sector sector = sectorsDAO.findById(id);
sectorsDAO.delete(sector);
return Response.ok().build();
}
}<|fim▁end|>
| |
<|file_name|>StarLexer.cpp<|end_file_name|><|fim▁begin|>/*
* Copyright 2000-2015 Rochus Keller <mailto:[email protected]>
*
* This file is part of the CARA (Computer Aided Resonance Assignment,
* see <http://cara.nmr.ch/>) NMR Application Framework (NAF) library.
*
* The following is the license that applies to this copy of the
* library. For a license to use the library under conditions
* other than those described here, please email to [email protected].
*
* GNU General Public License Usage
* This file may be used under the terms of the GNU General Public
* License (GPL) versions 2.0 or 3.0 as published by the Free Software
* Foundation and appearing in the file LICENSE.GPL included in
* the packaging of this file. Please review the following information
* to ensure GNU General Public Licensing requirements will be met:
* http://www.fsf.org/licensing/licenses/info/GPLv2.html and
* http://www.gnu.org/copyleft/gpl.html.
*/
#include "StarLexer.h"
#include <QtDebug>
#include <QTextStream>
using namespace Star;
enum { BEL = 7,
USC = 0x5F, // Underscore
LCB = 0x7B, // Left Curly Bracket
RCB = 0x7D, // Right Curly Bracket
LSB = 0x5B, // Left Square Bracket
RSB = 0x5D, // Right Square Bracket
CMA = 0x2C, // Comma
APO = 0x27, // Apostrophe
QOT = 0x22, // Quote
SEM = 0x3B, // Semicolon
COL = 0x3A, // Colon
PND = 0x23, // Pound
DLR = 0x24 // Dollar
};
StarLexer::StarLexer(bool newSyntax) :
d_lineNr(0),d_colNr(0),d_newSyntax(newSyntax),d_in(0)
{
}
StarLexer::StarLexer(const StarLexer & rhs):
d_lineNr(0),d_colNr(0),d_newSyntax(true),d_in(0)
{
*this = rhs;
}
StarLexer::~StarLexer()
{
if( d_in )
delete d_in;
}
bool StarLexer::setStream(QIODevice *in, const char *codec)
{
if( in == 0 )
{
if( d_in )
delete d_in;
// das muss so sein; wenn erst im Destruktor gelöscht oder Stream sogar ein Value Object, dann stürzt
// Prozess beim Löschen von Lexer in QObject bzw. dessen Mutex.
d_in = 0;
return false;
}
if( !in->isOpen() )
{
if( !in->open( QIODevice::ReadOnly ) )
return false;<|fim▁hole|> d_in->setDevice( in );
d_in->setCodec( codec );
d_in->setAutoDetectUnicode(true);
d_lineNr = 0;
d_colNr = 0;
d_line.clear();
return true;
}
void StarLexer::reset()
{
d_in->seek(0);
d_in->reset();
d_in->resetStatus();
d_lineNr = 0;
d_colNr = 0;
d_line.clear();
}
StarLexer::Token StarLexer::nextToken()
{
skipWhiteSpace();
while( d_colNr >= d_line.size() )
{
if( d_in->atEnd() )
return Token( Token::EndOfStream, d_lineNr, d_colNr );
nextLine();
if( !checkLineChars() )
return Token( Token::InvalidChar, d_lineNr, d_colNr, d_line );
skipWhiteSpace();
}
Q_ASSERT( d_colNr < d_line.size() );
const int uc = d_line[d_colNr].unicode();
switch( uc )
{
case USC:
return readTag();
case QOT:
if( d_newSyntax && matchTuple( d_colNr+1, QOT, 2 ) )
return readTrippleQuotString(QOT);
else
return readSingleQuotString(QOT);
case APO:
if( d_newSyntax && matchTuple( d_colNr+1, APO, 2 ) )
return readTrippleQuotString(APO);
else
return readSingleQuotString(APO);
case SEM:
if( d_colNr == 0 )
return readSemicolonString();
else
return Token( Token::SyntaxError, d_lineNr, d_colNr, QChar(';') );
case PND:
{
const int col = d_colNr;
const QString cmt = d_line.mid(d_colNr);
d_colNr = d_line.size();
return Token( Token::Comment, d_lineNr, col, cmt );
}
default:
break;
}
if( d_newSyntax )
{
switch( uc )
{
case LCB:
return Token( Token::TableStart, d_lineNr, d_colNr++ );
case RCB:
{
if( (d_colNr + 1) < d_line.size() && d_line[d_colNr+1].unicode() == DLR )
{
d_colNr++;
return Token( Token::RefEnd, d_lineNr, d_colNr++ );
}else
return Token( Token::TableEnd, d_lineNr, d_colNr++ );
}
case LSB:
return Token( Token::ListStart, d_lineNr, d_colNr++ );
case RSB:
return Token( Token::ListEnd, d_lineNr, d_colNr++ );
case CMA:
return Token( Token::ElemSep, d_lineNr, d_colNr++ );
case COL:
return Token( Token::KeyValueSep, d_lineNr, d_colNr++ );
default:
break;
}
}
switch( uc )
{
case DLR:
if( d_newSyntax && (d_colNr + 1) < d_line.size() && d_line[d_colNr+1].unicode() == LCB )
{
d_colNr++;
return Token( Token::RefStart, d_lineNr, d_colNr++ );
}// else fall through
default:
{
const int col = d_colNr;
const QString val = readNonWhiteSpace();
if( val.startsWith( QLatin1String("loop_"), Qt::CaseInsensitive ) )
return Token( Token::Loop, d_lineNr, col, val.mid(5) );
else if( val.startsWith( QLatin1String("global_"), Qt::CaseInsensitive ) )
return Token( Token::Global, d_lineNr, col, val.mid(7) );
else if( val.startsWith( QLatin1String("save_"), Qt::CaseInsensitive ) )
return Token( Token::Save, d_lineNr, col, val.mid(5) );
else if( val.startsWith( QLatin1String("stop_"), Qt::CaseInsensitive ) )
return Token( Token::Stop, d_lineNr, col, val.mid(5) );
else if( val.startsWith( QLatin1String("data_"), Qt::CaseInsensitive ) )
return Token( Token::Data, d_lineNr, col, val.mid(5) );
else
return Token( Token::NonQuoted, d_lineNr, col, val );
}
}
Q_ASSERT( false );
return Token( Token::SyntaxError, d_lineNr, d_colNr, QChar(';') );
}
StarLexer::Token StarLexer::nextTokenNoComments()
{
Token t = nextToken();
while( t.d_type == Token::Comment )
t = nextToken();
return t;
}
void StarLexer::dump()
{
Token t = nextToken();
while( t.d_type < Token::EndOfStream )
{
qDebug() << "****" << t.typeName() << t.d_line << t.d_col << t.d_text;
t = nextToken();
}
qDebug() << "****" << t.typeName() << t.d_line << t.d_col << t.d_text;
}
StarLexer &StarLexer::operator =(const StarLexer &rhs)
{
d_newSyntax = rhs.d_newSyntax;
return *this;
}
void StarLexer::nextLine()
{
d_colNr = 0;
d_lineNr++;
d_line = d_in->readLine();
}
void StarLexer::skipWhiteSpace()
{
while( d_colNr < d_line.size() && d_line[d_colNr].isSpace() )
d_colNr++;
}
bool StarLexer::isValid(const QChar &ch) const
{
const ulong uc = ch.unicode();
if( uc <= 127 )
return ch.isSpace() || ch.isPunct() || ch.isPrint() || ( d_newSyntax && uc == BEL );
else if( d_newSyntax && uc >= 128 && uc <= 0xD7FF )
return true;
else if( d_newSyntax && uc >= 0xE000 && uc <= 0xFFFD )
return true;
else if( d_newSyntax && uc >= 0x10000 && uc <= 0x10FFF )
return true;
return false;
}
bool StarLexer::checkLineChars()
{
for( int i = 0; i < d_line.size(); i++ )
{
if( !isValid( d_line[i] ) )
{
d_colNr = i;
return false;
}
}
return true;
}
StarLexer::Token StarLexer::readTag()
{
const int col = d_colNr;
QString res;
res.reserve( d_line.size() - d_colNr );
while( d_colNr < d_line.size() && !d_line[d_colNr].isSpace() )
{
res += d_line[d_colNr];
d_colNr++;
}
return Token( Token::Tag, d_lineNr, col, res.mid(1) ); // mid..ohne USC
}
StarLexer::Token StarLexer::readSingleQuotString(int sym)
{
QString res;
res.reserve( d_line.size() - d_colNr );
Q_ASSERT( d_colNr < d_line.size() && d_line[d_colNr].unicode() == sym );
d_colNr++;
const int col = d_colNr;
while( d_colNr < d_line.size() )
{
if( d_newSyntax && d_line[d_colNr].unicode() == BEL )
{
if( ( d_colNr + 1 ) < d_line.size() && d_line[d_colNr+1].unicode() == sym )
{
res += QChar(sym);
d_colNr++;
}else
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
}else if( d_line[d_colNr].unicode() == sym )
{
if( !d_newSyntax && ( d_colNr + 1 ) < d_line.size() && !d_line[d_colNr+1].isSpace() )
{
res += d_line[d_colNr];
}else
{
d_colNr++;
return Token( Token::Quoted, d_lineNr, col, res );
}
}else
res += d_line[d_colNr];
d_colNr++;
}
return Token( Token::SyntaxError, d_lineNr, d_colNr, res ); // Zeilenende ohne abschliessendes sym
}
StarLexer::Token StarLexer::readTrippleQuotString(int sym)
{
Q_ASSERT( d_newSyntax );
const QString pattern( 3, QChar(sym) );
Q_ASSERT( ( d_colNr + 2 ) < d_line.size() && d_line.mid(d_colNr,3) == pattern );
d_colNr += 3;
const int col = d_colNr;
const int line = d_lineNr;
const int lhsPos = findTripple( pattern, d_colNr );
bool ok;
if( lhsPos >= 0 )
{
// Ende auf gleicher Zeile
const QString res = escapeString( d_line.mid( d_colNr, lhsPos - d_colNr ), sym, &ok );
if( ok )
{
d_colNr = lhsPos + 3;
return Token( Token::Multiline, line, col, res );
}else
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
}else if( lhsPos == -1 )
{
// Ende auf künftiger Zeile
QString res = escapeString( d_line.mid( d_colNr ), sym, &ok ) + QChar('\n');
if( !ok )
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
while( true )
{
if( d_in->atEnd() )
return Token( Token::SyntaxError, d_lineNr, d_colNr, res ); // TrippleQuote without end
nextLine();
if( !checkLineChars() )
return Token( Token::InvalidChar, d_lineNr, d_colNr, QString(d_line[d_colNr]) );
const int rhsPos = findTripple( pattern, d_colNr );
if( rhsPos >= 0 )
{
// Ende auf dieser Zeile
res += escapeString( d_line.mid( d_colNr, rhsPos - d_colNr ), sym, &ok );
if( ok )
{
d_colNr = rhsPos + 3;
return Token( Token::Multiline, line, col, res );
}else
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
}else if( rhsPos == -1 )
{
// Ende noch nicht auf dieser Zeile
res += escapeString(d_line, sym, &ok ) + QChar('\n');
if( !ok )
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
}else
return Token( Token::SyntaxError, d_lineNr, d_colNr, d_line.mid(d_colNr) );
}
Q_ASSERT( false );
return Token( Token::SyntaxError, d_lineNr, d_colNr, res );
}else
return Token( Token::SyntaxError, d_lineNr, d_colNr, d_line.mid(d_colNr) );
}
StarLexer::Token StarLexer::readSemicolonString()
{
Q_ASSERT( d_colNr == 0 && !d_line.isEmpty() && d_line[0].unicode() == SEM );
QString res = d_line.mid(1) + QChar('\n');
const int col = d_colNr + 1;
const int line = d_lineNr;
while( true )
{
if( d_in->atEnd() )
return Token( Token::SyntaxError, d_lineNr, d_colNr, res ); // Semicolon String without end
nextLine();
if( !checkLineChars() )
return Token( Token::InvalidChar, d_lineNr, d_colNr, QString(d_line[d_colNr]) );
if( !d_line.isEmpty() && d_line[0].unicode() == SEM )
{
// Ende gefunden
d_colNr++;
return Token( Token::Multiline, line, col, res );
}else
{
// Ende noch nicht gefunden; ganze Zeile gehört zu String
res += d_line + QChar('\n');
}
}
Q_ASSERT( false );
return Token( Token::SyntaxError, d_lineNr, d_colNr );
}
QString StarLexer::readNonWhiteSpace()
{
QString res;
res.reserve( d_line.size() - d_colNr );
const QString pattern = (d_newSyntax) ? QLatin1String("{}[],") : QLatin1String("");
while( d_colNr < d_line.size() && !d_line[d_colNr].isSpace() && !pattern.contains(d_line[d_colNr]) )
{
res += d_line[d_colNr];
d_colNr++;
}
return res;
}
int StarLexer::findTripple( const QString& pattern, int from) const
{
Q_ASSERT( d_newSyntax );
Q_ASSERT( !pattern.isEmpty() );
int pos = d_line.indexOf( pattern, from );
if( pos != -1 )
{
Q_ASSERT( pos > 0 );
if( d_line[pos-1].unicode() == BEL )
{
if( (pos + 3) < d_line.size() && d_line[pos+3] == pattern[0] )
return pos + 1; // BEL, QOT, QOT, QOT, QOT
else
return -2; // error, BEL, QOT, QOT, QOT
}
}
return pos; // -1..not found
}
QString StarLexer::escapeString(const QString &str, int sym, bool *ok) const
{
Q_ASSERT( d_newSyntax );
int col = 0;
QString res;
res.reserve( str.size() );
while( col < str.size() )
{
if( str[col].unicode() == BEL )
{
if( ( col + 1 ) < str.size() && str[col+1].unicode() == sym )
{
res += QChar(sym);
col++;
}else
{
if( ok )
*ok = false;
return res;
}
}else
res += str[col];
col++;
}
if( ok )
*ok = true;
return res;
}
bool StarLexer::matchTuple(int cur, int sym, int count) const
{
Q_ASSERT( d_newSyntax );
for( int i = cur; i < ( cur + count ); i++ )
{
if( i >= d_line.size() )
return false;
if( d_line[i].unicode() != sym )
return false;
}
return true;
}
const char *StarLexer::Token::typeName() const
{
static const char* names[] = { "Null",
"Comment",
"Global", "Data", "Save", // Cells
"Loop", "Stop", // Loops
"Tag",
"Quoted", "NonQuoted", "Multiline", // Values
"ListStart", "ListEnd",
"TableStart", "TableEnd",
"RefStart", "RefEnd",
"ElemSep",
"KeyValueSep",
"EndOfStream",
"InvalidChar", "SyntaxError"
};
return names[d_type];
}
bool StarLexer::Token::isDataValue() const
{
switch( d_type )
{
case Quoted:
case NonQuoted:
case Multiline:
case ListStart:
case TableStart:
case RefStart:
return true;
default:
return false;
}
}<|fim▁end|>
|
}
if( d_in == 0 )
d_in = new QTextStream();
d_in->setAutoDetectUnicode(false);
|
<|file_name|>param_tcp_rxbufsize_8k.py<|end_file_name|><|fim▁begin|>from trex_astf_lib.api import *
# IPV6 tunable example
#
# ipv6.src_msb
# ipv6.dst_msb
# ipv6.enable
# <|fim▁hole|>
def get_profile(self, **kwargs):
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
c_glob_info = ASTFGlobalInfo()
c_glob_info.tcp.rxbufsize = 8*1024
c_glob_info.tcp.txbufsize = 8*1024
s_glob_info = ASTFGlobalInfo()
s_glob_info.tcp.rxbufsize = 8*1024
s_glob_info.tcp.txbufsize = 8*1024
return ASTFProfile(default_ip_gen=ip_gen,
# Defaults affects all files
default_c_glob_info=c_glob_info,
default_s_glob_info=s_glob_info,
cap_list=[
ASTFCapInfo(file="../avl/delay_10_http_browsing_0.pcap", cps=1)
]
)
def register():
return Prof1()<|fim▁end|>
|
class Prof1():
def __init__(self):
pass
|
<|file_name|>BookmarkImporter.java<|end_file_name|><|fim▁begin|>package yuku.alkitab.base.util;
import android.app.Activity;
import android.app.Dialog;
import android.content.Intent;
import android.database.Cursor;
import android.database.DatabaseUtils;
import android.database.sqlite.SQLiteDatabase;
import android.os.AsyncTask;
import android.support.annotation.NonNull;
import android.support.annotation.Nullable;
import android.util.Xml;
import com.afollestad.materialdialogs.MaterialDialog;
import gnu.trove.list.TIntList;
import gnu.trove.list.array.TIntArrayList;
import gnu.trove.map.hash.TIntLongHashMap;
import gnu.trove.map.hash.TIntObjectHashMap;
import gnu.trove.map.hash.TObjectIntHashMap;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import org.xml.sax.ext.DefaultHandler2;
import yuku.alkitab.base.App;
import yuku.alkitab.base.IsiActivity;
import yuku.alkitab.base.S;
import yuku.alkitab.base.storage.Db;
import yuku.alkitab.base.storage.InternalDb;
import yuku.alkitab.debug.R;
import yuku.alkitab.model.Label;
import yuku.alkitab.model.Marker;
import yuku.alkitab.model.Marker_Label;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import static yuku.alkitab.base.util.Literals.ToStringArray;
// Imported from v3. Used for once-only migration from v3 to v4.
public class BookmarkImporter {
static final String TAG = BookmarkImporter.class.getSimpleName();
// constants
static class Bookmark2_Label { // DO NOT CHANGE CONSTANT VALUES!
public static final String XMLTAG_Bookmark2_Label = "Bukmak2_Label";
public static final String XMLATTR_bookmark2_relId = "bukmak2_relId";
public static final String XMLATTR_label_relId = "label_relId";
}
// constants
static class BackupManager {
public static final String XMLTAG_Bukmak2 = "Bukmak2";
private static final String XMLATTR_ari = "ari";
private static final String XMLATTR_kind = "jenis";
private static final String XMLATTR_caption = "tulisan";
private static final String XMLATTR_addTime = "waktuTambah";
private static final String XMLATTR_modifyTime = "waktuUbah";
private static final String XMLATTR_relId = "relId";
private static final String XMLVAL_bookmark = "bukmak";
private static final String XMLVAL_note = "catatan";
private static final String XMLVAL_highlight = "stabilo";
public static final String XMLTAG_Label = "Label";
private static final String XMLATTR_title = "judul";
private static final String XMLATTR_bgColor = "warnaLatar";
@Nullable
public static Marker markerFromAttributes(Attributes attributes) {
int ari = Integer.parseInt(attributes.getValue("", XMLATTR_ari));
String kind_s = attributes.getValue("", XMLATTR_kind);
Marker.Kind kind = kind_s.equals(XMLVAL_bookmark) ? Marker.Kind.bookmark : kind_s.equals(XMLVAL_note) ? Marker.Kind.note : kind_s.equals(XMLVAL_highlight) ? Marker.Kind.highlight : null;
String caption = unescapeHighUnicode(attributes.getValue("", XMLATTR_caption));
Date addTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_addTime)));
Date modifyTime = Sqlitil.toDate(Integer.parseInt(attributes.getValue("", XMLATTR_modifyTime)));
if (kind == null) { // invalid
return null;
}
return Marker.createNewMarker(ari, kind, caption, 1, addTime, modifyTime);
}
public static int getRelId(Attributes attributes) {
String s = attributes.getValue("", XMLATTR_relId);
return s == null ? 0 : Integer.parseInt(s);
}
public static Label labelFromAttributes(Attributes attributes) {
String title = unescapeHighUnicode(attributes.getValue("", XMLATTR_title));
String bgColor = attributes.getValue("", XMLATTR_bgColor);
return Label.createNewLabel(title, 0, bgColor);
}
static ThreadLocal<Matcher> highUnicodeMatcher = new ThreadLocal<Matcher>() {
@Override
protected Matcher initialValue() {
return Pattern.compile("\\[\\[~U([0-9A-Fa-f]{6})~\\]\\]").matcher("");
}
};
public static String unescapeHighUnicode(String input) {
if (input == null) return null;
final Matcher m = highUnicodeMatcher.get();
m.reset(input);
StringBuffer res = new StringBuffer();
while (m.find()) {
String s = m.group(1);
final int cp = Integer.parseInt(s, 16);
m.appendReplacement(res, new String(new int[]{cp}, 0, 1));
}
m.appendTail(res);
return res.toString();
}
}<|fim▁hole|>
public static void importBookmarks(final Activity activity, @NonNull final InputStream fis, final boolean finishActivityAfterwards, final Runnable runWhenDone) {
final MaterialDialog pd = new MaterialDialog.Builder(activity)
.content(R.string.mengimpor_titiktiga)
.cancelable(false)
.progress(true, 0)
.show();
new AsyncTask<Boolean, Integer, Object>() {
int count_bookmark = 0;
int count_label = 0;
@Override
protected Object doInBackground(Boolean... params) {
final List<Marker> markers = new ArrayList<>();
final TObjectIntHashMap<Marker> markerToRelIdMap = new TObjectIntHashMap<>();
final List<Label> labels = new ArrayList<>();
final TObjectIntHashMap<Label> labelToRelIdMap = new TObjectIntHashMap<>();
final TIntLongHashMap labelRelIdToAbsIdMap = new TIntLongHashMap();
final TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap = new TIntObjectHashMap<>();
try {
Xml.parse(fis, Xml.Encoding.UTF_8, new DefaultHandler2() {
@Override
public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException {
switch (localName) {
case BackupManager.XMLTAG_Bukmak2:
final Marker marker = BackupManager.markerFromAttributes(attributes);
if (marker != null) {
markers.add(marker);
final int bookmark2_relId = BackupManager.getRelId(attributes);
markerToRelIdMap.put(marker, bookmark2_relId);
count_bookmark++;
}
break;
case BackupManager.XMLTAG_Label: {
final Label label = BackupManager.labelFromAttributes(attributes);
int label_relId = BackupManager.getRelId(attributes);
labels.add(label);
labelToRelIdMap.put(label, label_relId);
count_label++;
break;
}
case Bookmark2_Label.XMLTAG_Bookmark2_Label: {
final int bookmark2_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_bookmark2_relId));
final int label_relId = Integer.parseInt(attributes.getValue("", Bookmark2_Label.XMLATTR_label_relId));
TIntList labelRelIds = markerRelIdToLabelRelIdsMap.get(bookmark2_relId);
if (labelRelIds == null) {
labelRelIds = new TIntArrayList();
markerRelIdToLabelRelIdsMap.put(bookmark2_relId, labelRelIds);
}
labelRelIds.add(label_relId);
break;
}
}
}
});
fis.close();
} catch (Exception e) {
return e;
}
{ // bikin label-label yang diperlukan, juga map relId dengan id dari label.
final HashMap<String, Label> judulMap = new HashMap<>();
final List<Label> xlabelLama = S.getDb().listAllLabels();
for (Label labelLama : xlabelLama) {
judulMap.put(labelLama.title, labelLama);
}
for (Label label : labels) {
// cari apakah label yang judulnya persis sama udah ada
Label labelLama = judulMap.get(label.title);
final int labelRelId = labelToRelIdMap.get(label);
if (labelLama != null) {
// removed from v3: update warna label lama
labelRelIdToAbsIdMap.put(labelRelId, labelLama._id);
AppLog.d(TAG, "label (lama) r->a : " + labelRelId + "->" + labelLama._id);
} else { // belum ada, harus bikin baru
Label labelBaru = S.getDb().insertLabel(label.title, label.backgroundColor);
labelRelIdToAbsIdMap.put(labelRelId, labelBaru._id);
AppLog.d(TAG, "label (baru) r->a : " + labelRelId + "->" + labelBaru._id);
}
}
}
importBookmarks(markers, markerToRelIdMap, labelRelIdToAbsIdMap, markerRelIdToLabelRelIdsMap);
return null;
}
@Override
protected void onPostExecute(@NonNull Object result) {
pd.dismiss();
if (result instanceof Exception) {
AppLog.e(TAG, "Error when importing markers", (Throwable) result);
new MaterialDialog.Builder(activity)
.content(activity.getString(R.string.terjadi_kesalahan_ketika_mengimpor_pesan, ((Exception) result).getMessage()))
.positiveText(R.string.ok)
.show();
} else {
final Dialog dialog = new MaterialDialog.Builder(activity)
.content(activity.getString(R.string.impor_berhasil_angka_diproses, count_bookmark, count_label))
.positiveText(R.string.ok)
.show();
if (finishActivityAfterwards) {
dialog.setOnDismissListener(dialog1 -> activity.finish());
}
}
if (runWhenDone != null) runWhenDone.run();
}
}.execute();
}
public static void importBookmarks(List<Marker> markers, TObjectIntHashMap<Marker> markerToRelIdMap, TIntLongHashMap labelRelIdToAbsIdMap, TIntObjectHashMap<TIntList> markerRelIdToLabelRelIdsMap) {
SQLiteDatabase db = S.getDb().getWritableDatabase();
db.beginTransaction();
try {
final TIntObjectHashMap<Marker> markerRelIdToMarker = new TIntObjectHashMap<>();
{ // write new markers (if not available yet)
for (int i = 0; i < markers.size(); i++) {
Marker marker = markers.get(i);
final int marker_relId = markerToRelIdMap.get(marker);
// migrate: look for existing marker with same kind, ari, and content
try (Cursor cursor = db.query(
Db.TABLE_Marker,
null,
Db.Marker.ari + "=? and " + Db.Marker.kind + "=? and " + Db.Marker.caption + "=?",
ToStringArray(marker.ari, marker.kind.code, marker.caption),
null, null, null
)) {
if (cursor.moveToNext()) {
marker = InternalDb.markerFromCursor(cursor);
markers.set(i, marker);
} else {
InternalDb.insertMarker(db, marker);
}
// map it
markerRelIdToMarker.put(marker_relId, marker);
}
}
}
{ // now is marker-label assignments
for (final int marker_relId : markerRelIdToLabelRelIdsMap.keys()) {
final TIntList label_relIds = markerRelIdToLabelRelIdsMap.get(marker_relId);
final Marker marker = markerRelIdToMarker.get(marker_relId);
if (marker != null) {
// existing labels > 0: ignore
// existing labels == 0: insert
final int existing_label_count = (int) DatabaseUtils.queryNumEntries(db, Db.TABLE_Marker_Label, Db.Marker_Label.marker_gid + "=?", ToStringArray(marker.gid));
if (existing_label_count == 0) {
for (int label_relId : label_relIds.toArray()) {
final long label_id = labelRelIdToAbsIdMap.get(label_relId);
if (label_id > 0) {
final Label label = S.getDb().getLabelById(label_id);
final Marker_Label marker_label = Marker_Label.createNewMarker_Label(marker.gid, label.gid);
InternalDb.insertMarker_LabelIfNotExists(db, marker_label);
} else {
AppLog.w(TAG, "label_id is invalid!: " + label_id);
}
}
}
} else {
AppLog.w(TAG, "wrong marker_relId: " + marker_relId);
}
}
}
db.setTransactionSuccessful();
} finally {
db.endTransaction();
}
App.getLbm().sendBroadcast(new Intent(IsiActivity.ACTION_ATTRIBUTE_MAP_CHANGED));
}
}<|fim▁end|>
| |
<|file_name|>mock_generator.py<|end_file_name|><|fim▁begin|>#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
import gevent
from gevent import monkey; monkey.patch_all()
import argparse
import os
import socket
import random
import math
import uuid
from netaddr import IPAddress
from pysandesh.sandesh_base import *
from pysandesh.util import UTCTimestampUsec
from sandesh_common.vns.ttypes import Module, NodeType
from sandesh_common.vns.constants import ModuleNames, Module2NodeType, \
NodeTypeNames
from vrouter.sandesh.virtual_network.ttypes import UveVirtualNetworkAgent, \
InterVnStats, UveInterVnStats, UveVirtualNetworkAgentTrace
from vrouter.sandesh.virtual_machine.ttypes import VmInterfaceAgent, \
UveVirtualMachineAgent, UveVirtualMachineAgentTrace
from vrouter.vrouter.ttypes import VrouterStatsAgent, VrouterStats
from vrouter.cpuinfo import CpuInfoData
from vrouter.sandesh.flow.ttypes import *
class MockGenerator(object):
_VN_PREFIX = 'default-domain:mock-gen-test:vn'
_VM_PREFIX = 'vm'
_BYTES_PER_PACKET = 1024
_OTHER_VN_PKTS_PER_SEC = 1000
_UVE_MSG_INTVL_IN_SEC = 10
_GEVENT_SPAWN_DELAY_IN_SEC = 10
_FLOW_GEVENT_SPAWN_DELAY_IN_SEC = 30
_NUM_FLOWS_IN_ITERATION = 145 * 10
_FLOW_MSG_INTVL_IN_SEC = 1
_FLOW_PKTS_PER_SEC = 100
def __init__(self, hostname, module_name, node_type_name, instance_id,
start_vn, end_vn, other_vn,
num_vns, vm_iterations, collectors, ip_vns, ip_start_index,
num_flows_per_vm):
self._module_name = module_name
self._hostname = hostname
self._node_type_name = node_type_name
self._instance_id = instance_id
self._start_vn = start_vn
self._end_vn = end_vn
self._num_vns = num_vns
self._other_vn = other_vn
self._ip_vns = ip_vns
self._ip_start_index = ip_start_index
self._vm_iterations = vm_iterations
self._num_flows_per_vm = num_flows_per_vm
self._sandesh_instance = Sandesh()
if not isinstance(collectors, list):
collectors = [collectors]
self._collectors = collectors
#end __init__
def run_generator(self):
self._sandesh_instance.init_generator(self._module_name, self._hostname,
self._node_type_name, self._instance_id, self._collectors,
'', -1, ['vrouter'])
self._sandesh_instance.set_logging_params(enable_local_log = False,
level = SandeshLevel.SYS_EMERG)
send_uve_task = gevent.spawn_later(
random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC),
self._send_uve_sandesh)
cpu_info_task = gevent.spawn_later(
random.randint(0, self._GEVENT_SPAWN_DELAY_IN_SEC),
self._send_cpu_info)
send_flow_task = gevent.spawn_later(
random.randint(5, self._FLOW_GEVENT_SPAWN_DELAY_IN_SEC),
self._send_flow_sandesh)
return [send_uve_task, cpu_info_task, send_flow_task]
#end run_generator
def _send_flow_sandesh(self):
flows = []
while True:
# Populate flows if not done
if len(flows) == 0:
other_vn = self._other_vn
for vn in range(self._start_vn, self._end_vn):
for nvm in range(self._vm_iterations):
for nflow in range(self._num_flows_per_vm):
init_packets = random.randint(1, \
self._FLOW_PKTS_PER_SEC)
init_bytes = init_packets * \
random.randint(1, self._BYTES_PER_PACKET)
sourceip = int(self._ip_vns[vn] + \
self._ip_start_index + nvm)
destip = int(self._ip_vns[other_vn] + \
self._ip_start_index + nvm)
flows.append(FlowDataIpv4(
flowuuid = str(uuid.uuid1()),
direction_ing = random.randint(0, 1),
sourcevn = self._VN_PREFIX + str(vn),
destvn = self._VN_PREFIX + str(other_vn),
sourceip = sourceip,
destip = destip,
sport = random.randint(0, 65535),
dport = random.randint(0, 65535),
protocol = random.choice([6, 17, 1]),
setup_time = UTCTimestampUsec(),
packets = init_packets,
bytes = init_bytes,
diff_packets = init_packets,
diff_bytes = init_bytes))
other_vn = (other_vn + 1) % self._num_vns
# Send the flows periodically
flow_cnt = 0
for flow_data in flows:
new_packets = random.randint(1, self._FLOW_PKTS_PER_SEC)
new_bytes = new_packets * \
random.randint(1, self._BYTES_PER_PACKET)
flow_data.packets += new_packets
flow_data.bytes += new_bytes
flow_data.diff_packets = new_packets
flow_data.diff_bytes = new_bytes
flow_object = FlowDataIpv4Object(flowdata = flow_data,
sandesh = self._sandesh_instance)
flow_object.send(sandesh = self._sandesh_instance)
flow_cnt += 1
if flow_cnt == self._NUM_FLOWS_IN_ITERATION:
flow_cnt = 0
gevent.sleep(self._FLOW_MSG_INTVL_IN_SEC)
else:
gevent.sleep(0)
#end _send_flow_sandesh
def _send_cpu_info(self):
vrouter_cpu_info = CpuInfoData()
vrouter_stats = VrouterStatsAgent()
vrouter_stats.name = self._hostname
while True:
vrouter_stats.cpu_info = vrouter_cpu_info.get_cpu_info(system = False)
vrouter_stats.cpu_share = vrouter_stats.cpu_info.cpu_share
vrouter_stats.virt_mem = vrouter_stats.cpu_info.meminfo.virt
stats = VrouterStats(sandesh = self._sandesh_instance,
data = vrouter_stats)
stats.send(sandesh = self._sandesh_instance)
gevent.sleep(60)
#end _send_cpu_info
def _populate_other_vn_stats(self, other_vn, intervn_list, vn, vn_stats,
in_uve_intervn_list, out_uve_intervn_list):
other_vn_name = self._VN_PREFIX + str(other_vn)
intervn = InterVnStats()
intervn.other_vn = other_vn_name
intervn.vrouter = self._hostname
intervn.in_tpkts = random.randint(1, self._OTHER_VN_PKTS_PER_SEC * \
self._num_vns * self._UVE_MSG_INTVL_IN_SEC)
intervn.in_bytes = intervn.in_tpkts * random.randint(1, \
self._BYTES_PER_PACKET)
intervn.out_tpkts = random.randint(1, self._OTHER_VN_PKTS_PER_SEC * \
self._num_vns * self._UVE_MSG_INTVL_IN_SEC)
intervn.out_bytes = intervn.out_tpkts * random.randint(1, \
self._BYTES_PER_PACKET)
if vn in vn_stats:
other_vn_stats = vn_stats[vn]
else:
other_vn_stats = None
if other_vn_stats is None:
other_vn_stats = {}
other_vn_stats[other_vn] = (intervn.in_tpkts, intervn.in_bytes, \
intervn.out_tpkts, intervn.out_bytes)
else:
if other_vn in other_vn_stats:
prev_in_tpkts, prev_in_bytes, prev_out_tpkts, prev_out_bytes = \
other_vn_stats[other_vn]
new_in_tpkts = prev_in_tpkts + intervn.in_tpkts
new_in_bytes = prev_in_bytes + intervn.in_bytes
new_out_tpkts = prev_out_tpkts + intervn.out_tpkts
new_out_bytes = prev_out_bytes + intervn.out_bytes
other_vn_stats[other_vn] = (new_in_tpkts, new_in_bytes, \
new_out_tpkts, new_out_bytes)
else:
other_vn_stats[other_vn] = (intervn.in_tpkts, \
intervn.in_bytes, intervn.out_tpkts, intervn.out_bytes)<|fim▁hole|> vn_stats[vn] = other_vn_stats
in_uve_intervn = UveInterVnStats()
in_uve_intervn.other_vn = other_vn_name
out_uve_intervn = UveInterVnStats()
out_uve_intervn.other_vn = other_vn_name
in_uve_intervn.tpkts, in_uve_intervn.bytes, out_uve_intervn.tpkts, \
out_uve_intervn.bytes = other_vn_stats[other_vn]
in_uve_intervn_list.append(in_uve_intervn)
out_uve_intervn_list.append(out_uve_intervn)
intervn_list.append(intervn)
#end _populate_other_vn_stats
def _send_uve_sandesh(self):
vn_stats = {}
vn_vm_list = {}
vn_vm_list_populated = False
vn_vm_list_sent = False
while True:
# Send VM list if populated and not already sent
if vn_vm_list_populated and not vn_vm_list_sent:
for vn in range(self._start_vn, self._end_vn):
vn_agent = UveVirtualNetworkAgent(virtualmachine_list = \
vn_vm_list[vn])
vn_agent.name = self._VN_PREFIX + str(vn)
uve_agent_vn = UveVirtualNetworkAgentTrace( \
data = vn_agent, sandesh = self._sandesh_instance)
uve_agent_vn.send(sandesh = self._sandesh_instance)
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
vn_vm_list_sent = True
other_vn = self._other_vn
for vn in range(self._start_vn, self._end_vn):
intervn_list = []
in_uve_intervn_list = []
out_uve_intervn_list = []
# Populate inter-VN and UVE inter-VN stats for other_vn
self._populate_other_vn_stats(other_vn, intervn_list, vn, \
vn_stats, in_uve_intervn_list, out_uve_intervn_list)
# Populate inter-VN and UVE inter-VN stats for self - vn
self._populate_other_vn_stats(vn, intervn_list, vn, \
vn_stats, in_uve_intervn_list, out_uve_intervn_list)
vn_agent = UveVirtualNetworkAgent(vn_stats = intervn_list,
in_stats = in_uve_intervn_list,
out_stats = out_uve_intervn_list)
vn_agent.name = self._VN_PREFIX + str(vn)
uve_agent_vn = UveVirtualNetworkAgentTrace(data = vn_agent,
sandesh = self._sandesh_instance)
uve_agent_vn.send(sandesh = self._sandesh_instance)
for nvm in range(self._vm_iterations):
vm_if = VmInterfaceAgent()
vm_if.name = 'p2p1'
vm_if.ip_address = str(self._ip_vns[vn] + \
self._ip_start_index + nvm)
vm_if.virtual_network = vn_agent.name
vm_agent = UveVirtualMachineAgent()
vm_name = vn_agent.name + ':' + self._hostname + ':' + \
self._VM_PREFIX + str(vn) + '-' + str(nvm)
vm_agent.name = vm_name
vm_agent.interface_list = []
vm_agent.interface_list.append(vm_if)
uve_agent_vm = UveVirtualMachineAgentTrace(data = vm_agent,
sandesh = self._sandesh_instance)
uve_agent_vm.send(sandesh = self._sandesh_instance)
# Populate VN VM list
if not vn in vn_vm_list:
vn_vm_list[vn] = [vm_name]
else:
vm_list = vn_vm_list[vn]
vm_list.append(vm_name)
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
other_vn += 1
gevent.sleep(random.randint(0, self._UVE_MSG_INTVL_IN_SEC))
vn_vm_list_populated = True
#end _send_uve_sandesh
#end class MockGenerator
class MockGeneratorTest(object):
def __init__(self):
self._parse_args()
#end __init__
def _parse_args(self):
'''
Eg. python mock_generator.py
--num_generators 10
--collectors 127.0.0.1:8086
--num_instances_per_generator 10
--num_networks 100
--num_flows_per_instance 10
--start_ip_address 1.0.0.1
'''
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--num_generators", type=int,
default=10,
help="Number of mock generators")
parser.add_argument("--num_instances_per_generator", type=int,
default=10,
help="Number of instances (virtual machines) per generator")
parser.add_argument("--num_networks", type=int,
default=100,
help="Number of virtual networks")
parser.add_argument("--collectors",
default='127.0.0.1:8086',
help="List of Collector IP addresses in ip:port format",
nargs="+")
parser.add_argument("--num_flows_per_instance", type=int,
default=10,
help="Number of flows per instance (virtual machine)")
parser.add_argument("--start_ip_address",
default="1.0.0.1",
help="Start IP address to be used for instances")
self._args = parser.parse_args()
if isinstance(self._args.collectors, basestring):
self._args.collectors = self._args.collectors.split()
#end _parse_args
def setup(self):
collectors = self._args.collectors
ngens = self._args.num_generators
pid = os.getpid()
num_instances = self._args.num_instances_per_generator
num_networks = self._args.num_networks
module = Module.VROUTER_AGENT
moduleid = ModuleNames[module]
node_type = Module2NodeType[module]
node_type_name = NodeTypeNames[node_type]
hostname = socket.gethostname() + '-' + str(pid)
hostnames = [hostname + '-' + str(x) for x in range(ngens)]
gen_factor = num_networks / num_instances
if gen_factor == 0:
print("Number of virtual networks(%d) should be "
"greater than number of instances per generator(%d)" % \
(num_networks, num_instances))
return False
start_vns = [(x % gen_factor) * num_instances for x in range(ngens)]
end_vns = [((x % gen_factor) + 1) * num_instances \
for x in range(ngens)]
other_vn_adj = num_networks / 2
other_vns = [x - other_vn_adj if x >= other_vn_adj \
else x + other_vn_adj for x in start_vns]
instance_iterations = int(math.ceil(float(num_instances) / \
num_networks))
num_ips_per_vn = int(math.ceil(float(ngens * num_instances) / \
num_networks))
start_ip_address = IPAddress(self._args.start_ip_address)
ip_vns = [start_ip_address + num_ips_per_vn * x for x in \
range(num_networks)]
start_ip_index = [x * num_instances / num_networks for x in \
range(ngens)]
self._generators = [MockGenerator(hostnames[x], moduleid, \
node_type_name, str(x), start_vns[x], end_vns[x], other_vns[x], \
num_networks, instance_iterations, \
collectors[x % len(collectors)], ip_vns, \
start_ip_index[x], self._args.num_flows_per_instance) \
for x in range(ngens)]
return True
#end setup
def run(self):
generator_run_tasks = [gen.run_generator() for gen in self._generators]
generator_tasks = [gen_task for gen_task_sublist in \
generator_run_tasks for gen_task in gen_task_sublist ]
gevent.joinall(generator_tasks)
#end run
#end class MockGeneratorTest
def main():
test = MockGeneratorTest()
success = test.setup()
if success:
test.run()
#end main
if __name__ == '__main__':
main()<|fim▁end|>
| |
<|file_name|>Utils.java<|end_file_name|><|fim▁begin|>package cn.hicc.information.sensorsignin.utils;
import android.app.Activity;
import android.app.ActivityManager;
import android.app.AlertDialog;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.provider.Settings;
import android.widget.TextView;
import java.util.ArrayList;
import cn.hicc.information.sensorsignin.MyApplication;
/**
* 工具类
*/
public class Utils {
/**
* 判断网络情况
*
* @param context 上下文
* @return false 表示没有网络 true 表示有网络
*/
public static boolean isNetworkAvalible(Context context) {
// 获得网络状态管理器
ConnectivityManager connectivityManager = (ConnectivityManager) context
.getSystemService(Context.CONNECTIVITY_SERVICE);
if (connectivityManager == null) {<|fim▁hole|> NetworkInfo[] net_info = connectivityManager.getAllNetworkInfo();
if (net_info != null) {
for (int i = 0; i < net_info.length; i++) {
// 判断获得的网络状态是否是处于连接状态
if (net_info[i].getState() == NetworkInfo.State.CONNECTED) {
return true;
}
}
}
}
return false;
}
// 如果没有网络,则弹出网络设置对话框
public static void checkNetwork(final Activity activity) {
if (!Utils.isNetworkAvalible(activity)) {
TextView msg = new TextView(activity);
msg.setText(" 当前没有可以使用的网络,部分功能可能无法使用,请设置网络!");
new AlertDialog.Builder(activity)
//.setIcon(R.mipmap.ic_launcher)
.setTitle("网络状态提示")
.setView(msg)
.setNegativeButton("朕知道了",
new DialogInterface.OnClickListener() {
@Override
public void onClick(DialogInterface dialogInterface, int i) {
}
})
.setPositiveButton("开启网络",
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog,
int whichButton) {
// 跳转到设置界面
activity.startActivityForResult(new Intent(
Settings.ACTION_WIRELESS_SETTINGS),
0);
}
}).create().show();
}
return;
}
// 判断服务是否在运行
public static boolean ServiceIsWorked(String name) {
ActivityManager myManager = (ActivityManager) MyApplication.getContext().getSystemService(Context.ACTIVITY_SERVICE);
ArrayList<ActivityManager.RunningServiceInfo> runningService = (ArrayList <ActivityManager.RunningServiceInfo>)myManager.getRunningServices(300);
for (int i = 0; i < runningService.size(); i++) {
if (runningService.get(i).service.getClassName().toString().equals(name)) {
return true;
}
}
return false;
}
}<|fim▁end|>
|
return false;
} else {
// 建立网络数组
|
<|file_name|>app.e2e-spec.ts<|end_file_name|><|fim▁begin|>import { AppPage } from './app.po';
describe('material2 App', () => {
let page: AppPage;
beforeEach(() => {
page = new AppPage();
});
it('should display welcome message', () => {<|fim▁hole|>});<|fim▁end|>
|
page.navigateTo();
expect(page.getParagraphText()).toEqual('Welcome to app!');
});
|
<|file_name|>libvpx_vp9_encoder.cc<|end_file_name|><|fim▁begin|>/*
* Copyright (c) 2020 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*
*/
#ifdef RTC_ENABLE_VP9
#include "modules/video_coding/codecs/vp9/libvpx_vp9_encoder.h"
#include <algorithm>
#include <limits>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/memory/memory.h"
#include "absl/strings/match.h"
#include "api/video/color_space.h"
#include "api/video/i010_buffer.h"
#include "common_video/include/video_frame_buffer.h"
#include "common_video/libyuv/include/webrtc_libyuv.h"
#include "modules/rtp_rtcp/include/rtp_rtcp_defines.h"
#include "modules/video_coding/codecs/vp9/svc_rate_allocator.h"
#include "modules/video_coding/svc/create_scalability_structure.h"
#include "modules/video_coding/svc/scalable_video_controller.h"
#include "modules/video_coding/svc/scalable_video_controller_no_layering.h"
#include "modules/video_coding/utility/vp9_uncompressed_header_parser.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/field_trial_list.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/experiments/rate_control_settings.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/time_utils.h"
#include "rtc_base/trace_event.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include <libvpx/vp8cx.h>
#include <libvpx/vpx_encoder.h>
namespace webrtc {
namespace {
// Maps from gof_idx to encoder internal reference frame buffer index. These
// maps work for 1,2 and 3 temporal layers with GOF length of 1,2 and 4 frames.
uint8_t kRefBufIdx[4] = {0, 0, 0, 1};
uint8_t kUpdBufIdx[4] = {0, 0, 1, 0};
// Maximum allowed PID difference for differnet per-layer frame-rate case.
const int kMaxAllowedPidDiff = 30;
// TODO(ilink): Tune these thresholds further.
// Selected using ConverenceMotion_1280_720_50.yuv clip.
// No toggling observed on any link capacity from 100-2000kbps.
// HD was reached consistently when link capacity was 1500kbps.
// Set resolutions are a bit more conservative than svc_config.cc sets, e.g.
// for 300kbps resolution converged to 270p instead of 360p.
constexpr int kLowVp9QpThreshold = 149;
constexpr int kHighVp9QpThreshold = 205;
std::pair<size_t, size_t> GetActiveLayers(
const VideoBitrateAllocation& allocation) {
for (size_t sl_idx = 0; sl_idx < kMaxSpatialLayers; ++sl_idx) {
if (allocation.GetSpatialLayerSum(sl_idx) > 0) {
size_t last_layer = sl_idx + 1;
while (last_layer < kMaxSpatialLayers &&
allocation.GetSpatialLayerSum(last_layer) > 0) {
++last_layer;
}
return std::make_pair(sl_idx, last_layer);
}
}
return {0, 0};
}
std::unique_ptr<ScalableVideoController> CreateVp9ScalabilityStructure(
const VideoCodec& codec) {
int num_spatial_layers = codec.VP9().numberOfSpatialLayers;
int num_temporal_layers =
std::max(1, int{codec.VP9().numberOfTemporalLayers});
if (num_spatial_layers == 1 && num_temporal_layers == 1) {
return std::make_unique<ScalableVideoControllerNoLayering>();
}
char name[20];
rtc::SimpleStringBuilder ss(name);
if (codec.mode == VideoCodecMode::kScreensharing) {
// TODO(bugs.webrtc.org/11999): Compose names of the structures when they
// are implemented.
return nullptr;
} else if (codec.VP9().interLayerPred == InterLayerPredMode::kOn ||
num_spatial_layers == 1) {
ss << "L" << num_spatial_layers << "T" << num_temporal_layers;
} else if (codec.VP9().interLayerPred == InterLayerPredMode::kOnKeyPic) {
ss << "L" << num_spatial_layers << "T" << num_temporal_layers << "_KEY";
} else {
RTC_DCHECK_EQ(codec.VP9().interLayerPred, InterLayerPredMode::kOff);
ss << "S" << num_spatial_layers << "T" << num_temporal_layers;
}
// Check spatial ratio.
if (num_spatial_layers > 1 && codec.spatialLayers[0].targetBitrate > 0) {
if (codec.width != codec.spatialLayers[num_spatial_layers - 1].width ||
codec.height != codec.spatialLayers[num_spatial_layers - 1].height) {
RTC_LOG(LS_WARNING)
<< "Top layer resolution expected to match overall resolution";
return nullptr;
}
// Check if the ratio is one of the supported.
int numerator;
int denominator;
if (codec.spatialLayers[1].width == 2 * codec.spatialLayers[0].width) {
numerator = 1;
denominator = 2;
// no suffix for 1:2 ratio.
} else if (2 * codec.spatialLayers[1].width ==
3 * codec.spatialLayers[0].width) {
numerator = 2;
denominator = 3;
ss << "h";
} else {
RTC_LOG(LS_WARNING) << "Unsupported scalability ratio "
<< codec.spatialLayers[0].width << ":"
<< codec.spatialLayers[1].width;
return nullptr;
}
// Validate ratio is consistent for all spatial layer transitions.
for (int sid = 1; sid < num_spatial_layers; ++sid) {
if (codec.spatialLayers[sid].width * numerator !=
codec.spatialLayers[sid - 1].width * denominator ||
codec.spatialLayers[sid].height * numerator !=
codec.spatialLayers[sid - 1].height * denominator) {
RTC_LOG(LS_WARNING) << "Inconsistent scalability ratio " << numerator
<< ":" << denominator;
return nullptr;
}
}
}
auto scalability_structure_controller = CreateScalabilityStructure(name);
if (scalability_structure_controller == nullptr) {
RTC_LOG(LS_WARNING) << "Unsupported scalability structure " << name;
} else {
RTC_LOG(LS_INFO) << "Created scalability structure " << name;
}
return scalability_structure_controller;
}
vpx_svc_ref_frame_config_t Vp9References(
rtc::ArrayView<const ScalableVideoController::LayerFrameConfig> layers) {
vpx_svc_ref_frame_config_t ref_config = {};
for (const ScalableVideoController::LayerFrameConfig& layer_frame : layers) {
const auto& buffers = layer_frame.Buffers();
RTC_DCHECK_LE(buffers.size(), 3);
int sid = layer_frame.SpatialId();
if (!buffers.empty()) {
ref_config.lst_fb_idx[sid] = buffers[0].id;
ref_config.reference_last[sid] = buffers[0].referenced;
if (buffers[0].updated) {
ref_config.update_buffer_slot[sid] |= (1 << buffers[0].id);
}
}
if (buffers.size() > 1) {
ref_config.gld_fb_idx[sid] = buffers[1].id;
ref_config.reference_golden[sid] = buffers[1].referenced;
if (buffers[1].updated) {
ref_config.update_buffer_slot[sid] |= (1 << buffers[1].id);
}
}
if (buffers.size() > 2) {
ref_config.alt_fb_idx[sid] = buffers[2].id;
ref_config.reference_alt_ref[sid] = buffers[2].referenced;
if (buffers[2].updated) {
ref_config.update_buffer_slot[sid] |= (1 << buffers[2].id);
}
}
}
// TODO(bugs.webrtc.org/11999): Fill ref_config.duration
return ref_config;
}
} // namespace
void LibvpxVp9Encoder::EncoderOutputCodedPacketCallback(vpx_codec_cx_pkt* pkt,
void* user_data) {
LibvpxVp9Encoder* enc = static_cast<LibvpxVp9Encoder*>(user_data);
enc->GetEncodedLayerFrame(pkt);
}
LibvpxVp9Encoder::LibvpxVp9Encoder(const cricket::VideoCodec& codec,
std::unique_ptr<LibvpxInterface> interface,
const WebRtcKeyValueConfig& trials)
: libvpx_(std::move(interface)),
encoded_image_(),
encoded_complete_callback_(nullptr),
profile_(
ParseSdpForVP9Profile(codec.params).value_or(VP9Profile::kProfile0)),
inited_(false),
timestamp_(0),
rc_max_intra_target_(0),
encoder_(nullptr),
config_(nullptr),
raw_(nullptr),
input_image_(nullptr),
force_key_frame_(true),
pics_since_key_(0),
num_temporal_layers_(0),
num_spatial_layers_(0),
num_active_spatial_layers_(0),
first_active_layer_(0),
layer_deactivation_requires_key_frame_(absl::StartsWith(
trials.Lookup("WebRTC-Vp9IssueKeyFrameOnLayerDeactivation"),
"Enabled")),
is_svc_(false),
inter_layer_pred_(InterLayerPredMode::kOn),
external_ref_control_(false), // Set in InitEncode because of tests.
trusted_rate_controller_(
RateControlSettings::ParseFromKeyValueConfig(&trials)
.LibvpxVp9TrustedRateController()),
layer_buffering_(false),
full_superframe_drop_(true),
first_frame_in_picture_(true),
ss_info_needed_(false),
force_all_active_layers_(false),
use_svc_controller_(
absl::StartsWith(trials.Lookup("WebRTC-Vp9DependencyDescriptor"),
"Enabled")),
is_flexible_mode_(false),
variable_framerate_experiment_(ParseVariableFramerateConfig(trials)),
variable_framerate_controller_(
variable_framerate_experiment_.framerate_limit),
quality_scaler_experiment_(ParseQualityScalerConfig(trials)),
external_ref_ctrl_(
!absl::StartsWith(trials.Lookup("WebRTC-Vp9ExternalRefCtrl"),
"Disabled")),
performance_flags_(ParsePerformanceFlagsFromTrials(trials)),
num_steady_state_frames_(0),
config_changed_(true) {
codec_ = {};
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
}
LibvpxVp9Encoder::~LibvpxVp9Encoder() {
Release();
}
void LibvpxVp9Encoder::SetFecControllerOverride(FecControllerOverride*) {
// Ignored.
}
int LibvpxVp9Encoder::Release() {
int ret_val = WEBRTC_VIDEO_CODEC_OK;
if (encoder_ != nullptr) {
if (inited_) {
if (libvpx_->codec_destroy(encoder_)) {
ret_val = WEBRTC_VIDEO_CODEC_MEMORY;
}
}
delete encoder_;
encoder_ = nullptr;
}
if (config_ != nullptr) {
delete config_;
config_ = nullptr;
}
if (raw_ != nullptr) {
libvpx_->img_free(raw_);
raw_ = nullptr;
}
inited_ = false;
return ret_val;
}
bool LibvpxVp9Encoder::ExplicitlyConfiguredSpatialLayers() const {
// We check target_bitrate_bps of the 0th layer to see if the spatial layers
// (i.e. bitrates) were explicitly configured.
return codec_.spatialLayers[0].targetBitrate > 0;
}
bool LibvpxVp9Encoder::SetSvcRates(
const VideoBitrateAllocation& bitrate_allocation) {
std::pair<size_t, size_t> current_layers =
GetActiveLayers(current_bitrate_allocation_);
std::pair<size_t, size_t> new_layers = GetActiveLayers(bitrate_allocation);
const bool layer_activation_requires_key_frame =
inter_layer_pred_ == InterLayerPredMode::kOff ||
inter_layer_pred_ == InterLayerPredMode::kOnKeyPic;
const bool lower_layers_enabled = new_layers.first < current_layers.first;
const bool higher_layers_enabled = new_layers.second > current_layers.second;
const bool disabled_layers = new_layers.first > current_layers.first ||
new_layers.second < current_layers.second;
if (lower_layers_enabled ||
(higher_layers_enabled && layer_activation_requires_key_frame) ||
(disabled_layers && layer_deactivation_requires_key_frame_)) {
force_key_frame_ = true;
}
if (current_layers != new_layers) {
ss_info_needed_ = true;
}
config_->rc_target_bitrate = bitrate_allocation.get_sum_kbps();
if (ExplicitlyConfiguredSpatialLayers()) {
for (size_t sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
const bool was_layer_active = (config_->ss_target_bitrate[sl_idx] > 0);
config_->ss_target_bitrate[sl_idx] =
bitrate_allocation.GetSpatialLayerSum(sl_idx) / 1000;
for (size_t tl_idx = 0; tl_idx < num_temporal_layers_; ++tl_idx) {
config_->layer_target_bitrate[sl_idx * num_temporal_layers_ + tl_idx] =
bitrate_allocation.GetTemporalLayerSum(sl_idx, tl_idx) / 1000;
}
if (!was_layer_active) {
// Reset frame rate controller if layer is resumed after pause.
framerate_controller_[sl_idx].Reset();
}
framerate_controller_[sl_idx].SetTargetRate(
codec_.spatialLayers[sl_idx].maxFramerate);
}
} else {
float rate_ratio[VPX_MAX_LAYERS] = {0};
float total = 0;
for (int i = 0; i < num_spatial_layers_; ++i) {
if (svc_params_.scaling_factor_num[i] <= 0 ||
svc_params_.scaling_factor_den[i] <= 0) {
RTC_LOG(LS_ERROR) << "Scaling factors not specified!";
return false;
}
rate_ratio[i] = static_cast<float>(svc_params_.scaling_factor_num[i]) /
svc_params_.scaling_factor_den[i];
total += rate_ratio[i];
}
for (int i = 0; i < num_spatial_layers_; ++i) {
RTC_CHECK_GT(total, 0);
config_->ss_target_bitrate[i] = static_cast<unsigned int>(
config_->rc_target_bitrate * rate_ratio[i] / total);
if (num_temporal_layers_ == 1) {
config_->layer_target_bitrate[i] = config_->ss_target_bitrate[i];
} else if (num_temporal_layers_ == 2) {
config_->layer_target_bitrate[i * num_temporal_layers_] =
config_->ss_target_bitrate[i] * 2 / 3;
config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
config_->ss_target_bitrate[i];
} else if (num_temporal_layers_ == 3) {
config_->layer_target_bitrate[i * num_temporal_layers_] =
config_->ss_target_bitrate[i] / 2;
config_->layer_target_bitrate[i * num_temporal_layers_ + 1] =
config_->layer_target_bitrate[i * num_temporal_layers_] +
(config_->ss_target_bitrate[i] / 4);
config_->layer_target_bitrate[i * num_temporal_layers_ + 2] =
config_->ss_target_bitrate[i];
} else {
RTC_LOG(LS_ERROR) << "Unsupported number of temporal layers: "
<< num_temporal_layers_;
return false;
}
framerate_controller_[i].SetTargetRate(codec_.maxFramerate);
}
}
num_active_spatial_layers_ = 0;
first_active_layer_ = 0;
bool seen_active_layer = false;
bool expect_no_more_active_layers = false;
for (int i = 0; i < num_spatial_layers_; ++i) {
if (config_->ss_target_bitrate[i] > 0) {
RTC_DCHECK(!expect_no_more_active_layers) << "Only middle layer is "
"deactivated.";
if (!seen_active_layer) {
first_active_layer_ = i;
}
num_active_spatial_layers_ = i + 1;
seen_active_layer = true;
} else {
expect_no_more_active_layers = seen_active_layer;
}
}
if (higher_layers_enabled && !force_key_frame_) {
// Prohibit drop of all layers for the next frame, so newly enabled
// layer would have a valid spatial reference.
for (size_t i = 0; i < num_spatial_layers_; ++i) {
svc_drop_frame_.framedrop_thresh[i] = 0;
}
force_all_active_layers_ = true;
}
if (svc_controller_) {
VideoBitrateAllocation allocation;
for (int sid = 0; sid < num_spatial_layers_; ++sid) {
for (int tid = 0; tid < num_temporal_layers_; ++tid) {
allocation.SetBitrate(
sid, tid,
config_->layer_target_bitrate[sid * num_temporal_layers_ + tid] *
1000);
}
}
svc_controller_->OnRatesUpdated(allocation);
}
current_bitrate_allocation_ = bitrate_allocation;
config_changed_ = true;
return true;
}
void LibvpxVp9Encoder::SetRates(const RateControlParameters& parameters) {
if (!inited_) {
RTC_LOG(LS_WARNING) << "SetRates() calll while uninitialzied.";
return;
}
if (encoder_->err) {
RTC_LOG(LS_WARNING) << "Encoder in error state: " << encoder_->err;
return;
}
if (parameters.framerate_fps < 1.0) {
RTC_LOG(LS_WARNING) << "Unsupported framerate: "
<< parameters.framerate_fps;
return;
}
codec_.maxFramerate = static_cast<uint32_t>(parameters.framerate_fps + 0.5);
bool res = SetSvcRates(parameters.bitrate);
RTC_DCHECK(res) << "Failed to set new bitrate allocation";
config_changed_ = true;
}
// TODO(eladalon): s/inst/codec_settings/g.
int LibvpxVp9Encoder::InitEncode(const VideoCodec* inst,
const Settings& settings) {
if (inst == nullptr) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->maxFramerate < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// Allow zero to represent an unspecified maxBitRate
if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->width < 1 || inst->height < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (settings.number_of_cores < 1) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (inst->VP9().numberOfTemporalLayers > 3) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// libvpx probably does not support more than 3 spatial layers.
if (inst->VP9().numberOfSpatialLayers > 3) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
absl::optional<vpx_img_fmt_t> previous_img_fmt =
raw_ ? absl::make_optional<vpx_img_fmt_t>(raw_->fmt) : absl::nullopt;
int ret_val = Release();
if (ret_val < 0) {
return ret_val;
}
if (encoder_ == nullptr) {
encoder_ = new vpx_codec_ctx_t;
}
if (config_ == nullptr) {
config_ = new vpx_codec_enc_cfg_t;
}
timestamp_ = 0;
if (&codec_ != inst) {
codec_ = *inst;
}
memset(&svc_params_, 0, sizeof(vpx_svc_extra_cfg_t));
force_key_frame_ = true;
pics_since_key_ = 0;
num_spatial_layers_ = inst->VP9().numberOfSpatialLayers;
RTC_DCHECK_GT(num_spatial_layers_, 0);
num_temporal_layers_ = inst->VP9().numberOfTemporalLayers;
if (num_temporal_layers_ == 0) {
num_temporal_layers_ = 1;
}
if (use_svc_controller_) {
svc_controller_ = CreateVp9ScalabilityStructure(*inst);
}
framerate_controller_ = std::vector<FramerateController>(
num_spatial_layers_, FramerateController(codec_.maxFramerate));
is_svc_ = (num_spatial_layers_ > 1 || num_temporal_layers_ > 1);
// Populate encoder configuration with default values.
if (libvpx_->codec_enc_config_default(vpx_codec_vp9_cx(), config_, 0)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
vpx_img_fmt img_fmt = VPX_IMG_FMT_NONE;
unsigned int bits_for_storage = 8;
switch (profile_) {
case VP9Profile::kProfile0:
img_fmt = previous_img_fmt.value_or(VPX_IMG_FMT_I420);
bits_for_storage = 8;
config_->g_bit_depth = VPX_BITS_8;
config_->g_profile = 0;
config_->g_input_bit_depth = 8;
break;
case VP9Profile::kProfile1:
// Encoding of profile 1 is not implemented. It would require extended
// support for I444, I422, and I440 buffers.
RTC_NOTREACHED();
break;
case VP9Profile::kProfile2:
img_fmt = VPX_IMG_FMT_I42016;
bits_for_storage = 16;
config_->g_bit_depth = VPX_BITS_10;
config_->g_profile = 2;
config_->g_input_bit_depth = 10;
break;
}
// Creating a wrapper to the image - setting image data to nullptr. Actual
// pointer will be set in encode. Setting align to 1, as it is meaningless
// (actual memory is not allocated).
raw_ = libvpx_->img_wrap(nullptr, img_fmt, codec_.width, codec_.height, 1,
nullptr);
raw_->bit_depth = bits_for_storage;
config_->g_w = codec_.width;
config_->g_h = codec_.height;
config_->rc_target_bitrate = inst->startBitrate; // in kbit/s
config_->g_error_resilient = is_svc_ ? VPX_ERROR_RESILIENT_DEFAULT : 0;
// Setting the time base of the codec.
config_->g_timebase.num = 1;
config_->g_timebase.den = 90000;
config_->g_lag_in_frames = 0; // 0- no frame lagging
config_->g_threads = 1;
// Rate control settings.
config_->rc_dropframe_thresh = inst->VP9().frameDroppingOn ? 30 : 0;
config_->rc_end_usage = VPX_CBR;
config_->g_pass = VPX_RC_ONE_PASS;
config_->rc_min_quantizer =
codec_.mode == VideoCodecMode::kScreensharing ? 8 : 2;
config_->rc_max_quantizer = 52;
config_->rc_undershoot_pct = 50;
config_->rc_overshoot_pct = 50;
config_->rc_buf_initial_sz = 500;
config_->rc_buf_optimal_sz = 600;
config_->rc_buf_sz = 1000;
// Set the maximum target size of any key-frame.
rc_max_intra_target_ = MaxIntraTarget(config_->rc_buf_optimal_sz);
// Key-frame interval is enforced manually by this wrapper.
config_->kf_mode = VPX_KF_DISABLED;
// TODO(webm:1592): work-around for libvpx issue, as it can still
// put some key-frames at will even in VPX_KF_DISABLED kf_mode.
config_->kf_max_dist = inst->VP9().keyFrameInterval;
config_->kf_min_dist = config_->kf_max_dist;
if (quality_scaler_experiment_.enabled) {
// In that experiment webrtc wide quality scaler is used instead of libvpx
// internal scaler.
config_->rc_resize_allowed = 0;
} else {
config_->rc_resize_allowed = inst->VP9().automaticResizeOn ? 1 : 0;
}
// Determine number of threads based on the image size and #cores.
config_->g_threads =
NumberOfThreads(config_->g_w, config_->g_h, settings.number_of_cores);
is_flexible_mode_ = inst->VP9().flexibleMode;
inter_layer_pred_ = inst->VP9().interLayerPred;
if (num_spatial_layers_ > 1 &&
codec_.mode == VideoCodecMode::kScreensharing && !is_flexible_mode_) {
RTC_LOG(LS_ERROR) << "Flexible mode is required for screenshare with "
"several spatial layers";
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// External reference control is required for different frame rate on spatial
// layers because libvpx generates rtp incompatible references in this case.
external_ref_control_ = external_ref_ctrl_ ||
(num_spatial_layers_ > 1 &&
codec_.mode == VideoCodecMode::kScreensharing) ||
inter_layer_pred_ == InterLayerPredMode::kOn;
if (num_temporal_layers_ == 1) {
gof_.SetGofInfoVP9(kTemporalStructureMode1);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_NOLAYERING;
config_->ts_number_layers = 1;
config_->ts_rate_decimator[0] = 1;
config_->ts_periodicity = 1;
config_->ts_layer_id[0] = 0;
} else if (num_temporal_layers_ == 2) {
gof_.SetGofInfoVP9(kTemporalStructureMode2);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0101;
config_->ts_number_layers = 2;
config_->ts_rate_decimator[0] = 2;
config_->ts_rate_decimator[1] = 1;
config_->ts_periodicity = 2;
config_->ts_layer_id[0] = 0;
config_->ts_layer_id[1] = 1;
} else if (num_temporal_layers_ == 3) {
gof_.SetGofInfoVP9(kTemporalStructureMode3);
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_0212;
config_->ts_number_layers = 3;
config_->ts_rate_decimator[0] = 4;
config_->ts_rate_decimator[1] = 2;
config_->ts_rate_decimator[2] = 1;
config_->ts_periodicity = 4;
config_->ts_layer_id[0] = 0;
config_->ts_layer_id[1] = 2;
config_->ts_layer_id[2] = 1;
config_->ts_layer_id[3] = 2;
} else {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
if (external_ref_control_) {
config_->temporal_layering_mode = VP9E_TEMPORAL_LAYERING_MODE_BYPASS;
if (num_temporal_layers_ > 1 && num_spatial_layers_ > 1 &&
codec_.mode == VideoCodecMode::kScreensharing) {
// External reference control for several temporal layers with different
// frame rates on spatial layers is not implemented yet.
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
}
ref_buf_.clear();
return InitAndSetControlSettings(inst);
}
int LibvpxVp9Encoder::NumberOfThreads(int width,
int height,
int number_of_cores) {
// Keep the number of encoder threads equal to the possible number of column
// tiles, which is (1, 2, 4, 8). See comments below for VP9E_SET_TILE_COLUMNS.
if (width * height >= 1280 * 720 && number_of_cores > 4) {
return 4;
} else if (width * height >= 640 * 360 && number_of_cores > 2) {
return 2;
} else {
// Use 2 threads for low res on ARM.
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || \
defined(WEBRTC_ANDROID)
if (width * height >= 320 * 180 && number_of_cores > 2) {
return 2;
}
#endif
// 1 thread less than VGA.
return 1;
}
}
int LibvpxVp9Encoder::InitAndSetControlSettings(const VideoCodec* inst) {
// Set QP-min/max per spatial and temporal layer.
int tot_num_layers = num_spatial_layers_ * num_temporal_layers_;
for (int i = 0; i < tot_num_layers; ++i) {
svc_params_.max_quantizers[i] = config_->rc_max_quantizer;
svc_params_.min_quantizers[i] = config_->rc_min_quantizer;
}
config_->ss_number_layers = num_spatial_layers_;
if (svc_controller_) {
auto stream_config = svc_controller_->StreamConfig();
for (int i = 0; i < stream_config.num_spatial_layers; ++i) {
svc_params_.scaling_factor_num[i] = stream_config.scaling_factor_num[i];
svc_params_.scaling_factor_den[i] = stream_config.scaling_factor_den[i];
}
} else if (ExplicitlyConfiguredSpatialLayers()) {
for (int i = 0; i < num_spatial_layers_; ++i) {
const auto& layer = codec_.spatialLayers[i];
RTC_CHECK_GT(layer.width, 0);
const int scale_factor = codec_.width / layer.width;
RTC_DCHECK_GT(scale_factor, 0);
// Ensure scaler factor is integer.
if (scale_factor * layer.width != codec_.width) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// Ensure scale factor is the same in both dimensions.
if (scale_factor * layer.height != codec_.height) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
// Ensure scale factor is power of two.
const bool is_pow_of_two = (scale_factor & (scale_factor - 1)) == 0;
if (!is_pow_of_two) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
svc_params_.scaling_factor_num[i] = 1;
svc_params_.scaling_factor_den[i] = scale_factor;
RTC_DCHECK_GT(codec_.spatialLayers[i].maxFramerate, 0);
RTC_DCHECK_LE(codec_.spatialLayers[i].maxFramerate, codec_.maxFramerate);
if (i > 0) {
// Frame rate of high spatial layer is supposed to be equal or higher
// than frame rate of low spatial layer.
RTC_DCHECK_GE(codec_.spatialLayers[i].maxFramerate,
codec_.spatialLayers[i - 1].maxFramerate);
}
}
} else {
int scaling_factor_num = 256;
for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
// 1:2 scaling in each dimension.
svc_params_.scaling_factor_num[i] = scaling_factor_num;
svc_params_.scaling_factor_den[i] = 256;
}
}
SvcRateAllocator init_allocator(codec_);
current_bitrate_allocation_ =
init_allocator.Allocate(VideoBitrateAllocationParameters(
inst->startBitrate * 1000, inst->maxFramerate));
if (!SetSvcRates(current_bitrate_allocation_)) {
return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
}
const vpx_codec_err_t rv = libvpx_->codec_enc_init(
encoder_, vpx_codec_vp9_cx(), config_,
config_->g_bit_depth == VPX_BITS_8 ? 0 : VPX_CODEC_USE_HIGHBITDEPTH);
if (rv != VPX_CODEC_OK) {
RTC_LOG(LS_ERROR) << "Init error: " << libvpx_->codec_err_to_string(rv);
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
UpdatePerformanceFlags();
RTC_DCHECK_EQ(performance_flags_by_spatial_index_.size(),
static_cast<size_t>(num_spatial_layers_));
if (performance_flags_.use_per_layer_speed) {
for (int si = 0; si < num_spatial_layers_; ++si) {
svc_params_.speed_per_layer[si] =
performance_flags_by_spatial_index_[si].base_layer_speed;
svc_params_.loopfilter_ctrl[si] =
performance_flags_by_spatial_index_[si].deblock_mode;
}
}
libvpx_->codec_control(encoder_, VP8E_SET_MAX_INTRA_BITRATE_PCT,
rc_max_intra_target_);
libvpx_->codec_control(encoder_, VP9E_SET_AQ_MODE,
inst->VP9().adaptiveQpMode ? 3 : 0);
libvpx_->codec_control(encoder_, VP9E_SET_FRAME_PARALLEL_DECODING, 0);
libvpx_->codec_control(encoder_, VP9E_SET_SVC_GF_TEMPORAL_REF, 0);
if (is_svc_) {
libvpx_->codec_control(encoder_, VP9E_SET_SVC, 1);
libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
}
if (!is_svc_ || !performance_flags_.use_per_layer_speed) {
libvpx_->codec_control(
encoder_, VP8E_SET_CPUUSED,
performance_flags_by_spatial_index_.rbegin()->base_layer_speed);
}
if (num_spatial_layers_ > 1) {
switch (inter_layer_pred_) {
case InterLayerPredMode::kOn:
libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 0);
break;
case InterLayerPredMode::kOff:
libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 1);
break;
case InterLayerPredMode::kOnKeyPic:
libvpx_->codec_control(encoder_, VP9E_SET_SVC_INTER_LAYER_PRED, 2);
break;
default:
RTC_NOTREACHED();
}
memset(&svc_drop_frame_, 0, sizeof(svc_drop_frame_));
const bool reverse_constrained_drop_mode =
inter_layer_pred_ == InterLayerPredMode::kOn &&
codec_.mode == VideoCodecMode::kScreensharing &&
num_spatial_layers_ > 1;
if (reverse_constrained_drop_mode) {
// Screenshare dropping mode: drop a layer only together with all lower
// layers. This ensures that drops on lower layers won't reduce frame-rate
// for higher layers and reference structure is RTP-compatible.
svc_drop_frame_.framedrop_mode = CONSTRAINED_FROM_ABOVE_DROP;
svc_drop_frame_.max_consec_drop = 5;
for (size_t i = 0; i < num_spatial_layers_; ++i) {
svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
}
// No buffering is needed because the highest layer is always present in
// all frames in CONSTRAINED_FROM_ABOVE drop mode.
layer_buffering_ = false;
} else {
// Configure encoder to drop entire superframe whenever it needs to drop
// a layer. This mode is preferred over per-layer dropping which causes
// quality flickering and is not compatible with RTP non-flexible mode.
svc_drop_frame_.framedrop_mode =
full_superframe_drop_ ? FULL_SUPERFRAME_DROP : CONSTRAINED_LAYER_DROP;
// Buffering is needed only for constrained layer drop, as it's not clear
// which frame is the last.
layer_buffering_ = !full_superframe_drop_;
svc_drop_frame_.max_consec_drop = std::numeric_limits<int>::max();
for (size_t i = 0; i < num_spatial_layers_; ++i) {
svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
}
}
libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
&svc_drop_frame_);
}
// Register callback for getting each spatial layer.
vpx_codec_priv_output_cx_pkt_cb_pair_t cbp = {
LibvpxVp9Encoder::EncoderOutputCodedPacketCallback,
reinterpret_cast<void*>(this)};
libvpx_->codec_control(encoder_, VP9E_REGISTER_CX_CALLBACK,
reinterpret_cast<void*>(&cbp));
// Control function to set the number of column tiles in encoding a frame, in
// log2 unit: e.g., 0 = 1 tile column, 1 = 2 tile columns, 2 = 4 tile columns.
// The number tile columns will be capped by the encoder based on image size
// (minimum width of tile column is 256 pixels, maximum is 4096).
libvpx_->codec_control(encoder_, VP9E_SET_TILE_COLUMNS,
static_cast<int>((config_->g_threads >> 1)));
// Turn on row-based multithreading.
libvpx_->codec_control(encoder_, VP9E_SET_ROW_MT, 1);
#if !defined(WEBRTC_ARCH_ARM) && !defined(WEBRTC_ARCH_ARM64) && \
!defined(ANDROID)
// Do not enable the denoiser on ARM since optimization is pending.
// Denoiser is on by default on other platforms.
libvpx_->codec_control(encoder_, VP9E_SET_NOISE_SENSITIVITY,
inst->VP9().denoisingOn ? 1 : 0);
#endif
if (codec_.mode == VideoCodecMode::kScreensharing) {
// Adjust internal parameters to screen content.
libvpx_->codec_control(encoder_, VP9E_SET_TUNE_CONTENT, 1);
}
// Enable encoder skip of static/low content blocks.
libvpx_->codec_control(encoder_, VP8E_SET_STATIC_THRESHOLD, 1);
inited_ = true;
config_changed_ = true;
return WEBRTC_VIDEO_CODEC_OK;
}
uint32_t LibvpxVp9Encoder::MaxIntraTarget(uint32_t optimal_buffer_size) {
// Set max to the optimal buffer level (normalized by target BR),
// and scaled by a scale_par.
// Max target size = scale_par * optimal_buffer_size * targetBR[Kbps].
// This value is presented in percentage of perFrameBw:
// perFrameBw = targetBR[Kbps] * 1000 / framerate.
// The target in % is as follows:
float scale_par = 0.5;
uint32_t target_pct =
optimal_buffer_size * scale_par * codec_.maxFramerate / 10;
// Don't go below 3 times the per frame bandwidth.
const uint32_t min_intra_size = 300;
return (target_pct < min_intra_size) ? min_intra_size : target_pct;
}
int LibvpxVp9Encoder::Encode(const VideoFrame& input_image,
const std::vector<VideoFrameType>* frame_types) {
if (!inited_) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (encoded_complete_callback_ == nullptr) {
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
}
if (num_active_spatial_layers_ == 0) {
// All spatial layers are disabled, return without encoding anything.
return WEBRTC_VIDEO_CODEC_OK;
}
// We only support one stream at the moment.
if (frame_types && !frame_types->empty()) {
if ((*frame_types)[0] == VideoFrameType::kVideoFrameKey) {
force_key_frame_ = true;
}
}
if (pics_since_key_ + 1 ==
static_cast<size_t>(codec_.VP9()->keyFrameInterval)) {
force_key_frame_ = true;
}
if (svc_controller_) {
layer_frames_ = svc_controller_->NextFrameConfig(force_key_frame_);
if (layer_frames_.empty()) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
}
vpx_svc_layer_id_t layer_id = {0};
if (!force_key_frame_) {
const size_t gof_idx = (pics_since_key_ + 1) % gof_.num_frames_in_gof;
layer_id.temporal_layer_id = gof_.temporal_idx[gof_idx];
if (VideoCodecMode::kScreensharing == codec_.mode) {
const uint32_t frame_timestamp_ms =
1000 * input_image.timestamp() / kVideoPayloadTypeFrequency;
// To ensure that several rate-limiters with different limits don't
// interfere, they must be queried in order of increasing limit.
bool use_steady_state_limiter =
variable_framerate_experiment_.enabled &&
input_image.update_rect().IsEmpty() &&
num_steady_state_frames_ >=
variable_framerate_experiment_.frames_before_steady_state;
// Need to check all frame limiters, even if lower layers are disabled,
// because variable frame-rate limiter should be checked after the first
// layer. It's easier to overwrite active layers after, then check all
// cases.
for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
const float layer_fps =
framerate_controller_[layer_id.spatial_layer_id].GetTargetRate();
// Use steady state rate-limiter at the correct place.
if (use_steady_state_limiter &&
layer_fps > variable_framerate_experiment_.framerate_limit - 1e-9) {
if (variable_framerate_controller_.DropFrame(frame_timestamp_ms)) {
layer_id.spatial_layer_id = num_active_spatial_layers_;
}
// Break always: if rate limiter triggered frame drop, no need to
// continue; otherwise, the rate is less than the next limiters.
break;
}
if (framerate_controller_[sl_idx].DropFrame(frame_timestamp_ms)) {
++layer_id.spatial_layer_id;
} else {
break;
}
}
if (use_steady_state_limiter &&
layer_id.spatial_layer_id < num_active_spatial_layers_) {
variable_framerate_controller_.AddFrame(frame_timestamp_ms);
}
}
if (force_all_active_layers_) {
layer_id.spatial_layer_id = first_active_layer_;
force_all_active_layers_ = false;
}
RTC_DCHECK_LE(layer_id.spatial_layer_id, num_active_spatial_layers_);
if (layer_id.spatial_layer_id >= num_active_spatial_layers_) {
// Drop entire picture.
return WEBRTC_VIDEO_CODEC_OK;
}
}
// Need to set temporal layer id on ALL layers, even disabled ones.
// Otherwise libvpx might produce frames on a disabled layer:
// http://crbug.com/1051476
for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
layer_id.temporal_layer_id_per_spatial[sl_idx] = layer_id.temporal_layer_id;
}
if (layer_id.spatial_layer_id < first_active_layer_) {
layer_id.spatial_layer_id = first_active_layer_;
}
if (svc_controller_) {
layer_id.spatial_layer_id = layer_frames_.front().SpatialId();
layer_id.temporal_layer_id = layer_frames_.front().TemporalId();
for (const auto& layer : layer_frames_) {
layer_id.temporal_layer_id_per_spatial[layer.SpatialId()] =
layer.TemporalId();
}
}
if (is_svc_ && performance_flags_.use_per_layer_speed) {
// Update speed settings that might depend on temporal index.
bool speed_updated = false;
for (int sl_idx = 0; sl_idx < num_spatial_layers_; ++sl_idx) {
const int target_speed =
layer_id.temporal_layer_id_per_spatial[sl_idx] == 0
? performance_flags_by_spatial_index_[sl_idx].base_layer_speed
: performance_flags_by_spatial_index_[sl_idx].high_layer_speed;
if (svc_params_.speed_per_layer[sl_idx] != target_speed) {
svc_params_.speed_per_layer[sl_idx] = target_speed;
speed_updated = true;
}
}
if (speed_updated) {
libvpx_->codec_control(encoder_, VP9E_SET_SVC_PARAMETERS, &svc_params_);
}
}
libvpx_->codec_control(encoder_, VP9E_SET_SVC_LAYER_ID, &layer_id);
if (num_spatial_layers_ > 1) {
// Update frame dropping settings as they may change on per-frame basis.
libvpx_->codec_control(encoder_, VP9E_SET_SVC_FRAME_DROP_LAYER,
&svc_drop_frame_);
}
if (config_changed_) {
if (libvpx_->codec_enc_config_set(encoder_, config_)) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
if (!performance_flags_.use_per_layer_speed) {
// Not setting individual speeds per layer, find the highest active
// resolution instead and base the speed on that.
for (int i = num_spatial_layers_ - 1; i >= 0; --i) {
if (config_->ss_target_bitrate[i] > 0) {
int width = (svc_params_.scaling_factor_num[i] * config_->g_w) /
svc_params_.scaling_factor_den[i];
int height = (svc_params_.scaling_factor_num[i] * config_->g_h) /
svc_params_.scaling_factor_den[i];
int speed =
std::prev(performance_flags_.settings_by_resolution.lower_bound(
width * height))
->second.base_layer_speed;
libvpx_->codec_control(encoder_, VP8E_SET_CPUUSED, speed);
break;
}
}
}
config_changed_ = false;
}
RTC_DCHECK_EQ(input_image.width(), raw_->d_w);
RTC_DCHECK_EQ(input_image.height(), raw_->d_h);
// Set input image for use in the callback.
// This was necessary since you need some information from input_image.
// You can save only the necessary information (such as timestamp) instead of
// doing this.
input_image_ = &input_image;
// In case we need to map the buffer, |mapped_buffer| is used to keep it alive
// through reference counting until after encoding has finished.
rtc::scoped_refptr<const VideoFrameBuffer> mapped_buffer;
const I010BufferInterface* i010_buffer;
rtc::scoped_refptr<const I010BufferInterface> i010_copy;
switch (profile_) {
case VP9Profile::kProfile0: {
mapped_buffer =
PrepareBufferForProfile0(input_image.video_frame_buffer());
if (!mapped_buffer) {
return WEBRTC_VIDEO_CODEC_ERROR;
}
break;
}
case VP9Profile::kProfile1: {
RTC_NOTREACHED();
break;
}
case VP9Profile::kProfile2: {
// We can inject kI010 frames directly for encode. All other formats
// should be converted to it.
switch (input_image.video_frame_buffer()->type()) {
case VideoFrameBuffer::Type::kI010: {
i010_buffer = input_image.video_frame_buffer()->GetI010();
break;
}
default: {
i010_copy =
I010Buffer::Copy(*input_image.video_frame_buffer()->ToI420());
i010_buffer = i010_copy.get();
}
}
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(
reinterpret_cast<const uint8_t*>(i010_buffer->DataY()));
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(
reinterpret_cast<const uint8_t*>(i010_buffer->DataU()));
raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(
reinterpret_cast<const uint8_t*>(i010_buffer->DataV()));
raw_->stride[VPX_PLANE_Y] = i010_buffer->StrideY() * 2;
raw_->stride[VPX_PLANE_U] = i010_buffer->StrideU() * 2;
raw_->stride[VPX_PLANE_V] = i010_buffer->StrideV() * 2;
break;
}
}
vpx_enc_frame_flags_t flags = 0;
if (force_key_frame_) {
flags = VPX_EFLAG_FORCE_KF;
}
if (svc_controller_) {
vpx_svc_ref_frame_config_t ref_config = Vp9References(layer_frames_);
libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
&ref_config);
} else if (external_ref_control_) {
vpx_svc_ref_frame_config_t ref_config =
SetReferences(force_key_frame_, layer_id.spatial_layer_id);
if (VideoCodecMode::kScreensharing == codec_.mode) {
for (uint8_t sl_idx = 0; sl_idx < num_active_spatial_layers_; ++sl_idx) {
ref_config.duration[sl_idx] = static_cast<int64_t>(
90000 / (std::min(static_cast<float>(codec_.maxFramerate),
framerate_controller_[sl_idx].GetTargetRate())));
}
}
libvpx_->codec_control(encoder_, VP9E_SET_SVC_REF_FRAME_CONFIG,
&ref_config);
}
first_frame_in_picture_ = true;
// TODO(ssilkin): Frame duration should be specified per spatial layer
// since their frame rate can be different. For now calculate frame duration
// based on target frame rate of the highest spatial layer, which frame rate
// is supposed to be equal or higher than frame rate of low spatial layers.
// Also, timestamp should represent actual time passed since previous frame
// (not 'expected' time). Then rate controller can drain buffer more
// accurately.
RTC_DCHECK_GE(framerate_controller_.size(), num_active_spatial_layers_);
float target_framerate_fps =
(codec_.mode == VideoCodecMode::kScreensharing)
? std::min(static_cast<float>(codec_.maxFramerate),
framerate_controller_[num_active_spatial_layers_ - 1]
.GetTargetRate())
: codec_.maxFramerate;
uint32_t duration = static_cast<uint32_t>(90000 / target_framerate_fps);
const vpx_codec_err_t rv = libvpx_->codec_encode(
encoder_, raw_, timestamp_, duration, flags, VPX_DL_REALTIME);
if (rv != VPX_CODEC_OK) {
RTC_LOG(LS_ERROR) << "Encoding error: " << libvpx_->codec_err_to_string(rv)
<< "\n"
"Details: "
<< libvpx_->codec_error(encoder_) << "\n"
<< libvpx_->codec_error_detail(encoder_);
return WEBRTC_VIDEO_CODEC_ERROR;
}
timestamp_ += duration;
if (layer_buffering_) {
const bool end_of_picture = true;
DeliverBufferedFrame(end_of_picture);
}
return WEBRTC_VIDEO_CODEC_OK;
}
bool LibvpxVp9Encoder::PopulateCodecSpecific(CodecSpecificInfo* codec_specific,
absl::optional<int>* spatial_idx,
const vpx_codec_cx_pkt& pkt,
uint32_t timestamp) {
RTC_CHECK(codec_specific != nullptr);
codec_specific->codecType = kVideoCodecVP9;
CodecSpecificInfoVP9* vp9_info = &(codec_specific->codecSpecific.VP9);
vp9_info->first_frame_in_picture = first_frame_in_picture_;
vp9_info->flexible_mode = is_flexible_mode_;
if (pkt.data.frame.flags & VPX_FRAME_IS_KEY) {
pics_since_key_ = 0;
} else if (first_frame_in_picture_) {
++pics_since_key_;
}
vpx_svc_layer_id_t layer_id = {0};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
// Can't have keyframe with non-zero temporal layer.
RTC_DCHECK(pics_since_key_ != 0 || layer_id.temporal_layer_id == 0);
RTC_CHECK_GT(num_temporal_layers_, 0);
RTC_CHECK_GT(num_active_spatial_layers_, 0);
if (num_temporal_layers_ == 1) {
RTC_CHECK_EQ(layer_id.temporal_layer_id, 0);
vp9_info->temporal_idx = kNoTemporalIdx;
} else {
vp9_info->temporal_idx = layer_id.temporal_layer_id;
}
if (num_active_spatial_layers_ == 1) {
RTC_CHECK_EQ(layer_id.spatial_layer_id, 0);
*spatial_idx = absl::nullopt;
} else {
*spatial_idx = layer_id.spatial_layer_id;
}
// TODO(asapersson): this info has to be obtained from the encoder.
vp9_info->temporal_up_switch = false;
const bool is_key_pic = (pics_since_key_ == 0);
const bool is_inter_layer_pred_allowed =
(inter_layer_pred_ == InterLayerPredMode::kOn ||
(inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic));
// Always set inter_layer_predicted to true on high layer frame if inter-layer
// prediction (ILP) is allowed even if encoder didn't actually use it.
// Setting inter_layer_predicted to false would allow receiver to decode high
// layer frame without decoding low layer frame. If that would happen (e.g.
// if low layer frame is lost) then receiver won't be able to decode next high
// layer frame which uses ILP.
vp9_info->inter_layer_predicted =
first_frame_in_picture_ ? false : is_inter_layer_pred_allowed;
// Mark all low spatial layer frames as references (not just frames of
// active low spatial layers) if inter-layer prediction is enabled since
// these frames are indirect references of high spatial layer, which can
// later be enabled without key frame.
vp9_info->non_ref_for_inter_layer_pred =
!is_inter_layer_pred_allowed ||
layer_id.spatial_layer_id + 1 == num_spatial_layers_;
// Always populate this, so that the packetizer can properly set the marker
// bit.
vp9_info->num_spatial_layers = num_active_spatial_layers_;
vp9_info->first_active_layer = first_active_layer_;
vp9_info->num_ref_pics = 0;
FillReferenceIndices(pkt, pics_since_key_, vp9_info->inter_layer_predicted,
vp9_info);
if (vp9_info->flexible_mode) {
vp9_info->gof_idx = kNoGofIdx;
} else {
vp9_info->gof_idx =
static_cast<uint8_t>(pics_since_key_ % gof_.num_frames_in_gof);
vp9_info->temporal_up_switch = gof_.temporal_up_switch[vp9_info->gof_idx];
RTC_DCHECK(vp9_info->num_ref_pics == gof_.num_ref_pics[vp9_info->gof_idx] ||
vp9_info->num_ref_pics == 0);
}
vp9_info->inter_pic_predicted = (!is_key_pic && vp9_info->num_ref_pics > 0);
// Write SS on key frame of independently coded spatial layers and on base
// temporal/spatial layer frame if number of layers changed without issuing
// of key picture (inter-layer prediction is enabled).
const bool is_key_frame = is_key_pic && !vp9_info->inter_layer_predicted;
if (is_key_frame || (ss_info_needed_ && layer_id.temporal_layer_id == 0 &&
layer_id.spatial_layer_id == first_active_layer_)) {
vp9_info->ss_data_available = true;
vp9_info->spatial_layer_resolution_present = true;
// Signal disabled layers.
for (size_t i = 0; i < first_active_layer_; ++i) {
vp9_info->width[i] = 0;
vp9_info->height[i] = 0;
}
for (size_t i = first_active_layer_; i < num_active_spatial_layers_; ++i) {
vp9_info->width[i] = codec_.width * svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
vp9_info->height[i] = codec_.height * svc_params_.scaling_factor_num[i] /
svc_params_.scaling_factor_den[i];
}
if (vp9_info->flexible_mode) {
vp9_info->gof.num_frames_in_gof = 0;
} else {
vp9_info->gof.CopyGofInfoVP9(gof_);
}
ss_info_needed_ = false;
} else {
vp9_info->ss_data_available = false;
}
first_frame_in_picture_ = false;
// Populate codec-agnostic section in the codec specific structure.
if (svc_controller_) {
auto it = absl::c_find_if(
layer_frames_,
[&](const ScalableVideoController::LayerFrameConfig& config) {
return config.SpatialId() == layer_id.spatial_layer_id;
});
if (it == layer_frames_.end()) {
RTC_LOG(LS_ERROR) << "Encoder produced a frame for layer S"
<< layer_id.spatial_layer_id << "T"
<< layer_id.temporal_layer_id
<< " that wasn't requested.";
return false;
}
codec_specific->generic_frame_info = svc_controller_->OnEncodeDone(*it);
if (is_key_frame) {
codec_specific->template_structure =
svc_controller_->DependencyStructure();
auto& resolutions = codec_specific->template_structure->resolutions;
resolutions.resize(num_spatial_layers_);
for (int sid = 0; sid < num_spatial_layers_; ++sid) {
resolutions[sid] = RenderResolution(
/*width=*/codec_.width * svc_params_.scaling_factor_num[sid] /
svc_params_.scaling_factor_den[sid],
/*height=*/codec_.height * svc_params_.scaling_factor_num[sid] /
svc_params_.scaling_factor_den[sid]);
}
}
}
return true;
}
void LibvpxVp9Encoder::FillReferenceIndices(const vpx_codec_cx_pkt& pkt,
const size_t pic_num,
const bool inter_layer_predicted,
CodecSpecificInfoVP9* vp9_info) {
vpx_svc_layer_id_t layer_id = {0};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
const bool is_key_frame =
(pkt.data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
std::vector<RefFrameBuffer> ref_buf_list;
if (is_svc_) {
vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
&enc_layer_conf);
int ref_buf_flags = 0;
if (enc_layer_conf.reference_last[layer_id.spatial_layer_id]) {
const size_t fb_idx =
enc_layer_conf.lst_fb_idx[layer_id.spatial_layer_id];
RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
ref_buf_list.push_back(ref_buf_.at(fb_idx));
ref_buf_flags |= 1 << fb_idx;
}
}
if (enc_layer_conf.reference_alt_ref[layer_id.spatial_layer_id]) {
const size_t fb_idx =
enc_layer_conf.alt_fb_idx[layer_id.spatial_layer_id];
RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
ref_buf_list.push_back(ref_buf_.at(fb_idx));
ref_buf_flags |= 1 << fb_idx;
}
}
if (enc_layer_conf.reference_golden[layer_id.spatial_layer_id]) {
const size_t fb_idx =
enc_layer_conf.gld_fb_idx[layer_id.spatial_layer_id];
RTC_DCHECK(ref_buf_.find(fb_idx) != ref_buf_.end());
if (std::find(ref_buf_list.begin(), ref_buf_list.end(),
ref_buf_.at(fb_idx)) == ref_buf_list.end()) {
ref_buf_list.push_back(ref_buf_.at(fb_idx));
ref_buf_flags |= 1 << fb_idx;
}
}
RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
<< layer_id.spatial_layer_id << " tl "
<< layer_id.temporal_layer_id << " refered buffers "
<< (ref_buf_flags & (1 << 0) ? 1 : 0)
<< (ref_buf_flags & (1 << 1) ? 1 : 0)
<< (ref_buf_flags & (1 << 2) ? 1 : 0)
<< (ref_buf_flags & (1 << 3) ? 1 : 0)
<< (ref_buf_flags & (1 << 4) ? 1 : 0)
<< (ref_buf_flags & (1 << 5) ? 1 : 0)
<< (ref_buf_flags & (1 << 6) ? 1 : 0)
<< (ref_buf_flags & (1 << 7) ? 1 : 0);
} else if (!is_key_frame) {
RTC_DCHECK_EQ(num_spatial_layers_, 1);
RTC_DCHECK_EQ(num_temporal_layers_, 1);
// In non-SVC mode encoder doesn't provide reference list. Assume each frame
// refers previous one, which is stored in buffer 0.
ref_buf_list.push_back(ref_buf_.at(0));
}
size_t max_ref_temporal_layer_id = 0;
std::vector<size_t> ref_pid_list;
vp9_info->num_ref_pics = 0;
for (const RefFrameBuffer& ref_buf : ref_buf_list) {
RTC_DCHECK_LE(ref_buf.pic_num, pic_num);
if (ref_buf.pic_num < pic_num) {
if (inter_layer_pred_ != InterLayerPredMode::kOn) {
// RTP spec limits temporal prediction to the same spatial layer.
// It is safe to ignore this requirement if inter-layer prediction is
// enabled for all frames when all base frames are relayed to receiver.
RTC_DCHECK_EQ(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
} else {
RTC_DCHECK_LE(ref_buf.spatial_layer_id, layer_id.spatial_layer_id);
}
RTC_DCHECK_LE(ref_buf.temporal_layer_id, layer_id.temporal_layer_id);
// Encoder may reference several spatial layers on the same previous
// frame in case if some spatial layers are skipped on the current frame.
// We shouldn't put duplicate references as it may break some old
// clients and isn't RTP compatible.
if (std::find(ref_pid_list.begin(), ref_pid_list.end(),
ref_buf.pic_num) != ref_pid_list.end()) {
continue;
}
ref_pid_list.push_back(ref_buf.pic_num);
const size_t p_diff = pic_num - ref_buf.pic_num;
RTC_DCHECK_LE(p_diff, 127UL);
vp9_info->p_diff[vp9_info->num_ref_pics] = static_cast<uint8_t>(p_diff);
++vp9_info->num_ref_pics;
max_ref_temporal_layer_id =
std::max(max_ref_temporal_layer_id, ref_buf.temporal_layer_id);
} else {
RTC_DCHECK(inter_layer_predicted);
// RTP spec only allows to use previous spatial layer for inter-layer
// prediction.
RTC_DCHECK_EQ(ref_buf.spatial_layer_id + 1, layer_id.spatial_layer_id);
}
}
vp9_info->temporal_up_switch =
(max_ref_temporal_layer_id <
static_cast<size_t>(layer_id.temporal_layer_id));
}
void LibvpxVp9Encoder::UpdateReferenceBuffers(const vpx_codec_cx_pkt& pkt,
const size_t pic_num) {
vpx_svc_layer_id_t layer_id = {0};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
RefFrameBuffer frame_buf(pic_num, layer_id.spatial_layer_id,
layer_id.temporal_layer_id);
if (is_svc_) {
vpx_svc_ref_frame_config_t enc_layer_conf = {{0}};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_REF_FRAME_CONFIG,
&enc_layer_conf);
const int update_buffer_slot =
enc_layer_conf.update_buffer_slot[layer_id.spatial_layer_id];
for (size_t i = 0; i < kNumVp9Buffers; ++i) {
if (update_buffer_slot & (1 << i)) {
ref_buf_[i] = frame_buf;
}
}
RTC_LOG(LS_VERBOSE) << "Frame " << pic_num << " sl "
<< layer_id.spatial_layer_id << " tl "
<< layer_id.temporal_layer_id << " updated buffers "
<< (update_buffer_slot & (1 << 0) ? 1 : 0)
<< (update_buffer_slot & (1 << 1) ? 1 : 0)
<< (update_buffer_slot & (1 << 2) ? 1 : 0)
<< (update_buffer_slot & (1 << 3) ? 1 : 0)
<< (update_buffer_slot & (1 << 4) ? 1 : 0)
<< (update_buffer_slot & (1 << 5) ? 1 : 0)
<< (update_buffer_slot & (1 << 6) ? 1 : 0)
<< (update_buffer_slot & (1 << 7) ? 1 : 0);
} else {
RTC_DCHECK_EQ(num_spatial_layers_, 1);
RTC_DCHECK_EQ(num_temporal_layers_, 1);
// In non-svc mode encoder doesn't provide reference list. Assume each frame
// is reference and stored in buffer 0.
ref_buf_[0] = frame_buf;
}
}
vpx_svc_ref_frame_config_t LibvpxVp9Encoder::SetReferences(
bool is_key_pic,
size_t first_active_spatial_layer_id) {
// kRefBufIdx, kUpdBufIdx need to be updated to support longer GOFs.
RTC_DCHECK_LE(gof_.num_frames_in_gof, 4);
vpx_svc_ref_frame_config_t ref_config;
memset(&ref_config, 0, sizeof(ref_config));
const size_t num_temporal_refs = std::max(1, num_temporal_layers_ - 1);
const bool is_inter_layer_pred_allowed =
inter_layer_pred_ == InterLayerPredMode::kOn ||
(inter_layer_pred_ == InterLayerPredMode::kOnKeyPic && is_key_pic);
absl::optional<int> last_updated_buf_idx;
// Put temporal reference to LAST and spatial reference to GOLDEN. Update
// frame buffer (i.e. store encoded frame) if current frame is a temporal
// reference (i.e. it belongs to a low temporal layer) or it is a spatial
// reference. In later case, always store spatial reference in the last
// reference frame buffer.
// For the case of 3 temporal and 3 spatial layers we need 6 frame buffers
// for temporal references plus 1 buffer for spatial reference. 7 buffers
// in total.
for (size_t sl_idx = first_active_spatial_layer_id;
sl_idx < num_active_spatial_layers_; ++sl_idx) {
const size_t curr_pic_num = is_key_pic ? 0 : pics_since_key_ + 1;
const size_t gof_idx = curr_pic_num % gof_.num_frames_in_gof;
if (!is_key_pic) {
// Set up temporal reference.
const int buf_idx = sl_idx * num_temporal_refs + kRefBufIdx[gof_idx];
// Last reference frame buffer is reserved for spatial reference. It is
// not supposed to be used for temporal prediction.
RTC_DCHECK_LT(buf_idx, kNumVp9Buffers - 1);
const int pid_diff = curr_pic_num - ref_buf_[buf_idx].pic_num;
// Incorrect spatial layer may be in the buffer due to a key-frame.
const bool same_spatial_layer =
ref_buf_[buf_idx].spatial_layer_id == sl_idx;
bool correct_pid = false;
if (is_flexible_mode_) {
correct_pid = pid_diff > 0 && pid_diff < kMaxAllowedPidDiff;
} else {
// Below code assumes single temporal referecence.
RTC_DCHECK_EQ(gof_.num_ref_pics[gof_idx], 1);
correct_pid = pid_diff == gof_.pid_diff[gof_idx][0];
}
if (same_spatial_layer && correct_pid) {
ref_config.lst_fb_idx[sl_idx] = buf_idx;
ref_config.reference_last[sl_idx] = 1;
} else {
// This reference doesn't match with one specified by GOF. This can
// only happen if spatial layer is enabled dynamically without key
// frame. Spatial prediction is supposed to be enabled in this case.
RTC_DCHECK(is_inter_layer_pred_allowed &&
sl_idx > first_active_spatial_layer_id);
}
}
if (is_inter_layer_pred_allowed && sl_idx > first_active_spatial_layer_id) {
// Set up spatial reference.
RTC_DCHECK(last_updated_buf_idx);
ref_config.gld_fb_idx[sl_idx] = *last_updated_buf_idx;
ref_config.reference_golden[sl_idx] = 1;
} else {
RTC_DCHECK(ref_config.reference_last[sl_idx] != 0 ||
sl_idx == first_active_spatial_layer_id ||
inter_layer_pred_ == InterLayerPredMode::kOff);<|fim▁hole|>
if (gof_.temporal_idx[gof_idx] < num_temporal_layers_ - 1 ||
num_temporal_layers_ == 1) {
last_updated_buf_idx = sl_idx * num_temporal_refs + kUpdBufIdx[gof_idx];
// Ensure last frame buffer is not used for temporal prediction (it is
// reserved for spatial reference).
RTC_DCHECK_LT(*last_updated_buf_idx, kNumVp9Buffers - 1);
} else if (is_inter_layer_pred_allowed) {
last_updated_buf_idx = kNumVp9Buffers - 1;
}
if (last_updated_buf_idx) {
ref_config.update_buffer_slot[sl_idx] = 1 << *last_updated_buf_idx;
}
}
return ref_config;
}
void LibvpxVp9Encoder::GetEncodedLayerFrame(const vpx_codec_cx_pkt* pkt) {
RTC_DCHECK_EQ(pkt->kind, VPX_CODEC_CX_FRAME_PKT);
if (pkt->data.frame.sz == 0) {
// Ignore dropped frame.
return;
}
vpx_svc_layer_id_t layer_id = {0};
libvpx_->codec_control(encoder_, VP9E_GET_SVC_LAYER_ID, &layer_id);
if (layer_buffering_) {
// Deliver buffered low spatial layer frame.
const bool end_of_picture = false;
DeliverBufferedFrame(end_of_picture);
}
// TODO(nisse): Introduce some buffer cache or buffer pool, to reduce
// allocations and/or copy operations.
encoded_image_.SetEncodedData(EncodedImageBuffer::Create(
static_cast<const uint8_t*>(pkt->data.frame.buf), pkt->data.frame.sz));
const bool is_key_frame =
(pkt->data.frame.flags & VPX_FRAME_IS_KEY) ? true : false;
// Ensure encoder issued key frame on request.
RTC_DCHECK(is_key_frame || !force_key_frame_);
// Check if encoded frame is a key frame.
encoded_image_._frameType = VideoFrameType::kVideoFrameDelta;
if (is_key_frame) {
encoded_image_._frameType = VideoFrameType::kVideoFrameKey;
force_key_frame_ = false;
}
codec_specific_ = {};
absl::optional<int> spatial_index;
if (!PopulateCodecSpecific(&codec_specific_, &spatial_index, *pkt,
input_image_->timestamp())) {
// Drop the frame.
encoded_image_.set_size(0);
return;
}
encoded_image_.SetSpatialIndex(spatial_index);
UpdateReferenceBuffers(*pkt, pics_since_key_);
TRACE_COUNTER1("webrtc", "EncodedFrameSize", encoded_image_.size());
encoded_image_.SetTimestamp(input_image_->timestamp());
encoded_image_._encodedHeight =
pkt->data.frame.height[layer_id.spatial_layer_id];
encoded_image_._encodedWidth =
pkt->data.frame.width[layer_id.spatial_layer_id];
int qp = -1;
libvpx_->codec_control(encoder_, VP8E_GET_LAST_QUANTIZER, &qp);
encoded_image_.qp_ = qp;
if (!layer_buffering_) {
const bool end_of_picture = encoded_image_.SpatialIndex().value_or(0) + 1 ==
num_active_spatial_layers_;
DeliverBufferedFrame(end_of_picture);
}
}
void LibvpxVp9Encoder::DeliverBufferedFrame(bool end_of_picture) {
if (encoded_image_.size() > 0) {
if (num_spatial_layers_ > 1) {
// Restore frame dropping settings, as dropping may be temporary forbidden
// due to dynamically enabled layers.
for (size_t i = 0; i < num_spatial_layers_; ++i) {
svc_drop_frame_.framedrop_thresh[i] = config_->rc_dropframe_thresh;
}
}
codec_specific_.end_of_picture = end_of_picture;
encoded_complete_callback_->OnEncodedImage(encoded_image_,
&codec_specific_);
if (codec_.mode == VideoCodecMode::kScreensharing) {
const uint8_t spatial_idx = encoded_image_.SpatialIndex().value_or(0);
const uint32_t frame_timestamp_ms =
1000 * encoded_image_.Timestamp() / kVideoPayloadTypeFrequency;
framerate_controller_[spatial_idx].AddFrame(frame_timestamp_ms);
const size_t steady_state_size = SteadyStateSize(
spatial_idx, codec_specific_.codecSpecific.VP9.temporal_idx);
// Only frames on spatial layers, which may be limited in a steady state
// are considered for steady state detection.
if (framerate_controller_[spatial_idx].GetTargetRate() >
variable_framerate_experiment_.framerate_limit + 1e-9) {
if (encoded_image_.qp_ <=
variable_framerate_experiment_.steady_state_qp &&
encoded_image_.size() <= steady_state_size) {
++num_steady_state_frames_;
} else {
num_steady_state_frames_ = 0;
}
}
}
encoded_image_.set_size(0);
}
}
int LibvpxVp9Encoder::RegisterEncodeCompleteCallback(
EncodedImageCallback* callback) {
encoded_complete_callback_ = callback;
return WEBRTC_VIDEO_CODEC_OK;
}
VideoEncoder::EncoderInfo LibvpxVp9Encoder::GetEncoderInfo() const {
EncoderInfo info;
info.supports_native_handle = false;
info.implementation_name = "libvpx";
if (quality_scaler_experiment_.enabled && inited_ &&
codec_.VP9().automaticResizeOn) {
info.scaling_settings = VideoEncoder::ScalingSettings(
quality_scaler_experiment_.low_qp, quality_scaler_experiment_.high_qp);
} else {
info.scaling_settings = VideoEncoder::ScalingSettings::kOff;
}
info.has_trusted_rate_controller = trusted_rate_controller_;
info.is_hardware_accelerated = false;
info.has_internal_source = false;
if (inited_) {
// Find the max configured fps of any active spatial layer.
float max_fps = 0.0;
for (size_t si = 0; si < num_spatial_layers_; ++si) {
if (codec_.spatialLayers[si].active &&
codec_.spatialLayers[si].maxFramerate > max_fps) {
max_fps = codec_.spatialLayers[si].maxFramerate;
}
}
for (size_t si = 0; si < num_spatial_layers_; ++si) {
info.fps_allocation[si].clear();
if (!codec_.spatialLayers[si].active) {
continue;
}
// This spatial layer may already use a fraction of the total frame rate.
const float sl_fps_fraction =
codec_.spatialLayers[si].maxFramerate / max_fps;
for (size_t ti = 0; ti < num_temporal_layers_; ++ti) {
const uint32_t decimator =
num_temporal_layers_ <= 1 ? 1 : config_->ts_rate_decimator[ti];
RTC_DCHECK_GT(decimator, 0);
info.fps_allocation[si].push_back(
rtc::saturated_cast<uint8_t>(EncoderInfo::kMaxFramerateFraction *
(sl_fps_fraction / decimator)));
}
}
if (profile_ == VP9Profile::kProfile0) {
info.preferred_pixel_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
}
}
if (!encoder_info_override_.resolution_bitrate_limits().empty()) {
info.resolution_bitrate_limits =
encoder_info_override_.resolution_bitrate_limits();
}
return info;
}
size_t LibvpxVp9Encoder::SteadyStateSize(int sid, int tid) {
const size_t bitrate_bps = current_bitrate_allocation_.GetBitrate(
sid, tid == kNoTemporalIdx ? 0 : tid);
const float fps = (codec_.mode == VideoCodecMode::kScreensharing)
? std::min(static_cast<float>(codec_.maxFramerate),
framerate_controller_[sid].GetTargetRate())
: codec_.maxFramerate;
return static_cast<size_t>(
bitrate_bps / (8 * fps) *
(100 -
variable_framerate_experiment_.steady_state_undershoot_percentage) /
100 +
0.5);
}
// static
LibvpxVp9Encoder::VariableFramerateExperiment
LibvpxVp9Encoder::ParseVariableFramerateConfig(
const WebRtcKeyValueConfig& trials) {
FieldTrialFlag enabled = FieldTrialFlag("Enabled");
FieldTrialParameter<double> framerate_limit("min_fps", 5.0);
FieldTrialParameter<int> qp("min_qp", 32);
FieldTrialParameter<int> undershoot_percentage("undershoot", 30);
FieldTrialParameter<int> frames_before_steady_state(
"frames_before_steady_state", 5);
ParseFieldTrial({&enabled, &framerate_limit, &qp, &undershoot_percentage,
&frames_before_steady_state},
trials.Lookup("WebRTC-VP9VariableFramerateScreenshare"));
VariableFramerateExperiment config;
config.enabled = enabled.Get();
config.framerate_limit = framerate_limit.Get();
config.steady_state_qp = qp.Get();
config.steady_state_undershoot_percentage = undershoot_percentage.Get();
config.frames_before_steady_state = frames_before_steady_state.Get();
return config;
}
// static
LibvpxVp9Encoder::QualityScalerExperiment
LibvpxVp9Encoder::ParseQualityScalerConfig(const WebRtcKeyValueConfig& trials) {
FieldTrialFlag disabled = FieldTrialFlag("Disabled");
FieldTrialParameter<int> low_qp("low_qp", kLowVp9QpThreshold);
FieldTrialParameter<int> high_qp("hihg_qp", kHighVp9QpThreshold);
ParseFieldTrial({&disabled, &low_qp, &high_qp},
trials.Lookup("WebRTC-VP9QualityScaler"));
QualityScalerExperiment config;
config.enabled = !disabled.Get();
RTC_LOG(LS_INFO) << "Webrtc quality scaler for vp9 is "
<< (config.enabled ? "enabled." : "disabled");
config.low_qp = low_qp.Get();
config.high_qp = high_qp.Get();
return config;
}
void LibvpxVp9Encoder::UpdatePerformanceFlags() {
const auto find_speed = [&](int min_pixel_count) {
RTC_DCHECK(!performance_flags_.settings_by_resolution.empty());
auto it =
performance_flags_.settings_by_resolution.upper_bound(min_pixel_count);
return std::prev(it)->second;
};
performance_flags_by_spatial_index_.clear();
if (is_svc_) {
for (int si = 0; si < num_spatial_layers_; ++si) {
performance_flags_by_spatial_index_.push_back(find_speed(
codec_.spatialLayers[si].width * codec_.spatialLayers[si].height));
}
} else {
performance_flags_by_spatial_index_.push_back(
find_speed(codec_.width * codec_.height));
}
}
// static
LibvpxVp9Encoder::PerformanceFlags
LibvpxVp9Encoder::ParsePerformanceFlagsFromTrials(
const WebRtcKeyValueConfig& trials) {
struct Params : public PerformanceFlags::ParameterSet {
int min_pixel_count = 0;
};
FieldTrialStructList<Params> trials_list(
{FieldTrialStructMember("min_pixel_count",
[](Params* p) { return &p->min_pixel_count; }),
FieldTrialStructMember("high_layer_speed",
[](Params* p) { return &p->high_layer_speed; }),
FieldTrialStructMember("base_layer_speed",
[](Params* p) { return &p->base_layer_speed; }),
FieldTrialStructMember("deblock_mode",
[](Params* p) { return &p->deblock_mode; })},
{});
FieldTrialFlag per_layer_speed("use_per_layer_speed");
ParseFieldTrial({&trials_list, &per_layer_speed},
trials.Lookup("WebRTC-VP9-PerformanceFlags"));
PerformanceFlags flags;
flags.use_per_layer_speed = per_layer_speed.Get();
constexpr int kMinSpeed = 1;
constexpr int kMaxSpeed = 9;
for (auto& f : trials_list.Get()) {
if (f.base_layer_speed < kMinSpeed || f.base_layer_speed > kMaxSpeed ||
f.high_layer_speed < kMinSpeed || f.high_layer_speed > kMaxSpeed ||
f.deblock_mode < 0 || f.deblock_mode > 2) {
RTC_LOG(LS_WARNING) << "Ignoring invalid performance flags: "
<< "min_pixel_count = " << f.min_pixel_count
<< ", high_layer_speed = " << f.high_layer_speed
<< ", base_layer_speed = " << f.base_layer_speed
<< ", deblock_mode = " << f.deblock_mode;
continue;
}
flags.settings_by_resolution[f.min_pixel_count] = f;
}
if (flags.settings_by_resolution.empty()) {
return GetDefaultPerformanceFlags();
}
return flags;
}
// static
LibvpxVp9Encoder::PerformanceFlags
LibvpxVp9Encoder::GetDefaultPerformanceFlags() {
PerformanceFlags flags;
flags.use_per_layer_speed = false;
#if defined(WEBRTC_ARCH_ARM) || defined(WEBRTC_ARCH_ARM64) || defined(ANDROID)
// Speed 8 on all layers for all resolutions.
flags.settings_by_resolution[0] = {8, 8, 0};
#else
// For smaller resolutions, use lower speed setting (get some coding gain at
// the cost of increased encoding complexity).
flags.settings_by_resolution[0] = {5, 5, 0};
// Use speed 7 for QCIF and above.
flags.settings_by_resolution[352 * 288] = {7, 7, 0};
#endif
return flags;
}
void LibvpxVp9Encoder::MaybeRewrapRawWithFormat(const vpx_img_fmt fmt) {
if (!raw_) {
raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
nullptr);
} else if (raw_->fmt != fmt) {
RTC_LOG(INFO) << "Switching VP9 encoder pixel format to "
<< (fmt == VPX_IMG_FMT_NV12 ? "NV12" : "I420");
libvpx_->img_free(raw_);
raw_ = libvpx_->img_wrap(nullptr, fmt, codec_.width, codec_.height, 1,
nullptr);
}
// else no-op since the image is already in the right format.
}
rtc::scoped_refptr<VideoFrameBuffer> LibvpxVp9Encoder::PrepareBufferForProfile0(
rtc::scoped_refptr<VideoFrameBuffer> buffer) {
absl::InlinedVector<VideoFrameBuffer::Type, kMaxPreferredPixelFormats>
supported_formats = {VideoFrameBuffer::Type::kI420,
VideoFrameBuffer::Type::kNV12};
rtc::scoped_refptr<VideoFrameBuffer> mapped_buffer;
if (buffer->type() != VideoFrameBuffer::Type::kNative) {
// |buffer| is already mapped.
mapped_buffer = buffer;
} else {
// Attempt to map to one of the supported formats.
mapped_buffer = buffer->GetMappedFrameBuffer(supported_formats);
}
if (!mapped_buffer ||
(absl::c_find(supported_formats, mapped_buffer->type()) ==
supported_formats.end() &&
mapped_buffer->type() != VideoFrameBuffer::Type::kI420A)) {
// Unknown pixel format or unable to map, convert to I420 and prepare that
// buffer instead to ensure Scale() is safe to use.
auto converted_buffer = buffer->ToI420();
if (!converted_buffer) {
RTC_LOG(LS_ERROR) << "Failed to convert "
<< VideoFrameBufferTypeToString(buffer->type())
<< " image to I420. Can't encode frame.";
return {};
}
// The buffer should now be a mapped I420 or I420A format, but some buffer
// implementations incorrectly return the wrong buffer format, such as
// kNative. As a workaround to this, we perform ToI420() a second time.
// TODO(https://crbug.com/webrtc/12602): When Android buffers have a correct
// ToI420() implementaion, remove his workaround.
if (converted_buffer->type() != VideoFrameBuffer::Type::kI420 &&
converted_buffer->type() != VideoFrameBuffer::Type::kI420A) {
converted_buffer = converted_buffer->ToI420();
RTC_CHECK(converted_buffer->type() == VideoFrameBuffer::Type::kI420 ||
converted_buffer->type() == VideoFrameBuffer::Type::kI420A);
}
// Because |buffer| had to be converted, use |converted_buffer| instead.
buffer = mapped_buffer = converted_buffer;
}
// Prepare |raw_| from |mapped_buffer|.
switch (mapped_buffer->type()) {
case VideoFrameBuffer::Type::kI420:
case VideoFrameBuffer::Type::kI420A: {
MaybeRewrapRawWithFormat(VPX_IMG_FMT_I420);
const I420BufferInterface* i420_buffer = mapped_buffer->GetI420();
RTC_DCHECK(i420_buffer);
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(i420_buffer->DataY());
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(i420_buffer->DataU());
raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(i420_buffer->DataV());
raw_->stride[VPX_PLANE_Y] = i420_buffer->StrideY();
raw_->stride[VPX_PLANE_U] = i420_buffer->StrideU();
raw_->stride[VPX_PLANE_V] = i420_buffer->StrideV();
break;
}
case VideoFrameBuffer::Type::kNV12: {
MaybeRewrapRawWithFormat(VPX_IMG_FMT_NV12);
const NV12BufferInterface* nv12_buffer = mapped_buffer->GetNV12();
RTC_DCHECK(nv12_buffer);
raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(nv12_buffer->DataY());
raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(nv12_buffer->DataUV());
raw_->planes[VPX_PLANE_V] = raw_->planes[VPX_PLANE_U] + 1;
raw_->stride[VPX_PLANE_Y] = nv12_buffer->StrideY();
raw_->stride[VPX_PLANE_U] = nv12_buffer->StrideUV();
raw_->stride[VPX_PLANE_V] = nv12_buffer->StrideUV();
break;
}
default:
RTC_NOTREACHED();
}
return mapped_buffer;
}
} // namespace webrtc
#endif // RTC_ENABLE_VP9<|fim▁end|>
|
}
last_updated_buf_idx.reset();
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.