file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
user_repository.go | package repository
import (
"context"
"database/sql"
"errors"
"github.com/reecerussell/open-social/cmd/users/dao"
"github.com/reecerussell/open-social/cmd/users/model"
// MSSQL driver
_ "github.com/denisenkom/go-mssqldb"
)
var (
ErrUserNotFound = errors.New("user not found")
)
type UserRepository interface {
Create(ctx context.Context, u *model.User) error
DoesUsernameExist(ctx context.Context, username string, excludeRefID *string) (bool, error)
GetUserByUsername(ctx context.Context, username string) (*model.User, error)
// GetUserByReference gets a user's model by referenceID,
// for the user with the reference userReferenceID.
GetUserByReference(ctx context.Context, referenceID, userReferenceID string) (*model.User, error)
GetIDByReference(ctx context.Context, referenceID string) (*int, error)
}
type userRepository struct {
url string
}
func | (url string) UserRepository {
return &userRepository{url: url}
}
func (r *userRepository) Create(ctx context.Context, u *model.User) error {
db, err := sql.Open("sqlserver", r.url)
if err != nil {
return err
}
const query = `INSERT INTO [Users] ([ReferenceId],[Username],[PasswordHash])
VALUES (NEWID(), @username, @passwordHash)
SELECT [Id], CAST([ReferenceId] AS CHAR(36)) FROM [Users] WHERE [Id] = SCOPE_IDENTITY()`
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return err
}
defer stmt.Close()
user := u.Dao()
row := stmt.QueryRowContext(ctx,
sql.Named("username", user.Username),
sql.Named("passwordHash", user.PasswordHash))
// Read the user's ids
err = row.Scan(&user.ID, &user.ReferenceID)
if err != nil {
return err
}
// Set the user's ids
u.SetID(user.ID)
u.SetReferenceID(user.ReferenceID)
return nil
}
func (r *userRepository) DoesUsernameExist(ctx context.Context, username string, excludeRefID *string) (bool, error) {
db, err := sql.Open("sqlserver", r.url)
if err != nil {
return false, err
}
query := "SELECT COUNT(*) FROM [Users] WHERE [Username] = @username"
args := []interface{}{sql.Named("username", username)}
if excludeRefID != nil {
query += " [ReferenceId] != @referenceId"
args = append(args, sql.Named("referenceId", *excludeRefID))
}
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return false, err
}
defer stmt.Close()
var count int64
err = stmt.QueryRowContext(ctx, args...).Scan(&count)
if err != nil {
return false, err
}
return count > 0, nil
}
func (r *userRepository) GetUserByUsername(ctx context.Context, username string) (*model.User, error) {
db, err := sql.Open("sqlserver", r.url)
if err != nil {
return nil, err
}
const query = `SELECT [Id], CAST([ReferenceId] AS CHAR(36)), [Username], [PasswordHash]
FROM [Users] WHERE [Username] = @username`
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
var user dao.User
err = stmt.QueryRowContext(ctx, sql.Named("username", username)).Scan(
&user.ID,
&user.ReferenceID,
&user.Username,
&user.PasswordHash,
)
if err != nil {
if err == sql.ErrNoRows {
return nil, ErrUserNotFound
}
return nil, err
}
return model.NewUserFromDao(&user), nil
}
func (r *userRepository) GetIDByReference(ctx context.Context, referenceID string) (*int, error) {
db, err := sql.Open("sqlserver", r.url)
if err != nil {
return nil, err
}
const query = `SELECT [Id] FROM [Users] WHERE [ReferenceId] = @referenceId;`
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
var id int
err = stmt.QueryRowContext(ctx, sql.Named("referenceID", referenceID)).Scan(
&id,
)
if err != nil {
if err == sql.ErrNoRows {
return nil, ErrUserNotFound
}
return nil, err
}
return &id, nil
}
func (r *userRepository) GetUserByReference(ctx context.Context, referenceID, userReferenceID string) (*model.User, error) {
db, err := sql.Open("sqlserver", r.url)
if err != nil {
return nil, err
}
const query = `SELECT
[U].[Id],
CAST([U].[ReferenceId] AS CHAR(36)),
[U].[Username],
[U].[PasswordHash],
CASE (SELECT COUNT([UserId]) FROM [UserFollowers] AS [UF]
INNER JOIN [Users] AS [F] ON [F].[Id] = [UF].[FollowerId]
WHERE [UF].[UserId] = [U].[Id] AND [F].[ReferenceId] = @userReferenceId)
WHEN 1 THEN CAST(1 AS BIT)
ELSE CAST(0 AS BIT)
END AS [IsFollowing]
FROM [Users] AS [U]
WHERE [U].[ReferenceId] = @referenceId;`
stmt, err := db.PrepareContext(ctx, query)
if err != nil {
return nil, err
}
defer stmt.Close()
var user dao.User
err = stmt.QueryRowContext(ctx,
sql.Named("referenceId", referenceID),
sql.Named("userReferenceId", userReferenceID)).Scan(
&user.ID,
&user.ReferenceID,
&user.Username,
&user.PasswordHash,
&user.IsFollowing,
)
if err != nil {
if err == sql.ErrNoRows {
return nil, ErrUserNotFound
}
return nil, err
}
return model.NewUserFromDao(&user), nil
}
| NewUserRepository |
A.go | package A
import (
"bufio"
"os"
"strconv"
"fmt"
)
var sc = bufio.NewScanner(os.Stdin)
var rdr = bufio.NewReaderSize(os.Stdin, 1000000)
func main() {
var a, d int
fmt.Scan(&a, &d)
if a > d{
fmt.Println(a *(d+1))
}else{
fmt.Println(d*(a+1))
}
}
func gcd(x, y int) int |
func lcm(x, y int) int {
return x*y / gcd(x, y)
}
func nextInt() int {
sc.Scan()
nextI, err := strconv.Atoi(sc.Text())
if err != nil {
panic(err)
}
return nextI
}
func readLine() string {
buf := make([]byte, 0, 1000000)
for {
line, isPrefix, err := rdr.ReadLine()
if err != nil {
panic(err)
}
buf = append(buf, line...)
if !isPrefix {
break
}
}
return string(buf)
}
| {
if y == 0{
return x
}
return gcd(y, x % y)
} |
server.js | // test.js
// This is a harness for running the tethr.io tests
// tethr.io tests rely on a backend signaling server (webrtc.io) as well as more than one other peer in the same room.
// Thus we cannot simply use a packaged test harness, but wrap our own with phantomjs
var async = require('async')
var launcher = require('browser-launcher')
var testapp = require('./testapp')
module.exports = function (opts, cb) {
opts = opts || {}
opts.port = opts.port || 18101
async.series([
function (cb) {
app = testapp.listen(opts.port)
setTimeout(cb, 50)
},
function (cb) {
launchChrome('http://localhost:' + opts.port + '/ping.html', cb)
},
function (cb) {
setTimeout(cb, 1000)
},
], function (err) {
if (err) {
console.error(err)
process.exit(1)
}
if (opts.killtime)
setTimeout(function () { process.exit(0) }, opts.killtime)
else if (cb)
cb()
})
}
function launchChrome(addr, cb) { | headless: true,
browser: null,
};
launch.browsers.local.forEach(function(browser) {
if (/^chrom/.test(browser.name)) {
opts.browser = browser.name
}
})
if (!opts.browser) {
return cb(new Error('No chrome or chromium browser present on system'))
}
console.log('Launching chrome to ' + addr)
launch(addr, opts, function (err, ps) {
cb(err)
});
});
} | launcher(function (err, launch) {
if (err) return cb(err)
var opts = { |
Reverse.py | # Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.utils import int64_array
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class Reverse(Op):
op = 'Reverse'
def __init__(self, graph: Graph, attrs: dict):
mandatory_props = {
'type': None,
'axis': None,
'op': self.op,
'in_ports_count': 2,
'out_ports_count': 1,
'infer': self.infer,
}
super().__init__(graph, mandatory_props, attrs)
@staticmethod
def infer(node):
| input_shape = node.in_port(0).data.get_shape()
input_value = node.in_port(0).data.get_value()
assert input_shape is not None
if not node.has_valid('axis'):
assert 1 in node.in_nodes()
assert node.in_node(1).has_valid('value')
assert node.in_node(1).value.size == 1
node['axis'] = node.in_node(1).value.item()
node.in_port(1).disconnect()
assert node.has_valid('axis')
assert len(node.out_nodes()) == 1
if input_value is not None:
node.out_port(0).data.set_value(np.flip(input_value, node.axis))
else:
node.out_port(0).data.set_shape(input_shape) |
|
styleSheet.js | 'use strict';
/* jshint ignore:start */
/**
* This code was generated by
* \ / _ _ _| _ _
* | (_)\/(_)(_|\/| |(/_ v1.0.0
* / /
*/
/* jshint ignore:end */
var Q = require('q'); /* jshint ignore:line */
var _ = require('lodash'); /* jshint ignore:line */
var util = require('util'); /* jshint ignore:line */
var Page = require('../../../../base/Page'); /* jshint ignore:line */
var serialize = require('../../../../base/serialize'); /* jshint ignore:line */
var values = require('../../../../base/values'); /* jshint ignore:line */
var StyleSheetList;
var StyleSheetPage;
var StyleSheetInstance;
var StyleSheetContext;
/* jshint ignore:start */
/**
* Initialize the StyleSheetList
*
* PLEASE NOTE that this class contains preview products that are subject to
* change. Use them with caution. If you currently do not have developer preview
* access, please contact [email protected].
*
* @constructor Twilio.Preview.Understand.AssistantContext.StyleSheetList
*
* @param {Twilio.Preview.Understand} version - Version of the resource
* @param {string} assistantSid - The unique ID of the Assistant
*/
/* jshint ignore:end */
StyleSheetList = function StyleSheetList(version, assistantSid) {
/* jshint ignore:start */
/**
* @function styleSheet
* @memberof Twilio.Preview.Understand.AssistantContext#
*
* @param {string} sid - sid of instance
*
* @returns {Twilio.Preview.Understand.AssistantContext.StyleSheetContext}
*/
/* jshint ignore:end */
function | (sid) {
return StyleSheetListInstance.get(sid);
}
StyleSheetListInstance._version = version;
// Path Solution
StyleSheetListInstance._solution = {assistantSid: assistantSid};
/* jshint ignore:start */
/**
* Constructs a style_sheet
*
* @function get
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetList#
*
* @returns {Twilio.Preview.Understand.AssistantContext.StyleSheetContext}
*/
/* jshint ignore:end */
StyleSheetListInstance.get = function get() {
return new StyleSheetContext(this._version, this._solution.assistantSid);
};
/* jshint ignore:start */
/**
* Provide a user-friendly representation
*
* @function toJSON
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetList#
*
* @returns Object
*/
/* jshint ignore:end */
StyleSheetListInstance.toJSON = function toJSON() {
return this._solution;
};
StyleSheetListInstance[util.inspect.custom] = function inspect(depth, options) {
return util.inspect(this.toJSON(), options);
};
return StyleSheetListInstance;
};
/* jshint ignore:start */
/**
* Initialize the StyleSheetPage
*
* PLEASE NOTE that this class contains preview products that are subject to
* change. Use them with caution. If you currently do not have developer preview
* access, please contact [email protected].
*
* @constructor Twilio.Preview.Understand.AssistantContext.StyleSheetPage
*
* @param {Understand} version - Version of the resource
* @param {Response<string>} response - Response from the API
* @param {StyleSheetSolution} solution - Path solution
*
* @returns StyleSheetPage
*/
/* jshint ignore:end */
StyleSheetPage = function StyleSheetPage(version, response, solution) {
// Path Solution
this._solution = solution;
Page.prototype.constructor.call(this, version, response, this._solution);
};
_.extend(StyleSheetPage.prototype, Page.prototype);
StyleSheetPage.prototype.constructor = StyleSheetPage;
/* jshint ignore:start */
/**
* Build an instance of StyleSheetInstance
*
* @function getInstance
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetPage#
*
* @param {StyleSheetPayload} payload - Payload response from the API
*
* @returns StyleSheetInstance
*/
/* jshint ignore:end */
StyleSheetPage.prototype.getInstance = function getInstance(payload) {
return new StyleSheetInstance(this._version, payload, this._solution.assistantSid);
};
/* jshint ignore:start */
/**
* Provide a user-friendly representation
*
* @function toJSON
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetPage#
*
* @returns Object
*/
/* jshint ignore:end */
StyleSheetPage.prototype.toJSON = function toJSON() {
let clone = {};
_.forOwn(this, function(value, key) {
if (!_.startsWith(key, '_') && ! _.isFunction(value)) {
clone[key] = value;
}
});
return clone;
};
StyleSheetPage.prototype[util.inspect.custom] = function inspect(depth, options)
{
return util.inspect(this.toJSON(), options);
};
/* jshint ignore:start */
/**
* Initialize the StyleSheetContext
*
* PLEASE NOTE that this class contains preview products that are subject to
* change. Use them with caution. If you currently do not have developer preview
* access, please contact [email protected].
*
* @constructor Twilio.Preview.Understand.AssistantContext.StyleSheetInstance
*
* @property {string} accountSid -
* The unique ID of the Account that created this Assistant
* @property {string} assistantSid - The unique ID of the Assistant
* @property {string} url - The url
* @property {object} data - The JSON style sheet object
*
* @param {Understand} version - Version of the resource
* @param {StyleSheetPayload} payload - The instance payload
* @param {sid} assistantSid - The unique ID of the Assistant
*/
/* jshint ignore:end */
StyleSheetInstance = function StyleSheetInstance(version, payload, assistantSid)
{
this._version = version;
// Marshaled Properties
this.accountSid = payload.account_sid; // jshint ignore:line
this.assistantSid = payload.assistant_sid; // jshint ignore:line
this.url = payload.url; // jshint ignore:line
this.data = payload.data; // jshint ignore:line
// Context
this._context = undefined;
this._solution = {assistantSid: assistantSid, };
};
Object.defineProperty(StyleSheetInstance.prototype,
'_proxy', {
get: function() {
if (!this._context) {
this._context = new StyleSheetContext(this._version, this._solution.assistantSid);
}
return this._context;
}
});
/* jshint ignore:start */
/**
* fetch a StyleSheetInstance
*
* @function fetch
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetInstance#
*
* @param {function} [callback] - Callback to handle processed record
*
* @returns {Promise} Resolves to processed StyleSheetInstance
*/
/* jshint ignore:end */
StyleSheetInstance.prototype.fetch = function fetch(callback) {
return this._proxy.fetch(callback);
};
/* jshint ignore:start */
/**
* update a StyleSheetInstance
*
* @function update
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetInstance#
*
* @param {object} [opts] - Options for request
* @param {object} [opts.styleSheet] - The JSON Style sheet string
* @param {function} [callback] - Callback to handle processed record
*
* @returns {Promise} Resolves to processed StyleSheetInstance
*/
/* jshint ignore:end */
StyleSheetInstance.prototype.update = function update(opts, callback) {
return this._proxy.update(opts, callback);
};
/* jshint ignore:start */
/**
* Provide a user-friendly representation
*
* @function toJSON
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetInstance#
*
* @returns Object
*/
/* jshint ignore:end */
StyleSheetInstance.prototype.toJSON = function toJSON() {
let clone = {};
_.forOwn(this, function(value, key) {
if (!_.startsWith(key, '_') && ! _.isFunction(value)) {
clone[key] = value;
}
});
return clone;
};
StyleSheetInstance.prototype[util.inspect.custom] = function inspect(depth,
options) {
return util.inspect(this.toJSON(), options);
};
/* jshint ignore:start */
/**
* Initialize the StyleSheetContext
*
* PLEASE NOTE that this class contains preview products that are subject to
* change. Use them with caution. If you currently do not have developer preview
* access, please contact [email protected].
*
* @constructor Twilio.Preview.Understand.AssistantContext.StyleSheetContext
*
* @param {Understand} version - Version of the resource
* @param {sid_like} assistantSid - The unique ID of the Assistant
*/
/* jshint ignore:end */
StyleSheetContext = function StyleSheetContext(version, assistantSid) {
this._version = version;
// Path Solution
this._solution = {assistantSid: assistantSid, };
this._uri = `/Assistants/${assistantSid}/StyleSheet`;
};
/* jshint ignore:start */
/**
* fetch a StyleSheetInstance
*
* @function fetch
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetContext#
*
* @param {function} [callback] - Callback to handle processed record
*
* @returns {Promise} Resolves to processed StyleSheetInstance
*/
/* jshint ignore:end */
StyleSheetContext.prototype.fetch = function fetch(callback) {
var deferred = Q.defer();
var promise = this._version.fetch({uri: this._uri, method: 'GET'});
promise = promise.then(function(payload) {
deferred.resolve(new StyleSheetInstance(this._version, payload, this._solution.assistantSid));
}.bind(this));
promise.catch(function(error) {
deferred.reject(error);
});
if (_.isFunction(callback)) {
deferred.promise.nodeify(callback);
}
return deferred.promise;
};
/* jshint ignore:start */
/**
* update a StyleSheetInstance
*
* @function update
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetContext#
*
* @param {object} [opts] - Options for request
* @param {object} [opts.styleSheet] - The JSON Style sheet string
* @param {function} [callback] - Callback to handle processed record
*
* @returns {Promise} Resolves to processed StyleSheetInstance
*/
/* jshint ignore:end */
StyleSheetContext.prototype.update = function update(opts, callback) {
if (_.isFunction(opts)) {
callback = opts;
opts = {};
}
opts = opts || {};
var deferred = Q.defer();
var data = values.of({'StyleSheet': serialize.object(_.get(opts, 'styleSheet'))});
var promise = this._version.update({uri: this._uri, method: 'POST', data: data});
promise = promise.then(function(payload) {
deferred.resolve(new StyleSheetInstance(this._version, payload, this._solution.assistantSid));
}.bind(this));
promise.catch(function(error) {
deferred.reject(error);
});
if (_.isFunction(callback)) {
deferred.promise.nodeify(callback);
}
return deferred.promise;
};
/* jshint ignore:start */
/**
* Provide a user-friendly representation
*
* @function toJSON
* @memberof Twilio.Preview.Understand.AssistantContext.StyleSheetContext#
*
* @returns Object
*/
/* jshint ignore:end */
StyleSheetContext.prototype.toJSON = function toJSON() {
return this._solution;
};
StyleSheetContext.prototype[util.inspect.custom] = function inspect(depth,
options) {
return util.inspect(this.toJSON(), options);
};
module.exports = {
StyleSheetList: StyleSheetList,
StyleSheetPage: StyleSheetPage,
StyleSheetInstance: StyleSheetInstance,
StyleSheetContext: StyleSheetContext
};
| StyleSheetListInstance |
trampoline.rs | use std::convert::TryFrom;
use crate::extn::core::array::Array;
use crate::extn::core::matchdata::{Capture, CaptureAt, CaptureExtract, MatchData};
use crate::extn::core::regexp::Regexp;
use crate::extn::core::symbol::Symbol;
use crate::extn::prelude::*;
use crate::sys::protect;
pub fn begin(interp: &mut Artichoke, mut value: Value, mut at: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let capture = match interp.try_convert_mut(&mut at)? {
CaptureExtract::GroupIndex(idx) => Capture::GroupIndex(idx),
CaptureExtract::GroupName(name) => Capture::GroupName(name),
CaptureExtract::Symbol(symbol) => Capture::GroupName(symbol.bytes(interp)),
};
let begin = data.begin(capture)?;
match begin.map(Int::try_from) {
Some(Ok(begin)) => Ok(interp.convert(begin)),
Some(Err(_)) => Err(ArgumentError::from("input string too long").into()),
None => Ok(Value::nil()),
}
}
pub fn captures(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
if let Some(captures) = data.captures()? {
interp.try_convert_mut(captures)
} else {
Ok(Value::nil())
}
}
pub fn element_reference(
interp: &mut Artichoke,
mut value: Value,
mut elem: Value,
len: Option<Value>,
) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let at = if let Some(len) = len {
let start = elem.implicitly_convert_to_int(interp)?;
let len = len.implicitly_convert_to_int(interp)?;
CaptureAt::StartLen(start, len)
} else if let Ok(index) = elem.implicitly_convert_to_int(interp) {
CaptureAt::GroupIndex(index)
} else if let Ok(name) = elem.implicitly_convert_to_string(interp) {
CaptureAt::GroupName(name)
} else if let Ok(symbol) = unsafe { Symbol::unbox_from_value(&mut elem, interp) } {
CaptureAt::GroupName(symbol.bytes(interp))
} else {
// NOTE(lopopolo): Encapsulation is broken here by reaching into the
// inner regexp.
let captures_len = data.regexp.inner().captures_len(None)?;
let rangelen = Int::try_from(captures_len)
.map_err(|_| ArgumentError::from("input string too long"))?;
if let Some(protect::Range { start, len }) = elem.is_range(interp, rangelen)? {
CaptureAt::StartLen(start, len)
} else {
return Ok(Value::nil());
}
};
let matched = data.capture_at(at)?;
interp.try_convert_mut(matched)
}
pub fn end(interp: &mut Artichoke, mut value: Value, mut at: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let capture = match interp.try_convert_mut(&mut at)? {
CaptureExtract::GroupIndex(idx) => Capture::GroupIndex(idx),
CaptureExtract::GroupName(name) => Capture::GroupName(name),
CaptureExtract::Symbol(symbol) => Capture::GroupName(symbol.bytes(interp)),
};
let end = data.end(capture)?;
match end.map(Int::try_from) {
Some(Ok(end)) => Ok(interp.convert(end)),
Some(Err(_)) => Err(ArgumentError::from("input string too long").into()),
None => Ok(Value::nil()),
}
}
pub fn length(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let len = data.len()?;
if let Ok(len) = Int::try_from(len) {
Ok(interp.convert(len)) | } else {
Err(ArgumentError::from("input string too long").into())
}
}
pub fn named_captures(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let named_captures = data.named_captures()?;
interp.try_convert_mut(named_captures)
}
pub fn names(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let names = data.names();
interp.try_convert_mut(names)
}
pub fn offset(interp: &mut Artichoke, mut value: Value, mut at: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let capture = match interp.try_convert_mut(&mut at)? {
CaptureExtract::GroupIndex(idx) => Capture::GroupIndex(idx),
CaptureExtract::GroupName(name) => Capture::GroupName(name),
CaptureExtract::Symbol(symbol) => Capture::GroupName(symbol.bytes(interp)),
};
if let Some([begin, end]) = data.offset(capture)? {
if let (Ok(begin), Ok(end)) = (Int::try_from(begin), Int::try_from(end)) {
let ary = Array::assoc(interp.convert(begin), interp.convert(end));
Array::alloc_value(ary, interp)
} else {
Err(ArgumentError::from("input string too long").into())
}
} else {
let ary = Array::assoc(Value::nil(), Value::nil());
Array::alloc_value(ary, interp)
}
}
pub fn post_match(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let post = data.post();
Ok(interp.convert_mut(post))
}
pub fn pre_match(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let pre = data.pre();
Ok(interp.convert_mut(pre))
}
pub fn regexp(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let regexp = data.regexp();
// TODO(GH-614): MatchData#regexp needs to return an identical Regexp to the
// one used to create the match (same object ID).
//
// The `Regexp::alloc_value` here should be replaced with
// `Regexp::box_into_value`.
//
// See: https://github.com/ruby/spec/pull/727
let regexp = Regexp::alloc_value(regexp.clone(), interp)?;
Ok(regexp)
}
pub fn string(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let mut string = interp.convert_mut(data.string());
string.freeze(interp)?;
Ok(string)
}
pub fn to_a(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
if let Some(ary) = data.to_a()? {
interp.try_convert_mut(ary)
} else {
Ok(Value::nil())
}
}
pub fn to_s(interp: &mut Artichoke, mut value: Value) -> Result<Value, Error> {
let data = unsafe { MatchData::unbox_from_value(&mut value, interp)? };
let display = data.to_s()?;
Ok(interp.convert_mut(display))
} | |
const.py | # vim: set sw=4 ts=4 expandtab : | moteperamo = 1000000000000000000
DELTA_AMO = 0.000000001 # 10^-9 AMO
DELTA_MOTE = 1000000000 # 10^9 mote
BLKSHOUR = 60*60
BLKSDAY = 60*60*24
BLKSWEEK = 60*60*24*7
BLKSMONTH = 60*60*24*30
BLKSQUARTER = 60*60*24*90
BLKSYEAR = 60*60*24*365 |
oneamo = 1000000000000000000 |
tokenless_auth.py | # Copyright 2015 Hewlett-Packard
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
from oslo_log import log
from keystone.auth import core
from keystone.common import provider_api
import keystone.conf
from keystone import exception
from keystone.federation import constants as federation_constants
from keystone.federation import utils
from keystone.i18n import _
CONF = keystone.conf.CONF
LOG = log.getLogger(__name__)
class TokenlessAuthHelper(provider_api.ProviderAPIMixin, object):
def __init__(self, env):
"""A init class for TokenlessAuthHelper.
:param env: The HTTP request environment that should contain
client certificate attributes. These attributes should match
with what the mapping defines. Or a user cannot be mapped and
results un-authenticated. The following examples are for the
attributes that reference to the client certificate's Subject's
Common Name and Organization:
SSL_CLIENT_S_DN_CN, SSL_CLIENT_S_DN_O
:type env: dict
"""
self.env = env
def _build_scope_info(self):
"""Build the token request scope based on the headers.
:returns: scope data
:rtype: dict | project_domain_name = self.env.get('HTTP_X_PROJECT_DOMAIN_NAME')
domain_id = self.env.get('HTTP_X_DOMAIN_ID')
domain_name = self.env.get('HTTP_X_DOMAIN_NAME')
scope = {}
if project_id:
scope['project'] = {'id': project_id}
elif project_name:
scope['project'] = {'name': project_name}
if project_domain_id:
scope['project']['domain'] = {'id': project_domain_id}
elif project_domain_name:
scope['project']['domain'] = {'name': project_domain_name}
else:
msg = _('Neither Project Domain ID nor Project Domain Name '
'was provided.')
raise exception.ValidationError(msg)
elif domain_id:
scope['domain'] = {'id': domain_id}
elif domain_name:
scope['domain'] = {'name': domain_name}
else:
raise exception.ValidationError(
attribute='project or domain',
target='scope')
return scope
def get_scope(self):
auth = {}
# NOTE(chioleong): Auth methods here are insignificant because
# we only care about using auth.controllers.AuthInfo
# to validate the scope information. Therefore,
# we don't provide any identity.
auth['scope'] = self._build_scope_info()
# NOTE(chioleong): We'll let AuthInfo validate the scope for us
auth_info = core.AuthInfo.create(auth, scope_only=True)
return auth_info.get_scope()
def get_mapped_user(self, project_id=None, domain_id=None):
"""Map client certificate to an existing user.
If user is ephemeral, there is no validation on the user himself;
however it will be mapped to a corresponding group(s) and the scope
of this ephemeral user is the same as what is assigned to the group.
:param project_id: Project scope of the mapped user.
:param domain_id: Domain scope of the mapped user.
:returns: A dictionary that contains the keys, such as
user_id, user_name, domain_id, domain_name
:rtype: dict
"""
idp_id = self._build_idp_id()
LOG.debug('The IdP Id %s and protocol Id %s are used to look up '
'the mapping.', idp_id, CONF.tokenless_auth.protocol)
mapped_properties, mapping_id = self.federation_api.evaluate(
idp_id, CONF.tokenless_auth.protocol, self.env)
user = mapped_properties.get('user', {})
user_id = user.get('id')
user_name = user.get('name')
user_type = user.get('type')
if user.get('domain') is not None:
user_domain_id = user.get('domain').get('id')
user_domain_name = user.get('domain').get('name')
else:
user_domain_id = None
user_domain_name = None
# if user is ephemeral type, we don't care if the user exists
# or not, but just care if the mapped group(s) is valid.
if user_type == utils.UserType.EPHEMERAL:
user_ref = {'type': utils.UserType.EPHEMERAL}
group_ids = mapped_properties['group_ids']
utils.validate_mapped_group_ids(group_ids,
mapping_id,
self.identity_api)
group_ids.extend(
utils.transform_to_group_ids(
mapped_properties['group_names'], mapping_id,
self.identity_api, self.assignment_api))
roles = self.assignment_api.get_roles_for_groups(group_ids,
project_id,
domain_id)
if roles is not None:
role_names = [role['name'] for role in roles]
user_ref['roles'] = role_names
user_ref['group_ids'] = list(group_ids)
user_ref[federation_constants.IDENTITY_PROVIDER] = idp_id
user_ref[federation_constants.PROTOCOL] = (
CONF.tokenless_auth.protocol)
return user_ref
if user_id:
user_ref = self.identity_api.get_user(user_id)
elif user_name and (user_domain_name or user_domain_id):
if user_domain_name:
user_domain = self.resource_api.get_domain_by_name(
user_domain_name)
self.resource_api.assert_domain_enabled(user_domain['id'],
user_domain)
user_domain_id = user_domain['id']
user_ref = self.identity_api.get_user_by_name(user_name,
user_domain_id)
else:
msg = _('User auth cannot be built due to missing either '
'user id, or user name with domain id, or user name '
'with domain name.')
raise exception.ValidationError(msg)
self.identity_api.assert_user_enabled(
user_id=user_ref['id'],
user=user_ref)
user_ref['type'] = utils.UserType.LOCAL
return user_ref
def _build_idp_id(self):
"""Build the IdP name from the given config option issuer_attribute.
The default issuer attribute SSL_CLIENT_I_DN in the environment is
built with the following formula -
base64_idp = sha1(env['SSL_CLIENT_I_DN'])
:returns: base64_idp like the above example
:rtype: str
"""
idp = self.env.get(CONF.tokenless_auth.issuer_attribute)
if idp is None:
raise exception.TokenlessAuthConfigError(
issuer_attribute=CONF.tokenless_auth.issuer_attribute)
hashed_idp = hashlib.sha256(idp.encode('utf-8'))
return hashed_idp.hexdigest() | """
project_id = self.env.get('HTTP_X_PROJECT_ID')
project_name = self.env.get('HTTP_X_PROJECT_NAME')
project_domain_id = self.env.get('HTTP_X_PROJECT_DOMAIN_ID') |
test_help.py | from buildtest.cli.help import buildtest_help
def | ():
buildtest_help(command="build")
buildtest_help(command="buildspec")
buildtest_help(command="config")
buildtest_help(command="cdash")
buildtest_help(command="history")
buildtest_help(command="inspect")
buildtest_help(command="report")
buildtest_help(command="schema")
buildtest_help(command="stylecheck")
buildtest_help(command="unittests")
| test_buildtest_help |
default.go | package main
import "fmt" | string ""
*tau nil
*/
type T struct { x int; y bool; z *int }
func main() {
var b bool
var i int
var s string
var t T
var p *T
var q = new(T)
fmt.Print(b, "\n")
fmt.Print(i, "\n")
fmt.Print(s, "\n")
fmt.Print(t.x, "\n")
fmt.Print(t.y, "\n")
fmt.Print(t.z, "\n")
fmt.Print(p, "\n")
fmt.Print(q.x, "\n")
fmt.Print(q.y, "\n")
fmt.Print(q.z, "\n")
} |
/* valeurs par défaut :
int 0
bool false |
httptest_util.go | // Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util
import (
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"strings"
"time"
)
type MockServer struct {
s *httptest.Server
sleepDuration time.Duration
resp string
}
func InitMockServer(response string) *MockServer {
m := &MockServer{
resp: response,
}
m.s = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
time.Sleep(m.sleepDuration)
w.Header().Set("Content-Type", "application/json")
w.Write([]byte(m.resp))
}))
return m
}
func (m *MockServer) SetResp(response string) {
m.resp = response
}
func (m *MockServer) GetURL() string {
return m.s.URL
}
func (m *MockServer) Close() {
m.s.Close()
}
func (m *MockServer) SetSleepTime(sleepDuration time.Duration) {
m.sleepDuration = sleepDuration
}
func InitMockServerFromPathResp(pathResp map[string]string) *httptest.Server |
// JsonEqual compares two JSON strings after normalizing them.
// Should be used for test only.
func JsonEqual(want, got string) error {
var err error
if got, err = normalizeJson(got); err != nil {
return err
}
if want, err = normalizeJson(want); err != nil {
return err
}
if !strings.EqualFold(want, got) {
return fmt.Errorf("\n got: %s \n want: %s", got, want)
}
return nil
}
// normalizeJson returns normalized JSON string.
func normalizeJson(input string) (string, error) {
var jsonObject map[string]interface{}
json.Unmarshal([]byte(input), &jsonObject)
outputString, err := json.Marshal(jsonObject)
return string(outputString), err
}
| {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
// Root is used to tell if the sever is healthy or not.
if r.URL.Path == "" || r.URL.Path == "/" {
w.WriteHeader(http.StatusOK)
return
}
if resp, ok := pathResp[r.URL.Path]; ok {
w.WriteHeader(http.StatusOK)
w.Write([]byte(resp))
return
}
w.WriteHeader(http.StatusNotFound)
}))
} |
nsclosure.go | // Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License"). You may
// not use this file except in compliance with the License. A copy of the
// License is located at
//
// http://aws.amazon.com/apache2.0/
//
// or in the "license" file accompanying this file. This file is distributed
// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
// express or implied. See the License for the specific language governing
// permissions and limitations under the License.
package engine
import (
"net"
"syscall"
"time"
"github.com/aws/amazon-ecs-cni-plugins/pkg/netlinkwrapper"
log "github.com/cihub/seelog"
"github.com/containernetworking/cni/pkg/ns"
"github.com/pkg/errors"
"github.com/vishvananda/netlink"
)
const (
instanceMetadataEndpoint = "169.254.169.254/32"
)
var linkWithMACNotFoundError = errors.New("engine: device with mac address not found")
// setupNamespaceClosureContext wraps the parameters and the method to configure the container's namespace
type setupNamespaceClosureContext struct {
netLink netlinkwrapper.NetLink
dhclient DHClient
ifName string
deviceName string
macAddress string
ipv4Addr *netlink.Addr
ipv6Addr *netlink.Addr
ipv4Gateway net.IP
ipv6Gateway net.IP
blockIMDS bool
}
// teardownNamespaceClosureContext wraps the parameters and the method to teardown the
// container's namespace
type teardownNamespaceClosureContext struct {
netLink netlinkwrapper.NetLink
dhclient DHClient
hardwareAddr net.HardwareAddr
stopDHClient6 bool
checkDHClientStateInteval time.Duration
maxDHClientStopWait time.Duration
}
// newSetupNamespaceClosureContext creates a new setupNamespaceClosure object
func newSetupNamespaceClosureContext(netLink netlinkwrapper.NetLink, dhclient DHClient,
ifName string, deviceName string, macAddress string, ipv4Address string, ipv6Address string,
ipv4Gateway string, ipv6Gateway string, blockIMDS bool) (*setupNamespaceClosureContext, error) {
nlIPV4Addr, err := netLink.ParseAddr(ipv4Address)
if err != nil {
return nil, errors.Wrap(err,
"setupNamespaceClosure engine: unable to parse ipv4 address for the interface")
}
ipv4GatewayIP := net.ParseIP(ipv4Gateway)
if ipv4GatewayIP == nil {
return nil, errors.New(
"setupNamespaceClosure engine: unable to parse address of the ipv4 gateway")
}
nsClosure := &setupNamespaceClosureContext{
netLink: netLink,
dhclient: dhclient,
ifName: ifName,
deviceName: deviceName,
macAddress: macAddress,
ipv4Addr: nlIPV4Addr,
ipv4Gateway: ipv4GatewayIP,
blockIMDS: blockIMDS,
}
if ipv6Address != "" {
nlIPV6Addr, err := netLink.ParseAddr(ipv6Address)
if err != nil {
return nil, errors.Wrap(err,
"setupNamespaceClosure engine: unable to parse ipv6 address for the interface")
}
ipv6GatewayIP := net.ParseIP(ipv6Gateway)
if ipv6GatewayIP == nil {
return nil, errors.New(
"setupNamespaceClosure engine: unable to parse address of the ipv6 gateway")
}
nsClosure.ipv6Addr = nlIPV6Addr
nsClosure.ipv6Gateway = ipv6GatewayIP
}
return nsClosure, nil
}
// newTeardownNamespaceClosureContext creates a new teardownNamespaceClosure object
func newTeardownNamespaceClosureContext(netLink netlinkwrapper.NetLink, dhclient DHClient,
mac string, stopDHClient6 bool,
checkDHClientStateInteval time.Duration, maxDHClientStopWait time.Duration) (*teardownNamespaceClosureContext, error) {
hardwareAddr, err := net.ParseMAC(mac)
if err != nil {
return nil, errors.Wrapf(err,
"newTeardownNamespaceClosure engine: malformatted mac address specified")
}
return &teardownNamespaceClosureContext{
netLink: netLink,
dhclient: dhclient,
hardwareAddr: hardwareAddr,
stopDHClient6: stopDHClient6,
checkDHClientStateInteval: checkDHClientStateInteval,
maxDHClientStopWait: maxDHClientStopWait,
}, nil
}
// run defines the closure to execute within the container's namespace to configure it
// appropriately
func (closureContext *setupNamespaceClosureContext) run(_ ns.NetNS) error {
// Get the link for the ENI device
eniLink, err := closureContext.netLink.LinkByName(closureContext.deviceName)
if err != nil {
return errors.Wrapf(err,
"setupNamespaceClosure engine: unable to get link for device '%s'",
closureContext.deviceName)
}
err = closureContext.netLink.LinkSetName(eniLink, closureContext.ifName)
if err != nil {
return errors.Wrap(err, "setupNamespaceClosure engine: unable to change interface name")
}
// Add the IPV4 Address to the link
err = closureContext.netLink.AddrAdd(eniLink, closureContext.ipv4Addr)
if err != nil {
return errors.Wrap(err,
"setupNamespaceClosure engine: unable to add ipv4 address to the interface")
}
if closureContext.ipv6Addr != nil {
// Add the IPV6 Address to the link
err = closureContext.netLink.AddrAdd(eniLink, closureContext.ipv6Addr)
if err != nil {
return errors.Wrap(err,
"setupNamespaceClosure engine: unable to add ipv6 address to the interface")
}
}
// Bring it up
err = closureContext.netLink.LinkSetUp(eniLink)
if err != nil {
return errors.Wrap(err,
"setupNamespaceClosure engine: unable to bring up the device")
}
// Add a blackhole route for IMDS endpoint if required
if closureContext.blockIMDS {
_, imdsNetwork, err := net.ParseCIDR(instanceMetadataEndpoint)
if err != nil {
// This should never happen because we always expect
// 169.254.169.254/32 to be parsed without any errors
return errors.Wrapf(err, "setupNamespaceClosure engine: unable to parse instance metadata endpoint")
}
if err = closureContext.netLink.RouteAdd(&netlink.Route{
Dst: imdsNetwork,
Type: syscall.RTN_BLACKHOLE,
}); err != nil {
return errors.Wrapf(err, "setupNamespaceClosure engine: unable to add route to block instance metadata")
}
}
// Setup ipv4 route for the gateway
err = closureContext.netLink.RouteAdd(&netlink.Route{
Gw: closureContext.ipv4Gateway,
})
if err != nil {
return errors.Wrap(err,
"setupNamespaceClosure engine: unable to add the route for the ipv4 gateway")
}
// Start dhclient for IPV4 address
err = closureContext.dhclient.Start(closureContext.ifName, closureContext.macAddress, ipRev4)
if err != nil {
return err
}
if closureContext.ipv6Addr != nil {
// Setup ipv6 route for the gateway
err = closureContext.netLink.RouteAdd(&netlink.Route{
LinkIndex: eniLink.Attrs().Index,
Gw: closureContext.ipv6Gateway,
})
if err != nil && !isRouteExistsError(err) {
return errors.Wrap(err,
"setupNamespaceClosure engine: unable to add the route for the ipv6 gateway")
}
// Start dhclient for IPV6 address
return closureContext.dhclient.Start(closureContext.ifName, closureContext.macAddress, ipRev6)
}
return nil
}
// isRouteExistsError returns true if the error type is syscall.EEXIST
// This helps us determine if we should ignore this error as the route | if errno, ok := err.(syscall.Errno); ok {
return errno == syscall.EEXIST
}
return false
}
// run defines the closure to execute within the container's namespace to tear it down
func (closureContext *teardownNamespaceClosureContext) run(_ ns.NetNS) error {
link, err := getLinkByHardwareAddress(closureContext.netLink, closureContext.hardwareAddr)
if err != nil {
return errors.Wrapf(err,
"teardownNamespaceClosure engine: unable to get device with hardware address '%s'",
closureContext.hardwareAddr.String())
}
deviceName := link.Attrs().Name
log.Debugf("Found link device as (hardware address=%s): %s", closureContext.hardwareAddr, deviceName)
// Stop the dhclient process for IPV4 address
err = closureContext.dhclient.Stop(closureContext.hardwareAddr.String(), ipRev4,
closureContext.checkDHClientStateInteval, closureContext.maxDHClientStopWait)
if err != nil {
return err
}
if closureContext.stopDHClient6 {
// Stop the dhclient process for IPV6 address
err = closureContext.dhclient.Stop(closureContext.hardwareAddr.String(), ipRev6,
closureContext.checkDHClientStateInteval, closureContext.maxDHClientStopWait)
if err != nil {
return err
}
}
log.Infof("Cleaned up dhclient for device(hardware address=%s): %s", closureContext.hardwareAddr, deviceName)
return nil
}
// getLinkByHardwareAddress gets the link device based on the mac address
func getLinkByHardwareAddress(netLink netlinkwrapper.NetLink, hardwareAddr net.HardwareAddr) (netlink.Link, error) {
links, err := netLink.LinkList()
if err != nil {
return nil, err
}
for _, link := range links {
// TODO: Evaluate if reflect.DeepEqual is a better alternative here
if link.Attrs().HardwareAddr.String() == hardwareAddr.String() {
return link, nil
}
}
return nil, linkWithMACNotFoundError
} | // that we want to add already exists in the routing table
func isRouteExistsError(err error) bool { |
expr.rs | use super::pat::{RecoverColon, RecoverComma, PARAM_EXPECTED};
use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign};
use super::{
AttrWrapper, BlockMode, ClosureSpans, ForceCollect, Parser, PathStyle, Restrictions, TokenType,
};
use super::{SemiColonMode, SeqSep, TokenExpectType, TrailingToken};
use crate::maybe_recover_from_interpolated_ty_qpath;
use ast::token::DelimToken;
use rustc_ast::ptr::P;
use rustc_ast::token::{self, Token, TokenKind};
use rustc_ast::tokenstream::Spacing;
use rustc_ast::util::classify;
use rustc_ast::util::literal::LitError;
use rustc_ast::util::parser::{prec_let_scrutinee_needs_par, AssocOp, Fixity};
use rustc_ast::{self as ast, AttrStyle, AttrVec, CaptureBy, ExprField, Lit, UnOp, DUMMY_NODE_ID};
use rustc_ast::{AnonConst, BinOp, BinOpKind, FnDecl, FnRetTy, MacCall, Param, Ty, TyKind};
use rustc_ast::{Arm, Async, BlockCheckMode, Expr, ExprKind, Label, Movability, RangeLimits};
use rustc_ast_pretty::pprust;
use rustc_errors::{Applicability, DiagnosticBuilder, PResult};
use rustc_session::lint::builtin::BREAK_WITH_LABEL_AND_LOOP;
use rustc_session::lint::BuiltinLintDiagnostics;
use rustc_span::edition::LATEST_STABLE_EDITION;
use rustc_span::source_map::{self, Span, Spanned};
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{BytePos, Pos};
use std::mem;
/// Possibly accepts an `token::Interpolated` expression (a pre-parsed expression
/// dropped into the token stream, which happens while parsing the result of
/// macro expansion). Placement of these is not as complex as I feared it would
/// be. The important thing is to make sure that lookahead doesn't balk at
/// `token::Interpolated` tokens.
macro_rules! maybe_whole_expr {
($p:expr) => {
if let token::Interpolated(nt) = &$p.token.kind {
match &**nt {
token::NtExpr(e) | token::NtLiteral(e) => {
let e = e.clone();
$p.bump();
return Ok(e);
}
token::NtPath(path) => {
let path = path.clone();
$p.bump();
return Ok($p.mk_expr(
$p.prev_token.span,
ExprKind::Path(None, path),
AttrVec::new(),
));
}
token::NtBlock(block) => {
let block = block.clone();
$p.bump();
return Ok($p.mk_expr(
$p.prev_token.span,
ExprKind::Block(block, None),
AttrVec::new(),
));
}
_ => {}
};
}
};
}
#[derive(Debug)]
pub(super) enum LhsExpr {
NotYetParsed,
AttributesParsed(AttrWrapper),
AlreadyParsed(P<Expr>),
}
impl From<Option<AttrWrapper>> for LhsExpr {
/// Converts `Some(attrs)` into `LhsExpr::AttributesParsed(attrs)`
/// and `None` into `LhsExpr::NotYetParsed`.
///
/// This conversion does not allocate.
fn from(o: Option<AttrWrapper>) -> Self {
if let Some(attrs) = o { LhsExpr::AttributesParsed(attrs) } else { LhsExpr::NotYetParsed }
}
}
impl From<P<Expr>> for LhsExpr {
/// Converts the `expr: P<Expr>` into `LhsExpr::AlreadyParsed(expr)`.
///
/// This conversion does not allocate.
fn from(expr: P<Expr>) -> Self {
LhsExpr::AlreadyParsed(expr)
}
}
impl<'a> Parser<'a> {
/// Parses an expression.
#[inline]
pub fn parse_expr(&mut self) -> PResult<'a, P<Expr>> {
self.current_closure.take();
self.parse_expr_res(Restrictions::empty(), None)
}
/// Parses an expression, forcing tokens to be collected
pub fn parse_expr_force_collect(&mut self) -> PResult<'a, P<Expr>> {
self.collect_tokens_no_attrs(|this| this.parse_expr())
}
pub fn parse_anon_const_expr(&mut self) -> PResult<'a, AnonConst> {
self.parse_expr().map(|value| AnonConst { id: DUMMY_NODE_ID, value })
}
fn parse_expr_catch_underscore(&mut self) -> PResult<'a, P<Expr>> {
match self.parse_expr() {
Ok(expr) => Ok(expr),
Err(mut err) => match self.token.ident() {
Some((Ident { name: kw::Underscore, .. }, false))
if self.look_ahead(1, |t| t == &token::Comma) =>
{
// Special-case handling of `foo(_, _, _)`
err.emit();
self.bump();
Ok(self.mk_expr(self.prev_token.span, ExprKind::Err, AttrVec::new()))
}
_ => Err(err),
},
}
}
/// Parses a sequence of expressions delimited by parentheses.
fn parse_paren_expr_seq(&mut self) -> PResult<'a, Vec<P<Expr>>> {
self.parse_paren_comma_seq(|p| p.parse_expr_catch_underscore()).map(|(r, _)| r)
}
/// Parses an expression, subject to the given restrictions.
#[inline]
pub(super) fn parse_expr_res(
&mut self,
r: Restrictions,
already_parsed_attrs: Option<AttrWrapper>,
) -> PResult<'a, P<Expr>> {
self.with_res(r, |this| this.parse_assoc_expr(already_parsed_attrs))
}
/// Parses an associative expression.
///
/// This parses an expression accounting for associativity and precedence of the operators in
/// the expression.
#[inline]
fn parse_assoc_expr(
&mut self,
already_parsed_attrs: Option<AttrWrapper>,
) -> PResult<'a, P<Expr>> {
self.parse_assoc_expr_with(0, already_parsed_attrs.into())
}
/// Parses an associative expression with operators of at least `min_prec` precedence.
pub(super) fn parse_assoc_expr_with(
&mut self,
min_prec: usize,
lhs: LhsExpr,
) -> PResult<'a, P<Expr>> {
let mut lhs = if let LhsExpr::AlreadyParsed(expr) = lhs {
expr
} else {
let attrs = match lhs {
LhsExpr::AttributesParsed(attrs) => Some(attrs),
_ => None,
};
if [token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind) {
return self.parse_prefix_range_expr(attrs);
} else {
self.parse_prefix_expr(attrs)?
}
};
let last_type_ascription_set = self.last_type_ascription.is_some();
if !self.should_continue_as_assoc_expr(&lhs) {
self.last_type_ascription = None;
return Ok(lhs);
}
self.expected_tokens.push(TokenType::Operator);
while let Some(op) = self.check_assoc_op() {
// Adjust the span for interpolated LHS to point to the `$lhs` token
// and not to what it refers to.
let lhs_span = match self.prev_token.kind {
TokenKind::Interpolated(..) => self.prev_token.span,
_ => lhs.span,
};
let cur_op_span = self.token.span;
let restrictions = if op.node.is_assign_like() {
self.restrictions & Restrictions::NO_STRUCT_LITERAL
} else {
self.restrictions
};
let prec = op.node.precedence();
if prec < min_prec {
break;
}
// Check for deprecated `...` syntax
if self.token == token::DotDotDot && op.node == AssocOp::DotDotEq {
self.err_dotdotdot_syntax(self.token.span);
}
if self.token == token::LArrow {
self.err_larrow_operator(self.token.span);
}
self.bump();
if op.node.is_comparison() {
if let Some(expr) = self.check_no_chained_comparison(&lhs, &op)? {
return Ok(expr);
}
}
// Look for JS' `===` and `!==` and recover
if (op.node == AssocOp::Equal || op.node == AssocOp::NotEqual)
&& self.token.kind == token::Eq
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
let sugg = match op.node {
AssocOp::Equal => "==",
AssocOp::NotEqual => "!=",
_ => unreachable!(),
};
self.struct_span_err(sp, &format!("invalid comparison operator `{}=`", sugg))
.span_suggestion_short(
sp,
&format!("`{s}=` is not a valid comparison operator, use `{s}`", s = sugg),
sugg.to_string(),
Applicability::MachineApplicable,
)
.emit();
self.bump();
}
// Look for PHP's `<>` and recover
if op.node == AssocOp::Less
&& self.token.kind == token::Gt
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
self.struct_span_err(sp, "invalid comparison operator `<>`")
.span_suggestion_short(
sp,
"`<>` is not a valid comparison operator, use `!=`",
"!=".to_string(),
Applicability::MachineApplicable,
)
.emit();
self.bump();
}
// Look for C++'s `<=>` and recover
if op.node == AssocOp::LessEqual
&& self.token.kind == token::Gt
&& self.prev_token.span.hi() == self.token.span.lo()
{
let sp = op.span.to(self.token.span);
self.struct_span_err(sp, "invalid comparison operator `<=>`")
.span_label(
sp,
"`<=>` is not a valid comparison operator, use `std::cmp::Ordering`",
)
.emit();
self.bump();
}
let op = op.node;
// Special cases:
if op == AssocOp::As {
lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?;
continue;
} else if op == AssocOp::Colon {
lhs = self.parse_assoc_op_ascribe(lhs, lhs_span)?;
continue;
} else if op == AssocOp::DotDot || op == AssocOp::DotDotEq {
// If we didn’t have to handle `x..`/`x..=`, it would be pretty easy to
// generalise it to the Fixity::None code.
lhs = self.parse_range_expr(prec, lhs, op, cur_op_span)?;
break;
}
let fixity = op.fixity();
let prec_adjustment = match fixity {
Fixity::Right => 0,
Fixity::Left => 1,
// We currently have no non-associative operators that are not handled above by
// the special cases. The code is here only for future convenience.
Fixity::None => 1,
};
let rhs = self.with_res(restrictions - Restrictions::STMT_EXPR, |this| {
this.parse_assoc_expr_with(prec + prec_adjustment, LhsExpr::NotYetParsed)
})?;
let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span);
lhs = match op {
AssocOp::Add
| AssocOp::Subtract
| AssocOp::Multiply
| AssocOp::Divide
| AssocOp::Modulus
| AssocOp::LAnd
| AssocOp::LOr
| AssocOp::BitXor
| AssocOp::BitAnd
| AssocOp::BitOr
| AssocOp::ShiftLeft
| AssocOp::ShiftRight
| AssocOp::Equal
| AssocOp::Less
| AssocOp::LessEqual
| AssocOp::NotEqual
| AssocOp::Greater
| AssocOp::GreaterEqual => {
let ast_op = op.to_ast_binop().unwrap();
let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs);
self.mk_expr(span, binary, AttrVec::new())
}
AssocOp::Assign => {
self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span), AttrVec::new())
}
AssocOp::AssignOp(k) => {
let aop = match k {
token::Plus => BinOpKind::Add,
token::Minus => BinOpKind::Sub,
token::Star => BinOpKind::Mul,
token::Slash => BinOpKind::Div,
token::Percent => BinOpKind::Rem,
token::Caret => BinOpKind::BitXor,
token::And => BinOpKind::BitAnd,
token::Or => BinOpKind::BitOr,
token::Shl => BinOpKind::Shl,
token::Shr => BinOpKind::Shr,
};
let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs);
self.mk_expr(span, aopexpr, AttrVec::new())
}
AssocOp::As | AssocOp::Colon | AssocOp::DotDot | AssocOp::DotDotEq => {
self.span_bug(span, "AssocOp should have been handled by special case")
}
};
if let Fixity::None = fixity {
break;
}
}
if last_type_ascription_set {
self.last_type_ascription = None;
}
Ok(lhs)
}
fn should_continue_as_assoc_expr(&mut self, lhs: &Expr) -> bool {
match (self.expr_is_complete(lhs), AssocOp::from_token(&self.token)) {
// Semi-statement forms are odd:
// See https://github.com/rust-lang/rust/issues/29071
(true, None) => false,
(false, _) => true, // Continue parsing the expression.
// An exhaustive check is done in the following block, but these are checked first
// because they *are* ambiguous but also reasonable looking incorrect syntax, so we
// want to keep their span info to improve diagnostics in these cases in a later stage.
(true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3`
(true, Some(AssocOp::Subtract)) | // `{ 42 } -5`
(true, Some(AssocOp::Add)) // `{ 42 } + 42
// If the next token is a keyword, then the tokens above *are* unambiguously incorrect:
// `if x { a } else { b } && if y { c } else { d }`
if !self.look_ahead(1, |t| t.is_used_keyword()) => {
// These cases are ambiguous and can't be identified in the parser alone.
let sp = self.sess.source_map().start_point(self.token.span);
self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
false
}
(true, Some(AssocOp::LAnd)) => {
// `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }`. Separated from the
// above due to #74233.
// These cases are ambiguous and can't be identified in the parser alone.
let sp = self.sess.source_map().start_point(self.token.span);
self.sess.ambiguous_block_expr_parse.borrow_mut().insert(sp, lhs.span);
false
}
(true, Some(ref op)) if !op.can_continue_expr_unambiguously() => false,
(true, Some(_)) => {
self.error_found_expr_would_be_stmt(lhs);
true
}
}
}
/// We've found an expression that would be parsed as a statement,
/// but the next token implies this should be parsed as an expression.
/// For example: `if let Some(x) = x { x } else { 0 } / 2`.
fn error_found_expr_would_be_stmt(&self, lhs: &Expr) {
let mut err = self.struct_span_err(
self.token.span,
&format!("expected expression, found `{}`", pprust::token_to_string(&self.token),),
);
err.span_label(self.token.span, "expected expression");
self.sess.expr_parentheses_needed(&mut err, lhs.span);
err.emit();
}
/// Possibly translate the current token to an associative operator.
/// The method does not advance the current token.
///
/// Also performs recovery for `and` / `or` which are mistaken for `&&` and `||` respectively.
fn check_assoc_op(&self) -> Option<Spanned<AssocOp>> {
let (op, span) = match (AssocOp::from_token(&self.token), self.token.ident()) {
// When parsing const expressions, stop parsing when encountering `>`.
(
Some(
AssocOp::ShiftRight
| AssocOp::Greater
| AssocOp::GreaterEqual
| AssocOp::AssignOp(token::BinOpToken::Shr),
),
_,
) if self.restrictions.contains(Restrictions::CONST_EXPR) => {
return None;
}
(Some(op), _) => (op, self.token.span),
(None, Some((Ident { name: sym::and, span }, false))) => {
self.error_bad_logical_op("and", "&&", "conjunction");
(AssocOp::LAnd, span)
}
(None, Some((Ident { name: sym::or, span }, false))) => {
self.error_bad_logical_op("or", "||", "disjunction");
(AssocOp::LOr, span)
}
_ => return None,
};
Some(source_map::respan(span, op))
}
/// Error on `and` and `or` suggesting `&&` and `||` respectively.
fn error_bad_logical_op(&self, bad: &str, good: &str, english: &str) {
self.struct_span_err(self.token.span, &format!("`{}` is not a logical operator", bad))
.span_suggestion_short(
self.token.span,
&format!("use `{}` to perform logical {}", good, english),
good.to_string(),
Applicability::MachineApplicable,
)
.note("unlike in e.g., python and PHP, `&&` and `||` are used for logical operators")
.emit();
}
/// Checks if this expression is a successfully parsed statement.
fn expr_is_complete(&self, e: &Expr) -> bool {
self.restrictions.contains(Restrictions::STMT_EXPR)
&& !classify::expr_requires_semi_to_be_stmt(e)
}
/// Parses `x..y`, `x..=y`, and `x..`/`x..=`.
/// The other two variants are handled in `parse_prefix_range_expr` below.
fn parse_range_expr(
&mut self,
prec: usize,
lhs: P<Expr>,
op: AssocOp,
cur_op_span: Span,
) -> PResult<'a, P<Expr>> {
let rhs = if self.is_at_start_of_range_notation_rhs() {
Some(self.parse_assoc_expr_with(prec + 1, LhsExpr::NotYetParsed)?)
} else {
None
};
let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span);
let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span);
let limits =
if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed };
let range = self.mk_range(Some(lhs), rhs, limits);
Ok(self.mk_expr(span, range, AttrVec::new()))
}
fn is_at_start_of_range_notation_rhs(&self) -> bool {
if self.token.can_begin_expr() {
// Parse `for i in 1.. { }` as infinite loop, not as `for i in (1..{})`.
if self.token == token::OpenDelim(token::Brace) {
return !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
}
true
} else {
false
}
}
/// Parses prefix-forms of range notation: `..expr`, `..`, `..=expr`.
fn parse_prefix_range_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
// Check for deprecated `...` syntax.
if self.token == token::DotDotDot {
self.err_dotdotdot_syntax(self.token.span);
}
debug_assert!(
[token::DotDot, token::DotDotDot, token::DotDotEq].contains(&self.token.kind),
"parse_prefix_range_expr: token {:?} is not DotDot/DotDotEq",
self.token
);
let limits = match self.token.kind {
token::DotDot => RangeLimits::HalfOpen,
_ => RangeLimits::Closed,
};
let op = AssocOp::from_token(&self.token);
// FIXME: `parse_prefix_range_expr` is called when the current
// token is `DotDot`, `DotDotDot`, or `DotDotEq`. If we haven't already
// parsed attributes, then trying to parse them here will always fail.
// We should figure out how we want attributes on range expressions to work.
let attrs = self.parse_or_use_outer_attributes(attrs)?;
self.collect_tokens_for_expr(attrs, |this, attrs| {
let lo = this.token.span;
this.bump();
let (span, opt_end) = if this.is_at_start_of_range_notation_rhs() {
// RHS must be parsed with more associativity than the dots.
this.parse_assoc_expr_with(op.unwrap().precedence() + 1, LhsExpr::NotYetParsed)
.map(|x| (lo.to(x.span), Some(x)))?
} else {
(lo, None)
};
let range = this.mk_range(None, opt_end, limits);
Ok(this.mk_expr(span, range, attrs.into()))
})
}
/// Parses a prefix-unary-operator expr.
fn parse_prefix_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
let lo = self.token.span;
macro_rules! make_it {
($this:ident, $attrs:expr, |this, _| $body:expr) => {
$this.collect_tokens_for_expr($attrs, |$this, attrs| {
let (hi, ex) = $body?;
Ok($this.mk_expr(lo.to(hi), ex, attrs.into()))
})
};
}
let this = self;
// Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr()
match this.token.uninterpolate().kind {
token::Not => make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Not)), // `!expr`
token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `~expr`
token::BinOp(token::Minus) => {
make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Neg))
} // `-expr`
token::BinOp(token::Star) => {
make_it!(this, attrs, |this, _| this.parse_unary_expr(lo, UnOp::Deref))
} // `*expr`
token::BinOp(token::And) | token::AndAnd => {
make_it!(this, attrs, |this, _| this.parse_borrow_expr(lo))
}
token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => {
let mut err = this.struct_span_err(lo, "leading `+` is not supported");
err.span_label(lo, "unexpected `+`");
// a block on the LHS might have been intended to be an expression instead
if let Some(sp) = this.sess.ambiguous_block_expr_parse.borrow().get(&lo) {
this.sess.expr_parentheses_needed(&mut err, *sp);
} else {
err.span_suggestion_verbose(
lo,
"try removing the `+`",
"".to_string(),
Applicability::MachineApplicable,
);
}
err.emit();
this.bump();
this.parse_prefix_expr(None)
} // `+expr`
token::Ident(..) if this.token.is_keyword(kw::Box) => {
make_it!(this, attrs, |this, _| this.parse_box_expr(lo))
}
token::Ident(..) if this.is_mistaken_not_ident_negation() => {
make_it!(this, attrs, |this, _| this.recover_not_expr(lo))
}
_ => return this.parse_dot_or_call_expr(Some(attrs)),
}
}
fn parse_prefix_expr_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> {
self.bump();
let expr = self.parse_prefix_expr(None);
let (span, expr) = self.interpolated_or_expr_span(expr)?;
Ok((lo.to(span), expr))
}
fn parse_unary_expr(&mut self, lo: Span, op: UnOp) -> PResult<'a, (Span, ExprKind)> {
let (span, expr) = self.parse_prefix_expr_common(lo)?;
Ok((span, self.mk_unary(op, expr)))
}
// Recover on `!` suggesting for bitwise negation instead.
fn recover_tilde_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
self.struct_span_err(lo, "`~` cannot be used as a unary operator")
.span_suggestion_short(
lo,
"use `!` to perform bitwise not",
"!".to_owned(),
Applicability::MachineApplicable,
)
.emit();
self.parse_unary_expr(lo, UnOp::Not)
}
/// Parse `box expr`.
fn parse_box_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
let (span, expr) = self.parse_prefix_expr_common(lo)?;
self.sess.gated_spans.gate(sym::box_syntax, span);
Ok((span, ExprKind::Box(expr)))
}
fn is_mistaken_not_ident_negation(&self) -> bool {
let token_cannot_continue_expr = |t: &Token| match t.uninterpolate().kind {
// These tokens can start an expression after `!`, but
// can't continue an expression after an ident
token::Ident(name, is_raw) => token::ident_can_begin_expr(name, t.span, is_raw),
token::Literal(..) | token::Pound => true,
_ => t.is_whole_expr(),
};
self.token.is_ident_named(sym::not) && self.look_ahead(1, token_cannot_continue_expr)
}
/// Recover on `not expr` in favor of `!expr`.
fn recover_not_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
// Emit the error...
let not_token = self.look_ahead(1, |t| t.clone());
self.struct_span_err(
not_token.span,
&format!("unexpected {} after identifier", super::token_descr(¬_token)),
)
.span_suggestion_short(
// Span the `not` plus trailing whitespace to avoid
// trailing whitespace after the `!` in our suggestion
self.sess.source_map().span_until_non_whitespace(lo.to(not_token.span)),
"use `!` to perform logical negation",
"!".to_owned(),
Applicability::MachineApplicable,
)
.emit();
// ...and recover!
self.parse_unary_expr(lo, UnOp::Not)
}
/// Returns the span of expr, if it was not interpolated or the span of the interpolated token.
fn interpolated_or_expr_span(
&self,
expr: PResult<'a, P<Expr>>,
) -> PResult<'a, (Span, P<Expr>)> {
expr.map(|e| {
(
match self.prev_token.kind {
TokenKind::Interpolated(..) => self.prev_token.span,
_ => e.span,
},
e,
)
})
}
fn parse_assoc_op_cast(
&mut self,
lhs: P<Expr>,
lhs_span: Span,
expr_kind: fn(P<Expr>, P<Ty>) -> ExprKind,
) -> PResult<'a, P<Expr>> {
let mk_expr = |this: &mut Self, lhs: P<Expr>, rhs: P<Ty>| {
this.mk_expr(
this.mk_expr_sp(&lhs, lhs_span, rhs.span),
expr_kind(lhs, rhs),
AttrVec::new(),
)
};
// Save the state of the parser before parsing type normally, in case there is a
// LessThan comparison after this cast.
let parser_snapshot_before_type = self.clone();
let cast_expr = match self.parse_ty_no_plus() {
Ok(rhs) => mk_expr(self, lhs, rhs),
Err(mut type_err) => {
// Rewind to before attempting to parse the type with generics, to recover
// from situations like `x as usize < y` in which we first tried to parse
// `usize < y` as a type with generic arguments.
let parser_snapshot_after_type = mem::replace(self, parser_snapshot_before_type);
// Check for typo of `'a: loop { break 'a }` with a missing `'`.
match (&lhs.kind, &self.token.kind) {
(
// `foo: `
ExprKind::Path(None, ast::Path { segments, .. }),
TokenKind::Ident(kw::For | kw::Loop | kw::While, false),
) if segments.len() == 1 => {
let snapshot = self.clone();
let label = Label {
ident: Ident::from_str_and_span(
&format!("'{}", segments[0].ident),
segments[0].ident.span,
),
};
match self.parse_labeled_expr(label, AttrVec::new(), false) {
Ok(expr) => {
type_err.cancel();
self.struct_span_err(label.ident.span, "malformed loop label")
.span_suggestion(
label.ident.span,
"use the correct loop label format",
label.ident.to_string(),
Applicability::MachineApplicable,
)
.emit();
return Ok(expr);
}
Err(mut err) => {
err.cancel();
*self = snapshot;
}
}
}
_ => {}
}
match self.parse_path(PathStyle::Expr) {
Ok(path) => {
let (op_noun, op_verb) = match self.token.kind {
token::Lt => ("comparison", "comparing"),
token::BinOp(token::Shl) => ("shift", "shifting"),
_ => {
// We can end up here even without `<` being the next token, for
// example because `parse_ty_no_plus` returns `Err` on keywords,
// but `parse_path` returns `Ok` on them due to error recovery.
// Return original error and parser state.
*self = parser_snapshot_after_type;
return Err(type_err);
}
};
// Successfully parsed the type path leaving a `<` yet to parse.
type_err.cancel();
// Report non-fatal diagnostics, keep `x as usize` as an expression
// in AST and continue parsing.
let msg = format!(
"`<` is interpreted as a start of generic arguments for `{}`, not a {}",
pprust::path_to_string(&path),
op_noun,
);
let span_after_type = parser_snapshot_after_type.token.span;
let expr =
mk_expr(self, lhs, self.mk_ty(path.span, TyKind::Path(None, path)));
self.struct_span_err(self.token.span, &msg)
.span_label(
self.look_ahead(1, |t| t.span).to(span_after_type),
"interpreted as generic arguments",
)
.span_label(self.token.span, format!("not interpreted as {}", op_noun))
.multipart_suggestion(
&format!("try {} the cast value", op_verb),
vec![
(expr.span.shrink_to_lo(), "(".to_string()),
(expr.span.shrink_to_hi(), ")".to_string()),
],
Applicability::MachineApplicable,
)
.emit();
expr
}
Err(mut path_err) => {
// Couldn't parse as a path, return original error and parser state.
path_err.cancel();
*self = parser_snapshot_after_type;
return Err(type_err);
}
}
}
};
self.parse_and_disallow_postfix_after_cast(cast_expr)
}
/// Parses a postfix operators such as `.`, `?`, or index (`[]`) after a cast,
/// then emits an error and returns the newly parsed tree.
/// The resulting parse tree for `&x as T[0]` has a precedence of `((&x) as T)[0]`.
fn parse_and_disallow_postfix_after_cast(
&mut self,
cast_expr: P<Expr>,
) -> PResult<'a, P<Expr>> {
// Save the memory location of expr before parsing any following postfix operators.
// This will be compared with the memory location of the output expression.
// If they different we can assume we parsed another expression because the existing expression is not reallocated.
let addr_before = &*cast_expr as *const _ as usize;
let span = cast_expr.span;
let with_postfix = self.parse_dot_or_call_expr_with_(cast_expr, span)?;
let changed = addr_before != &*with_postfix as *const _ as usize;
// Check if an illegal postfix operator has been added after the cast.
// If the resulting expression is not a cast, or has a different memory location, it is an illegal postfix operator.
if !matches!(with_postfix.kind, ExprKind::Cast(_, _) | ExprKind::Type(_, _)) || changed {
let msg = format!(
"casts cannot be followed by {}",
match with_postfix.kind {
ExprKind::Index(_, _) => "indexing",
ExprKind::Try(_) => "?",
ExprKind::Field(_, _) => "a field access",
ExprKind::MethodCall(_, _, _) => "a method call",
ExprKind::Call(_, _) => "a function call",
ExprKind::Await(_) => "`.await`",
ExprKind::Err => return Ok(with_postfix),
_ => unreachable!("parse_dot_or_call_expr_with_ shouldn't produce this"),
}
);
let mut err = self.struct_span_err(span, &msg);
// If type ascription is "likely an error", the user will already be getting a useful
// help message, and doesn't need a second.
if self.last_type_ascription.map_or(false, |last_ascription| last_ascription.1) {
self.maybe_annotate_with_ascription(&mut err, false);
} else {
let suggestions = vec![
(span.shrink_to_lo(), "(".to_string()),
(span.shrink_to_hi(), ")".to_string()),
];
err.multipart_suggestion(
"try surrounding the expression in parentheses",
suggestions,
Applicability::MachineApplicable,
);
}
err.emit();
};
Ok(with_postfix)
}
fn parse_assoc_op_ascribe(&mut self, lhs: P<Expr>, lhs_span: Span) -> PResult<'a, P<Expr>> {
let maybe_path = self.could_ascription_be_path(&lhs.kind);
self.last_type_ascription = Some((self.prev_token.span, maybe_path));
let lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Type)?;
self.sess.gated_spans.gate(sym::type_ascription, lhs.span);
Ok(lhs)
}
/// Parse `& mut? <expr>` or `& raw [ const | mut ] <expr>`.
fn parse_borrow_expr(&mut self, lo: Span) -> PResult<'a, (Span, ExprKind)> {
self.expect_and()?;
let has_lifetime = self.token.is_lifetime() && self.look_ahead(1, |t| t != &token::Colon);
let lifetime = has_lifetime.then(|| self.expect_lifetime()); // For recovery, see below.
let (borrow_kind, mutbl) = self.parse_borrow_modifiers(lo);
let expr = self.parse_prefix_expr(None);
let (hi, expr) = self.interpolated_or_expr_span(expr)?;
let span = lo.to(hi);
if let Some(lt) = lifetime {
self.error_remove_borrow_lifetime(span, lt.ident.span);
}
Ok((span, ExprKind::AddrOf(borrow_kind, mutbl, expr)))
}
fn error_remove_borrow_lifetime(&self, span: Span, lt_span: Span) {
self.struct_span_err(span, "borrow expressions cannot be annotated with lifetimes")
.span_label(lt_span, "annotated with lifetime here")
.span_suggestion(
lt_span,
"remove the lifetime annotation",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
/// Parse `mut?` or `raw [ const | mut ]`.
fn parse_borrow_modifiers(&mut self, lo: Span) -> (ast::BorrowKind, ast::Mutability) {
if self.check_keyword(kw::Raw) && self.look_ahead(1, Token::is_mutability) {
// `raw [ const | mut ]`.
let found_raw = self.eat_keyword(kw::Raw);
assert!(found_raw);
let mutability = self.parse_const_or_mut().unwrap();
self.sess.gated_spans.gate(sym::raw_ref_op, lo.to(self.prev_token.span));
(ast::BorrowKind::Raw, mutability)
} else {
// `mut?`
(ast::BorrowKind::Ref, self.parse_mutability())
}
}
/// Parses `a.b` or `a(13)` or `a[4]` or just `a`.
fn parse_dot_or_call_expr(&mut self, attrs: Option<AttrWrapper>) -> PResult<'a, P<Expr>> {
let attrs = self.parse_or_use_outer_attributes(attrs)?;
self.collect_tokens_for_expr(attrs, |this, attrs| {
let base = this.parse_bottom_expr();
let (span, base) = this.interpolated_or_expr_span(base)?;
this.parse_dot_or_call_expr_with(base, span, attrs)
})
}
pub(super) fn parse_dot_or_call_expr_with(
&mut self,
e0: P<Expr>,
lo: Span,
mut attrs: Vec<ast::Attribute>,
) -> PResult<'a, P<Expr>> {
// Stitch the list of outer attributes onto the return value.
// A little bit ugly, but the best way given the current code
// structure
self.parse_dot_or_call_expr_with_(e0, lo).map(|expr| {
expr.map(|mut expr| {
attrs.extend::<Vec<_>>(expr.attrs.into());
expr.attrs = attrs.into();
expr
})
})
}
fn parse_dot_or_call_expr_with_(&mut self, mut e: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
loop {
if self.eat(&token::Question) {
// `expr?`
e = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Try(e), AttrVec::new());
continue;
}
if self.eat(&token::Dot) {
// expr.f
e = self.parse_dot_suffix_expr(lo, e)?;
continue;
}
if self.expr_is_complete(&e) {
return Ok(e);
}
e = match self.token.kind {
token::OpenDelim(token::Paren) => self.parse_fn_call_expr(lo, e),
token::OpenDelim(token::Bracket) => self.parse_index_expr(lo, e)?,
_ => return Ok(e),
}
}
}
fn look_ahead_type_ascription_as_field(&mut self) -> bool {
self.look_ahead(1, |t| t.is_ident())
&& self.look_ahead(2, |t| t == &token::Colon)
&& self.look_ahead(3, |t| t.can_begin_expr())
}
fn parse_dot_suffix_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
match self.token.uninterpolate().kind {
token::Ident(..) => self.parse_dot_suffix(base, lo),
token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) => {
Ok(self.parse_tuple_field_access_expr(lo, base, symbol, suffix, None))
}
token::Literal(token::Lit { kind: token::Float, symbol, suffix }) => {
Ok(self.parse_tuple_field_access_expr_float(lo, base, symbol, suffix))
}
_ => {
self.error_unexpected_after_dot();
Ok(base)
}
}
}
fn error_unexpected_after_dot(&self) {
// FIXME Could factor this out into non_fatal_unexpected or something.
let actual = pprust::token_to_string(&self.token);
self.struct_span_err(self.token.span, &format!("unexpected token: `{}`", actual)).emit();
}
// We need an identifier or integer, but the next token is a float.
// Break the float into components to extract the identifier or integer.
// FIXME: With current `TokenCursor` it's hard to break tokens into more than 2
// parts unless those parts are processed immediately. `TokenCursor` should either
// support pushing "future tokens" (would be also helpful to `break_and_eat`), or
// we should break everything including floats into more basic proc-macro style
// tokens in the lexer (probably preferable).
fn parse_tuple_field_access_expr_float(
&mut self,
lo: Span,
base: P<Expr>,
float: Symbol,
suffix: Option<Symbol>,
) -> P<Expr> {
#[derive(Debug)]
enum FloatComponent {
IdentLike(String),
Punct(char),
}
use FloatComponent::*;
let float_str = float.as_str();
let mut components = Vec::new();
let mut ident_like = String::new();
for c in float_str.chars() {
if c == '_' || c.is_ascii_alphanumeric() {
ident_like.push(c);
} else if matches!(c, '.' | '+' | '-') {
if !ident_like.is_empty() {
components.push(IdentLike(mem::take(&mut ident_like)));
}
components.push(Punct(c));
} else {
panic!("unexpected character in a float token: {:?}", c)
}
}
if !ident_like.is_empty() {
components.push(IdentLike(ident_like));
}
// With proc macros the span can refer to anything, the source may be too short,
// or too long, or non-ASCII. It only makes sense to break our span into components
// if its underlying text is identical to our float literal.
let span = self.token.span;
let can_take_span_apart =
|| self.span_to_snippet(span).as_deref() == Ok(float_str).as_deref();
match &*components {
// 1e2
[IdentLike(i)] => {
self.parse_tuple_field_access_expr(lo, base, Symbol::intern(&i), suffix, None)
}
// 1.
[IdentLike(i), Punct('.')] => {
let (ident_span, dot_span) = if can_take_span_apart() {
let (span, ident_len) = (span.data(), BytePos::from_usize(i.len()));
let ident_span = span.with_hi(span.lo + ident_len);
let dot_span = span.with_lo(span.lo + ident_len);
(ident_span, dot_span)
} else {
(span, span)
};
assert!(suffix.is_none());
let symbol = Symbol::intern(&i);
self.token = Token::new(token::Ident(symbol, false), ident_span);
let next_token = (Token::new(token::Dot, dot_span), self.token_spacing);
self.parse_tuple_field_access_expr(lo, base, symbol, None, Some(next_token))
}
// 1.2 | 1.2e3
[IdentLike(i1), Punct('.'), IdentLike(i2)] => {
let (ident1_span, dot_span, ident2_span) = if can_take_span_apart() {
let (span, ident1_len) = (span.data(), BytePos::from_usize(i1.len()));
let ident1_span = span.with_hi(span.lo + ident1_len);
let dot_span = span
.with_lo(span.lo + ident1_len)
.with_hi(span.lo + ident1_len + BytePos(1));
let ident2_span = self.token.span.with_lo(span.lo + ident1_len + BytePos(1));
(ident1_span, dot_span, ident2_span)
} else {
(span, span, span)
};
let symbol1 = Symbol::intern(&i1);
self.token = Token::new(token::Ident(symbol1, false), ident1_span);
// This needs to be `Spacing::Alone` to prevent regressions.
// See issue #76399 and PR #76285 for more details
let next_token1 = (Token::new(token::Dot, dot_span), Spacing::Alone);
let base1 =
self.parse_tuple_field_access_expr(lo, base, symbol1, None, Some(next_token1));
let symbol2 = Symbol::intern(&i2);
let next_token2 = Token::new(token::Ident(symbol2, false), ident2_span);
self.bump_with((next_token2, self.token_spacing)); // `.`
self.parse_tuple_field_access_expr(lo, base1, symbol2, suffix, None)
}
// 1e+ | 1e- (recovered)
[IdentLike(_), Punct('+' | '-')] |
// 1e+2 | 1e-2
[IdentLike(_), Punct('+' | '-'), IdentLike(_)] |
// 1.2e+ | 1.2e-
[IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-')] |
// 1.2e+3 | 1.2e-3
[IdentLike(_), Punct('.'), IdentLike(_), Punct('+' | '-'), IdentLike(_)] => {
// See the FIXME about `TokenCursor` above.
self.error_unexpected_after_dot();
base
}
_ => panic!("unexpected components in a float token: {:?}", components),
}
}
fn parse_tuple_field_access_expr(
&mut self,
lo: Span,
base: P<Expr>,
field: Symbol,
suffix: Option<Symbol>,
next_token: Option<(Token, Spacing)>,
) -> P<Expr> {
match next_token {
Some(next_token) => self.bump_with(next_token),
None => self.bump(),
}
let span = self.prev_token.span;
let field = ExprKind::Field(base, Ident::new(field, span));
self.expect_no_suffix(span, "a tuple index", suffix);
self.mk_expr(lo.to(span), field, AttrVec::new())
}
/// Parse a function call expression, `expr(...)`.
fn parse_fn_call_expr(&mut self, lo: Span, fun: P<Expr>) -> P<Expr> {
let snapshot = if self.token.kind == token::OpenDelim(token::Paren)
&& self.look_ahead_type_ascription_as_field()
{
Some((self.clone(), fun.kind.clone()))
} else {
None
};
let open_paren = self.token.span;
let mut seq = self.parse_paren_expr_seq().map(|args| {
self.mk_expr(lo.to(self.prev_token.span), self.mk_call(fun, args), AttrVec::new())
});
if let Some(expr) =
self.maybe_recover_struct_lit_bad_delims(lo, open_paren, &mut seq, snapshot)
{
return expr;
}
self.recover_seq_parse_error(token::Paren, lo, seq)
}
/// If we encounter a parser state that looks like the user has written a `struct` literal with
/// parentheses instead of braces, recover the parser state and provide suggestions.
#[instrument(skip(self, seq, snapshot), level = "trace")]
fn maybe_recover_struct_lit_bad_delims(
&mut self,
lo: Span,
open_paren: Span,
seq: &mut PResult<'a, P<Expr>>,
snapshot: Option<(Self, ExprKind)>,
) -> Option<P<Expr>> {
match (seq.as_mut(), snapshot) {
(Err(ref mut err), Some((mut snapshot, ExprKind::Path(None, path)))) => {
let name = pprust::path_to_string(&path);
snapshot.bump(); // `(`
match snapshot.parse_struct_fields(path, false, token::Paren) {
Ok((fields, ..)) if snapshot.eat(&token::CloseDelim(token::Paren)) => {
// We are certain we have `Enum::Foo(a: 3, b: 4)`, suggest
// `Enum::Foo { a: 3, b: 4 }` or `Enum::Foo(3, 4)`.
*self = snapshot;
let close_paren = self.prev_token.span;
let span = lo.to(self.prev_token.span);
if !fields.is_empty() {
err.cancel();
let mut err = self.struct_span_err(
span,
"invalid `struct` delimiters or `fn` call arguments",
);
err.multipart_suggestion(
&format!("if `{}` is a struct, use braces as delimiters", name),
vec![
(open_paren, " { ".to_string()),
(close_paren, " }".to_string()),
],
Applicability::MaybeIncorrect,
);
err.multipart_suggestion(
&format!("if `{}` is a function, use the arguments directly", name),
fields
.into_iter()
.map(|field| (field.span.until(field.expr.span), String::new()))
.collect(),
Applicability::MaybeIncorrect,
);
err.emit();
} else {
err.emit();
}
return Some(self.mk_expr_err(span));
}
Ok(_) => {}
Err(mut err) => err.emit(),
}
}
_ => {}
}
None
}
/// Parse an indexing expression `expr[...]`.
fn parse_index_expr(&mut self, lo: Span, base: P<Expr>) -> PResult<'a, P<Expr>> {
self.bump(); // `[`
let index = self.parse_expr()?;
self.expect(&token::CloseDelim(token::Bracket))?;
Ok(self.mk_expr(lo.to(self.prev_token.span), self.mk_index(base, index), AttrVec::new()))
}
/// Assuming we have just parsed `.`, continue parsing into an expression.
fn parse_dot_suffix(&mut self, self_arg: P<Expr>, lo: Span) -> PResult<'a, P<Expr>> {
if self.token.uninterpolated_span().rust_2018() && self.eat_keyword(kw::Await) {
return Ok(self.mk_await_expr(self_arg, lo));
}
let fn_span_lo = self.token.span;
let mut segment = self.parse_path_segment(PathStyle::Expr, None)?;
self.check_trailing_angle_brackets(&segment, &[&token::OpenDelim(token::Paren)]);
self.check_turbofish_missing_angle_brackets(&mut segment);
if self.check(&token::OpenDelim(token::Paren)) {
// Method call `expr.f()`
let mut args = self.parse_paren_expr_seq()?;
args.insert(0, self_arg);
let fn_span = fn_span_lo.to(self.prev_token.span);
let span = lo.to(self.prev_token.span);
Ok(self.mk_expr(span, ExprKind::MethodCall(segment, args, fn_span), AttrVec::new()))
} else {
// Field access `expr.f`
if let Some(args) = segment.args {
self.struct_span_err(
args.span(),
"field expressions cannot have generic arguments",
)
.emit();
}
let span = lo.to(self.prev_token.span);
Ok(self.mk_expr(span, ExprKind::Field(self_arg, segment.ident), AttrVec::new()))
}
}
/// At the bottom (top?) of the precedence hierarchy,
/// Parses things like parenthesized exprs, macros, `return`, etc.
///
/// N.B., this does not parse outer attributes, and is private because it only works
/// correctly if called from `parse_dot_or_call_expr()`.
fn parse_bottom_expr(&mut self) -> PResult<'a, P<Expr>> {
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole_expr!(self);
// Outer attributes are already parsed and will be
// added to the return value after the fact.
//
// Therefore, prevent sub-parser from parsing
// attributes by giving them an empty "already-parsed" list.
let attrs = AttrVec::new();
// Note: when adding new syntax here, don't forget to adjust `TokenKind::can_begin_expr()`.
let lo = self.token.span;
if let token::Literal(_) = self.token.kind {
// This match arm is a special-case of the `_` match arm below and
// could be removed without changing functionality, but it's faster
// to have it here, especially for programs with large constants.
self.parse_lit_expr(attrs)
} else if self.check(&token::OpenDelim(token::Paren)) {
self.parse_tuple_parens_expr(attrs)
} else if self.check(&token::OpenDelim(token::Brace)) {
self.parse_block_expr(None, lo, BlockCheckMode::Default, attrs)
} else if self.check(&token::BinOp(token::Or)) || self.check(&token::OrOr) {
self.parse_closure_expr(attrs)
} else if self.check(&token::OpenDelim(token::Bracket)) {
self.parse_array_or_repeat_expr(attrs, token::Bracket)
} else if self.check_path() {
self.parse_path_start_expr(attrs)
} else if self.check_keyword(kw::Move) || self.check_keyword(kw::Static) {
self.parse_closure_expr(attrs)
} else if self.eat_keyword(kw::If) {
self.parse_if_expr(attrs)
} else if self.check_keyword(kw::For) {
if self.choose_generics_over_qpath(1) {
// NOTE(Centril, eddyb): DO NOT REMOVE! Beyond providing parser recovery,
// this is an insurance policy in case we allow qpaths in (tuple-)struct patterns.
// When `for <Foo as Bar>::Proj in $expr $block` is wanted,
// you can disambiguate in favor of a pattern with `(...)`.
self.recover_quantified_closure_expr(attrs)
} else {
assert!(self.eat_keyword(kw::For));
self.parse_for_expr(None, self.prev_token.span, attrs)
}
} else if self.eat_keyword(kw::While) {
self.parse_while_expr(None, self.prev_token.span, attrs)
} else if let Some(label) = self.eat_label() {
self.parse_labeled_expr(label, attrs, true)
} else if self.eat_keyword(kw::Loop) {
self.parse_loop_expr(None, self.prev_token.span, attrs)
} else if self.eat_keyword(kw::Continue) {
let kind = ExprKind::Continue(self.eat_label());
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
} else if self.eat_keyword(kw::Match) {
let match_sp = self.prev_token.span;
self.parse_match_expr(attrs).map_err(|mut err| {
err.span_label(match_sp, "while parsing this match expression");
err
})
} else if self.eat_keyword(kw::Unsafe) {
self.parse_block_expr(None, lo, BlockCheckMode::Unsafe(ast::UserProvided), attrs)
} else if self.check_inline_const(0) {
self.parse_const_block(lo.to(self.token.span), false)
} else if self.is_do_catch_block() {
self.recover_do_catch(attrs)
} else if self.is_try_block() {
self.expect_keyword(kw::Try)?;
self.parse_try_block(lo, attrs)
} else if self.eat_keyword(kw::Return) {
self.parse_return_expr(attrs)
} else if self.eat_keyword(kw::Break) {
self.parse_break_expr(attrs)
} else if self.eat_keyword(kw::Yield) {
self.parse_yield_expr(attrs)
} else if self.eat_keyword(kw::Let) {
self.parse_let_expr(attrs)
} else if self.eat_keyword(kw::Underscore) {
Ok(self.mk_expr(self.prev_token.span, ExprKind::Underscore, attrs))
} else if !self.unclosed_delims.is_empty() && self.check(&token::Semi) {
// Don't complain about bare semicolons after unclosed braces
// recovery in order to keep the error count down. Fixing the
// delimiters will possibly also fix the bare semicolon found in
// expression context. For example, silence the following error:
//
// error: expected expression, found `;`
// --> file.rs:2:13
// |
// 2 | foo(bar(;
// | ^ expected expression
self.bump();
Ok(self.mk_expr_err(self.token.span))
} else if self.token.uninterpolated_span().rust_2018() {
// `Span::rust_2018()` is somewhat expensive; don't get it repeatedly.
if self.check_keyword(kw::Async) {
if self.is_async_block() {
// Check for `async {` and `async move {`.
self.parse_async_block(attrs)
} else {
self.parse_closure_expr(attrs)
}
} else if self.eat_keyword(kw::Await) {
self.recover_incorrect_await_syntax(lo, self.prev_token.span, attrs)
} else {
self.parse_lit_expr(attrs)
}
} else {
self.parse_lit_expr(attrs)
}
}
fn parse_lit_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
match self.parse_opt_lit() {
Some(literal) => {
let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Lit(literal), attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
None => self.try_macro_suggestion(),
}
}
fn parse_tuple_parens_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect(&token::OpenDelim(token::Paren))?;
let (es, trailing_comma) = match self.parse_seq_to_end(
&token::CloseDelim(token::Paren),
SeqSep::trailing_allowed(token::Comma),
|p| p.parse_expr_catch_underscore(),
) {
Ok(x) => x,
Err(err) => return Ok(self.recover_seq_parse_error(token::Paren, lo, Err(err))),
};
let kind = if es.len() == 1 && !trailing_comma {
// `(e)` is parenthesized `e`.
ExprKind::Paren(es.into_iter().next().unwrap())
} else {
// `(e,)` is a tuple with only one field, `e`.
ExprKind::Tup(es)
};
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
fn parse_array_or_repeat_expr(
&mut self,
attrs: AttrVec,
close_delim: token::DelimToken,
) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `[` or other open delim
let close = &token::CloseDelim(close_delim);
let kind = if self.eat(close) {
// Empty vector
ExprKind::Array(Vec::new())
} else {
// Non-empty vector
let first_expr = self.parse_expr()?;
if self.eat(&token::Semi) {
// Repeating array syntax: `[ 0; 512 ]`
let count = self.parse_anon_const_expr()?;
self.expect(close)?;
ExprKind::Repeat(first_expr, count)
} else if self.eat(&token::Comma) {
// Vector with two or more elements.
let sep = SeqSep::trailing_allowed(token::Comma);
let (remaining_exprs, _) = self.parse_seq_to_end(close, sep, |p| p.parse_expr())?;
let mut exprs = vec![first_expr];
exprs.extend(remaining_exprs);
ExprKind::Array(exprs)
} else {
// Vector with one element
self.expect(close)?;
ExprKind::Array(vec![first_expr])
}
};
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
fn parse_path_start_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let (qself, path) = if self.eat_lt() {
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
(Some(qself), path)
} else {
(None, self.parse_path(PathStyle::Expr)?)
};
let lo = path.span;
// `!`, as an operator, is prefix, so we know this isn't that.
let (hi, kind) = if self.eat(&token::Not) {
// MACRO INVOCATION expression
if qself.is_some() {
self.struct_span_err(path.span, "macros cannot use qualified paths").emit();
}
let mac = MacCall {
path,
args: self.parse_mac_args()?,
prior_type_ascription: self.last_type_ascription,
};
(self.prev_token.span, ExprKind::MacCall(mac))
} else if self.check(&token::OpenDelim(token::Brace)) {
if let Some(expr) = self.maybe_parse_struct_expr(qself.as_ref(), &path, &attrs) {
if qself.is_some() {
self.sess.gated_spans.gate(sym::more_qualified_paths, path.span);
}
return expr;
} else {
(path.span, ExprKind::Path(qself, path))
}
} else {
(path.span, ExprKind::Path(qself, path))
};
let expr = self.mk_expr(lo.to(hi), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `'label: $expr`. The label is already parsed.
fn parse_labeled_expr(
&mut self,
label: Label,
attrs: AttrVec,
consume_colon: bool,
) -> PResult<'a, P<Expr>> {
let lo = label.ident.span;
let label = Some(label);
let ate_colon = self.eat(&token::Colon);
let expr = if self.eat_keyword(kw::While) {
self.parse_while_expr(label, lo, attrs)
} else if self.eat_keyword(kw::For) {
self.parse_for_expr(label, lo, attrs)
} else if self.eat_keyword(kw::Loop) {
self.parse_loop_expr(label, lo, attrs)
} else if self.check(&token::OpenDelim(token::Brace)) || self.token.is_whole_block() {
self.parse_block_expr(label, lo, BlockCheckMode::Default, attrs)
} else {
let msg = "expected `while`, `for`, `loop` or `{` after a label";
self.struct_span_err(self.token.span, msg).span_label(self.token.span, msg).emit();
// Continue as an expression in an effort to recover on `'label: non_block_expr`.
self.parse_expr()
}?;
if !ate_colon && consume_colon {
self.error_labeled_expr_must_be_followed_by_colon(lo, expr.span);
}
Ok(expr)
}
fn error_labeled_expr_must_be_followed_by_colon(&self, lo: Span, span: Span) {
self.struct_span_err(span, "labeled expression must be followed by `:`")
.span_label(lo, "the label")
.span_suggestion_short(
lo.shrink_to_hi(),
"add `:` after the label",
": ".to_string(),
Applicability::MachineApplicable,
)
.note("labels are used before loops and blocks, allowing e.g., `break 'label` to them")
.emit();
}
/// Recover on the syntax `do catch { ... }` suggesting `try { ... }` instead.
fn recover_do_catch(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.bump(); // `do`
self.bump(); // `catch`
let span_dc = lo.to(self.prev_token.span);
self.struct_span_err(span_dc, "found removed `do catch` syntax")
.span_suggestion(
span_dc,
"replace with the new syntax",
"try".to_string(),
Applicability::MachineApplicable,
)
.note("following RFC #2388, the new non-placeholder syntax is `try`")
.emit();
self.parse_try_block(lo, attrs)
}
/// Parse an expression if the token can begin one.
fn parse_expr_opt(&mut self) -> PResult<'a, Option<P<Expr>>> {
Ok(if self.token.can_begin_expr() { Some(self.parse_expr()?) } else { None })
}
/// Parse `"return" expr?`.
fn parse_return_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Ret(self.parse_expr_opt()?);
let expr = self.mk_expr(lo.to(self.prev_token.span), kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `"break" (('label (:? expr)?) | expr?)` with `"break"` token already eaten.
/// If the label is followed immediately by a `:` token, the label and `:` are
/// parsed as part of the expression (i.e. a labeled loop). The language team has
/// decided in #87026 to require parentheses as a visual aid to avoid confusion if
/// the break expression of an unlabeled break is a labeled loop (as in
/// `break 'lbl: loop {}`); a labeled break with an unlabeled loop as its value
/// expression only gets a warning for compatibility reasons; and a labeled break
/// with a labeled loop does not even get a warning because there is no ambiguity.
fn parse_break_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let mut label = self.eat_label();
let kind = if label.is_some() && self.token == token::Colon {
// The value expression can be a labeled loop, see issue #86948, e.g.:
// `loop { break 'label: loop { break 'label 42; }; }`
let lexpr = self.parse_labeled_expr(label.take().unwrap(), AttrVec::new(), true)?;
self.struct_span_err(
lexpr.span,
"parentheses are required around this expression to avoid confusion with a labeled break expression",
)
.multipart_suggestion(
"wrap the expression in parentheses",
vec![
(lexpr.span.shrink_to_lo(), "(".to_string()),
(lexpr.span.shrink_to_hi(), ")".to_string()),
],
Applicability::MachineApplicable,
)
.emit();
Some(lexpr)
} else if self.token != token::OpenDelim(token::Brace)
|| !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
{
let expr = self.parse_expr_opt()?;
if let Some(ref expr) = expr {
if label.is_some()
&& matches!(
expr.kind,
ExprKind::While(_, _, None)
| ExprKind::ForLoop(_, _, _, None)
| ExprKind::Loop(_, None)
| ExprKind::Block(_, None)
)
{
self.sess.buffer_lint_with_diagnostic(
BREAK_WITH_LABEL_AND_LOOP,
lo.to(expr.span),
ast::CRATE_NODE_ID,
"this labeled break expression is easy to confuse with an unlabeled break with a labeled value expression",
BuiltinLintDiagnostics::BreakWithLabelAndLoop(expr.span),
);
}
}
expr
} else {
None
};
let expr = self.mk_expr(lo.to(self.prev_token.span), ExprKind::Break(label, kind), attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Parse `"yield" expr?`.
fn parse_yield_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let kind = ExprKind::Yield(self.parse_expr_opt()?);
let span = lo.to(self.prev_token.span);
self.sess.gated_spans.gate(sym::generators, span);
let expr = self.mk_expr(span, kind, attrs);
self.maybe_recover_from_bad_qpath(expr, true)
}
/// Returns a string literal if the next token is a string literal.
/// In case of error returns `Some(lit)` if the next token is a literal with a wrong kind,
/// and returns `None` if the next token is not literal at all.
pub fn parse_str_lit(&mut self) -> Result<ast::StrLit, Option<Lit>> {
match self.parse_opt_lit() {
Some(lit) => match lit.kind {
ast::LitKind::Str(symbol_unescaped, style) => Ok(ast::StrLit {
style,
symbol: lit.token.symbol,
suffix: lit.token.suffix,
span: lit.span,
symbol_unescaped,
}),
_ => Err(Some(lit)),
},
None => Err(None),
}
}
pub(super) fn parse_lit(&mut self) -> PResult<'a, Lit> {
self.parse_opt_lit().ok_or_else(|| {
if let token::Interpolated(inner) = &self.token.kind {
let expr = match inner.as_ref() {
token::NtExpr(expr) => Some(expr),
token::NtLiteral(expr) => Some(expr),
_ => None,
};
if let Some(expr) = expr {
if matches!(expr.kind, ExprKind::Err) {
self.diagnostic()
.delay_span_bug(self.token.span, &"invalid interpolated expression");
return self.diagnostic().struct_dummy();
}
}
}
let msg = format!("unexpected token: {}", super::token_descr(&self.token));
self.struct_span_err(self.token.span, &msg)
})
}
/// Matches `lit = true | false | token_lit`.
/// Returns `None` if the next token is not a literal.
pub(super) fn parse_opt_lit(&mut self) -> Option<Lit> {
let mut recovered = None;
if self.token == token::Dot {
// Attempt to recover `.4` as `0.4`. We don't currently have any syntax where
// dot would follow an optional literal, so we do this unconditionally.
recovered = self.look_ahead(1, |next_token| {
if let token::Literal(token::Lit { kind: token::Integer, symbol, suffix }) =
next_token.kind
{
if self.token.span.hi() == next_token.span.lo() {
let s = String::from("0.") + symbol.as_str();
let kind = TokenKind::lit(token::Float, Symbol::intern(&s), suffix);
return Some(Token::new(kind, self.token.span.to(next_token.span)));
}
}
None
});
if let Some(token) = &recovered {
self.bump();
self.error_float_lits_must_have_int_part(&token);
}
}
let token = recovered.as_ref().unwrap_or(&self.token);
match Lit::from_token(token) {
Ok(lit) => {
self.bump();
Some(lit)
}
Err(LitError::NotLiteral) => None,
Err(err) => {
let span = token.span;
let lit = match token.kind {
token::Literal(lit) => lit,
_ => unreachable!(),
};
self.bump();
self.report_lit_error(err, lit, span);
// Pack possible quotes and prefixes from the original literal into
// the error literal's symbol so they can be pretty-printed faithfully.
let suffixless_lit = token::Lit::new(lit.kind, lit.symbol, None);
let symbol = Symbol::intern(&suffixless_lit.to_string());
let lit = token::Lit::new(token::Err, symbol, lit.suffix);
Some(Lit::from_lit_token(lit, span).unwrap_or_else(|_| unreachable!()))
}
}
}
fn error_float_lits_must_have_int_part(&self, token: &Token) {
self.struct_span_err(token.span, "float literals must have an integer part")
.span_suggestion(
token.span,
"must have an integer part",
pprust::token_to_string(token).into(),
Applicability::MachineApplicable,
)
.emit();
}
fn report_lit_error(&self, err: LitError, lit: token::Lit, span: Span) {
// Checks if `s` looks like i32 or u1234 etc.
fn looks_like_width_suffix(first_chars: &[char], s: &str) -> bool {
s.len() > 1 && s.starts_with(first_chars) && s[1..].chars().all(|c| c.is_ascii_digit())
}
let token::Lit { kind, suffix, .. } = lit;
match err {
// `NotLiteral` is not an error by itself, so we don't report
// it and give the parser opportunity to try something else.
LitError::NotLiteral => {}
// `LexerError` *is* an error, but it was already reported
// by lexer, so here we don't report it the second time.
LitError::LexerError => {}
LitError::InvalidSuffix => {
self.expect_no_suffix(
span,
&format!("{} {} literal", kind.article(), kind.descr()),
suffix,
);
}
LitError::InvalidIntSuffix => {
let suf = suffix.expect("suffix error with no suffix");
let suf = suf.as_str();
if looks_like_width_suffix(&['i', 'u'], &suf) {
// If it looks like a width, try to be helpful.
let msg = format!("invalid width `{}` for integer literal", &suf[1..]);
self.struct_span_err(span, &msg)
.help("valid widths are 8, 16, 32, 64 and 128")
.emit();
} else {
let msg = format!("invalid suffix `{}` for number literal", suf);
self.struct_span_err(span, &msg)
.span_label(span, format!("invalid suffix `{}`", suf))
.help("the suffix must be one of the numeric types (`u32`, `isize`, `f32`, etc.)")
.emit();
}
}
LitError::InvalidFloatSuffix => {
let suf = suffix.expect("suffix error with no suffix");
let suf = suf.as_str();
if looks_like_width_suffix(&['f'], suf) {
// If it looks like a width, try to be helpful.
let msg = format!("invalid width `{}` for float literal", &suf[1..]);
self.struct_span_err(span, &msg).help("valid widths are 32 and 64").emit();
} else {
let msg = format!("invalid suffix `{}` for float literal", suf);
self.struct_span_err(span, &msg)
.span_label(span, format!("invalid suffix `{}`", suf))
.help("valid suffixes are `f32` and `f64`")
.emit();
}
}
LitError::NonDecimalFloat(base) => {
let descr = match base {
16 => "hexadecimal",
8 => "octal",
2 => "binary",
_ => unreachable!(),
};
self.struct_span_err(span, &format!("{} float literal is not supported", descr))
.span_label(span, "not supported")
.emit();
}
LitError::IntTooLarge => {
self.struct_span_err(span, "integer literal is too large").emit();
}
}
}
pub(super) fn expect_no_suffix(&self, sp: Span, kind: &str, suffix: Option<Symbol>) {
if let Some(suf) = suffix {
let mut err = if kind == "a tuple index"
&& [sym::i32, sym::u32, sym::isize, sym::usize].contains(&suf)
{
// #59553: warn instead of reject out of hand to allow the fix to percolate
// through the ecosystem when people fix their macros
let mut err = self
.sess
.span_diagnostic
.struct_span_warn(sp, &format!("suffixes on {} are invalid", kind));
err.note(&format!(
"`{}` is *temporarily* accepted on tuple index fields as it was \
incorrectly accepted on stable for a few releases",
suf,
));
err.help(
"on proc macros, you'll want to use `syn::Index::from` or \
`proc_macro::Literal::*_unsuffixed` for code that will desugar \
to tuple field access",
);
err.note(
"see issue #60210 <https://github.com/rust-lang/rust/issues/60210> \
for more information",
);
err
} else {
self.struct_span_err(sp, &format!("suffixes on {} are invalid", kind))
};
err.span_label(sp, format!("invalid suffix `{}`", suf));
err.emit();
}
}
/// Matches `'-' lit | lit` (cf. `ast_validation::AstValidator::check_expr_within_pat`).
/// Keep this in sync with `Token::can_begin_literal_maybe_minus`.
pub fn parse_literal_maybe_minus(&mut self) -> PResult<'a, P<Expr>> {
maybe_whole_expr!(self);
let lo = self.token.span;
let minus_present = self.eat(&token::BinOp(token::Minus));
let lit = self.parse_lit()?;
let expr = self.mk_expr(lit.span, ExprKind::Lit(lit), AttrVec::new());
if minus_present {
Ok(self.mk_expr(
lo.to(self.prev_token.span),
self.mk_unary(UnOp::Neg, expr),
AttrVec::new(),
))
} else {
Ok(expr)
}
}
fn is_array_like_block(&mut self) -> bool {
self.look_ahead(1, |t| matches!(t.kind, TokenKind::Ident(..) | TokenKind::Literal(_)))
&& self.look_ahead(2, |t| t == &token::Comma)
&& self.look_ahead(3, |t| t.can_begin_expr())
}
/// Emits a suggestion if it looks like the user meant an array but
/// accidentally used braces, causing the code to be interpreted as a block
/// expression.
fn maybe_suggest_brackets_instead_of_braces(
&mut self,
lo: Span,
attrs: AttrVec,
) -> Option<P<Expr>> {
let mut snapshot = self.clone();
match snapshot.parse_array_or_repeat_expr(attrs, token::Brace) {
Ok(arr) => {
let hi = snapshot.prev_token.span;
self.struct_span_err(
arr.span,
"this code is interpreted as a block expression, not an array",
)
.multipart_suggestion(
"try using [] instead of {}",
vec![(lo, "[".to_owned()), (hi, "]".to_owned())],
Applicability::MaybeIncorrect,
)
.note("to define an array, one would use square brackets instead of curly braces")
.emit();
*self = snapshot;
Some(self.mk_expr_err(arr.span))
}
Err(mut e) => {
e.cancel();
None
}
}
}
/// Parses a block or unsafe block.
pub(super) fn parse_block_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
blk_mode: BlockCheckMode,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
if self.is_array_like_block() {
if let Some(arr) = self.maybe_suggest_brackets_instead_of_braces(lo, attrs.clone()) {
return Ok(arr);
}
}
if let Some(label) = opt_label {
self.sess.gated_spans.gate(sym::label_break_value, label.ident.span);
}
if self.token.is_whole_block() {
self.struct_span_err(self.token.span, "cannot use a `block` macro fragment here")
.span_label(lo.to(self.token.span), "the `block` fragment is within this context")
.emit();
}
let (inner_attrs, blk) = self.parse_block_common(lo, blk_mode)?;
attrs.extend(inner_attrs);
Ok(self.mk_expr(blk.span, ExprKind::Block(blk, opt_label), attrs))
}
/// Recover on an explicitly quantified closure expression, e.g., `for<'a> |x: &'a u8| *x + 1`.
fn recover_quantified_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
let _ = self.parse_late_bound_lifetime_defs()?;
let span_for = lo.to(self.prev_token.span);
let closure = self.parse_closure_expr(attrs)?;
self.struct_span_err(span_for, "cannot introduce explicit parameters for a closure")
.span_label(closure.span, "the parameters are attached to this closure")
.span_suggestion(
span_for,
"remove the parameters",
String::new(),
Applicability::MachineApplicable,
)
.emit();
Ok(self.mk_expr_err(lo.to(closure.span)))
}
/// Parses a closure expression (e.g., `move |args| expr`).
fn parse_closure_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
let movability =
if self.eat_keyword(kw::Static) { Movability::Static } else { Movability::Movable };
let asyncness = if self.token.uninterpolated_span().rust_2018() {
self.parse_asyncness()
} else {
Async::No
};
let capture_clause = self.parse_capture_clause()?;
let decl = self.parse_fn_block_decl()?;
let decl_hi = self.prev_token.span;
let mut body = match decl.output {
FnRetTy::Default(_) => {
let restrictions = self.restrictions - Restrictions::STMT_EXPR;
self.parse_expr_res(restrictions, None)?
}
_ => {
// If an explicit return type is given, require a block to appear (RFC 968).
let body_lo = self.token.span;
self.parse_block_expr(None, body_lo, BlockCheckMode::Default, AttrVec::new())?
}
};
if let Async::Yes { span, .. } = asyncness {
// Feature-gate `async ||` closures.
self.sess.gated_spans.gate(sym::async_closure, span);
}
if self.token.kind == TokenKind::Semi && self.token_cursor.frame.delim == DelimToken::Paren
{
// It is likely that the closure body is a block but where the
// braces have been removed. We will recover and eat the next
// statements later in the parsing process.
body = self.mk_expr_err(body.span);
}
let body_span = body.span;
let closure = self.mk_expr(
lo.to(body.span),
ExprKind::Closure(capture_clause, asyncness, movability, decl, body, lo.to(decl_hi)),
attrs,
);
// Disable recovery for closure body
let spans =
ClosureSpans { whole_closure: closure.span, closing_pipe: decl_hi, body: body_span };
self.current_closure = Some(spans);
Ok(closure)
}
/// Parses an optional `move` prefix to a closure-like construct.
fn parse_capture_clause(&mut self) -> PResult<'a, CaptureBy> {
if self.eat_keyword(kw::Move) {
// Check for `move async` and recover
if self.check_keyword(kw::Async) {
let move_async_span = self.token.span.with_lo(self.prev_token.span.data().lo);
Err(self.incorrect_move_async_order_found(move_async_span))
} else {
Ok(CaptureBy::Value)
}
} else {
Ok(CaptureBy::Ref)
}
}
/// Parses the `|arg, arg|` header of a closure.
fn parse_fn_block_decl(&mut self) -> PResult<'a, P<FnDecl>> {
let inputs = if self.eat(&token::OrOr) {
Vec::new()
} else {
self.expect(&token::BinOp(token::Or))?;
let args = self
.parse_seq_to_before_tokens(
&[&token::BinOp(token::Or), &token::OrOr],
SeqSep::trailing_allowed(token::Comma),
TokenExpectType::NoExpect,
|p| p.parse_fn_block_param(),
)?
.0;
self.expect_or()?;
args
};
let output =
self.parse_ret_ty(AllowPlus::Yes, RecoverQPath::Yes, RecoverReturnSign::Yes)?;
Ok(P(FnDecl { inputs, output }))
}
/// Parses a parameter in a closure header (e.g., `|arg, arg|`).
fn parse_fn_block_param(&mut self) -> PResult<'a, Param> {
let lo = self.token.span;
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let pat = this.parse_pat_no_top_alt(PARAM_EXPECTED)?;
let ty = if this.eat(&token::Colon) {
this.parse_ty()?
} else {
this.mk_ty(this.prev_token.span, TyKind::Infer)
};
Ok((
Param {
attrs: attrs.into(),
ty,
pat,
span: lo.to(this.token.span),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::MaybeComma,
))
})
}
/// Parses an `if` expression (`if` token already eaten).
fn parse_if_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let cond = self.parse_cond_expr()?;
let missing_then_block_binop_span = || {
match cond.kind {
ExprKind::Binary(Spanned { span: binop_span, .. }, _, ref right)
if let ExprKind::Block(..) = right.kind => Some(binop_span),
_ => None
}
};
// Verify that the parsed `if` condition makes sense as a condition. If it is a block, then
// verify that the last statement is either an implicit return (no `;`) or an explicit
// return. This won't catch blocks with an explicit `return`, but that would be caught by
// the dead code lint.
let thn = if self.token.is_keyword(kw::Else) || !cond.returns() {
if let Some(binop_span) = missing_then_block_binop_span() {
self.error_missing_if_then_block(lo, None, Some(binop_span)).emit();
self.mk_block_err(cond.span)
} else {
self.error_missing_if_cond(lo, cond.span)
}
} else {
let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let not_block = self.token != token::OpenDelim(token::Brace);
let block = self.parse_block().map_err(|err| {
if not_block {
self.error_missing_if_then_block(lo, Some(err), missing_then_block_binop_span())
} else {
err
}
})?;
self.error_on_if_block_attrs(lo, false, block.span, &attrs);
block
};
let els = if self.eat_keyword(kw::Else) { Some(self.parse_else_expr()?) } else { None };
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::If(cond, thn, els), attrs))
}
fn error_missing_if_then_block(
&self,
if_span: Span,
err: Option<DiagnosticBuilder<'a>>,
binop_span: Option<Span>,
) -> DiagnosticBuilder<'a> {
let msg = "this `if` expression has a condition, but no block";
let mut err = if let Some(mut err) = err {
err.span_label(if_span, msg);
err
} else {
self.struct_span_err(if_span, msg)
};
if let Some(binop_span) = binop_span {
err.span_help(binop_span, "maybe you forgot the right operand of the condition?");
}
err
}
fn error_missing_if_cond(&self, lo: Span, span: Span) -> P<ast::Block> {
let sp = self.sess.source_map().next_point(lo);
self.struct_span_err(sp, "missing condition for `if` expression")
.span_label(sp, "expected if condition here")
.emit();
self.mk_block_err(span)
}
/// Parses the condition of a `if` or `while` expression.
fn parse_cond_expr(&mut self) -> PResult<'a, P<Expr>> {
let cond = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
if let ExprKind::Let(..) = cond.kind {
// Remove the last feature gating of a `let` expression since it's stable.
self.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
}
Ok(cond)
}
/// Parses a `let $pat = $expr` pseudo-expression.
/// The `let` token has already been eaten.
fn parse_let_expr(&mut self, attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.prev_token.span;
let pat = self.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?;
self.expect(&token::Eq)?;
let expr = self.with_res(self.restrictions | Restrictions::NO_STRUCT_LITERAL, |this| {
this.parse_assoc_expr_with(1 + prec_let_scrutinee_needs_par(), None.into())
})?;
let span = lo.to(expr.span);
self.sess.gated_spans.gate(sym::let_chains, span);
Ok(self.mk_expr(span, ExprKind::Let(pat, expr, span), attrs))
}
/// Parses an `else { ... }` expression (`else` token already eaten).
fn parse_else_expr(&mut self) -> PResult<'a, P<Expr>> {
let ctx_span = self.prev_token.span; // `else`
let attrs = self.parse_outer_attributes()?.take_for_recovery(); // For recovery.
let expr = if self.eat_keyword(kw::If) {
self.parse_if_expr(AttrVec::new())?
} else {
let blk = self.parse_block()?;
self.mk_expr(blk.span, ExprKind::Block(blk, None), AttrVec::new())
};
self.error_on_if_block_attrs(ctx_span, true, expr.span, &attrs);
Ok(expr)
}
fn error_on_if_block_attrs(
&self,
ctx_span: Span,
is_ctx_else: bool,
branch_span: Span,
attrs: &[ast::Attribute],
) {
let (span, last) = match attrs {
[] => return,
[x0 @ xn] | [x0, .., xn] => (x0.span.to(xn.span), xn.span),
};
let ctx = if is_ctx_else { "else" } else { "if" };
self.struct_span_err(last, "outer attributes are not allowed on `if` and `else` branches")
.span_label(branch_span, "the attributes are attached to this branch")
.span_label(ctx_span, format!("the branch belongs to this `{}`", ctx))
.span_suggestion(
span,
"remove the attributes",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
/// Parses `for <src_pat> in <src_expr> <src_loop_block>` (`for` token already eaten).
fn parse_for_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
// Record whether we are about to parse `for (`.
// This is used below for recovery in case of `for ( $stuff ) $block`
// in which case we will suggest `for $stuff $block`.
let begin_paren = match self.token.kind {
token::OpenDelim(token::Paren) => Some(self.token.span),
_ => None,
};
let pat = self.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?;
if !self.eat_keyword(kw::In) {
self.error_missing_in_for_loop();
}
self.check_for_for_in_in_typo(self.prev_token.span);
let expr = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
let pat = self.recover_parens_around_for_head(pat, begin_paren);
let (iattrs, loop_block) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
let kind = ExprKind::ForLoop(pat, expr, loop_block, opt_label);
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
}
fn error_missing_in_for_loop(&mut self) {
let (span, msg, sugg) = if self.token.is_ident_named(sym::of) {
// Possibly using JS syntax (#75311).
let span = self.token.span;
self.bump();
(span, "try using `in` here instead", "in")
} else {
(self.prev_token.span.between(self.token.span), "try adding `in` here", " in ")
};
self.struct_span_err(span, "missing `in` in `for` loop")
.span_suggestion_short(
span,
msg,
sugg.into(),
// Has been misleading, at least in the past (closed Issue #48492).
Applicability::MaybeIncorrect,
)
.emit();
}
/// Parses a `while` or `while let` expression (`while` token already eaten).
fn parse_while_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
let cond = self.parse_cond_expr()?;
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::While(cond, body, opt_label), attrs))
}
/// Parses `loop { ... }` (`loop` token already eaten).
fn parse_loop_expr(
&mut self,
opt_label: Option<Label>,
lo: Span,
mut attrs: AttrVec,
) -> PResult<'a, P<Expr>> {
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
Ok(self.mk_expr(lo.to(self.prev_token.span), ExprKind::Loop(body, opt_label), attrs))
}
fn eat_label(&mut self) -> Option<Label> {
self.token.lifetime().map(|ident| {
self.bump();
Label { ident }
})
}
/// Parses a `match ... { ... }` expression (`match` token already eaten).
fn parse_match_expr(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let match_span = self.prev_token.span;
let lo = self.prev_token.span;
let scrutinee = self.parse_expr_res(Restrictions::NO_STRUCT_LITERAL, None)?;
if let Err(mut e) = self.expect(&token::OpenDelim(token::Brace)) {
if self.token == token::Semi {
e.span_suggestion_short(
match_span,
"try removing this `match`",
String::new(),
Applicability::MaybeIncorrect, // speculative
);
}
return Err(e);
}
attrs.extend(self.parse_inner_attributes()?);
let mut arms: Vec<Arm> = Vec::new();
while self.token != token::CloseDelim(token::Brace) {
match self.parse_arm() {
Ok(arm) => arms.push(arm),
Err(mut e) => {
// Recover by skipping to the end of the block.
e.emit();
self.recover_stmt();
let span = lo.to(self.token.span);
if self.token == token::CloseDelim(token::Brace) {
self.bump();
}
return Ok(self.mk_expr(span, ExprKind::Match(scrutinee, arms), attrs));
}
}
}
let hi = self.token.span;
self.bump();
Ok(self.mk_expr(lo.to(hi), ExprKind::Match(scrutinee, arms), attrs))
}
/// Attempt to recover from match arm body with statements and no surrounding braces.
fn parse_arm_body_missing_braces(
&mut self,
first_expr: &P<Expr>,
arrow_span: Span,
) -> Option<P<Expr>> {
if self.token.kind != token::Semi {
return None;
}
let start_snapshot = self.clone();
let semi_sp = self.token.span;
self.bump(); // `;`
let mut stmts =
vec![self.mk_stmt(first_expr.span, ast::StmtKind::Expr(first_expr.clone()))];
let err = |this: &mut Parser<'_>, stmts: Vec<ast::Stmt>| {
let span = stmts[0].span.to(stmts[stmts.len() - 1].span);
let mut err = this.struct_span_err(span, "`match` arm body without braces");
let (these, s, are) =
if stmts.len() > 1 { ("these", "s", "are") } else { ("this", "", "is") };
err.span_label(
span,
&format!(
"{these} statement{s} {are} not surrounded by a body",
these = these,
s = s,
are = are
),
);
err.span_label(arrow_span, "while parsing the `match` arm starting here");
if stmts.len() > 1 {
err.multipart_suggestion(
&format!("surround the statement{} with a body", s),
vec![
(span.shrink_to_lo(), "{ ".to_string()),
(span.shrink_to_hi(), " }".to_string()),
],
Applicability::MachineApplicable,
);
} else {
err.span_suggestion(
semi_sp,
"use a comma to end a `match` arm expression",
",".to_string(),
Applicability::MachineApplicable,
);
}
err.emit();
this.mk_expr_err(span)
};
// We might have either a `,` -> `;` typo, or a block without braces. We need
// a more subtle parsing strategy.
loop {
if self.token.kind == token::CloseDelim(token::Brace) {
// We have reached the closing brace of the `match` expression.
return Some(err(self, stmts));
}
if self.token.kind == token::Comma {
*self = start_snapshot;
return None;
}
let pre_pat_snapshot = self.clone();
match self.parse_pat_no_top_alt(None) {
Ok(_pat) => {
if self.token.kind == token::FatArrow {
// Reached arm end.
*self = pre_pat_snapshot;
return Some(err(self, stmts));
}
}
Err(mut err) => {
err.cancel();
}
}
*self = pre_pat_snapshot;
match self.parse_stmt_without_recovery(true, ForceCollect::No) {
// Consume statements for as long as possible.
Ok(Some(stmt)) => {
stmts.push(stmt);
}
Ok(None) => {
*self = start_snapshot;
break;
}
// We couldn't parse either yet another statement missing it's
// enclosing block nor the next arm's pattern or closing brace.
Err(mut stmt_err) => {
stmt_err.cancel();
*self = start_snapshot;
break;
}
}
}
None
}
pub(super) fn parse_arm(&mut self) -> PResult<'a, Arm> {
let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
let pat = this.parse_pat_allow_top_alt(None, RecoverComma::Yes, RecoverColon::Yes)?;
let guard = if this.eat_keyword(kw::If) {
let if_span = this.prev_token.span;
let cond = this.parse_expr()?;
if let ExprKind::Let(..) = cond.kind {
// Remove the last feature gating of a `let` expression since it's stable.
this.sess.gated_spans.ungate_last(sym::let_chains, cond.span);
let span = if_span.to(cond.span);
this.sess.gated_spans.gate(sym::if_let_guard, span);
}
Some(cond)
} else {
None
};
let arrow_span = this.token.span;
if let Err(mut err) = this.expect(&token::FatArrow) {
// We might have a `=>` -> `=` or `->` typo (issue #89396).
if TokenKind::FatArrow
.similar_tokens()
.map_or(false, |similar_tokens| similar_tokens.contains(&this.token.kind))
{
err.span_suggestion(
this.token.span,
"try using a fat arrow here",
"=>".to_string(),
Applicability::MaybeIncorrect,
);
err.emit();
this.bump();
} else {
return Err(err);
}
}
let arm_start_span = this.token.span;
let expr = this.parse_expr_res(Restrictions::STMT_EXPR, None).map_err(|mut err| {
err.span_label(arrow_span, "while parsing the `match` arm starting here");
err
})?;
let require_comma = classify::expr_requires_semi_to_be_stmt(&expr)
&& this.token != token::CloseDelim(token::Brace);
let hi = this.prev_token.span;
if require_comma {
let sm = this.sess.source_map();
if let Some(body) = this.parse_arm_body_missing_braces(&expr, arrow_span) {
let span = body.span;
return Ok((
ast::Arm {
attrs: attrs.into(),
pat,
guard,
body,
span,
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::None,
));
}
this.expect_one_of(&[token::Comma], &[token::CloseDelim(token::Brace)]).map_err(
|mut err| {
match (sm.span_to_lines(expr.span), sm.span_to_lines(arm_start_span)) {
(Ok(ref expr_lines), Ok(ref arm_start_lines))
if arm_start_lines.lines[0].end_col
== expr_lines.lines[0].end_col
&& expr_lines.lines.len() == 2
&& this.token == token::FatArrow =>
{
// We check whether there's any trailing code in the parse span,
// if there isn't, we very likely have the following:
//
// X | &Y => "y"
// | -- - missing comma
// | |
// | arrow_span
// X | &X => "x"
// | - ^^ self.token.span
// | |
// | parsed until here as `"y" & X`
err.span_suggestion_short(
arm_start_span.shrink_to_hi(),
"missing a comma here to end this `match` arm",
",".to_owned(),
Applicability::MachineApplicable,
);
}
_ => {
err.span_label(
arrow_span,
"while parsing the `match` arm starting here",
);
}
}
err
},
)?;
} else {
this.eat(&token::Comma);
}
Ok((
ast::Arm {
attrs: attrs.into(),
pat,
guard,
body: expr,
span: lo.to(hi),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::None,
))
})
}
/// Parses a `try {...}` expression (`try` token already eaten).
fn parse_try_block(&mut self, span_lo: Span, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
if self.eat_keyword(kw::Catch) {
let mut error = self.struct_span_err(
self.prev_token.span,
"keyword `catch` cannot follow a `try` block",
);
error.help("try using `match` on the result of the `try` block instead");
error.emit();
Err(error)
} else {
let span = span_lo.to(body.span);
self.sess.gated_spans.gate(sym::try_blocks, span);
Ok(self.mk_expr(span, ExprKind::TryBlock(body), attrs))
}
}
fn is_do_catch_block(&self) -> bool {
self.token.is_keyword(kw::Do)
&& self.is_keyword_ahead(1, &[kw::Catch])
&& self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))
&& !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL)
}
fn is_try_block(&self) -> bool {
self.token.is_keyword(kw::Try)
&& self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace))
&& self.token.uninterpolated_span().rust_2018()
}
/// Parses an `async move? {...}` expression.
fn parse_async_block(&mut self, mut attrs: AttrVec) -> PResult<'a, P<Expr>> {
let lo = self.token.span;
self.expect_keyword(kw::Async)?;
let capture_clause = self.parse_capture_clause()?;
let (iattrs, body) = self.parse_inner_attrs_and_block()?;
attrs.extend(iattrs);
let kind = ExprKind::Async(capture_clause, DUMMY_NODE_ID, body);
Ok(self.mk_expr(lo.to(self.prev_token.span), kind, attrs))
}
fn is_async_block(&self) -> bool {
self.token.is_keyword(kw::Async)
&& ((
// `async move {`
self.is_keyword_ahead(1, &[kw::Move])
&& self.look_ahead(2, |t| *t == token::OpenDelim(token::Brace))
) || (
// `async {`
self.look_ahead(1, |t| *t == token::OpenDelim(token::Brace))
))
}
fn is_certainly_not_a_block(&self) -> bool {
self.look_ahead(1, |t| t.is_ident())
&& (
// `{ ident, ` cannot start a block.
self.look_ahead(2, |t| t == &token::Comma)
|| self.look_ahead(2, |t| t == &token::Colon)
&& (
// `{ ident: token, ` cannot start a block.
self.look_ahead(4, |t| t == &token::Comma) ||
// `{ ident: ` cannot start a block unless it's a type ascription `ident: Type`.
self.look_ahead(3, |t| !t.can_begin_type())
)
)
}
fn maybe_parse_struct_expr(
&mut self,
qself: Option<&ast::QSelf>,
path: &ast::Path,
attrs: &AttrVec,
) -> Option<PResult<'a, P<Expr>>> {
let struct_allowed = !self.restrictions.contains(Restrictions::NO_STRUCT_LITERAL);
if struct_allowed || self.is_certainly_not_a_block() {
if let Err(err) = self.expect(&token::OpenDelim(token::Brace)) {
return Some(Err(err));
}
let expr = self.parse_struct_expr(qself.cloned(), path.clone(), attrs.clone(), true);
if let (Ok(expr), false) = (&expr, struct_allowed) {
// This is a struct literal, but we don't can't accept them here.
self.error_struct_lit_not_allowed_here(path.span, expr.span);
}
return Some(expr);
}
None
}
fn error_struct_lit_not_allowed_here(&self, lo: Span, sp: Span) {
self.struct_span_err(sp, "struct literals are not allowed here")
.multipart_suggestion(
"surround the struct literal with parentheses",
vec![(lo.shrink_to_lo(), "(".to_string()), (sp.shrink_to_hi(), ")".to_string())],
Applicability::MachineApplicable,
)
.emit();
}
pub(super) fn parse_struct_fields(
&mut self,
pth: ast::Path,
recover: bool,
close_delim: token::DelimToken,
) -> PResult<'a, (Vec<ExprField>, ast::StructRest, bool)> {
let mut fields = Vec::new();
let mut base = ast::StructRest::None;
let mut recover_async = false;
let mut async_block_err = |e: &mut DiagnosticBuilder<'_>, span: Span| {
recover_async = true;
e.span_label(span, "`async` blocks are only allowed in Rust 2018 or later");
e.help(&format!("set `edition = \"{}\"` in `Cargo.toml`", LATEST_STABLE_EDITION));
e.note("for more on editions, read https://doc.rust-lang.org/edition-guide");
};
while self.token != token::CloseDelim(close_delim) {
if self.eat(&token::DotDot) {
let exp_span = self.prev_token.span;
// We permit `.. }` on the left-hand side of a destructuring assignment.
if self.check(&token::CloseDelim(close_delim)) {
base = ast::StructRest::Rest(self.prev_token.span.shrink_to_hi());
break;
}
match self.parse_expr() {
Ok(e) => base = ast::StructRest::Base(e),
Err(mut e) if recover => {
e.emit();
self.recover_stmt();
}
Err(e) => return Err(e),
}
self.recover_struct_comma_after_dotdot(exp_span);
break;
}
let recovery_field = self.find_struct_error_after_field_looking_code();
let parsed_field = match self.parse_expr_field() {
Ok(f) => Some(f),
Err(mut e) => {
if pth == kw::Async {
async_block_err(&mut e, pth.span);
} else {
e.span_label(pth.span, "while parsing this struct");
}
e.emit();
// If the next token is a comma, then try to parse
// what comes next as additional fields, rather than
// bailing out until next `}`.
if self.token != token::Comma {
self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
if self.token != token::Comma {
break;
}
}
None
}
};
match self.expect_one_of(&[token::Comma], &[token::CloseDelim(close_delim)]) {
Ok(_) => {
if let Some(f) = parsed_field.or(recovery_field) {
// Only include the field if there's no parse error for the field name.
fields.push(f);
}
}
Err(mut e) => {
if pth == kw::Async {
async_block_err(&mut e, pth.span);
} else {
e.span_label(pth.span, "while parsing this struct");
if let Some(f) = recovery_field {
fields.push(f);
e.span_suggestion(
self.prev_token.span.shrink_to_hi(),
"try adding a comma",
",".into(),
Applicability::MachineApplicable,
);
}
}
if !recover {
return Err(e);
}
e.emit();
self.recover_stmt_(SemiColonMode::Comma, BlockMode::Ignore);
self.eat(&token::Comma);
}
}
}
Ok((fields, base, recover_async))
}
/// Precondition: already parsed the '{'.
pub(super) fn parse_struct_expr(
&mut self,
qself: Option<ast::QSelf>,
pth: ast::Path,
attrs: AttrVec,
recover: bool,
) -> PResult<'a, P<Expr>> {
let lo = pth.span;
let (fields, base, recover_async) =
self.parse_struct_fields(pth.clone(), recover, token::Brace)?;
let span = lo.to(self.token.span);
self.expect(&token::CloseDelim(token::Brace))?;
let expr = if recover_async {
ExprKind::Err
} else {
ExprKind::Struct(P(ast::StructExpr { qself, path: pth, fields, rest: base }))
};
Ok(self.mk_expr(span, expr, attrs))
}
/// Use in case of error after field-looking code: `S { foo: () with a }`.
fn find_struct_error_after_field_looking_code(&self) -> Option<ExprField> {
match self.token.ident() {
Some((ident, is_raw))
if (is_raw || !ident.is_reserved())
&& self.look_ahead(1, |t| *t == token::Colon) =>
{
Some(ast::ExprField {
ident,
span: self.token.span,
expr: self.mk_expr_err(self.token.span),
is_shorthand: false,
attrs: AttrVec::new(),
id: DUMMY_NODE_ID,
is_placeholder: false,
})
}
_ => None,
}
}
fn recover_struct_comma_after_dotdot(&mut self, span: Span) {
if self.token != token::Comma {
return;
}
self.struct_span_err(
span.to(self.prev_token.span),
"cannot use a comma after the base struct",
)
.span_suggestion_short(
self.token.span,
"remove this comma",
String::new(),
Applicability::MachineApplicable,
)
.note("the base struct must always be the last field")
.emit();
self.recover_stmt();
}
/// Parses `ident (COLON expr)?`.
fn parse_expr_field(&mut self) -> PResult<'a, ExprField> {
| /// Check for `=`. This means the source incorrectly attempts to
/// initialize a field with an eq rather than a colon.
fn error_on_eq_field_init(&self, field_name: Ident) {
if self.token != token::Eq {
return;
}
self.struct_span_err(self.token.span, "expected `:`, found `=`")
.span_suggestion(
field_name.span.shrink_to_hi().to(self.token.span),
"replace equals symbol with a colon",
":".to_string(),
Applicability::MachineApplicable,
)
.emit();
}
fn err_dotdotdot_syntax(&self, span: Span) {
self.struct_span_err(span, "unexpected token: `...`")
.span_suggestion(
span,
"use `..` for an exclusive range",
"..".to_owned(),
Applicability::MaybeIncorrect,
)
.span_suggestion(
span,
"or `..=` for an inclusive range",
"..=".to_owned(),
Applicability::MaybeIncorrect,
)
.emit();
}
fn err_larrow_operator(&self, span: Span) {
self.struct_span_err(span, "unexpected token: `<-`")
.span_suggestion(
span,
"if you meant to write a comparison against a negative value, add a \
space in between `<` and `-`",
"< -".to_string(),
Applicability::MaybeIncorrect,
)
.emit();
}
fn mk_assign_op(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
ExprKind::AssignOp(binop, lhs, rhs)
}
fn mk_range(
&mut self,
start: Option<P<Expr>>,
end: Option<P<Expr>>,
limits: RangeLimits,
) -> ExprKind {
if end.is_none() && limits == RangeLimits::Closed {
self.inclusive_range_with_incorrect_end(self.prev_token.span);
ExprKind::Err
} else {
ExprKind::Range(start, end, limits)
}
}
fn mk_unary(&self, unop: UnOp, expr: P<Expr>) -> ExprKind {
ExprKind::Unary(unop, expr)
}
fn mk_binary(&self, binop: BinOp, lhs: P<Expr>, rhs: P<Expr>) -> ExprKind {
ExprKind::Binary(binop, lhs, rhs)
}
fn mk_index(&self, expr: P<Expr>, idx: P<Expr>) -> ExprKind {
ExprKind::Index(expr, idx)
}
fn mk_call(&self, f: P<Expr>, args: Vec<P<Expr>>) -> ExprKind {
ExprKind::Call(f, args)
}
fn mk_await_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> {
let span = lo.to(self.prev_token.span);
let await_expr = self.mk_expr(span, ExprKind::Await(self_arg), AttrVec::new());
self.recover_from_await_method_call();
await_expr
}
crate fn mk_expr(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> {
P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None })
}
pub(super) fn mk_expr_err(&self, span: Span) -> P<Expr> {
self.mk_expr(span, ExprKind::Err, AttrVec::new())
}
/// Create expression span ensuring the span of the parent node
/// is larger than the span of lhs and rhs, including the attributes.
fn mk_expr_sp(&self, lhs: &P<Expr>, lhs_span: Span, rhs_span: Span) -> Span {
lhs.attrs
.iter()
.find(|a| a.style == AttrStyle::Outer)
.map_or(lhs_span, |a| a.span)
.to(rhs_span)
}
fn collect_tokens_for_expr(
&mut self,
attrs: AttrWrapper,
f: impl FnOnce(&mut Self, Vec<ast::Attribute>) -> PResult<'a, P<Expr>>,
) -> PResult<'a, P<Expr>> {
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let res = f(this, attrs)?;
let trailing = if this.restrictions.contains(Restrictions::STMT_EXPR)
&& this.token.kind == token::Semi
{
TrailingToken::Semi
} else {
// FIXME - pass this through from the place where we know
// we need a comma, rather than assuming that `#[attr] expr,`
// always captures a trailing comma
TrailingToken::MaybeComma
};
Ok((res, trailing))
})
}
}
| let attrs = self.parse_outer_attributes()?;
self.collect_tokens_trailing_token(attrs, ForceCollect::No, |this, attrs| {
let lo = this.token.span;
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let is_shorthand = !this.look_ahead(1, |t| t == &token::Colon || t == &token::Eq);
let (ident, expr) = if is_shorthand {
// Mimic `x: x` for the `x` field shorthand.
let ident = this.parse_ident_common(false)?;
let path = ast::Path::from_ident(ident);
(ident, this.mk_expr(ident.span, ExprKind::Path(None, path), AttrVec::new()))
} else {
let ident = this.parse_field_name()?;
this.error_on_eq_field_init(ident);
this.bump(); // `:`
(ident, this.parse_expr()?)
};
Ok((
ast::ExprField {
ident,
span: lo.to(expr.span),
expr,
is_shorthand,
attrs: attrs.into(),
id: DUMMY_NODE_ID,
is_placeholder: false,
},
TrailingToken::MaybeComma,
))
})
}
|
api_op_CreateReplaceRootVolumeTask.go | // Code generated by smithy-go-codegen DO NOT EDIT.
package ec2
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
"github.com/aws/aws-sdk-go-v2/service/ec2/types"
"github.com/aws/smithy-go/middleware"
smithyhttp "github.com/aws/smithy-go/transport/http"
)
// Creates a root volume replacement task for an Amazon EC2 instance. The root
// volume can either be restored to its initial launch state, or it can be restored
// using a specific snapshot. For more information, see Replace a root volume
// (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-restoring-volume.html#replace-root)
// in the Amazon Elastic Compute Cloud User Guide.
func (c *Client) CreateReplaceRootVolumeTask(ctx context.Context, params *CreateReplaceRootVolumeTaskInput, optFns ...func(*Options)) (*CreateReplaceRootVolumeTaskOutput, error) {
if params == nil {
params = &CreateReplaceRootVolumeTaskInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CreateReplaceRootVolumeTask", params, optFns, c.addOperationCreateReplaceRootVolumeTaskMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CreateReplaceRootVolumeTaskOutput)
out.ResultMetadata = metadata
return out, nil
}
type CreateReplaceRootVolumeTaskInput struct {
// The ID of the instance for which to replace the root volume.
//
// This member is required.
InstanceId *string
// Unique, case-sensitive identifier you provide to ensure the idempotency of the
// request. If you do not specify a client token, a randomly generated token is
// used for the request to ensure idempotency. For more information, see Ensuring
// idempotency
// (https://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html).
ClientToken *string
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have the
// required permissions, the error response is DryRunOperation. Otherwise, it is
// UnauthorizedOperation.
DryRun *bool
// The ID of the snapshot from which to restore the replacement root volume. If you
// want to restore the volume to the initial launch state, omit this parameter.
SnapshotId *string
// The tags to apply to the root volume replacement task.
TagSpecifications []types.TagSpecification
noSmithyDocumentSerde
}
type CreateReplaceRootVolumeTaskOutput struct {
// Information about the root volume replacement task.
ReplaceRootVolumeTask *types.ReplaceRootVolumeTask
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCreateReplaceRootVolumeTaskMiddlewares(stack *middleware.Stack, options Options) (err error) {
err = stack.Serialize.Add(&awsEc2query_serializeOpCreateReplaceRootVolumeTask{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsEc2query_deserializeOpCreateReplaceRootVolumeTask{}, middleware.After)
if err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil {
return err
}
if err = addRetryMiddlewares(stack, options); err != nil {
return err
}
if err = addHTTPSignerV4Middleware(stack, options); err != nil {
return err
}
if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil {
return err
}
if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil {
return err
}
if err = addClientUserAgent(stack); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addIdempotencyToken_opCreateReplaceRootVolumeTaskMiddleware(stack, options); err != nil {
return err
}
if err = addOpCreateReplaceRootVolumeTaskValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCreateReplaceRootVolumeTask(options.Region), middleware.Before); err != nil {
return err
}
if err = addRequestIDRetrieverMiddleware(stack); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
return nil
}
type idempotencyToken_initializeOpCreateReplaceRootVolumeTask struct {
tokenProvider IdempotencyTokenProvider
}
func (*idempotencyToken_initializeOpCreateReplaceRootVolumeTask) ID() string {
return "OperationIdempotencyTokenAutoFill"
}
func (m *idempotencyToken_initializeOpCreateReplaceRootVolumeTask) HandleInitialize(ctx context.Context, in middleware.InitializeInput, next middleware.InitializeHandler) (
out middleware.InitializeOutput, metadata middleware.Metadata, err error,
) {
if m.tokenProvider == nil {
return next.HandleInitialize(ctx, in)
}
input, ok := in.Parameters.(*CreateReplaceRootVolumeTaskInput)
if !ok {
return out, metadata, fmt.Errorf("expected middleware input to be of type *CreateReplaceRootVolumeTaskInput ")
}
if input.ClientToken == nil |
return next.HandleInitialize(ctx, in)
}
func addIdempotencyToken_opCreateReplaceRootVolumeTaskMiddleware(stack *middleware.Stack, cfg Options) error {
return stack.Initialize.Add(&idempotencyToken_initializeOpCreateReplaceRootVolumeTask{tokenProvider: cfg.IdempotencyTokenProvider}, middleware.Before)
}
func newServiceMetadataMiddleware_opCreateReplaceRootVolumeTask(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
SigningName: "ec2",
OperationName: "CreateReplaceRootVolumeTask",
}
}
| {
t, err := m.tokenProvider.GetIdempotencyToken()
if err != nil {
return out, metadata, err
}
input.ClientToken = &t
} |
pat.rs | use super::{Parser, PathStyle};
use crate::{maybe_recover_from_interpolated_ty_qpath, maybe_whole};
use rustc_ast::ast::{self, AttrVec, Attribute, FieldPat, MacCall, Pat, PatKind, RangeEnd};
use rustc_ast::ast::{BindingMode, Expr, ExprKind, Ident, Mutability, Path, QSelf, RangeSyntax};
use rustc_ast::mut_visit::{noop_visit_mac, noop_visit_pat, MutVisitor};
use rustc_ast::ptr::P;
use rustc_ast::token;
use rustc_ast_pretty::pprust;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, PResult};
use rustc_span::source_map::{respan, Span, Spanned};
use rustc_span::symbol::{kw, sym};
type Expected = Option<&'static str>;
/// `Expected` for function and lambda parameter patterns.
pub(super) const PARAM_EXPECTED: Expected = Some("parameter name");
const WHILE_PARSING_OR_MSG: &str = "while parsing this or-pattern starting here";
/// Whether or not an or-pattern should be gated when occurring in the current context.
#[derive(PartialEq)]
pub(super) enum GateOr {
Yes,
No,
}
/// Whether or not to recover a `,` when parsing or-patterns.
#[derive(PartialEq, Copy, Clone)]
enum RecoverComma {
Yes,
No,
}
impl<'a> Parser<'a> {
/// Parses a pattern.
///
/// Corresponds to `pat<no_top_alt>` in RFC 2535 and does not admit or-patterns
/// at the top level. Used when parsing the parameters of lambda expressions,
/// functions, function pointers, and `pat` macro fragments.
pub fn parse_pat(&mut self, expected: Expected) -> PResult<'a, P<Pat>> {
self.parse_pat_with_range_pat(true, expected)
}
/// Entry point to the main pattern parser.
/// Corresponds to `top_pat` in RFC 2535 and allows or-pattern at the top level.
pub(super) fn parse_top_pat(&mut self, gate_or: GateOr) -> PResult<'a, P<Pat>> {
// Allow a '|' before the pats (RFCs 1925, 2530, and 2535).
let gated_leading_vert = self.eat_or_separator(None) && gate_or == GateOr::Yes;
let leading_vert_span = self.prev_token.span;
// Parse the possibly-or-pattern.
let pat = self.parse_pat_with_or(None, gate_or, RecoverComma::Yes)?;
// If we parsed a leading `|` which should be gated,
// and no other gated or-pattern has been parsed thus far,
// then we should really gate the leading `|`.
// This complicated procedure is done purely for diagnostics UX.
if gated_leading_vert && self.sess.gated_spans.is_ungated(sym::or_patterns) {
self.sess.gated_spans.gate(sym::or_patterns, leading_vert_span);
}
Ok(pat)
}
/// Parse the pattern for a function or function pointer parameter.
/// Special recovery is provided for or-patterns and leading `|`.
pub(super) fn parse_fn_param_pat(&mut self) -> PResult<'a, P<Pat>> {
self.recover_leading_vert(None, "not allowed in a parameter pattern");
let pat = self.parse_pat_with_or(PARAM_EXPECTED, GateOr::No, RecoverComma::No)?;
if let PatKind::Or(..) = &pat.kind {
self.ban_illegal_fn_param_or_pat(&pat);
}
Ok(pat)
}
/// Ban `A | B` immediately in a parameter pattern and suggest wrapping in parens.
fn ban_illegal_fn_param_or_pat(&self, pat: &Pat) {
let msg = "wrap the pattern in parenthesis";
let fix = format!("({})", pprust::pat_to_string(pat));
self.struct_span_err(pat.span, "an or-pattern parameter must be wrapped in parenthesis")
.span_suggestion(pat.span, msg, fix, Applicability::MachineApplicable)
.emit();
}
/// Parses a pattern, that may be a or-pattern (e.g. `Foo | Bar` in `Some(Foo | Bar)`).
/// Corresponds to `pat<allow_top_alt>` in RFC 2535.
fn parse_pat_with_or(
&mut self,
expected: Expected,
gate_or: GateOr,
rc: RecoverComma,
) -> PResult<'a, P<Pat>> {
// Parse the first pattern (`p_0`).
let first_pat = self.parse_pat(expected)?;
self.maybe_recover_unexpected_comma(first_pat.span, rc)?;
// If the next token is not a `|`,
// this is not an or-pattern and we should exit here.
if !self.check(&token::BinOp(token::Or)) && self.token != token::OrOr {
return Ok(first_pat);
}
// Parse the patterns `p_1 | ... | p_n` where `n > 0`.
let lo = first_pat.span;
let mut pats = vec![first_pat];
while self.eat_or_separator(Some(lo)) {
let pat = self.parse_pat(expected).map_err(|mut err| {
err.span_label(lo, WHILE_PARSING_OR_MSG);
err
})?;
self.maybe_recover_unexpected_comma(pat.span, rc)?;
pats.push(pat);
}
let or_pattern_span = lo.to(self.prev_token.span);
// Feature gate the or-pattern if instructed:
if gate_or == GateOr::Yes {
self.sess.gated_spans.gate(sym::or_patterns, or_pattern_span);
}
Ok(self.mk_pat(or_pattern_span, PatKind::Or(pats)))
}
/// Eat the or-pattern `|` separator.
/// If instead a `||` token is encountered, recover and pretend we parsed `|`.
fn eat_or_separator(&mut self, lo: Option<Span>) -> bool {
if self.recover_trailing_vert(lo) {
return false;
}
match self.token.kind {
token::OrOr => {
// Found `||`; Recover and pretend we parsed `|`.
self.ban_unexpected_or_or(lo);
self.bump();
true
}
_ => self.eat(&token::BinOp(token::Or)),
}
}
/// Recover if `|` or `||` is the current token and we have one of the
/// tokens `=>`, `if`, `=`, `:`, `;`, `,`, `]`, `)`, or `}` ahead of us.
///
/// These tokens all indicate that we reached the end of the or-pattern
/// list and can now reliably say that the `|` was an illegal trailing vert.
/// Note that there are more tokens such as `@` for which we know that the `|`
/// is an illegal parse. However, the user's intent is less clear in that case.
fn recover_trailing_vert(&mut self, lo: Option<Span>) -> bool {
let is_end_ahead = self.look_ahead(1, |token| match &token.uninterpolate().kind {
token::FatArrow // e.g. `a | => 0,`.
| token::Ident(kw::If, false) // e.g. `a | if expr`.
| token::Eq // e.g. `let a | = 0`.
| token::Semi // e.g. `let a |;`.
| token::Colon // e.g. `let a | :`.
| token::Comma // e.g. `let (a |,)`.
| token::CloseDelim(token::Bracket) // e.g. `let [a | ]`.
| token::CloseDelim(token::Paren) // e.g. `let (a | )`.
| token::CloseDelim(token::Brace) => true, // e.g. `let A { f: a | }`.
_ => false,
});
match (is_end_ahead, &self.token.kind) {
(true, token::BinOp(token::Or)) | (true, token::OrOr) => {
self.ban_illegal_vert(lo, "trailing", "not allowed in an or-pattern");
self.bump();
true
}
_ => false,
}
}
/// We have parsed `||` instead of `|`. Error and suggest `|` instead.
fn ban_unexpected_or_or(&mut self, lo: Option<Span>) {
let mut err = self.struct_span_err(self.token.span, "unexpected token `||` after pattern");
err.span_suggestion(
self.token.span,
"use a single `|` to separate multiple alternative patterns",
"|".to_owned(),
Applicability::MachineApplicable,
);
if let Some(lo) = lo {
err.span_label(lo, WHILE_PARSING_OR_MSG);
}
err.emit();
}
/// Some special error handling for the "top-level" patterns in a match arm,
/// `for` loop, `let`, &c. (in contrast to subpatterns within such).
fn maybe_recover_unexpected_comma(&mut self, lo: Span, rc: RecoverComma) -> PResult<'a, ()> {
if rc == RecoverComma::No || self.token != token::Comma {
return Ok(());
}
// An unexpected comma after a top-level pattern is a clue that the
// user (perhaps more accustomed to some other language) forgot the
// parentheses in what should have been a tuple pattern; return a
// suggestion-enhanced error here rather than choking on the comma later.
let comma_span = self.token.span;
self.bump();
if let Err(mut err) = self.skip_pat_list() {
// We didn't expect this to work anyway; we just wanted to advance to the
// end of the comma-sequence so we know the span to suggest parenthesizing.
err.cancel();
}
let seq_span = lo.to(self.prev_token.span);
let mut err = self.struct_span_err(comma_span, "unexpected `,` in pattern");
if let Ok(seq_snippet) = self.span_to_snippet(seq_span) {
err.span_suggestion(
seq_span,
"try adding parentheses to match on a tuple...",
format!("({})", seq_snippet),
Applicability::MachineApplicable,
)
.span_suggestion(
seq_span,
"...or a vertical bar to match on multiple alternatives",
seq_snippet.replace(",", " |"),
Applicability::MachineApplicable,
);
}
Err(err)
}
/// Parse and throw away a parentesized comma separated
/// sequence of patterns until `)` is reached.
fn skip_pat_list(&mut self) -> PResult<'a, ()> {
while !self.check(&token::CloseDelim(token::Paren)) {
self.parse_pat(None)?;
if !self.eat(&token::Comma) {
return Ok(());
}
}
Ok(())
}
/// Recursive possibly-or-pattern parser with recovery for an erroneous leading `|`.
/// See `parse_pat_with_or` for details on parsing or-patterns.
fn parse_pat_with_or_inner(&mut self) -> PResult<'a, P<Pat>> {
self.recover_leading_vert(None, "only allowed in a top-level pattern");
self.parse_pat_with_or(None, GateOr::Yes, RecoverComma::No)
}
/// Recover if `|` or `||` is here.
/// The user is thinking that a leading `|` is allowed in this position.
fn recover_leading_vert(&mut self, lo: Option<Span>, ctx: &str) {
if let token::BinOp(token::Or) | token::OrOr = self.token.kind {
self.ban_illegal_vert(lo, "leading", ctx);
self.bump();
}
}
/// A `|` or possibly `||` token shouldn't be here. Ban it.
fn ban_illegal_vert(&mut self, lo: Option<Span>, pos: &str, ctx: &str) {
let span = self.token.span;
let mut err = self.struct_span_err(span, &format!("a {} `|` is {}", pos, ctx));
err.span_suggestion(
span,
&format!("remove the `{}`", pprust::token_to_string(&self.token)),
String::new(),
Applicability::MachineApplicable,
);
if let Some(lo) = lo {
err.span_label(lo, WHILE_PARSING_OR_MSG);
}
if let token::OrOr = self.token.kind {
err.note("alternatives in or-patterns are separated with `|`, not `||`");
}
err.emit();
}
/// Parses a pattern, with a setting whether modern range patterns (e.g., `a..=b`, `a..b` are
/// allowed).
fn parse_pat_with_range_pat(
&mut self,
allow_range_pat: bool,
expected: Expected,
) -> PResult<'a, P<Pat>> {
maybe_recover_from_interpolated_ty_qpath!(self, true);
maybe_whole!(self, NtPat, |x| x);
let lo = self.token.span;
let pat = if self.check(&token::BinOp(token::And)) || self.token.kind == token::AndAnd {
self.parse_pat_deref(expected)?
} else if self.check(&token::OpenDelim(token::Paren)) {
self.parse_pat_tuple_or_parens()?
} else if self.check(&token::OpenDelim(token::Bracket)) {
// Parse `[pat, pat,...]` as a slice pattern.
let (pats, _) =
self.parse_delim_comma_seq(token::Bracket, |p| p.parse_pat_with_or_inner())?;
PatKind::Slice(pats)
} else if self.check(&token::DotDot) && !self.is_pat_range_end_start(1) {
// A rest pattern `..`.
self.bump(); // `..`
PatKind::Rest
} else if let Some(form) = self.parse_range_end() {
self.parse_pat_range_to(form)? // `..=X`, `...X`, or `..X`.
} else if self.eat_keyword(kw::Underscore) {
// Parse _
PatKind::Wild
} else if self.eat_keyword(kw::Mut) {
self.parse_pat_ident_mut()?
} else if self.eat_keyword(kw::Ref) {
// Parse ref ident @ pat / ref mut ident @ pat
let mutbl = self.parse_mutability();
self.parse_pat_ident(BindingMode::ByRef(mutbl))?
} else if self.eat_keyword(kw::Box) {
// Parse `box pat`
let pat = self.parse_pat_with_range_pat(false, None)?;
self.sess.gated_spans.gate(sym::box_patterns, lo.to(self.prev_token.span));
PatKind::Box(pat)
} else if self.can_be_ident_pat() {
// Parse `ident @ pat`
// This can give false positives and parse nullary enums,
// they are dealt with later in resolve.
self.parse_pat_ident(BindingMode::ByValue(Mutability::Not))?
} else if self.is_start_of_pat_with_path() {
// Parse pattern starting with a path
let (qself, path) = if self.eat_lt() {
// Parse a qualified path
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
(Some(qself), path)
} else {
// Parse an unqualified path
(None, self.parse_path(PathStyle::Expr)?)
};
let span = lo.to(self.prev_token.span);
if qself.is_none() && self.check(&token::Not) {
self.parse_pat_mac_invoc(path)?
} else if let Some(form) = self.parse_range_end() {
let begin = self.mk_expr(span, ExprKind::Path(qself, path), AttrVec::new());
self.parse_pat_range_begin_with(begin, form)?
} else if self.check(&token::OpenDelim(token::Brace)) {
self.parse_pat_struct(qself, path)?
} else if self.check(&token::OpenDelim(token::Paren)) {
self.parse_pat_tuple_struct(qself, path)?
} else {
PatKind::Path(qself, path)
}
} else {
// Try to parse everything else as literal with optional minus
match self.parse_literal_maybe_minus() {
Ok(begin) => match self.parse_range_end() {
Some(form) => self.parse_pat_range_begin_with(begin, form)?,
None => PatKind::Lit(begin),
},
Err(err) => return self.fatal_unexpected_non_pat(err, expected),
}
};
let pat = self.mk_pat(lo.to(self.prev_token.span), pat);
let pat = self.maybe_recover_from_bad_qpath(pat, true)?;
let pat = self.recover_intersection_pat(pat)?;
if !allow_range_pat {
self.ban_pat_range_if_ambiguous(&pat)
}
Ok(pat)
}
/// Try to recover the more general form `intersect ::= $pat_lhs @ $pat_rhs`.
///
/// Allowed binding patterns generated by `binding ::= ref? mut? $ident @ $pat_rhs`
/// should already have been parsed by now at this point,
/// if the next token is `@` then we can try to parse the more general form.
///
/// Consult `parse_pat_ident` for the `binding` grammar.
///
/// The notion of intersection patterns are found in
/// e.g. [F#][and] where they are called AND-patterns.
///
/// [and]: https://docs.microsoft.com/en-us/dotnet/fsharp/language-reference/pattern-matching
fn recover_intersection_pat(&mut self, lhs: P<Pat>) -> PResult<'a, P<Pat>> {
if self.token.kind != token::At {
// Next token is not `@` so it's not going to be an intersection pattern.
return Ok(lhs);
}
// At this point we attempt to parse `@ $pat_rhs` and emit an error.
self.bump(); // `@`
let mut rhs = self.parse_pat(None)?;
let sp = lhs.span.to(rhs.span);
if let PatKind::Ident(_, _, ref mut sub @ None) = rhs.kind {
// The user inverted the order, so help them fix that.
let mut applicability = Applicability::MachineApplicable;
// FIXME(bindings_after_at): Remove this code when stabilizing the feature.
lhs.walk(&mut |p| match p.kind {
// `check_match` is unhappy if the subpattern has a binding anywhere.
PatKind::Ident(..) => {
applicability = Applicability::MaybeIncorrect;
false // Short-circuit.
}
_ => true,
});
let lhs_span = lhs.span;
// Move the LHS into the RHS as a subpattern.
// The RHS is now the full pattern.
*sub = Some(lhs);
self.struct_span_err(sp, "pattern on wrong side of `@`")
.span_label(lhs_span, "pattern on the left, should be on the right")
.span_label(rhs.span, "binding on the right, should be on the left")
.span_suggestion(sp, "switch the order", pprust::pat_to_string(&rhs), applicability)
.emit();
} else {
// The special case above doesn't apply so we may have e.g. `A(x) @ B(y)`.
rhs.kind = PatKind::Wild;
self.struct_span_err(sp, "left-hand side of `@` must be a binding")
.span_label(lhs.span, "interpreted as a pattern, not a binding")
.span_label(rhs.span, "also a pattern")
.note("bindings are `x`, `mut x`, `ref x`, and `ref mut x`")
.emit();
}
rhs.span = sp;
Ok(rhs)
}
/// Ban a range pattern if it has an ambiguous interpretation.
fn ban_pat_range_if_ambiguous(&self, pat: &Pat) {
match pat.kind {
PatKind::Range(
..,
Spanned { node: RangeEnd::Included(RangeSyntax::DotDotDot), .. },
) => return,
PatKind::Range(..) => {}
_ => return,
}
self.struct_span_err(pat.span, "the range pattern here has ambiguous interpretation")
.span_suggestion(
pat.span,
"add parentheses to clarify the precedence",
format!("({})", pprust::pat_to_string(&pat)),
// "ambiguous interpretation" implies that we have to be guessing
Applicability::MaybeIncorrect,
)
.emit();
}
/// Parse `&pat` / `&mut pat`.
fn parse_pat_deref(&mut self, expected: Expected) -> PResult<'a, PatKind> {
self.expect_and()?;
self.recover_lifetime_in_deref_pat();
let mutbl = self.parse_mutability();
let subpat = self.parse_pat_with_range_pat(false, expected)?;
Ok(PatKind::Ref(subpat, mutbl))
}
fn recover_lifetime_in_deref_pat(&mut self) {
if let token::Lifetime(name) = self.token.kind {
self.bump(); // `'a`
let span = self.prev_token.span;
self.struct_span_err(span, &format!("unexpected lifetime `{}` in pattern", name))
.span_suggestion(
span,
"remove the lifetime",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
}
/// Parse a tuple or parenthesis pattern.
fn parse_pat_tuple_or_parens(&mut self) -> PResult<'a, PatKind> {
let (fields, trailing_comma) =
self.parse_paren_comma_seq(|p| p.parse_pat_with_or_inner())?;
// Here, `(pat,)` is a tuple pattern.
// For backward compatibility, `(..)` is a tuple pattern as well.
Ok(if fields.len() == 1 && !(trailing_comma || fields[0].is_rest()) {
PatKind::Paren(fields.into_iter().next().unwrap())
} else {
PatKind::Tuple(fields)
})
}
/// Parse a mutable binding with the `mut` token already eaten.
fn parse_pat_ident_mut(&mut self) -> PResult<'a, PatKind> {
let mut_span = self.prev_token.span;
if self.eat_keyword(kw::Ref) {
return self.recover_mut_ref_ident(mut_span);
}
self.recover_additional_muts();
// Make sure we don't allow e.g. `let mut $p;` where `$p:pat`.
if let token::Interpolated(ref nt) = self.token.kind {
if let token::NtPat(_) = **nt {
self.expected_ident_found().emit();
}
}
// Parse the pattern we hope to be an identifier.
let mut pat = self.parse_pat(Some("identifier"))?;
// If we don't have `mut $ident (@ pat)?`, error.
if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind {
// Don't recurse into the subpattern.
// `mut` on the outer binding doesn't affect the inner bindings.
*m = Mutability::Mut;
} else {
// Add `mut` to any binding in the parsed pattern.
let changed_any_binding = Self::make_all_value_bindings_mutable(&mut pat);
self.ban_mut_general_pat(mut_span, &pat, changed_any_binding);
}
Ok(pat.into_inner().kind)
}
/// Recover on `mut ref? ident @ pat` and suggest
/// that the order of `mut` and `ref` is incorrect.
fn recover_mut_ref_ident(&mut self, lo: Span) -> PResult<'a, PatKind> {
let mutref_span = lo.to(self.prev_token.span);
self.struct_span_err(mutref_span, "the order of `mut` and `ref` is incorrect")
.span_suggestion(
mutref_span,
"try switching the order",
"ref mut".into(),
Applicability::MachineApplicable,
)
.emit();
self.parse_pat_ident(BindingMode::ByRef(Mutability::Mut))
}
/// Turn all by-value immutable bindings in a pattern into mutable bindings.
/// Returns `true` if any change was made.
fn make_all_value_bindings_mutable(pat: &mut P<Pat>) -> bool {
struct AddMut(bool);
impl MutVisitor for AddMut {
fn visit_mac(&mut self, mac: &mut MacCall) {
noop_visit_mac(mac, self);
}
fn visit_pat(&mut self, pat: &mut P<Pat>) {
if let PatKind::Ident(BindingMode::ByValue(m @ Mutability::Not), ..) = &mut pat.kind
{
self.0 = true;
*m = Mutability::Mut;
}
noop_visit_pat(pat, self);
}
}
let mut add_mut = AddMut(false);
add_mut.visit_pat(pat);
add_mut.0
}
/// Error on `mut $pat` where `$pat` is not an ident.
fn ban_mut_general_pat(&self, lo: Span, pat: &Pat, changed_any_binding: bool) {
let span = lo.to(pat.span);
let fix = pprust::pat_to_string(&pat);
let (problem, suggestion) = if changed_any_binding {
("`mut` must be attached to each individual binding", "add `mut` to each binding")
} else {
("`mut` must be followed by a named binding", "remove the `mut` prefix")
};
self.struct_span_err(span, problem) | }
/// Eat any extraneous `mut`s and error + recover if we ate any.
fn recover_additional_muts(&mut self) {
let lo = self.token.span;
while self.eat_keyword(kw::Mut) {}
if lo == self.token.span {
return;
}
let span = lo.to(self.prev_token.span);
self.struct_span_err(span, "`mut` on a binding may not be repeated")
.span_suggestion(
span,
"remove the additional `mut`s",
String::new(),
Applicability::MachineApplicable,
)
.emit();
}
/// Parse macro invocation
fn parse_pat_mac_invoc(&mut self, path: Path) -> PResult<'a, PatKind> {
self.bump();
let args = self.parse_mac_args()?;
let mac = MacCall { path, args, prior_type_ascription: self.last_type_ascription };
Ok(PatKind::MacCall(mac))
}
fn fatal_unexpected_non_pat(
&mut self,
mut err: DiagnosticBuilder<'a>,
expected: Expected,
) -> PResult<'a, P<Pat>> {
err.cancel();
let expected = expected.unwrap_or("pattern");
let msg = format!("expected {}, found {}", expected, super::token_descr(&self.token));
let mut err = self.struct_span_err(self.token.span, &msg);
err.span_label(self.token.span, format!("expected {}", expected));
let sp = self.sess.source_map().start_point(self.token.span);
if let Some(sp) = self.sess.ambiguous_block_expr_parse.borrow().get(&sp) {
self.sess.expr_parentheses_needed(&mut err, *sp, None);
}
Err(err)
}
/// Parses the range pattern end form `".." | "..." | "..=" ;`.
fn parse_range_end(&mut self) -> Option<Spanned<RangeEnd>> {
let re = if self.eat(&token::DotDotDot) {
RangeEnd::Included(RangeSyntax::DotDotDot)
} else if self.eat(&token::DotDotEq) {
RangeEnd::Included(RangeSyntax::DotDotEq)
} else if self.eat(&token::DotDot) {
self.sess.gated_spans.gate(sym::exclusive_range_pattern, self.prev_token.span);
RangeEnd::Excluded
} else {
return None;
};
Some(respan(self.prev_token.span, re))
}
/// Parse a range pattern `$begin $form $end?` where `$form = ".." | "..." | "..=" ;`.
/// `$begin $form` has already been parsed.
fn parse_pat_range_begin_with(
&mut self,
begin: P<Expr>,
re: Spanned<RangeEnd>,
) -> PResult<'a, PatKind> {
let end = if self.is_pat_range_end_start(0) {
// Parsing e.g. `X..=Y`.
Some(self.parse_pat_range_end()?)
} else {
// Parsing e.g. `X..`.
self.sess.gated_spans.gate(sym::half_open_range_patterns, begin.span.to(re.span));
if let RangeEnd::Included(_) = re.node {
// FIXME(Centril): Consider semantic errors instead in `ast_validation`.
// Possibly also do this for `X..=` in *expression* contexts.
self.error_inclusive_range_with_no_end(re.span);
}
None
};
Ok(PatKind::Range(Some(begin), end, re))
}
pub(super) fn error_inclusive_range_with_no_end(&self, span: Span) {
struct_span_err!(self.sess.span_diagnostic, span, E0586, "inclusive range with no end")
.span_suggestion_short(
span,
"use `..` instead",
"..".to_string(),
Applicability::MachineApplicable,
)
.note("inclusive ranges must be bounded at the end (`..=b` or `a..=b`)")
.emit();
}
/// Parse a range-to pattern, `..X` or `..=X` where `X` remains to be parsed.
///
/// The form `...X` is prohibited to reduce confusion with the potential
/// expression syntax `...expr` for splatting in expressions.
fn parse_pat_range_to(&mut self, mut re: Spanned<RangeEnd>) -> PResult<'a, PatKind> {
let end = self.parse_pat_range_end()?;
self.sess.gated_spans.gate(sym::half_open_range_patterns, re.span.to(self.prev_token.span));
if let RangeEnd::Included(ref mut syn @ RangeSyntax::DotDotDot) = &mut re.node {
*syn = RangeSyntax::DotDotEq;
self.struct_span_err(re.span, "range-to patterns with `...` are not allowed")
.span_suggestion_short(
re.span,
"use `..=` instead",
"..=".to_string(),
Applicability::MachineApplicable,
)
.emit();
}
Ok(PatKind::Range(None, Some(end), re))
}
/// Is the token `dist` away from the current suitable as the start of a range patterns end?
fn is_pat_range_end_start(&self, dist: usize) -> bool {
self.look_ahead(dist, |t| {
t.is_path_start() // e.g. `MY_CONST`;
|| t.kind == token::Dot // e.g. `.5` for recovery;
|| t.can_begin_literal_maybe_minus() // e.g. `42`.
|| t.is_whole_expr()
})
}
fn parse_pat_range_end(&mut self) -> PResult<'a, P<Expr>> {
if self.check_path() {
let lo = self.token.span;
let (qself, path) = if self.eat_lt() {
// Parse a qualified path
let (qself, path) = self.parse_qpath(PathStyle::Expr)?;
(Some(qself), path)
} else {
// Parse an unqualified path
(None, self.parse_path(PathStyle::Expr)?)
};
let hi = self.prev_token.span;
Ok(self.mk_expr(lo.to(hi), ExprKind::Path(qself, path), AttrVec::new()))
} else {
self.parse_literal_maybe_minus()
}
}
/// Is this the start of a pattern beginning with a path?
fn is_start_of_pat_with_path(&mut self) -> bool {
self.check_path()
// Just for recovery (see `can_be_ident`).
|| self.token.is_ident() && !self.token.is_bool_lit() && !self.token.is_keyword(kw::In)
}
/// Would `parse_pat_ident` be appropriate here?
fn can_be_ident_pat(&mut self) -> bool {
self.check_ident()
&& !self.token.is_bool_lit() // Avoid `true` or `false` as a binding as it is a literal.
&& !self.token.is_path_segment_keyword() // Avoid e.g. `Self` as it is a path.
// Avoid `in`. Due to recovery in the list parser this messes with `for ( $pat in $expr )`.
&& !self.token.is_keyword(kw::In)
&& self.look_ahead(1, |t| match t.kind { // Try to do something more complex?
token::OpenDelim(token::Paren) // A tuple struct pattern.
| token::OpenDelim(token::Brace) // A struct pattern.
| token::DotDotDot | token::DotDotEq | token::DotDot // A range pattern.
| token::ModSep // A tuple / struct variant pattern.
| token::Not => false, // A macro expanding to a pattern.
_ => true,
})
}
/// Parses `ident` or `ident @ pat`.
/// Used by the copy foo and ref foo patterns to give a good
/// error message when parsing mistakes like `ref foo(a, b)`.
fn parse_pat_ident(&mut self, binding_mode: BindingMode) -> PResult<'a, PatKind> {
let ident = self.parse_ident()?;
let sub = if self.eat(&token::At) {
Some(self.parse_pat(Some("binding pattern"))?)
} else {
None
};
// Just to be friendly, if they write something like `ref Some(i)`,
// we end up here with `(` as the current token.
// This shortly leads to a parse error. Note that if there is no explicit
// binding mode then we do not end up here, because the lookahead
// will direct us over to `parse_enum_variant()`.
if self.token == token::OpenDelim(token::Paren) {
return Err(self
.struct_span_err(self.prev_token.span, "expected identifier, found enum pattern"));
}
Ok(PatKind::Ident(binding_mode, ident, sub))
}
/// Parse a struct ("record") pattern (e.g. `Foo { ... }` or `Foo::Bar { ... }`).
fn parse_pat_struct(&mut self, qself: Option<QSelf>, path: Path) -> PResult<'a, PatKind> {
if qself.is_some() {
return self.error_qpath_before_pat(&path, "{");
}
self.bump();
let (fields, etc) = self.parse_pat_fields().unwrap_or_else(|mut e| {
e.emit();
self.recover_stmt();
(vec![], true)
});
self.bump();
Ok(PatKind::Struct(path, fields, etc))
}
/// Parse tuple struct or tuple variant pattern (e.g. `Foo(...)` or `Foo::Bar(...)`).
fn parse_pat_tuple_struct(&mut self, qself: Option<QSelf>, path: Path) -> PResult<'a, PatKind> {
if qself.is_some() {
return self.error_qpath_before_pat(&path, "(");
}
let (fields, _) = self.parse_paren_comma_seq(|p| p.parse_pat_with_or_inner())?;
Ok(PatKind::TupleStruct(path, fields))
}
/// Error when there's a qualified path, e.g. `<Foo as Bar>::Baz`
/// as the path of e.g., a tuple or record struct pattern.
fn error_qpath_before_pat(&mut self, path: &Path, token: &str) -> PResult<'a, PatKind> {
let msg = &format!("unexpected `{}` after qualified path", token);
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, msg);
err.span_label(path.span, "the qualified path");
Err(err)
}
/// Parses the fields of a struct-like pattern.
fn parse_pat_fields(&mut self) -> PResult<'a, (Vec<FieldPat>, bool)> {
let mut fields = Vec::new();
let mut etc = false;
let mut ate_comma = true;
let mut delayed_err: Option<DiagnosticBuilder<'a>> = None;
let mut etc_span = None;
while self.token != token::CloseDelim(token::Brace) {
let attrs = match self.parse_outer_attributes() {
Ok(attrs) => attrs,
Err(err) => {
if let Some(mut delayed) = delayed_err {
delayed.emit();
}
return Err(err);
}
};
let lo = self.token.span;
// check that a comma comes after every field
if !ate_comma {
let err = self.struct_span_err(self.prev_token.span, "expected `,`");
if let Some(mut delayed) = delayed_err {
delayed.emit();
}
return Err(err);
}
ate_comma = false;
if self.check(&token::DotDot) || self.token == token::DotDotDot {
etc = true;
let mut etc_sp = self.token.span;
self.recover_one_fewer_dotdot();
self.bump(); // `..` || `...`
if self.token == token::CloseDelim(token::Brace) {
etc_span = Some(etc_sp);
break;
}
let token_str = super::token_descr(&self.token);
let msg = &format!("expected `}}`, found {}", token_str);
let mut err = self.struct_span_err(self.token.span, msg);
err.span_label(self.token.span, "expected `}`");
let mut comma_sp = None;
if self.token == token::Comma {
// Issue #49257
let nw_span = self.sess.source_map().span_until_non_whitespace(self.token.span);
etc_sp = etc_sp.to(nw_span);
err.span_label(
etc_sp,
"`..` must be at the end and cannot have a trailing comma",
);
comma_sp = Some(self.token.span);
self.bump();
ate_comma = true;
}
etc_span = Some(etc_sp.until(self.token.span));
if self.token == token::CloseDelim(token::Brace) {
// If the struct looks otherwise well formed, recover and continue.
if let Some(sp) = comma_sp {
err.span_suggestion_short(
sp,
"remove this comma",
String::new(),
Applicability::MachineApplicable,
);
}
err.emit();
break;
} else if self.token.is_ident() && ate_comma {
// Accept fields coming after `..,`.
// This way we avoid "pattern missing fields" errors afterwards.
// We delay this error until the end in order to have a span for a
// suggested fix.
if let Some(mut delayed_err) = delayed_err {
delayed_err.emit();
return Err(err);
} else {
delayed_err = Some(err);
}
} else {
if let Some(mut err) = delayed_err {
err.emit();
}
return Err(err);
}
}
fields.push(match self.parse_pat_field(lo, attrs) {
Ok(field) => field,
Err(err) => {
if let Some(mut delayed_err) = delayed_err {
delayed_err.emit();
}
return Err(err);
}
});
ate_comma = self.eat(&token::Comma);
}
if let Some(mut err) = delayed_err {
if let Some(etc_span) = etc_span {
err.multipart_suggestion(
"move the `..` to the end of the field list",
vec![
(etc_span, String::new()),
(self.token.span, format!("{}.. }}", if ate_comma { "" } else { ", " })),
],
Applicability::MachineApplicable,
);
}
err.emit();
}
Ok((fields, etc))
}
/// Recover on `...` as if it were `..` to avoid further errors.
/// See issue #46718.
fn recover_one_fewer_dotdot(&self) {
if self.token != token::DotDotDot {
return;
}
self.struct_span_err(self.token.span, "expected field pattern, found `...`")
.span_suggestion(
self.token.span,
"to omit remaining fields, use one fewer `.`",
"..".to_owned(),
Applicability::MachineApplicable,
)
.emit();
}
fn parse_pat_field(&mut self, lo: Span, attrs: Vec<Attribute>) -> PResult<'a, FieldPat> {
// Check if a colon exists one ahead. This means we're parsing a fieldname.
let hi;
let (subpat, fieldname, is_shorthand) = if self.look_ahead(1, |t| t == &token::Colon) {
// Parsing a pattern of the form `fieldname: pat`.
let fieldname = self.parse_field_name()?;
self.bump();
let pat = self.parse_pat_with_or_inner()?;
hi = pat.span;
(pat, fieldname, false)
} else {
// Parsing a pattern of the form `(box) (ref) (mut) fieldname`.
let is_box = self.eat_keyword(kw::Box);
let boxed_span = self.token.span;
let is_ref = self.eat_keyword(kw::Ref);
let is_mut = self.eat_keyword(kw::Mut);
let fieldname = self.parse_ident()?;
hi = self.prev_token.span;
let bind_type = match (is_ref, is_mut) {
(true, true) => BindingMode::ByRef(Mutability::Mut),
(true, false) => BindingMode::ByRef(Mutability::Not),
(false, true) => BindingMode::ByValue(Mutability::Mut),
(false, false) => BindingMode::ByValue(Mutability::Not),
};
let fieldpat = self.mk_pat_ident(boxed_span.to(hi), bind_type, fieldname);
let subpat =
if is_box { self.mk_pat(lo.to(hi), PatKind::Box(fieldpat)) } else { fieldpat };
(subpat, fieldname, true)
};
Ok(FieldPat {
ident: fieldname,
pat: subpat,
is_shorthand,
attrs: attrs.into(),
id: ast::DUMMY_NODE_ID,
span: lo.to(hi),
is_placeholder: false,
})
}
pub(super) fn mk_pat_ident(&self, span: Span, bm: BindingMode, ident: Ident) -> P<Pat> {
self.mk_pat(span, PatKind::Ident(bm, ident, None))
}
fn mk_pat(&self, span: Span, kind: PatKind) -> P<Pat> {
P(Pat { kind, span, id: ast::DUMMY_NODE_ID })
}
} | .span_suggestion(span, suggestion, fix, Applicability::MachineApplicable)
.note("`mut` may be followed by `variable` and `variable @ pattern`")
.emit(); |
irasshai.rs | use crate::model::hitogata::Hitogata;
use crate::omomuki::Result;
use crate::Tumori;
#[derive(Clone, Debug)]
pub struct Irasshai {}
impl Tumori for Irasshai {
fn kotafu(&self, _: &Hitogata) -> Box<dyn Tumori> {
return Box::new(self.clone());
}
fn get_kotae(&self, chara: &Hitogata) -> Result |
}
| {
return Result::Message((chara.kaeshi.aisatsu.kuru.irasshai)());
} |
analyticsItem.go | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package insights
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Properties that define an Analytics item that is associated to an Application Insights component.
// API Version: 2015-05-01.
type AnalyticsItem struct {
pulumi.CustomResourceState
// The content of this item
Content pulumi.StringPtrOutput `pulumi:"content"`
// The user-defined name of the item.
Name pulumi.StringPtrOutput `pulumi:"name"`
// A set of properties that can be defined in the context of a specific item type. Each type may have its own properties.
Properties ApplicationInsightsComponentAnalyticsItemPropertiesResponseOutput `pulumi:"properties"`
// Enum indicating if this item definition is owned by a specific user or is shared between all users with access to the Application Insights component.
Scope pulumi.StringPtrOutput `pulumi:"scope"`
// Date and time in UTC when this item was created.
TimeCreated pulumi.StringOutput `pulumi:"timeCreated"`
// Date and time in UTC of the last modification that was made to this item.
TimeModified pulumi.StringOutput `pulumi:"timeModified"`
// Enum indicating the type of the Analytics item.
Type pulumi.StringPtrOutput `pulumi:"type"`
// This instance's version of the data model. This can change as new features are added.
Version pulumi.StringOutput `pulumi:"version"`
}
// NewAnalyticsItem registers a new resource with the given unique name, arguments, and options.
func NewAnalyticsItem(ctx *pulumi.Context,
name string, args *AnalyticsItemArgs, opts ...pulumi.ResourceOption) (*AnalyticsItem, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.ResourceGroupName == nil {
return nil, errors.New("invalid value for required argument 'ResourceGroupName'")
}
if args.ResourceName == nil {
return nil, errors.New("invalid value for required argument 'ResourceName'")
}
if args.ScopePath == nil {
return nil, errors.New("invalid value for required argument 'ScopePath'")
}
aliases := pulumi.Aliases([]pulumi.Alias{
{
Type: pulumi.String("azure-nextgen:insights:AnalyticsItem"),
},
{
Type: pulumi.String("azure-native:insights/v20150501:AnalyticsItem"),
},
{
Type: pulumi.String("azure-nextgen:insights/v20150501:AnalyticsItem"),
},
})
opts = append(opts, aliases)
var resource AnalyticsItem
err := ctx.RegisterResource("azure-native:insights:AnalyticsItem", name, args, &resource, opts...)
if err != nil |
return &resource, nil
}
// GetAnalyticsItem gets an existing AnalyticsItem resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetAnalyticsItem(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *AnalyticsItemState, opts ...pulumi.ResourceOption) (*AnalyticsItem, error) {
var resource AnalyticsItem
err := ctx.ReadResource("azure-native:insights:AnalyticsItem", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering AnalyticsItem resources.
type analyticsItemState struct {
}
type AnalyticsItemState struct {
}
func (AnalyticsItemState) ElementType() reflect.Type {
return reflect.TypeOf((*analyticsItemState)(nil)).Elem()
}
type analyticsItemArgs struct {
// The content of this item
Content *string `pulumi:"content"`
// Internally assigned unique id of the item definition.
Id *string `pulumi:"id"`
// The user-defined name of the item.
Name *string `pulumi:"name"`
// Flag indicating whether or not to force save an item. This allows overriding an item if it already exists.
OverrideItem *bool `pulumi:"overrideItem"`
// A set of properties that can be defined in the context of a specific item type. Each type may have its own properties.
Properties *ApplicationInsightsComponentAnalyticsItemProperties `pulumi:"properties"`
// The name of the resource group. The name is case insensitive.
ResourceGroupName string `pulumi:"resourceGroupName"`
// The name of the Application Insights component resource.
ResourceName string `pulumi:"resourceName"`
// Enum indicating if this item definition is owned by a specific user or is shared between all users with access to the Application Insights component.
Scope *string `pulumi:"scope"`
// Enum indicating if this item definition is owned by a specific user or is shared between all users with access to the Application Insights component.
ScopePath string `pulumi:"scopePath"`
// Enum indicating the type of the Analytics item.
Type *string `pulumi:"type"`
}
// The set of arguments for constructing a AnalyticsItem resource.
type AnalyticsItemArgs struct {
// The content of this item
Content pulumi.StringPtrInput
// Internally assigned unique id of the item definition.
Id pulumi.StringPtrInput
// The user-defined name of the item.
Name pulumi.StringPtrInput
// Flag indicating whether or not to force save an item. This allows overriding an item if it already exists.
OverrideItem pulumi.BoolPtrInput
// A set of properties that can be defined in the context of a specific item type. Each type may have its own properties.
Properties ApplicationInsightsComponentAnalyticsItemPropertiesPtrInput
// The name of the resource group. The name is case insensitive.
ResourceGroupName pulumi.StringInput
// The name of the Application Insights component resource.
ResourceName pulumi.StringInput
// Enum indicating if this item definition is owned by a specific user or is shared between all users with access to the Application Insights component.
Scope pulumi.StringPtrInput
// Enum indicating if this item definition is owned by a specific user or is shared between all users with access to the Application Insights component.
ScopePath pulumi.StringInput
// Enum indicating the type of the Analytics item.
Type pulumi.StringPtrInput
}
func (AnalyticsItemArgs) ElementType() reflect.Type {
return reflect.TypeOf((*analyticsItemArgs)(nil)).Elem()
}
type AnalyticsItemInput interface {
pulumi.Input
ToAnalyticsItemOutput() AnalyticsItemOutput
ToAnalyticsItemOutputWithContext(ctx context.Context) AnalyticsItemOutput
}
func (*AnalyticsItem) ElementType() reflect.Type {
return reflect.TypeOf((*AnalyticsItem)(nil))
}
func (i *AnalyticsItem) ToAnalyticsItemOutput() AnalyticsItemOutput {
return i.ToAnalyticsItemOutputWithContext(context.Background())
}
func (i *AnalyticsItem) ToAnalyticsItemOutputWithContext(ctx context.Context) AnalyticsItemOutput {
return pulumi.ToOutputWithContext(ctx, i).(AnalyticsItemOutput)
}
type AnalyticsItemOutput struct{ *pulumi.OutputState }
func (AnalyticsItemOutput) ElementType() reflect.Type {
return reflect.TypeOf((*AnalyticsItem)(nil))
}
func (o AnalyticsItemOutput) ToAnalyticsItemOutput() AnalyticsItemOutput {
return o
}
func (o AnalyticsItemOutput) ToAnalyticsItemOutputWithContext(ctx context.Context) AnalyticsItemOutput {
return o
}
func init() {
pulumi.RegisterOutputType(AnalyticsItemOutput{})
}
| {
return nil, err
} |
index.d.ts | // Type definitions for word-extractor 1.0
// Project: https://github.com/morungos/node-word-extractor
// Definitions by: Rodrigo Saboya <https://github.com/saboya>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
declare class WordExtractor {
extract(documentPath: string | Uint8Array): Promise<WordExtractor.Document>;
}
export = WordExtractor;
declare namespace WordExtractor {
class Document {
getBody(): string;
getFootnotes(): string;
getHeaders(options?: { includeFooters?: boolean | undefined }): string;
getFooters(): string;
getAnnotations(): string; | getTextboxes(options?: { includeHeadersAndFooters?: boolean | undefined; includeBody?: boolean | undefined }): string;
getEndNotes(): string;
}
} | |
journey.rs | use axum::{extract::Extension, Json};
use domain::{NewJourney, UsersManager};
use serde::{Deserialize, Serialize};
use types::my_journey_destiny::to_name;
use types::my_source::to_source_name;
use types::ID;
use validator::{Validate, ValidationError};
use crate::{
app_request::{AuthUser, ValidatedJson},
app_response::AppError,
AppState,
};
#[derive(Default, Deserialize, Debug, Clone, Validate)]
pub struct JourneyForm {
#[validate(length(min = 5, max = 40, message = "title length(5-40)"))]
title: String,
#[validate(url)]
image_url: String,
#[validate(url)]
link: String,
#[validate(custom(function = "validate_source", message = "source is not correct"))]
source: u8,
#[validate(custom(
function = "validate_journey_destiny",
message = "journey_destiny is not correct"
))]
journey_destiny: String,
}
fn validate_source(source: u8) -> Result<(), ValidationError> {
if to_source_name(source.into()) == "" {
return Err(ValidationError::new("source"));
}
Ok(())
}
fn validate_journey_destiny(journey_destiny: &str) -> Result<(), ValidationError> {
if to_name(&journey_destiny) == "" {
return Err(ValidationError::new("journey_destiny"));
}
Ok(())
}
#[derive(Serialize)]
pub struct JourneyResponse {
id: ID,
}
#[tracing::instrument(skip(auth_user, state))]
pub async fn | (
ValidatedJson(form): ValidatedJson<JourneyForm>,
AuthUser(auth_user): AuthUser,
Extension(state): Extension<AppState>,
) -> Result<Json<JourneyResponse>, AppError> {
let adventures_manager = &state.adventures_manager;
let users_manager = &state.users_manager;
let user = users_manager
.get_user_by_username(auth_user.get_name())
.await?;
let new_journey = NewJourney {
title: form.title,
image_url: form.image_url,
link: form.link,
source: form.source.into(),
journey_destiny: form.journey_destiny,
};
let id = user.add_journey(new_journey, adventures_manager).await?;
Ok(JourneyResponse { id }.into())
}
| journey |
issue-53807.rs | // Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub fn main(){
let maybe = Some(vec![true, true]);
loop {
if let Some(thing) = maybe |
}
}
| {
} |
email.go | /*
* https://github.com/gcloudplatform/email/blob/master/email.go
*/
package email
import (
"bytes"
"encoding/base64"
"fmt"
"math/rand"
"mime"
"net/mail"
"net/smtp"
"path/filepath"
"strings"
)
type Mail struct {
smtpServer string
fromMail *mail.Address
password string
}
type Attachment struct {
Filename string
Data []byte
Inline bool
ContentType string
}
func | (smtpServer, password string, mailAddress *mail.Address) *Mail {
return &Mail{
smtpServer: smtpServer,
fromMail: mailAddress,
password: password,
}
}
func (m *Mail) Send(title, body string, toEmail []*mail.Address) error {
auth := smtp.PlainAuth(
"",
m.fromMail.Address,
m.password,
m.smtpServer,
)
to := ""
var toEmails []string
for _, e := range toEmail {
to += "," + e.String()
toEmails = append(toEmails, e.Address)
}
if to != "" {
to = to[1:]
}
buf := bytes.NewBuffer(nil)
buf.WriteString(fmt.Sprintf("From: %s\r\n", m.fromMail.String()))
buf.WriteString(fmt.Sprintf("To: %s\r\n", to))
buf.WriteString(fmt.Sprintf("Subject: %s\r\n", strings.Trim(mime.QEncoding.Encode("utf-8", title), "\"")))
buf.WriteString("MIME-Version: 1.0\r\n")
buf.WriteString("Content-Type: text/html; charset=\"utf-8\"\r\n")
buf.WriteString("Content-Transfer-Encoding: base64\r\n")
bodybase64 := base64.StdEncoding.EncodeToString([]byte(body))
buf.WriteString(bodybase64)
//fmt.Println(buf.String())
err := smtp.SendMail(
m.smtpServer,
auth,
m.fromMail.Address,
toEmails,
buf.Bytes(),
)
return err
}
// the email format see
// https://support.microsoft.com/zh-cn/kb/969854
// https://tools.ietf.org/html/rfc1341
func (m *Mail) SendWithAttachment(title, body string, toEmail []*mail.Address, attachment []*Attachment) error {
auth := smtp.PlainAuth(
"",
m.fromMail.Address,
m.password,
m.smtpServer,
)
to := ""
var toEmails []string
for _, e := range toEmail {
to += "," + e.String()
toEmails = append(toEmails, e.Address)
}
if to != "" {
to = to[1:]
}
buf := bytes.NewBuffer(nil)
buf.WriteString(fmt.Sprintf("From: %s\r\n", m.fromMail.String()))
buf.WriteString(fmt.Sprintf("To: %s\r\n", to))
buf.WriteString(fmt.Sprintf("Subject: %s\r\n", strings.Trim(mime.QEncoding.Encode("utf-8", title), "\"")))
buf.WriteString("MIME-Version: 1.0\r\n")
boundary := ""
boundary = genBoundary(28)
buf.WriteString(fmt.Sprintf("Content-Type: multipart/mixed; boundary=\"%s\"\r\n\r\n", boundary))
buf.WriteString("This is a message with multiple parts in MIME format.\r\n")
buf.WriteString(fmt.Sprintf("--%s\r\n", boundary))
bodybase64 := base64.StdEncoding.EncodeToString([]byte(body))
buf.WriteString("Content-Type: text/html; charset=\"utf-8\"\r\n")
buf.WriteString("Content-Transfer-Encoding: base64\r\n")
buf.WriteString(fmt.Sprintf("\r\n%s\r\n", bodybase64))
for _, attach := range attachment {
buf.WriteString(fmt.Sprintf("\r\n--%s\r\n", boundary))
if attach.Inline {
buf.WriteString("Content-Type: message/rfc822\r\n")
buf.WriteString(fmt.Sprintf("Content-Disposition: inline; filename=\"%s\"\r\n\r\n", attach.Filename))
buf.Write(attach.Data)
} else {
if attach.ContentType == "" {
ext := filepath.Ext(attach.Filename)
mimetype := mime.TypeByExtension(ext)
if mimetype != "" {
attach.ContentType = mimetype
} else {
attach.ContentType = "application/octet-stream"
}
}
buf.WriteString(fmt.Sprintf("Content-Type: %s\r\n", attach.ContentType))
buf.WriteString("Content-Transfer-Encoding: base64\r\n")
buf.WriteString(fmt.Sprintf("Content-Disposition: attachment; filename=\"%s\"\r\n\r\n", attach.Filename))
b := make([]byte, base64.StdEncoding.EncodedLen(len(attach.Data)))
base64.StdEncoding.Encode(b, attach.Data)
// write base64 content in lines of up to 76 chars
for i, l := 0, len(b); i < l; i++ {
buf.WriteByte(b[i])
if (i+1)%76 == 0 {
buf.WriteString("\r\n")
}
}
}
}
buf.WriteString(fmt.Sprintf("\r\n--%s--", boundary))
//fmt.Println(buf.String())
err := smtp.SendMail(
m.smtpServer,
auth,
m.fromMail.Address,
toEmails,
buf.Bytes(),
)
return err
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const letterLen = 62
func genBoundary(n int) string {
b := make([]byte, n)
for i := range b {
b[i] = letterBytes[rand.Intn(letterLen)]
}
return string(b)
}
| New |
completion.go | package cmd
import (
"fmt"
"log"
"os"
"path"
"github.com/spf13/cobra"
)
// Plugin structure for zsh
type Plugin struct {
name string
path string
}
func (plug *Plugin) script() string {
return path.Join(plug.path, fmt.Sprintf("_%s", plug.name))
}
// completionCmd represents the completion command
var completionCmd = &cobra.Command{
Use: "completion",
Short: "Generates zsh completion scripts",
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
zsh := os.Getenv("ZSH")
if zsh == "" {
log.Fatal(fmt.Errorf("could not find the ZSH environmental variable, is ZSH installed?"))
}
plugin := &Plugin{
name: rootCmd.Name(),
path: path.Join(zsh, "/completions"),
}
if _, err := os.Stat(plugin.path); os.IsNotExist(err) {
os.MkdirAll(plugin.path, 0700)
}
if err := rootCmd.GenZshCompletionFile(plugin.script()); err != nil {
log.Fatal(err)
}
fmt.Printf("a zsh completion file has been generated in %s \n", plugin.path)
fmt.Println()
fmt.Println("to utilize the plugin, please add 'compinit' to the end of your .zshrc file")
},
}
func | () {
rootCmd.AddCommand(completionCmd)
}
| init |
line.rs | use crate::*;
pub const MAX_HEADER_DEPTH: usize = 8;
/// a parsed line
#[derive(Debug, PartialEq, Eq, Clone)]
pub enum Line<'a> {
Normal(Composite<'a>), //
TableRow(TableRow<'a>), // a normal table row, with cells having content
TableRule(TableRule), // a separator/border in a table, optionally defining alignments
HorizontalRule, // an horizontal line dividing the screen
CodeFence(Composite<'a>),
}
impl Line<'_> {
pub fn from(md: &str) -> Line<'_> {
LineParser::from(md).line()
}
#[inline(always)]
pub fn char_length(&self) -> usize {
match self {
Line::Normal(composite) => composite.char_length(),
Line::TableRow(row) => row.cells.iter().fold(0, |s, c| s + c.char_length()),
_ => 0, // no known char length for table format lines
}
}
pub fn new_paragraph(compounds: Vec<Compound<'_>>) -> Line<'_> {
Line::Normal(Composite {
style: CompositeStyle::Paragraph,
compounds,
})
}
pub fn empty_code_fence() -> Line<'static> {
Line::CodeFence(Composite {
style: CompositeStyle::Paragraph,
compounds: vec![],
})
}
pub fn new_code_fence(compounds: Vec<Compound<'_>>) -> Line<'_> {
Line::CodeFence(Composite {
style: CompositeStyle::Paragraph,
compounds,
})
}
pub fn new_code(compound: Compound<'_>) -> Line<'_> {
Line::Normal(Composite {
style: CompositeStyle::Code,
compounds: vec![compound],
})
}
pub fn new_quote(compounds: Vec<Compound<'_>>) -> Line<'_> {
Line::Normal(Composite {
style: CompositeStyle::Quote,
compounds,
})
}
pub fn new_list_item(compounds: Vec<Compound<'_>>) -> Line<'_> {
Line::Normal(Composite {
style: CompositeStyle::ListItem,
compounds,
})
}
pub fn new_header(level: u8, compounds: Vec<Compound<'_>>) -> Line<'_> {
Line::Normal(Composite {
style: CompositeStyle::Header(level),
compounds,
})
}
pub fn new_table_row(cells: Vec<Composite<'_>>) -> Line<'_> {
Line::TableRow(TableRow { cells })
}
pub fn new_table_alignments(cells: Vec<Alignment>) -> Line<'static> {
Line::TableRule(TableRule { cells })
}
#[inline(always)]
pub fn is_table_row(&self) -> bool {
matches!(self, Line::TableRow(_))
}
#[inline(always)]
#[allow(clippy::match_like_matches_macro)]
pub fn is_table_part(&self) -> bool {
match self {
Line::Normal(_) => false,
_ => true,
}
}
#[inline(always)]
pub fn is_code(&self) -> bool {
match self {
Line::Normal(composite) => composite.is_code(),
_ => false,
}
}
}
#[test]
pub fn count_chars() | {
assert_eq!(Line::from("τ").char_length(), 1);
assert_eq!(Line::from("τ:`2π`").char_length(), 4);
assert_eq!(Line::from("* item").char_length(), 4);
}
|
|
me_zrl_bound_evolvers.py | #!/usr/bin/env python
"""@package docstring
File: me_zrl_bound_evolvers.py
Author: Adam Lamson
Email: [email protected]
Description:
"""
import numpy as np
# from scipy.integrate import dblquad
from .me_helpers import dr_dt, convert_sol_to_geom
from .me_zrl_odes import (rod_geom_derivs_zrl, calc_moment_derivs_zrl,
calc_moment_derivs_zrl_B_terms,
calc_boundary_derivs_zrl)
from .me_zrl_helpers import (avg_force_zrl,
prep_zrl_bound_evolver,
get_zrl_moments_and_boundary_terms)
from .rod_steric_forces import calc_wca_force_torque
from .me_zrl_evolvers import prep_zrl_evolver
def evolver_zrl_bound(sol, fric_coeff, params):
"""!Calculate all time derivatives necessary to solve the moment expansion
evolution of the Fokker-Planck equation of zero rest length (zrl) crosslinkers
bound to moving rods. d<var> is the time derivative of corresponding
variable
@param sol: Solution vector to solve_ivp
@param fric_coeff: friction coefficients of rod
@param params: Constant parameters of the simulation
@return: Time-derivatives of all time varying quantities in a flattened
array
"""
# Define useful parameters for functions
hL_i, hL_j = (.5 * params['L_i'], .5 * params['L_j'])
ks = params['ks']
r_i, r_j, u_i, u_j = convert_sol_to_geom(sol)
r_ij = r_j - r_i
(scalar_geom, q_arr, Q_arr) = prep_zrl_bound_evolver(sol, params)
(mu_kl, B_terms) = get_zrl_moments_and_boundary_terms(sol)
if mu_kl[0] < 0.:
mu_kl[0] = 0.
if mu_kl[4] < 0.:
mu_kl[4] = 0.
if mu_kl[5] < 0.:
|
# Get average force of crosslinkers on rod2
f_ij = avg_force_zrl(r_ij, u_i, u_j, mu_kl[0], mu_kl[1], mu_kl[2], ks)
# Evolution of rod positions
dgeom = rod_geom_derivs_zrl(f_ij, r_ij, u_i, u_j, scalar_geom,
mu_kl, fric_coeff, ks)
# Evolution of moments
dmu_kl = calc_moment_derivs_zrl_B_terms(mu_kl, scalar_geom,
q_arr, B_terms, params)
# Evolution of boundary condtions
dB_terms = calc_boundary_derivs_zrl(B_terms, scalar_geom, Q_arr, params)
dsol = np.concatenate(dgeom, dmu_kl, dB_terms)
return dsol
##########################################
| mu_kl[5] = 0. |
local.strategy.ts | import { Injectable, UnauthorizedException } from '@nestjs/common';
import { PassportStrategy } from '@nestjs/passport';
import { Strategy } from 'passport-local';
import { MessageHelper } from 'src/helpers/message.helper';
import { AuthService } from '../auth.service';
@Injectable()
export class LocalStrategy extends PassportStrategy(Strategy) { | super({ usernameField: 'email' });
}
async validate(email: string, password: string) {
const user = await this.authService.validateUser(email, password);
if (!user)
throw new UnauthorizedException(MessageHelper.PASSWORD_OR_EMAIL_INVALID);
return user;
}
} | constructor(private readonly authService: AuthService) { |
apply_cfg.rs | use std::marker::PhantomData;
use futures::future::Future;
use futures::{try_ready, Async, IntoFuture, Poll};
use crate::cell::Cell;
use crate::{IntoService, NewService, Service};
/// Convert `Fn(&Config, &mut Service) -> Future<Service>` fn to a NewService
pub fn apply_cfg<F, C, T, R, S>(srv: T, f: F) -> ApplyConfigService<F, C, T, R, S>
where
F: FnMut(&C, &mut T) -> R,
T: Service,
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
ApplyConfigService {
f: Cell::new(f),
srv: Cell::new(srv.into_service()),
_t: PhantomData,
}
}
/// Convert `Fn(&Config) -> Future<Service>` fn to NewService
pub struct ApplyConfigService<F, C, T, R, S>
where
F: FnMut(&C, &mut T) -> R,
T: Service,
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
f: Cell<F>,
srv: Cell<T>,
_t: PhantomData<(C, R, S)>,
}
impl<F, C, T, R, S> Clone for ApplyConfigService<F, C, T, R, S>
where
F: FnMut(&C, &mut T) -> R,
T: Service,
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
fn clone(&self) -> Self {
ApplyConfigService {
f: self.f.clone(),
srv: self.srv.clone(),
_t: PhantomData,
}
}
}
impl<F, C, T, R, S> NewService for ApplyConfigService<F, C, T, R, S>
where
F: FnMut(&C, &mut T) -> R,
T: Service,
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
type Config = C;
type Request = S::Request;
type Response = S::Response;
type Error = S::Error;
type Service = S;
type InitError = R::Error;
type Future = FnNewServiceConfigFut<R, S>;
fn new_service(&self, cfg: &C) -> Self::Future {
FnNewServiceConfigFut {
fut: unsafe { (self.f.get_mut_unsafe())(cfg, self.srv.get_mut_unsafe()) }
.into_future(),
_t: PhantomData,
}
}
}
pub struct FnNewServiceConfigFut<R, S>
where
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
fut: R::Future,
_t: PhantomData<(S,)>,
}
impl<R, S> Future for FnNewServiceConfigFut<R, S>
where
R: IntoFuture,
R::Item: IntoService<S>,
S: Service,
{
type Item = S;
type Error = R::Error;
fn | (&mut self) -> Poll<Self::Item, Self::Error> {
Ok(Async::Ready(try_ready!(self.fut.poll()).into_service()))
}
}
| poll |
sim_msb_gps.py | import logging
import zmq
import sys
import time
import uptime
import pickle
from datetime import datetime
from os import path
try:
from gps_config import (init, GPS_TOPIC)
except ImportError:
raise Exception('failed to import init method')
sys.exit(-1)
def gen_gps_message():
|
def main():
config = init()
connect_to = f'{config["ipc_protocol"]}:{config["ipc_port"]}'
logging.debug(f'binding to {connect_to} for zeroMQ IPC')
ctx = zmq.Context()
zmq_socket = ctx.socket(zmq.PUB)
try:
zmq_socket.connect(connect_to)
except Exception as e:
logging.fatal('failed to connect to zeroMQ socket for IPC')
sys.exit(-1)
logging.debug(f'connected to zeroMQ IPC socket')
logging.debug(f'entering endless loop')
try:
while True:
# Do stuff
data = gen_gps_message()
if config['print']: print(f'gps: {data}')
zmq_socket.send_multipart(
[
GPS_TOPIC,
pickle.dumps(
data
)
]
)
time.sleep(1)
# zmq_socket.send_pyobj(data)
except StopIteration:
logging.fatal("GPSD has terminated")
except KeyboardInterrupt:
logging.info('goodbye')
sys.exit(0)
if __name__ == '__main__':
main()
| return [
time.time(),
uptime.uptime(),
{
"class": "TPV",
"device": "/dev/ttyACM0",
"mode": 1,
"timestamp" : time.time(),
"leapseconds": 18,
"lat" : 8.66645,
"lon" : 53.5555,
"alt" : 6.5546,
}
] |
set.rs | use std::fmt;
use std::hash::Hash;
use std::iter::FromIterator;
use super::map::SsoHashMap;
/// Small-storage-optimized implementation of a set.
///
/// Stores elements in a small array up to a certain length
/// and switches to `HashSet` when that length is exceeded.
//
// FIXME: Implements subset of HashSet API.
//
// Missing HashSet API:
// all hasher-related
// try_reserve
// shrink_to (unstable)
// drain_filter (unstable)
// replace
// get_or_insert/get_or_insert_owned/get_or_insert_with (unstable)
// difference/symmetric_difference/intersection/union
// is_disjoint/is_subset/is_superset
// PartialEq/Eq (requires SsoHashMap implementation)
// BitOr/BitAnd/BitXor/Sub
#[derive(Clone)]
pub struct SsoHashSet<T> {
map: SsoHashMap<T, ()>,
}
/// Adapter function used ot return
/// result if SsoHashMap functions into
/// result SsoHashSet should return.
#[inline(always)]
fn entry_to_key<K, V>((k, _v): (K, V)) -> K {
k
}
impl<T> SsoHashSet<T> {
/// Creates an empty `SsoHashSet`.
#[inline]
pub fn new() -> Self {
Self { map: SsoHashMap::new() }
}
/// Creates an empty `SsoHashSet` with the specified capacity.
#[inline]
pub fn with_capacity(cap: usize) -> Self {
Self { map: SsoHashMap::with_capacity(cap) }
}
/// Clears the set, removing all values.
#[inline]
pub fn clear(&mut self) {
self.map.clear()
}
/// Returns the number of elements the set can hold without reallocating.
#[inline]
pub fn capacity(&self) -> usize {
self.map.capacity()
}
/// Returns the number of elements in the set.
#[inline]
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns `true` if the set contains no elements.
#[inline]
pub fn is_empty(&self) -> bool {
self.map.is_empty()
}
/// An iterator visiting all elements in arbitrary order.
/// The iterator element type is `&'a T`.
#[inline]
pub fn iter(&self) -> impl Iterator<Item = &T> {
self.into_iter()
}
/// Clears the set, returning all elements in an iterator.
#[inline]
pub fn drain(&mut self) -> impl Iterator<Item = T> + '_ {
self.map.drain().map(entry_to_key)
}
}
impl<T: Eq + Hash> SsoHashSet<T> {
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `SsoHashSet`. The collection may reserve more space to avoid
/// frequent reallocations.
#[inline]
pub fn reserve(&mut self, additional: usize) {
self.map.reserve(additional)
}
/// Shrinks the capacity of the set as much as possible. It will drop
/// down as much as possible while maintaining the internal rules
/// and possibly leaving some space in accordance with the resize policy.
#[inline]
pub fn shrink_to_fit(&mut self) {
self.map.shrink_to_fit()
}
/// Retains only the elements specified by the predicate.
#[inline]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
self.map.retain(|k, _v| f(k))
}
/// Removes and returns the value in the set, if any, that is equal to the given one.
#[inline]
pub fn take(&mut self, value: &T) -> Option<T> {
self.map.remove_entry(value).map(entry_to_key)
}
/// Returns a reference to the value in the set, if any, that is equal to the given value.
#[inline]
pub fn get(&self, value: &T) -> Option<&T> {
self.map.get_key_value(value).map(entry_to_key)
}
/// Adds a value to the set.
///
/// If the set did not have this value present, `true` is returned.
///
/// If the set did have this value present, `false` is returned.
#[inline]
pub fn insert(&mut self, elem: T) -> bool {
self.map.insert(elem, ()).is_none()
}
/// Removes a value from the set. Returns whether the value was
/// present in the set.
#[inline]
pub fn remove(&mut self, value: &T) -> bool {
self.map.remove(value).is_some()
}
/// Returns `true` if the set contains a value.
#[inline]
pub fn contains(&self, value: &T) -> bool {
self.map.contains_key(value)
}
}
impl<T: Eq + Hash> FromIterator<T> for SsoHashSet<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> SsoHashSet<T> {
let mut set: SsoHashSet<T> = Default::default();
set.extend(iter);
set
}
}
impl<T> Default for SsoHashSet<T> {
#[inline]
fn default() -> Self {
Self::new()
}
}
impl<T: Eq + Hash> Extend<T> for SsoHashSet<T> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
{
for val in iter.into_iter() {
self.insert(val);
}
}
#[inline]
fn extend_one(&mut self, item: T) {
self.insert(item);
}
#[inline]
fn extend_reserve(&mut self, additional: usize) {
self.map.extend_reserve(additional)
}
}
impl<'a, T> Extend<&'a T> for SsoHashSet<T>
where
T: 'a + Eq + Hash + Copy,
{
#[inline]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
self.extend(iter.into_iter().cloned());
}
#[inline]
fn extend_one(&mut self, &item: &'a T) {
self.insert(item);
}
#[inline]
fn extend_reserve(&mut self, additional: usize) |
}
impl<T> IntoIterator for SsoHashSet<T> {
type IntoIter = std::iter::Map<<SsoHashMap<T, ()> as IntoIterator>::IntoIter, fn((T, ())) -> T>;
type Item = <Self::IntoIter as Iterator>::Item;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.map.into_iter().map(entry_to_key)
}
}
impl<'a, T> IntoIterator for &'a SsoHashSet<T> {
type IntoIter = std::iter::Map<
<&'a SsoHashMap<T, ()> as IntoIterator>::IntoIter,
fn((&'a T, &'a ())) -> &'a T,
>;
type Item = <Self::IntoIter as Iterator>::Item;
#[inline]
fn into_iter(self) -> Self::IntoIter {
self.map.iter().map(entry_to_key)
}
}
impl<T> fmt::Debug for SsoHashSet<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
| {
Extend::<T>::extend_reserve(self, additional)
} |
IpsecVpnTunnelProfilesTypes.go | /* Copyright © 2019 VMware, Inc. All Rights Reserved.
SPDX-License-Identifier: BSD-2-Clause */
// Code generated. DO NOT EDIT.
/*
* Data type definitions file for service: IpsecVpnTunnelProfiles.
* Includes binding types of a structures and enumerations defined in the service.
* Shared by client-side stubs and server-side skeletons to ensure type
* compatibility.
*/
package infra
import (
"reflect"
"github.com/vmware/vsphere-automation-sdk-go/services/nsxt/model"
"github.com/vmware/vsphere-automation-sdk-go/runtime/bindings"
"github.com/vmware/vsphere-automation-sdk-go/runtime/data"
"github.com/vmware/vsphere-automation-sdk-go/runtime/protocol"
)
func ipsecVpnTunnelProfilesDeleteInputType() bindings.StructType {
fields := make(map[string]bindings.BindingType)
fieldNameMap := make(map[string]string)
fields["tunnel_profile_id"] = bindings.NewStringType()
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
var validators = []bindings.Validator{}
return bindings.NewStructType("operation-input", fields, reflect.TypeOf(data.StructValue{}), fieldNameMap, validators)
}
func i | ) bindings.BindingType {
return bindings.NewVoidType()
}
func ipsecVpnTunnelProfilesDeleteRestMetadata() protocol.OperationRestMetadata {
fields := map[string]bindings.BindingType{}
fieldNameMap := map[string]string{}
paramsTypeMap := map[string]bindings.BindingType{}
pathParams := map[string]string{}
queryParams := map[string]string{}
headerParams := map[string]string{}
fields["tunnel_profile_id"] = bindings.NewStringType()
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
paramsTypeMap["tunnel_profile_id"] = bindings.NewStringType()
paramsTypeMap["tunnelProfileId"] = bindings.NewStringType()
pathParams["tunnel_profile_id"] = "tunnelProfileId"
resultHeaders := map[string]string{}
errorHeaders := map[string]string{}
return protocol.NewOperationRestMetadata(
fields,
fieldNameMap,
paramsTypeMap,
pathParams,
queryParams,
headerParams,
"",
"",
"DELETE",
"/policy/api/v1/infra/ipsec-vpn-tunnel-profiles/{tunnelProfileId}",
resultHeaders,
204,
errorHeaders,
map[string]int{"InvalidRequest": 400,"Unauthorized": 403,"ServiceUnavailable": 503,"InternalServerError": 500,"NotFound": 404})
}
func ipsecVpnTunnelProfilesGetInputType() bindings.StructType {
fields := make(map[string]bindings.BindingType)
fieldNameMap := make(map[string]string)
fields["tunnel_profile_id"] = bindings.NewStringType()
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
var validators = []bindings.Validator{}
return bindings.NewStructType("operation-input", fields, reflect.TypeOf(data.StructValue{}), fieldNameMap, validators)
}
func ipsecVpnTunnelProfilesGetOutputType() bindings.BindingType {
return bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
}
func ipsecVpnTunnelProfilesGetRestMetadata() protocol.OperationRestMetadata {
fields := map[string]bindings.BindingType{}
fieldNameMap := map[string]string{}
paramsTypeMap := map[string]bindings.BindingType{}
pathParams := map[string]string{}
queryParams := map[string]string{}
headerParams := map[string]string{}
fields["tunnel_profile_id"] = bindings.NewStringType()
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
paramsTypeMap["tunnel_profile_id"] = bindings.NewStringType()
paramsTypeMap["tunnelProfileId"] = bindings.NewStringType()
pathParams["tunnel_profile_id"] = "tunnelProfileId"
resultHeaders := map[string]string{}
errorHeaders := map[string]string{}
return protocol.NewOperationRestMetadata(
fields,
fieldNameMap,
paramsTypeMap,
pathParams,
queryParams,
headerParams,
"",
"",
"GET",
"/policy/api/v1/infra/ipsec-vpn-tunnel-profiles/{tunnelProfileId}",
resultHeaders,
200,
errorHeaders,
map[string]int{"InvalidRequest": 400,"Unauthorized": 403,"ServiceUnavailable": 503,"InternalServerError": 500,"NotFound": 404})
}
func ipsecVpnTunnelProfilesListInputType() bindings.StructType {
fields := make(map[string]bindings.BindingType)
fieldNameMap := make(map[string]string)
fields["cursor"] = bindings.NewOptionalType(bindings.NewStringType())
fields["include_mark_for_delete_objects"] = bindings.NewOptionalType(bindings.NewBooleanType())
fields["included_fields"] = bindings.NewOptionalType(bindings.NewStringType())
fields["page_size"] = bindings.NewOptionalType(bindings.NewIntegerType())
fields["sort_ascending"] = bindings.NewOptionalType(bindings.NewBooleanType())
fields["sort_by"] = bindings.NewOptionalType(bindings.NewStringType())
fieldNameMap["cursor"] = "Cursor"
fieldNameMap["include_mark_for_delete_objects"] = "IncludeMarkForDeleteObjects"
fieldNameMap["included_fields"] = "IncludedFields"
fieldNameMap["page_size"] = "PageSize"
fieldNameMap["sort_ascending"] = "SortAscending"
fieldNameMap["sort_by"] = "SortBy"
var validators = []bindings.Validator{}
return bindings.NewStructType("operation-input", fields, reflect.TypeOf(data.StructValue{}), fieldNameMap, validators)
}
func ipsecVpnTunnelProfilesListOutputType() bindings.BindingType {
return bindings.NewReferenceType(model.IPSecVpnTunnelProfileListResultBindingType)
}
func ipsecVpnTunnelProfilesListRestMetadata() protocol.OperationRestMetadata {
fields := map[string]bindings.BindingType{}
fieldNameMap := map[string]string{}
paramsTypeMap := map[string]bindings.BindingType{}
pathParams := map[string]string{}
queryParams := map[string]string{}
headerParams := map[string]string{}
fields["cursor"] = bindings.NewOptionalType(bindings.NewStringType())
fields["include_mark_for_delete_objects"] = bindings.NewOptionalType(bindings.NewBooleanType())
fields["included_fields"] = bindings.NewOptionalType(bindings.NewStringType())
fields["page_size"] = bindings.NewOptionalType(bindings.NewIntegerType())
fields["sort_ascending"] = bindings.NewOptionalType(bindings.NewBooleanType())
fields["sort_by"] = bindings.NewOptionalType(bindings.NewStringType())
fieldNameMap["cursor"] = "Cursor"
fieldNameMap["include_mark_for_delete_objects"] = "IncludeMarkForDeleteObjects"
fieldNameMap["included_fields"] = "IncludedFields"
fieldNameMap["page_size"] = "PageSize"
fieldNameMap["sort_ascending"] = "SortAscending"
fieldNameMap["sort_by"] = "SortBy"
paramsTypeMap["included_fields"] = bindings.NewOptionalType(bindings.NewStringType())
paramsTypeMap["page_size"] = bindings.NewOptionalType(bindings.NewIntegerType())
paramsTypeMap["include_mark_for_delete_objects"] = bindings.NewOptionalType(bindings.NewBooleanType())
paramsTypeMap["cursor"] = bindings.NewOptionalType(bindings.NewStringType())
paramsTypeMap["sort_by"] = bindings.NewOptionalType(bindings.NewStringType())
paramsTypeMap["sort_ascending"] = bindings.NewOptionalType(bindings.NewBooleanType())
queryParams["cursor"] = "cursor"
queryParams["sort_ascending"] = "sort_ascending"
queryParams["included_fields"] = "included_fields"
queryParams["sort_by"] = "sort_by"
queryParams["include_mark_for_delete_objects"] = "include_mark_for_delete_objects"
queryParams["page_size"] = "page_size"
resultHeaders := map[string]string{}
errorHeaders := map[string]string{}
return protocol.NewOperationRestMetadata(
fields,
fieldNameMap,
paramsTypeMap,
pathParams,
queryParams,
headerParams,
"",
"",
"GET",
"/policy/api/v1/infra/ipsec-vpn-tunnel-profiles",
resultHeaders,
200,
errorHeaders,
map[string]int{"InvalidRequest": 400,"Unauthorized": 403,"ServiceUnavailable": 503,"InternalServerError": 500,"NotFound": 404})
}
func ipsecVpnTunnelProfilesPatchInputType() bindings.StructType {
fields := make(map[string]bindings.BindingType)
fieldNameMap := make(map[string]string)
fields["tunnel_profile_id"] = bindings.NewStringType()
fields["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
fieldNameMap["ip_sec_vpn_tunnel_profile"] = "IpSecVpnTunnelProfile"
var validators = []bindings.Validator{}
return bindings.NewStructType("operation-input", fields, reflect.TypeOf(data.StructValue{}), fieldNameMap, validators)
}
func ipsecVpnTunnelProfilesPatchOutputType() bindings.BindingType {
return bindings.NewVoidType()
}
func ipsecVpnTunnelProfilesPatchRestMetadata() protocol.OperationRestMetadata {
fields := map[string]bindings.BindingType{}
fieldNameMap := map[string]string{}
paramsTypeMap := map[string]bindings.BindingType{}
pathParams := map[string]string{}
queryParams := map[string]string{}
headerParams := map[string]string{}
fields["tunnel_profile_id"] = bindings.NewStringType()
fields["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
fieldNameMap["ip_sec_vpn_tunnel_profile"] = "IpSecVpnTunnelProfile"
paramsTypeMap["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
paramsTypeMap["tunnel_profile_id"] = bindings.NewStringType()
paramsTypeMap["tunnelProfileId"] = bindings.NewStringType()
pathParams["tunnel_profile_id"] = "tunnelProfileId"
resultHeaders := map[string]string{}
errorHeaders := map[string]string{}
return protocol.NewOperationRestMetadata(
fields,
fieldNameMap,
paramsTypeMap,
pathParams,
queryParams,
headerParams,
"",
"ip_sec_vpn_tunnel_profile",
"PATCH",
"/policy/api/v1/infra/ipsec-vpn-tunnel-profiles/{tunnelProfileId}",
resultHeaders,
204,
errorHeaders,
map[string]int{"InvalidRequest": 400,"Unauthorized": 403,"ServiceUnavailable": 503,"InternalServerError": 500,"NotFound": 404})
}
func ipsecVpnTunnelProfilesUpdateInputType() bindings.StructType {
fields := make(map[string]bindings.BindingType)
fieldNameMap := make(map[string]string)
fields["tunnel_profile_id"] = bindings.NewStringType()
fields["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
fieldNameMap["ip_sec_vpn_tunnel_profile"] = "IpSecVpnTunnelProfile"
var validators = []bindings.Validator{}
return bindings.NewStructType("operation-input", fields, reflect.TypeOf(data.StructValue{}), fieldNameMap, validators)
}
func ipsecVpnTunnelProfilesUpdateOutputType() bindings.BindingType {
return bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
}
func ipsecVpnTunnelProfilesUpdateRestMetadata() protocol.OperationRestMetadata {
fields := map[string]bindings.BindingType{}
fieldNameMap := map[string]string{}
paramsTypeMap := map[string]bindings.BindingType{}
pathParams := map[string]string{}
queryParams := map[string]string{}
headerParams := map[string]string{}
fields["tunnel_profile_id"] = bindings.NewStringType()
fields["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
fieldNameMap["tunnel_profile_id"] = "TunnelProfileId"
fieldNameMap["ip_sec_vpn_tunnel_profile"] = "IpSecVpnTunnelProfile"
paramsTypeMap["ip_sec_vpn_tunnel_profile"] = bindings.NewReferenceType(model.IPSecVpnTunnelProfileBindingType)
paramsTypeMap["tunnel_profile_id"] = bindings.NewStringType()
paramsTypeMap["tunnelProfileId"] = bindings.NewStringType()
pathParams["tunnel_profile_id"] = "tunnelProfileId"
resultHeaders := map[string]string{}
errorHeaders := map[string]string{}
return protocol.NewOperationRestMetadata(
fields,
fieldNameMap,
paramsTypeMap,
pathParams,
queryParams,
headerParams,
"",
"ip_sec_vpn_tunnel_profile",
"PUT",
"/policy/api/v1/infra/ipsec-vpn-tunnel-profiles/{tunnelProfileId}",
resultHeaders,
200,
errorHeaders,
map[string]int{"InvalidRequest": 400,"Unauthorized": 403,"ServiceUnavailable": 503,"InternalServerError": 500,"NotFound": 404})
}
| psecVpnTunnelProfilesDeleteOutputType( |
jsPlumbHandle.js | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import 'jquery-ui/ui/widgets/draggable'
import 'jquery-ui/ui/widgets/droppable'
import 'jquery-ui/ui/widgets/resizable'
import Vue from 'vue'
import _ from 'lodash'
import i18n from '@/module/i18n'
import { jsPlumb } from 'jsplumb'
import DragZoom from './dragZoom'
import store from '@/conf/home/store'
import router from '@/conf/home/router'
import { uuid, findComponentDownward } from '@/module/util/'
import {
tasksAll,
rtTasksTpl,
setSvgColor,
saveTargetarr,
rtTargetarrArr,
computeScale
} from './util'
import mStart from '@/conf/home/pages/projects/pages/definition/pages/list/_source/start'
let JSP = function () {
this.dag = {}
this.selectedElement = {}
this.config = {
// Whether to drag
isDrag: true,
// Whether to allow connection
isAttachment: false,
// Whether to drag a new node
isNewNodes: true,
// Whether to support double-click node events
isDblclick: true,
// Whether to support right-click menu events
isContextmenu: true,
// Whether to allow click events
isClick: false
}
}
/**
* dag init
*/
JSP.prototype.init = function ({ dag, instance, options }) {
// Get the dag component instance
this.dag = dag
// Get jsplumb instance
this.JspInstance = instance
// Get JSP options
this.options = options || {}
// Register jsplumb connection type and configuration
this.JspInstance.registerConnectionType('basic', {
anchor: 'Continuous',
connector: 'Bezier' // Line type
})
// Initial configuration
this.setConfig({
isDrag: !store.state.dag.isDetails,
isAttachment: false,
isNewNodes: !store.state.dag.isDetails, // Permissions.getAuth() === false ? false : !store.state.dag.isDetails,
isDblclick: true,
isContextmenu: true,
isClick: false
})
// Monitor line click
this.JspInstance.bind('click', e => {
if (this.config.isClick) {
this.connectClick(e)
}
})
// Drag and drop
if (this.config.isNewNodes) {
DragZoom.init()
}
}
/**
* set config attribute
*/
JSP.prototype.setConfig = function (o) {
this.config = Object.assign(this.config, {}, o)
}
/**
* Node binding event
*/
JSP.prototype.tasksEvent = function (selfId) {
const tasks = $(`#${selfId}`)
// Bind right event
tasks.on('contextmenu', e => {
this.tasksContextmenu(e)
return false
})
// Binding double click event
tasks.find('.icos').bind('dblclick', e => {
this.tasksDblclick(e)
})
// Binding click event
tasks.on('click', e => {
this.tasksClick(e)
})
}
/**
* Dag node drag and drop processing
*/
JSP.prototype.draggable = function () {
if (this.config.isNewNodes) {
let selfId
const self = this
$('.toolbar-btn .roundedRect').draggable({
scope: 'plant',
helper: 'clone',
containment: $('.dag-model'),
stop: function (e, ui) {
},
drag: function () {
$('body').find('.tooltip.fade.top.in').remove()
}
})
$('#canvas').droppable({
scope: 'plant',
drop: function (ev, ui) {
let id = 'tasks-' + Math.ceil(Math.random() * 100000) // eslint-disable-line
let scale = computeScale($(this))
scale = scale || 1
// Get mouse coordinates and after scale coordinate
const left = parseInt(ui.offset.left - $(this).offset().left) / scale
const top = parseInt(ui.offset.top - $(this).offset().top) / scale
// Generate template node
$('#canvas').append(rtTasksTpl({
id: id,
name: id,
x: left,
y: top,
isAttachment: self.config.isAttachment,
taskType: findComponentDownward(self.dag.$root, 'dag-chart').dagBarId
}))
// Get the generated node
const thisDom = jsPlumb.getSelector('.statemachine-demo .w')
// Generating a connection node
self.JspInstance.batch(() => {
self.initNode(thisDom[thisDom.length - 1])
})
selfId = id
self.tasksEvent(selfId)
// Dom structure is not generated without pop-up form form
if ($(`#${selfId}`).html()) {
// dag event
findComponentDownward(self.dag.$root, 'dag-chart')._createNodes({
id: selfId
})
}
}
})
}
}
/**
* Echo json processing and old data structure processing
*/
JSP.prototype.jsonHandle = function ({ largeJson, locations }) {
_.map(largeJson, v => {
// Generate template
$('#canvas').append(rtTasksTpl({
id: v.id,
name: v.name,
x: locations[v.id].x,
y: locations[v.id].y,
targetarr: locations[v.id].targetarr,
isAttachment: this.config.isAttachment,
taskType: v.type,
runFlag: v.runFlag,
nodenumber: locations[v.id].nodenumber,
successNode: v.conditionResult === undefined? '' : v.conditionResult.successNode[0],
failedNode: v.conditionResult === undefined? '' : v.conditionResult.failedNode[0]
}))
// contextmenu event
$(`#${v.id}`).on('contextmenu', e => {
this.tasksContextmenu(e)
return false
})
// dblclick event
$(`#${v.id}`).find('.icos').bind('dblclick', e => {
this.tasksDblclick(e)
})
// click event
$(`#${v.id}`).bind('click', e => {
this.tasksClick(e)
})
})
}
/**
* Initialize a single node
*/
JSP.prototype.initNode = function (el) {
// Whether to drag
if (this.config.isDrag) {
this.JspInstance.draggable(el, {
containment: 'dag-container'
})
}
// Node attribute configuration
this.JspInstance.makeSource(el, {
filter: '.ep',
anchor: 'Continuous',
connectorStyle: {
stroke: '#2d8cf0',
strokeWidth: 2,
outlineStroke: 'transparent',
outlineWidth: 4
},
// This place is leaking
// connectionType: "basic",
extract: {
action: 'the-action'
},
maxConnections: -1
})
// Node connection property configuration
this.JspInstance.makeTarget(el, {
dropOptions: { hoverClass: 'dragHover' },
anchor: 'Continuous',
allowLoopback: false // Forbid yourself to connect yourself
})
this.JspInstance.fire('jsPlumbDemoNodeAdded', el)
}
/**
* Node right click menu
*/
JSP.prototype.tasksContextmenu = function (event) {
if (this.config.isContextmenu) {
const routerName = router.history.current.name
// state
const isOne = routerName === 'projects-definition-details' && this.dag.releaseState !== 'NOT_RELEASE'
// hide
const isTwo = store.state.dag.isDetails
const html = [
`<a href="javascript:" id="startRunning" class="${isOne ? '' : 'disbled'}"><em class="ans-icon-play"></em><span>${i18n.$t('Start')}</span></a>`,
`<a href="javascript:" id="editNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-edit"></em><span>${i18n.$t('Edit')}</span></a>`,
`<a href="javascript:" id="copyNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-copy"></em><span>${i18n.$t('Copy')}</span></a>`,
`<a href="javascript:" id="removeNodes" class="${isTwo ? 'disbled' : ''}"><em class="ans-icon-trash"></em><span>${i18n.$t('Delete')}</span></a>`
]
const operationHtml = () => {
return html.splice(',')
}
| const $contextmenu = $('#contextmenu')
const $name = $(`#${$id}`).find('.name-p').text()
const $left = e.pageX + document.body.scrollLeft - 5
const $top = e.pageY + document.body.scrollTop - 5
$contextmenu.css({
left: $left,
top: $top,
visibility: 'visible'
})
// Action bar
$contextmenu.html('').append(operationHtml)
if (isOne) {
// start run
$('#startRunning').on('click', () => {
const name = store.state.dag.name
const id = router.history.current.params.id
store.dispatch('dag/getStartCheck', { processDefinitionId: id }).then(res => {
const modal = Vue.$modal.dialog({
closable: false,
showMask: true,
escClose: true,
className: 'v-modal-custom',
transitionName: 'opacityp',
render (h) {
return h(mStart, {
on: {
onUpdate () {
modal.remove()
},
close () {
modal.remove()
}
},
props: {
item: {
id: id,
name: name
},
startNodeList: $name,
sourceType: 'contextmenu'
}
})
}
})
}).catch(e => {
Vue.$message.error(e.msg || '')
})
})
}
if (!isTwo) {
// edit node
$('#editNodes').click(ev => {
findComponentDownward(this.dag.$root, 'dag-chart')._createNodes({
id: $id,
type: $(`#${$id}`).attr('data-tasks-type')
})
})
// delete node
$('#removeNodes').click(ev => {
this.removeNodes($id)
})
// copy node
$('#copyNodes').click(res => {
this.copyNodes($id)
})
}
}
}
/**
* Node double click event
*/
JSP.prototype.tasksDblclick = function (e) {
// Untie event
if (this.config.isDblclick) {
const id = $(e.currentTarget.offsetParent).attr('id')
findComponentDownward(this.dag.$root, 'dag-chart')._createNodes({
id: id,
type: $(`#${id}`).attr('data-tasks-type')
})
}
}
/**
* Node click event
*/
JSP.prototype.tasksClick = function (e) {
let $id
const self = this
const $body = $('body')
if (this.config.isClick) {
const $connect = this.selectedElement.connect
$('.w').removeClass('jtk-tasks-active')
$(e.currentTarget).addClass('jtk-tasks-active')
if ($connect) {
setSvgColor($connect, '#2d8cf0')
this.selectedElement.connect = null
}
this.selectedElement.id = $(e.currentTarget).attr('id')
// Unbind copy and paste events
$body.unbind('copy').unbind('paste')
// Copy binding id
$id = self.selectedElement.id
$body.bind({
copy: function () {
$id = self.selectedElement.id
},
paste: function () {
$id && self.copyNodes($id)
}
})
}
}
/**
* Remove binding events
* paste
*/
JSP.prototype.removePaste = function () {
const $body = $('body')
// Unbind copy and paste events
$body.unbind('copy').unbind('paste')
// Remove selected node parameters
this.selectedElement.id = null
// Remove node selection effect
$('.w').removeClass('jtk-tasks-active')
}
/**
* Line click event
*/
JSP.prototype.connectClick = function (e) {
// Set svg color
setSvgColor(e, '#0097e0')
const $id = this.selectedElement.id
if ($id) {
$(`#${$id}`).removeClass('jtk-tasks-active')
this.selectedElement.id = null
}
this.selectedElement.connect = e
}
/**
* toolbarEvent
* @param {Pointer}
*/
JSP.prototype.handleEventPointer = function (is) {
this.setConfig({
isClick: is,
isAttachment: false
})
}
/**
* toolbarEvent
* @param {Line}
*/
JSP.prototype.handleEventLine = function (is) {
const wDom = $('.w')
this.setConfig({
isAttachment: is
})
is ? wDom.addClass('jtk-ep') : wDom.removeClass('jtk-ep')
}
/**
* toolbarEvent
* @param {Remove}
*/
JSP.prototype.handleEventRemove = function () {
const $id = this.selectedElement.id || null
const $connect = this.selectedElement.connect || null
if ($id) {
this.removeNodes(this.selectedElement.id)
} else {
this.removeConnect($connect)
}
// Monitor whether to edit DAG
store.commit('dag/setIsEditDag', true)
}
/**
* Delete node
*/
JSP.prototype.removeNodes = function ($id) {
// Delete node processing(data-targetarr)
_.map(tasksAll(), v => {
let targetarr = v.targetarr.split(',')
if (targetarr.length) {
let newArr = _.filter(targetarr, v1 => v1 !== $id)
$(`#${v.id}`).attr('data-targetarr', newArr.toString())
}
})
// delete node
this.JspInstance.remove($id)
// delete dom
$(`#${$id}`).remove()
// callback onRemoveNodes event
this.options && this.options.onRemoveNodes && this.options.onRemoveNodes($id)
let connects = []
_.map(this.JspInstance.getConnections(), v => {
connects.push({
endPointSourceId: v.sourceId,
endPointTargetId: v.targetId
})
})
// Storage line dependence
store.commit('dag/setConnects', connects)
}
/**
* Delete connection
*/
JSP.prototype.removeConnect = function ($connect) {
if (!$connect) {
return
}
// Remove connections and remove node and node dependencies
let targetId = $connect.targetId
let sourceId = $connect.sourceId
let targetarr = rtTargetarrArr(targetId)
if (targetarr.length) {
targetarr = _.filter(targetarr, v => v !== sourceId)
$(`#${targetId}`).attr('data-targetarr', targetarr.toString())
}
if ($(`#${sourceId}`).attr('data-tasks-type') === 'CONDITIONS') {
$(`#${sourceId}`).attr('data-nodenumber', Number($(`#${sourceId}`).attr('data-nodenumber')) - 1)
}
this.JspInstance.deleteConnection($connect)
this.selectedElement = {}
}
/**
* Copy node
*/
JSP.prototype.copyNodes = function ($id) {
let newNodeInfo = _.cloneDeep(_.find(store.state.dag.tasks, v => v.id === $id))
const newNodePors = store.state.dag.locations[$id]
// Unstored nodes do not allow replication
if (!newNodePors) {
return
}
// Generate random id
const newUuId = `${uuid() + uuid()}`
const id = newNodeInfo.id.length > 8 ? newNodeInfo.id.substr(0, 7) : newNodeInfo.id
const name = newNodeInfo.name.length > 8 ? newNodeInfo.name.substr(0, 7) : newNodeInfo.name
// new id
const newId = `${id || ''}-${newUuId}`
// new name
const newName = `${name || ''}-${newUuId}`
// coordinate x
const newX = newNodePors.x + 100
// coordinate y
const newY = newNodePors.y + 40
// Generate template node
$('#canvas').append(rtTasksTpl({
id: newId,
name: newName,
x: newX,
y: newY,
isAttachment: this.config.isAttachment,
taskType: newNodeInfo.type
}))
// Get the generated node
const thisDom = jsPlumb.getSelector('.statemachine-demo .w')
// Copy node information
newNodeInfo = Object.assign(newNodeInfo, {
id: newId,
name: newName
})
// Add new node
store.commit('dag/addTasks', newNodeInfo)
// Add node location information
store.commit('dag/addLocations', {
[newId]: {
name: newName,
targetarr: '',
nodenumber: 0,
x: newX,
y: newY
}
})
// Generating a connection node
this.JspInstance.batch(() => {
this.initNode(thisDom[thisDom.length - 1])
// Add events to nodes
this.tasksEvent(newId)
})
}
/**
* toolbarEvent
* @param {Screen}
*/
JSP.prototype.handleEventScreen = function ({ item, is }) {
let screenOpen = true
if (is) {
item.icon = 'ans-icon-min'
screenOpen = true
} else {
item.icon = 'ans-icon-max'
screenOpen = false
}
const $mainLayoutModel = $('.main-layout-model')
if (screenOpen) {
$mainLayoutModel.addClass('dag-screen')
} else {
$mainLayoutModel.removeClass('dag-screen')
}
}
/**
* save task
* @param tasks
* @param locations
* @param connects
*/
JSP.prototype.saveStore = function () {
return new Promise((resolve, reject) => {
const connects = []
const locations = {}
const tasks = []
const is = (id) => {
return !!_.filter(tasksAll(), v => v.id === id).length
}
// task
_.map(_.cloneDeep(store.state.dag.tasks), v => {
if (is(v.id)) {
let preTasks = []
let id = $(`#${v.id}`)
let tar = id.attr('data-targetarr')
let idDep = tar ? id.attr('data-targetarr').split(',') : []
if (idDep.length) {
_.map(idDep, v1 => {
preTasks.push($(`#${v1}`).find('.name-p').text())
})
}
let tasksParam = _.assign(v, {
preTasks: preTasks
})
// Sub-workflow has no retries and interval
if (v.type === 'SUB_PROCESS') {
tasksParam = _.omit(tasksParam, ['maxRetryTimes', 'retryInterval'])
}
tasks.push(tasksParam)
}
})
_.map(this.JspInstance.getConnections(), v => {
connects.push({
endPointSourceId: v.sourceId,
endPointTargetId: v.targetId
})
})
_.map(tasksAll(), v => {
locations[v.id] = {
name: v.name,
targetarr: v.targetarr,
nodenumber: v.nodenumber,
x: v.x,
y: v.y
}
})
let targetArrBool = false
_.forEach(locations, item => {
if(item.targetarr) {
targetArrBool = true
return false
}
})
if(connects.length && !targetArrBool) {
Vue.$message.warning(`${i18n.$t('The workflow canvas is abnormal and cannot be saved, please recreate')}`)
return false
}
// Storage node
store.commit('dag/setTasks', tasks)
// Store coordinate information
store.commit('dag/setLocations', locations)
// Storage line dependence
store.commit('dag/setConnects', connects)
resolve({
connects: connects,
tasks: tasks,
locations: locations
})
})
}
/**
* Event processing
*/
JSP.prototype.handleEvent = function () {
this.JspInstance.bind('beforeDrop', function (info) {
console.log(info)
const rtTargetArr = (id) => {
let ids = $(`#${id}`).attr('data-targetarr')
return ids ? ids.split(',') : []
}
let sourceId = info['sourceId']// 出
let targetId = info['targetId']// 入
console.log(sourceId,targetId)
let rtTargetArrs = rtTargetArr(targetId)
let rtSouceArrs = rtTargetArr(sourceId)
/**
* Recursive search for nodes
*/
let recursiveVal
const recursiveTargetarr = (arr, targetId) => {
for (let i in arr) {
if (arr[i] === targetId) {
recursiveVal = targetId
} else {
let targetArr = rtTargetArr(arr[i])
recursiveTargetarr(targetArr, targetId)
}
}
return recursiveVal
}
// Connection to connected nodes is not allowed
if (_.findIndex(rtTargetArrs, v => v === sourceId) !== -1) {
console.log(rtTargetArrs,'not allowed')
return false
}
// Recursive form to find if the target Targetarr has a sourceId
if (recursiveTargetarr(rtSouceArrs, targetId)) {
console.log('has a sourceId')
return false
}
if ($(`#${sourceId}`).attr('data-tasks-type') === 'CONDITIONS' && parseInt($(`#${sourceId}`).attr('data-nodenumber')) === 2) {
return false
} else {
console.log('data-nodenumber')
$(`#${sourceId}`).attr('data-nodenumber', parseInt($(`#${sourceId}`).attr('data-nodenumber')) + 1)
}
// Storage node dependency information
saveTargetarr(sourceId, targetId)
// Monitor whether to edit DAG
store.commit('dag/setIsEditDag', true)
return true
})
}
/**
* Backfill data processing
*/
JSP.prototype.jspBackfill = function ({ connects, locations, largeJson }) {
// Backfill nodes
this.jsonHandle({
largeJson: largeJson,
locations: locations
})
const wNodes = jsPlumb.getSelector('.statemachine-demo .w')
// Backfill line
this.JspInstance.batch(() => {
for (let i = 0; i < wNodes.length; i++) {
this.initNode(wNodes[i])
}
_.map(connects, v => {
let sourceId = v.endPointSourceId.split('-')
let targetId = v.endPointTargetId.split('-')
if (sourceId.length === 4 && targetId.length === 4) {
sourceId = `${sourceId[0]}-${sourceId[1]}-${sourceId[2]}`
targetId = `${targetId[0]}-${targetId[1]}-${targetId[2]}`
} else {
sourceId = v.endPointSourceId
targetId = v.endPointTargetId
}
if($(`#${sourceId}`).attr('data-tasks-type') === 'CONDITIONS' && $(`#${sourceId}`).attr('data-successnode') === $(`#${targetId}`).find('.name-p').text()) {
this.JspInstance.connect({
source: sourceId,
target: targetId,
type: 'basic',
paintStyle: { strokeWidth: 2, stroke: '#4caf50' },
HoverPaintStyle: {stroke: '#ccc', strokeWidth: 3},
overlays:[["Label", { label: i18n.$t('success'), location:0.5, id:"label"} ]]
})
} else if($(`#${sourceId}`).attr('data-tasks-type') === 'CONDITIONS' && $(`#${sourceId}`).attr('data-failednode') === $(`#${targetId}`).find('.name-p').text()) {
this.JspInstance.connect({
source: sourceId,
target: targetId,
type: 'basic',
paintStyle: { strokeWidth: 2, stroke: '#252d39' },
HoverPaintStyle: {stroke: '#ccc', strokeWidth: 3},
overlays:[["Label", { label: i18n.$t('failed'), location:0.5, id:"label"} ]]
})
} else {
this.JspInstance.connect({
source: sourceId,
target: targetId,
type: 'basic',
paintStyle: { strokeWidth: 2, stroke: '#2d8cf0' },
HoverPaintStyle: {stroke: '#ccc', strokeWidth: 3}
})
}
})
})
jsPlumb.fire('jsPlumbDemoLoaded', this.JspInstance)
// Connection monitoring
this.handleEvent()
// Drag and drop new nodes
this.draggable()
}
export default new JSP() | const e = event
const $id = e.currentTarget.id |
utils_test.go | // Copyright (c) 2017 Intel Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
package utils
import (
"io/ioutil"
"os"
"path/filepath"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
)
func TestFileCopySuccessful(t *testing.T) {
fileContent := "testContent"
srcFile, err := ioutil.TempFile("", "test_src_copy")
if err != nil {
t.Fatal(err)
}
defer os.Remove(srcFile.Name())
defer srcFile.Close()
dstFile, err := ioutil.TempFile("", "test_dst_copy")
if err != nil {
t.Fatal(err)
}
defer os.Remove(dstFile.Name())
dstPath := dstFile.Name()
if err := dstFile.Close(); err != nil {
t.Fatal(err)
}
if _, err := srcFile.WriteString(fileContent); err != nil {
t.Fatal(err)
}
if err := FileCopy(srcFile.Name(), dstPath); err != nil {
t.Fatal(err)
}
dstContent, err := ioutil.ReadFile(dstPath)
if err != nil {
t.Fatal(err)
}
if string(dstContent) != fileContent {
t.Fatalf("Got %q\nExpecting %q", string(dstContent), fileContent)
}
srcInfo, err := srcFile.Stat()
if err != nil {
t.Fatal(err)
}
dstInfo, err := os.Stat(dstPath)
if err != nil {
t.Fatal(err)
}
if dstInfo.Mode() != srcInfo.Mode() {
t.Fatalf("Got FileMode %d\nExpecting FileMode %d", dstInfo.Mode(), srcInfo.Mode())
}
if dstInfo.IsDir() != srcInfo.IsDir() {
t.Fatalf("Got IsDir() = %t\nExpecting IsDir() = %t", dstInfo.IsDir(), srcInfo.IsDir())
}
if dstInfo.Size() != srcInfo.Size() {
t.Fatalf("Got Size() = %d\nExpecting Size() = %d", dstInfo.Size(), srcInfo.Size())
}
}
func TestFileCopySourceEmptyFailure(t *testing.T) {
if err := FileCopy("", "testDst"); err == nil {
t.Fatal("This test should fail because source path is empty")
}
}
func TestFileCopyDestinationEmptyFailure(t *testing.T) {
if err := FileCopy("testSrc", ""); err == nil {
t.Fatal("This test should fail because destination path is empty")
}
}
func TestFileCopySourceNotExistFailure(t *testing.T) {
srcFile, err := ioutil.TempFile("", "test_src_copy")
if err != nil {
t.Fatal(err)
}
srcPath := srcFile.Name()
if err := srcFile.Close(); err != nil {
t.Fatal(err)
}
if err := os.Remove(srcPath); err != nil {
t.Fatal(err)
}
if err := FileCopy(srcPath, "testDest"); err == nil {
t.Fatal("This test should fail because source file does not exist")
}
}
func TestGenerateRandomBytes(t *testing.T) {
bytesNeeded := 8
randBytes, err := GenerateRandomBytes(bytesNeeded)
if err != nil {
t.Fatal(err)
}
if len(randBytes) != bytesNeeded {
t.Fatalf("Failed to generate %d random bytes", bytesNeeded)
}
}
func TestRevereString(t *testing.T) {
str := "Teststr"
reversed := ReverseString(str)
if reversed != "rtstseT" {
t.Fatal("Incorrect String Reversal")
}
}
func TestWriteToFile(t *testing.T) {
err := WriteToFile("/file-does-not-exist", []byte("test-data"))
assert.NotNil(t, err)
tmpFile, err := ioutil.TempFile("", "test_append_file")
assert.Nil(t, err)
filename := tmpFile.Name()
defer os.Remove(filename)
tmpFile.Close()
testData := []byte("test-data")
err = WriteToFile(filename, testData)
assert.Nil(t, err)
data, err := ioutil.ReadFile(filename)
assert.Nil(t, err)
assert.True(t, reflect.DeepEqual(testData, data))
}
func TestConstraintsToVCPUs(t *testing.T) |
func TestGetVirtDriveNameInvalidIndex(t *testing.T) {
_, err := GetVirtDriveName(-1)
if err == nil {
t.Fatal(err)
}
}
func TestGetVirtDriveName(t *testing.T) {
tests := []struct {
index int
expectedDrive string
}{
{0, "vda"},
{25, "vdz"},
{27, "vdab"},
{704, "vdaac"},
{18277, "vdzzz"},
}
for _, test := range tests {
driveName, err := GetVirtDriveName(test.index)
if err != nil {
t.Fatal(err)
}
if driveName != test.expectedDrive {
t.Fatalf("Incorrect drive Name: Got: %s, Expecting :%s", driveName, test.expectedDrive)
}
}
}
func TestGetSCSIIdLun(t *testing.T) {
tests := []struct {
index int
expectedScsiID int
expectedLun int
}{
{0, 0, 0},
{1, 0, 1},
{2, 0, 2},
{255, 0, 255},
{256, 1, 0},
{257, 1, 1},
{258, 1, 2},
{512, 2, 0},
{513, 2, 1},
}
for _, test := range tests {
scsiID, lun, err := GetSCSIIdLun(test.index)
assert.Nil(t, err)
if scsiID != test.expectedScsiID && lun != test.expectedLun {
t.Fatalf("Expecting scsi-id:lun %d:%d, Got %d:%d", test.expectedScsiID, test.expectedLun, scsiID, lun)
}
}
_, _, err := GetSCSIIdLun(maxSCSIDevices + 1)
assert.NotNil(t, err)
}
func TestGetSCSIAddress(t *testing.T) {
tests := []struct {
index int
expectedSCSIAddress string
}{
{0, "0:0"},
{200, "0:200"},
{255, "0:255"},
{258, "1:2"},
{512, "2:0"},
}
for _, test := range tests {
scsiAddr, err := GetSCSIAddress(test.index)
assert.Nil(t, err)
assert.Equal(t, scsiAddr, test.expectedSCSIAddress)
}
}
func TestBuildSocketPath(t *testing.T) {
assert := assert.New(t)
type testData struct {
elems []string
valid bool
expected string
}
longPath := strings.Repeat("/a", 106/2)
longestPath := longPath + "a"
pathTooLong := filepath.Join(longestPath, "x")
data := []testData{
{[]string{""}, false, ""},
{[]string{"a"}, true, "a"},
{[]string{"/a"}, true, "/a"},
{[]string{"a", "b", "c"}, true, "a/b/c"},
{[]string{"a", "/b", "c"}, true, "a/b/c"},
{[]string{"/a", "b", "c"}, true, "/a/b/c"},
{[]string{"/a", "/b", "/c"}, true, "/a/b/c"},
{[]string{longPath}, true, longPath},
{[]string{longestPath}, true, longestPath},
{[]string{pathTooLong}, false, ""},
}
for i, d := range data {
result, err := BuildSocketPath(d.elems...)
if d.valid {
assert.NoErrorf(err, "test %d, data %+v", i, d)
} else {
assert.Errorf(err, "test %d, data %+v", i, d)
}
assert.NotNil(result)
assert.Equal(d.expected, result)
}
}
| {
assert := assert.New(t)
vcpus := ConstraintsToVCPUs(0, 100)
assert.Zero(vcpus)
vcpus = ConstraintsToVCPUs(100, 0)
assert.Zero(vcpus)
expectedVCPUs := uint(4)
vcpus = ConstraintsToVCPUs(4000, 1000)
assert.Equal(expectedVCPUs, vcpus)
vcpus = ConstraintsToVCPUs(4000, 1200)
assert.Equal(expectedVCPUs, vcpus)
} |
RobotLibrary.py | ########################################################################
# Copyright 2019 Roku, Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
########################################################################
from robot.api.deco import keyword
from Library.webDriver import WebDriver
from robot.libraries.BuiltIn import BuiltIn
from time import sleep
from robot.api import logger
import subprocess
import json
from datetime import datetime, timedelta
class RobotLibrary:
ROBOT_LIBRARY_SCOPE = 'GLOBAL'
ROBOT_LISTENER_API_VERSION = 2
def __init__(self, ip, timeout = 0, pressDelay = 0, path = ""):
self._process = None
if len(path) > 0:
self._process = subprocess.Popen(path)
self.ROBOT_LIBRARY_LISTENER = self
self._client = WebDriver(ip, timeout, pressDelay)
self.markTimer()
def close(self):
self._client.quiet()
if self._process != None:
self._process.kill()
@keyword("Mark timer")
def markTimer(self):
self._startTime = datetime.now()
@keyword("Get timer")
def getTimer(self):
currentTime = datetime.now()
delta = currentTime - self._startTime
return int(delta / timedelta(milliseconds=1))
@keyword("Side load")
def sideLoad(self, path, user, password):
multipart_form_data = {
'channel': ('channel.zip', open(path, 'rb')),
'username': (None, user),
'password': (None, password)
}
response = self._client.side_load(multipart_form_data)
self._checkResponse(response)
@keyword("Launch the channel")
def launchTheChannel(self, channel_code, contentId = "", mediaType = ""):
launch_response = self._client.send_launch_channel(channel_code, contentId, mediaType)
self._checkResponse(launch_response)
@keyword("Get apps")
def getApps(self):
apps_response = self._client.get_apps()
self._checkResponse(apps_response)
res = json.loads(apps_response.text)
return res['value']
@keyword("Verify is channel exist")
def verifyIsChannelExist(self, apps, id):
for app in apps:
if app['ID'] == id:
return True
raise Exception("Channel doesn't exist")
@keyword("Verify is screen loaded")
def verifyIsScreenLoaded(self, data: object, retries = 10, delay = 1):
print(data)
while retries > 0:
ui_layout_response = self._client.get_ui_element(data)
if ui_layout_response.status_code != 200:
retries -= 1
sleep(delay)
else:
return True
raise Exception("Can't find element")
@keyword("Send key")
def pressBtn(self, key_press: str, delay = 2):
sleep(delay)
key_press_response = self._client.send_keypress(key_press)
self._checkResponse(key_press_response)
@keyword("Send word")
def sendWord(self, word: str, delay = 2):
sleep(delay)
for c in word:
sleep(0.5)
key_press_response = self._client.send_keypress(f"LIT_{c}")
self._checkResponse(key_press_response)
@keyword("Send keys")
def sendButtonSequence(self, sequence, delay = 2):
sleep(delay)
key_press_response = self._client.send_sequence(sequence)
self._checkResponse(key_press_response)
@keyword("Get element")
def getElement(self, data: object, delay = 1):
sleep(delay)
ui_layout_response = self._client.get_ui_element(data)
self._checkResponse(ui_layout_response)
res = json.loads(ui_layout_response.text)
return res['value']
@keyword("Get elements")
def getElements(self, data: object, delay = 1):
sleep(delay)
ui_layout_response = self._client.get_ui_elements(data)
self._checkResponse(ui_layout_response)
res = json.loads(ui_layout_response.text)
return res['value']
@keyword("Get focused element")
def getFocusedElement(self):
ui_layout_response = self._client.get_active_element()
self._checkResponse(ui_layout_response)
res = json.loads(ui_layout_response.text)
return res['value']
@keyword("Verify is channel loaded")
def verifyIsChannelLoaded(self, id, retries = 10, delay = 1):
while retries > 0:
app_response = self._client.get_current_app()
self._checkResponse(app_response)
res = json.loads(app_response.text)
if res['value']['ID'] != id:
retries -= 1
sleep(delay)
else:
return True
raise Exception("Channel isn't launched")
@keyword("Get current channel info")
def getCurrentChannelInfo(self):
app_response = self._client.get_current_app()
self._checkResponse(app_response)
res = json.loads(app_response.text)
return res['value']
@keyword("Get device info")
def getDeviceInfo(self): | self._checkResponse(response)
res = json.loads(response.text)
return res['value']
@keyword("Get player info")
def getPlayerInfo(self):
response = self._client.get_player_info()
self._checkResponse(response)
res = json.loads(response.text)
value = res['value']
value['Position'] = int(self._getMsFromString(value['Position']))
value['Duration'] = int(self._getMsFromString(value['Duration']))
return value
@keyword("Verify is playback started")
def verifyIsPlaybackStarted(self, retries = 10, delay = 1):
while retries > 0:
response = self._client.get_player_info()
res = json.loads(response.text)
if response.status_code != 200 or res['value']['State'] != 'play':
retries -= 1
sleep(delay)
else:
return True
raise Exception("Invalid player state")
@keyword("Set timeout")
def setTimeout(self, timeout: int):
response = self._client.set_timeouts("implicit", timeout)
self._checkResponse(response)
@keyword("Set press delay")
def setDelay(self, delay: int):
response = self._client.set_timeouts("pressDelay", delay)
self._checkResponse(response)
@keyword("Get attribute")
def getAttribute(self, element, attr):
for attrObj in element['Attrs']:
if attrObj['Name']["Local"] == attr:
return attrObj['Value']
raise Exception("Can't find attribute")
@keyword("Input deep linking data")
def inputDeepLinkingData(self, channelId, contentId, mediaType):
launch_response = self._client.send_input_data(channelId, contentId, mediaType)
self._checkResponse(launch_response)
def _checkResponse(self, response):
if response.status_code == 400:
raise Exception(response.text)
elif response.status_code != 200:
res = json.loads(response.text)
raise Exception(res['value']['message'])
def _getMsFromString(self, str):
data = str.split(' ')
return data[0] | response = self._client.get_device_info() |
ScrollerWithCustomProps.js | import React, { forwardRef } from 'react';
import ScrollableContentWrapper from '../../components/ScrollableContentWrapper';
const ScrollerWithCustomProps = forwardRef(function ScrollerWithCustomProps(props, ref) {
return (
<ScrollableContentWrapper | renderTrackHorizontal={(props) => <div {...props} style={{ display: 'none' }} className='track-horizontal' />}
/>
);
});
export default ScrollerWithCustomProps; | {...props}
ref={ref}
renderView={({ style, ...props }) => <div {...props} style={{ ...style }} />} |
data_prepare.py | # 1 read file
# 2 clean data
# 3 tokenize
# 4 eleminate stop words
# 5 calculate tfidf matrix
def | (file_path):
| read_file |
recipe-475169.py | #!/usr/bin/env python
# mixnmatch.py - find combination of files/dirs that sum below a given threshold
# -- Jose Fonseca
import os
import os.path
import optparse
import sys
from sets import ImmutableSet as set
def get_size(path, block_size):
if os.path.isdir(path):
result = 0
for name in os.listdir(path):
size = get_size(os.path.join(path, name), block_size)
size = (size + block_size - 1)//block_size*block_size
result += size
return result
else:
return os.path.getsize(path)
def mix_and_match(limit, items, verbose = False):
# filter items
items = [(size, name) for size, name in items if size <= limit]
# sort them by size
items.sort(lambda (xsize, xname), (ysize, yname): cmp(xsize, ysize))
# initialize variables
added_collections = dict([(set([name]), size) for size, name in items])
collections = added_collections
while True:
if verbose:
sys.stderr.write("%d\n" % len(collections))
# find unique combinations of the recent collections
new_collections = {}
for names1, size1 in added_collections.iteritems():
for size2, name2 in items:
size3 = size1 + size2
if size3 > limit:
# we can break here as all collections that follow are
# bigger in size due to the sorting above
break
if name2 in names1:
continue
names3 = names1.union(set([name2]))
if names3 in new_collections:
continue
new_collections[names3] = size3
if len(new_collections) == 0:
break
collections.update(new_collections)
added_collections = new_collections
return [(size, names) for names, size in collections.iteritems()]
def main():
|
if __name__ == '__main__':
main()
| parser = optparse.OptionParser(usage="\n\t%prog [options] path ...")
parser.add_option(
'-l', '--limit',
type="int", dest="limit", default=4700000000,
help="total size limit")
parser.add_option(
'-B', '--block-size',
type="int", dest="size", default=2048,
help="use this block size")
parser.add_option(
'-s', '--show',
type="int", dest="show", default=10,
help="number of combinations to show")
parser.add_option(
'-v', '--verbose',
action="store_true", dest="verbose", default=False,
help="verbose output")
(options, args) = parser.parse_args(sys.argv[1:])
limit = options.limit
block_size = options.size
items = [(get_size(arg, block_size), arg) for arg in args]
collections = mix_and_match(limit, items, options.verbose)
collections.sort(lambda (xsize, xnames), (ysize, ynames): cmp(xsize, ysize))
if options.show != 0:
collections = collections[-options.show:]
for size, names in collections:
percentage = 100.0*float(size)/float(limit)
try:
sys.stdout.write("%10d\t%02.2f%%\t%s\n" % (size, percentage, " ".join(names)))
except IOError:
# ignore broken pipe
pass |
__init__.py | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
# | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
__version__ = pbr.version.VersionInfo('python-freezerclient').version_string() | # http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
MyVariant.js | import React, { PropTypes } from 'react';
import {Link} from 'react-router-dom';
import { connect } from 'react-redux';
import axios from 'axios';
import CountUp from 'react-countup';
import Chart from './Chart';
import Map from './Map';
class MyVariant extends React.Component {
constructor(props) {
super(props);
this.state={
sessionsURL:'https://gasuperproxy-1470690417190.appspot.com/query?id=ahxzfmdhc3VwZXJwcm94eS0xNDcwNjkwNDE3MTkwchULEghBcGlRdWVyeRiAgICA-MKECgw',
analyticsURL : ' https://gasuperproxy-1470690417190.appspot.com/query?id=ahxzfmdhc3VwZXJwcm94eS0xNDcwNjkwNDE3MTkwchULEghBcGlRdWVyeRiAgIDA05CWCQw',
realtimeURL:'https://gasuperproxy-1470690417190.appspot.com/query?id=ahxzfmdhc3VwZXJwcm94eS0xNDcwNjkwNDE3MTkwchULEghBcGlRdWVyeRiAgICgwteGCgw',
pagesURL:'https://gasuperproxy-1470690417190.appspot.com/query?id=ahxzfmdhc3VwZXJwcm94eS0xNDcwNjkwNDE3MTkwchULEghBcGlRdWVyeRiAgIDAk4eHCgw',
actionsURL:'https://gasuperproxy-1470690417190.appspot.com/query?id=ahxzfmdhc3VwZXJwcm94eS0xNDcwNjkwNDE3MTkwchULEghBcGlRdWVyeRiAgIDA05CWCgw',
activeUsers: 0,
totalUsers: 0,
results:[],
lastActiveUsers:0,
mapData:[],
pages: [],
activeUsersHistory:[],
timer: null,
devices:[],
totalSessions: 0,
}
this.fetchAnalyticsData = this.fetchAnalyticsData.bind(this);
this.fetchRealtimeUsers = this.fetchRealtimeUsers.bind(this);
this.shapeMapData = this.shapeMapData.bind(this);
// this.addComma = this.addComma.bind(this);
this.getUniqueItemsInTopPages = this.getUniqueItemsInTopPages.bind(this);
this.drawPages = this.drawPages.bind(this);
this.drawActions = this.drawActions.bind(this);
}
// addComma(number){
// return number.toString().replace(/\B(?=(\d{3})+(?!\d))/g, ",");
// }
getUniqueItemsInTopPages(list){
let pages =[]
for (var i = 0; i < this.state.results.length; i++) {
if (this.state.results[i][5] === '/') {
this.state.results[i][5] = 'mygene.info/'
}
pages.push(this.state.results[i][5])
}
pages = pages.filter((x, i, a) => a.indexOf(x) == i)
this.setState({
'pages': pages
})
}
drawPages(){
axios.get(this.state.pagesURL).then(response=>{
let res =[];
let arr = response.data.rows;
res.push(['Endpoint', 'Sessions']);
for (var i = 0; i < arr.length; i++) {
res.push([arr[i][0],parseFloat(arr[i][1])]);
}
google.charts.load('current', {packages: ['corechart', 'bar']});
google.charts.setOnLoadCallback(drawBasic);
function drawBasic() {
var data = google.visualization.arrayToDataTable(res);
var options = {
legend: {position: 'none'},
title: 'Top 5 Endpoints',
chartArea: {width: '50%'},
hAxis: {
title: 'Sessions',
minValue: 0
},
vAxis: {
title: 'Endpoint'
},
'tooltip' : {
isHtml: true
}
};
var chart = new google.visualization.BarChart(document.getElementById('chart_pages'));
chart.draw(data, options);
}
})
}
drawActions(){
axios.get(this.state.actionsURL).then(response=>{
let res =[];
let arr = response.data.rows;
res.push(['Action', 'Sessions']);
for (var i = 0; i < arr.length; i++) {
res.push([arr[i][0],parseFloat(arr[i][1])]);
}
google.charts.load('current', {packages: ['corechart', 'bar']});
google.charts.setOnLoadCallback(drawBasic);
function drawBasic() {
var data = google.visualization.arrayToDataTable(res);
var options = {
legend: {position: 'none'},
title: 'Top 5 Requests',
chartArea: {width: '50%'},
hAxis: {
title: 'Sessions',
minValue: 0
},
vAxis: {
title: 'Request'
},
'tooltip' : {
isHtml: true
}
};
var chart = new google.visualization.BarChart(document.getElementById('chart_actions'));
chart.draw(data, options);
}
})
}
fetchAnalyticsData(){
var self = this;
axios.get(self.state.analyticsURL).then(res=>{
this.setState({
'results': res.data
})
this.shapeMapData();
}).catch(err=>{
throw err;
})
axios.get(self.state.sessionsURL).then(res=>{
let users = parseInt(res.data['totalsForAllResults']['ga:sessions']);
this.props.pushReqData(users);
this.setState({
'totalSessions': users
})
}).catch(err=>{
throw err;
})
}
fetchRealtimeUsers(){
axios.get(this.state.realtimeURL).then(response=>{
let users = parseInt(response.data.totalsForAllResults['rt:activeUsers']);
this.setState({
activeUsers: users
});
this.state.activeUsersHistory.push(users)
if (this.state.activeUsersHistory.length > 10) {
this.state.activeUsersHistory.shift();
}
this.props.updateHistory(users);
this.props.sendChartData(this.props.mvHistory);
}).catch(err=>{
throw err;
})
}
shapeMapData(){
let res =[]
let arr = this.state.results.rows;
for (var i = 0; i < arr.length; i++) {
let lat = parseFloat(arr[i][3]);
let long = parseFloat(arr[i][2]);
let obj ={'api':'MyVariant','name': arr[i][1]+', '+arr[i][0],'coordinates':[lat,long],'users': arr[i][4] };
res.push(obj);
}
this.setState({
'mapData': res
});
this.props.sendMapData(this.state.mapData);
this.props.pushMapData(this.state.mapData);
}
componentDidMount(){
var self = this;
this.fetchAnalyticsData();
this.fetchRealtimeUsers();
this.drawPages();
this.drawActions();
this.timer =setInterval(function(){
self.setState({
lastActiveUsers: self.state.activeUsers
})
self.fetchRealtimeUsers();
}, 60000);
}
componentWillUnmount() {
clearInterval(this.timer);
this.props.sendMapData([]);
}
render() {
return (
<section className="margin0Auto padding20 centerText mV-back2">
<div className="container">
<div className="row">
<div className="col-sm-12 col-md-12 col-lg-12">
<img src="/static/img/screw.png" className="screwTopRight"/>
<img src="/static/img/screw.png" className="screwTopLeft"/>
<img src="/static/img/screw.png" className="screwBottomRight"/>
<img src="/static/img/screw.png" className="screwBottomLeft"/>
<div className=" row activeUsersBoxTest">
<div className="col-sm-12 col-md-4 col-lg-4">
<img src="/static/img/myvariant-text.png" width="300px" className="margin20 dropShadow"/>
<h4 className="whiteText">Active Users Right Now</h4>
<CountUp className="whiteText activeUsers-MyVariant"
start={this.state.lastActiveUsers}
end={this.state.activeUsers}
duration={3}
separator=","/>
</div>
<Chart panel="MyVariant" className="col-sm-12 col-md-4 col-lg-4"/>
<div className="col-sm-12 col-md-4 col-lg-4 text-center">
<button style={{'marginBottom':'10px'}} className='btn btn-outline-dark refreshBtn' onClick={this.fetchAnalyticsData}>Refresh</button>
<h1 className="text-muted whiteGlass font-weight-bold">
<CountUp className="text-muted"
start={0}
end={this.state.totalSessions}
duration={3}
separator=","/>
</h1>
<h5 style={{color:'#b1b1b1'}}>
Requests in the Last 30 Days
</h5>
</div>
</div>
</div>
<div id='charts' className='activeUsersBoxTest col-sm-12 col-md-12 col-lg-12' style={{display:'flex', flexWrap:'wrap'}}>
<img src="/static/img/screw.png" className="screwTopRight"/>
<img src="/static/img/screw.png" className="screwTopLeft"/>
<img src="/static/img/screw.png" className="screwBottomRight"/>
<img src="/static/img/screw.png" className="screwBottomLeft"/>
<div id="chart_pages" style={{flex:1}}></div>
<div id="chart_actions" style={{flex:1}}></div>
</div>
<div className='activeUsersBoxTest col-sm-12 col-md-12 col-lg-12 mapContainer'>
<img src="/static/img/screw.png" className="screwTopRight"/>
<img src="/static/img/screw.png" className="screwTopLeft"/>
<img src="/static/img/screw.png" className="screwBottomRight"/>
<img src="/static/img/screw.png" className="screwBottomLeft"/>
<Map color='#83fb48' api='MV'/>
</div>
</div>
</div>
</section>
);
}
}
function mapStateToProps(state) {
return {
//will make the user object from redux store available as props.user to this component
user : state.user,
mvHistory: state.mvHistory
}
}
function | (dispatch) {
return {
sendMapData: (value)=>{
const action = {type: "UPDATE-MAP", payload: value};
dispatch(action);
},
sendMap100Users: (value)=>{
const action = {type: "UPDATE-MAP-100", payload: value};
dispatch(action);
},
sendChartData: (value)=>{
const action = {type: "UPDATE-CHART", payload: value};
dispatch(action);
},
updateHistory: (value)=>{
const action = {type: "PUSH-TO-MVHISTORY", payload: value};
dispatch(action);
},
pushReqData: (value)=>{
const action = {type: "MV-REQUESTS", payload: value};
dispatch(action);
},
pushMapData: (value)=>{
const action = {type: "MV-MAP", payload: value};
dispatch(action);
}
}
}
export default connect(
mapStateToProps, mapDispatchToProps
)(MyVariant)
| mapDispatchToProps |
task_5.py | maxdivider = 20
def task5(maxdivider):
num = 11
test = 1
while test != 0:
test = 0
for div in range(1,maxdivider):
test += num%div
num += 1
return num - 1 |
print(task5(maxdivider)) |
|
import_manager.py | from app.lib.dns.helpers.shared import SharedHelper
import os
import datetime
import json
import progressbar
from app import db
class DNSImportManager(SharedHelper):
IMPORT_TYPE_ZONE = 1
IMPORT_TYPE_RECORD = 2
@property
def last_error(self):
return self.__last_error
@last_error.setter
def last_error(self, value):
self.__last_error = value
def __init__(self, dns_zones, dns_records, users):
self.__last_error = ''
self.__dns_zones = dns_zones
self.__dns_records = dns_records
self.__zone_headers = ['domain', 'active', 'catch_all', 'forwarding', 'regex', 'master', 'tags']
self.__record_headers = ['domain', 'id', 'ttl', 'cls', 'type', 'active', 'data', 'is_conditional', 'conditional_count', 'conditional_limit', 'conditional_reset', 'conditional_data']
self.__users = users
def identify(self, csvfile):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
header = self._load_csv_header(csvfile)
zone_header_count = 0
record_header_count = 0
for column in header:
if column in self.__zone_headers:
zone_header_count += 1
if column in self.__record_headers:
record_header_count += 1
if zone_header_count == len(self.__zone_headers):
return self.IMPORT_TYPE_ZONE
elif record_header_count == len(self.__record_headers):
return self.IMPORT_TYPE_RECORD
self.last_error = 'If you are uploading a ZONE file these are the required columns: {0}. If you are uploading a RECORD file then the required columns are: {1}.'.format(', '.join(self.__zone_headers), ', '.join(self.__record_headers))
return False
def review(self, csvfile, type, user_id, show_progressbar=False):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
lines = self._load_csv(csvfile)
if len(lines) == 0:
self.last_error = 'CSV is empty'
return False
user = self.__users.get_user(user_id)
if not user:
self.last_error = 'Could not find user with ID {0}'.format(user_id)
return False
all_errors = []
errors = []
rows = []
if type == self.IMPORT_TYPE_ZONE:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_zones(rows, user, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_records(rows, user, show_progressbar=show_progressbar)
all_errors += errors
# Sort errors per row number.
all_errors = sorted(all_errors, key=lambda k: k['row'])
return {
'data': rows,
'errors': all_errors
}
def run(self, data, type, user_id, show_progressbar=False):
errors = []
if type == self.IMPORT_TYPE_ZONE:
self.__import_zones(data, user_id, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
self.__import_records(data, user_id, errors, show_progressbar=show_progressbar)
return errors if len(errors) > 0 else True
def __import_zones(self, zones, user_id, show_progressbar=False, batch_size=100):
"""
This function has been heavily optimised as when I tried to import 250k domains its ETA was 1.5h, which isn't
very practical. The main assumption made here is that when this function is called, all validation checks will
have ready been completed.
"""
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
count = 0
unique_tags = []
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
# with bar as zones:
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
self.__zone_update_or_create(
zone_to_import['domain'],
zone_to_import['active'],
zone_to_import['catch_all'],
zone_to_import['forwarding'],
zone_to_import['regex'],
zone_to_import['master'],
user_id,
id=zone_to_import['id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
unique_tags = list(set(unique_tags + zone_to_import['tags']))
db.session.commit()
if show_progressbar:
widget[0] = progressbar.FormatLabel('Re-mapping zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user_id)
zone_ids = []
i = 0
for zone_to_import in list(zones):
i += 1
bar.update(i) if show_progressbar else False
zone_to_import['id'] = domain_mapping[zone_to_import['domain']] if zone_to_import['domain'] in domain_mapping else 0
zone_ids.append(zone_to_import['id'])
self.__zone_clear_tags(zone_ids, show_progressbar=show_progressbar, widget=widget)
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing tags')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
self.__tags_create(user_id, unique_tags)
tag_mapping = self.__get_tag_mapping(user_id)
count = 0
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
tags = {}
for tag in zone_to_import['tags']:
tags[tag] = tag_mapping[tag]
self.__zone_save_tags(zone_to_import['id'], tags, autocommit=False)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __import_records(self, records, user_id, errors, show_progressbar=False, batch_size = 100):
domain_mapping = self.__get_domain_mapping(user_id)
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
count = 0
for record_to_import in records:
count += 1
bar.update(count) if show_progressbar else False
# First, get the zone.
zone_id = domain_mapping[record_to_import['domain']] if record_to_import['domain'] in domain_mapping else None
if not zone_id:
# At this point all zones should exist.
errors.append('Could not find zone: {0}'.format(record_to_import['domain']))
continue
data = json.dumps(record_to_import['data']) if isinstance(record_to_import['data'], dict) else record_to_import['data']
conditional_data = json.dumps(record_to_import['conditional_data']) if isinstance(record_to_import['conditional_data'], dict) else record_to_import['conditional_data']
self.__record_update_or_create(
zone_id,
record_to_import['ttl'],
record_to_import['cls'],
record_to_import['type'],
record_to_import['active'],
data,
record_to_import['is_conditional'],
record_to_import['conditional_count'],
record_to_import['conditional_limit'],
record_to_import['conditional_reset'],
conditional_data,
id=record_to_import['record_id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __process_zones(self, zones, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
user_base_domain = '.' + self.__dns_zones.get_base_domain(user.admin, user.username)
count = 0
for zone in zones:
count += 1
bar.update(count) if show_progressbar else False
active = True if zone['active'] in ['1', 'yes', 'true'] else False
catch_all = True if zone['catch_all'] in ['1', 'yes', 'true'] else False
forwarding = True if zone['forwarding'] in ['1', 'yes', 'true'] else False
regex = True if zone['regex'] in ['1', 'yes', 'true'] else False
master = True if zone['master'] in ['1', 'yes', 'true'] else False
tags = zone['tags'].split(',')
# Trim each element.
map(str.strip, tags)
# Remove empty elements.
tags = list(filter(None, tags))
is_valid = True
if not user.admin:
if zone['domain'][-len(user_base_domain):] != user_base_domain and user_base_domain != '.' + zone['domain']:
is_valid = False
errors.append({'row': zone['row'], 'error': 'Zone {0} does not match your assigned master domain'.format(zone['domain'])})
if is_valid:
domain = {
'id': domain_mapping[zone['domain']] if zone['domain'] in domain_mapping else 0,
'domain': zone['domain'],
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'tags': tags
}
items.append(domain)
return items, errors
def __process_records(self, records, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
domain_mapping_reverse = self.__get_domain_mapping(user.id, reverse=True)
count = 0
for record in records:
count += 1
bar.update(count) if show_progressbar else False
record_errors = []
active = True if record['active'] in ['1', 'yes', 'true'] else False
zone_id = self.__process_record_zone(record, record_errors, domain_mapping)
record_id = self.__process_record_id(record, zone_id, record_errors, domain_mapping_reverse)
ttl = self.__process_record_ttl(record, record_errors)
cls = self.__process_record_cls(record, record_errors)
type = self.__process_record_type(record, record_errors)
is_conditional = True if record['is_conditional'] in ['1', 'yes', 'true'] else False
conditional_reset = True if record['conditional_reset'] in ['1', 'yes', 'true'] else False
conditional_count = self.__process_number(record, record_errors, 'conditional_count')
conditional_limit = self.__process_number(record, record_errors, 'conditional_limit')
data = {}
conditional_data = {}
if len(type) > 0:
data = self.__process_record_data(record, type, record_errors)
if is_conditional:
conditional_data = self.__process_record_data(record, type, record_errors, is_conditional=True)
if len(record_errors) == 0:
items.append({
'record_id': record_id,
'zone_id': zone_id,
'domain': record['domain'],
'active': active,
'ttl': ttl,
'cls': cls,
'type': type,
'data': data,
'is_conditional': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data
})
else:
errors += record_errors
return items, errors
def __process_number(self, record, errors, attribute):
value = record[attribute]
if len(value) == 0 or value.isdigit() is False:
errors.append({'row': record['row'], 'error': 'Invalid attribute {0} value: {1}'.format(record[attribute], value)})
return 0
return int(value)
def __process_record_id(self, record, zone_id, errors, domain_mapping):
zone_id = zone_id if zone_id > 0 else None
record_id = 0
if len(record['id']) > 0:
if not record['id'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid record id: {0}'.format(record['id'])})
return 0
record_id = int(record['id'])
if record_id > 0:
record_exists = self.__record_exists(record_id, dns_zone_id=zone_id)
if not record_exists:
# Record not found - treat as new.
return 0
if zone_id > 0:
domain = domain_mapping[zone_id] if zone_id in domain_mapping else None
if not domain:
errors.append({'row': record['row'], 'error': 'Zone {0} not found'.format(record['domain'])})
return 0
if record['domain'] != domain:
errors.append({'row': record['row'], 'error': 'Record {0} does not belong to zone {1}'.format(record_id, zone_id)})
return 0
return record_id
def __process_record_zone(self, record, errors, domain_mapping):
zone_id = domain_mapping[record['domain']] if record['domain'] in domain_mapping else 0
if zone_id == 0:
errors.append({'row': record['row'], 'error': 'Zone not found: {0}'.format(record['domain'])})
return zone_id
def __record_exists(self, dns_record_id, dns_zone_id=None):
params = {'id': dns_record_id}
sql = "SELECT COUNT(id) AS c FROM dns_records WHERE id = :id"
if dns_zone_id is not None:
params['dns_zone_id'] = dns_zone_id
sql += " AND dns_zone_id = :dns_zone_id"
result = db.session.execute(sql, params).first()
return result[0] > 0 if result is not None else False
def __process_record_ttl(self, record, errors):
ttl = 0
if not record['ttl'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
else:
ttl = int(record['ttl'])
if ttl < 0:
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
return ttl
def __process_record_cls(self, record, errors):
cls = ''
if not record['cls'] in self.__dns_records.get_classes():
errors.append({'row': record['row'], 'error': 'Invalid class: {0}'.format(record['cls'])})
else:
cls = record['cls']
return cls
def __process_record_type(self, record, errors):
type = ''
if not record['type'] in self.__dns_records.get_types():
errors.append({'row': record['row'], 'error': 'Invalid type: {0}'.format(record['type'])})
else:
type = record['type']
return type
def __properties_to_dict(self, record, errors, is_conditional=False):
attribute = 'conditional_data' if is_conditional else 'data'
rows = record[attribute].split("\n")
properties = {}
for row in rows:
parts = row.split('=', 1)
if len(parts) != 2:
errors.append({'row': record['row'], 'error': 'Invalid record property: {0}'.format(row)})
continue
name = parts[0].lower().strip()
value = parts[1].strip()
properties[name] = value
return properties
def __process_record_data(self, record, type, errors, is_conditional=False):
record_properties = self.__properties_to_dict(record, errors, is_conditional=is_conditional)
required_properties = self.__dns_records.get_record_type_properties(type, clean=True)
data = {}
for property_name, property_type in required_properties.items():
if not property_name in record_properties:
errors.append({'row': record['row'], 'error': 'Missing record property: {0}'.format(property_name)})
continue
value = record_properties[property_name]
if (property_type == 'int') and (isinstance(value, str)):
if not value.isdigit():
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
value = int(value)
if (property_type == 'str') and (len(value) == 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
elif (property_type == 'int') and (value < 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
data[property_name] = value
return data
def __categorise_rows(self, rows, type):
|
def __get_domain_mapping(self, user_id, reverse=False):
result = db.session.execute(
"SELECT id, domain FROM dns_zones WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
if reverse:
mapping[row[0]] = row[1]
else:
mapping[row[1]] = row[0]
return mapping
def __get_tag_mapping(self, user_id):
result = db.session.execute(
"SELECT id, name FROM tags WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
mapping[row[1]] = row[0]
return mapping
def __zone_update_or_create(self, domain, active, catch_all, forwarding, regex, master, user_id, id=None, autocommit=True):
params = {
'domain': domain,
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'user_id': user_id,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_zones (domain, active, catch_all, forwarding, regex, master, user_id, updated_at, created_at)" \
"VALUES(:domain, :active, :catch_all, :forwarding, :regex, :master, :user_id, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_zones SET domain = :domain, active = :active, catch_all = :catch_all, forwarding = :forwarding, regex = :regex, master = :master, user_id = :user_id, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __record_update_or_create(self, zone_id, ttl, cls, type, active, data, is_conditional, conditional_count,
conditional_limit, conditional_reset, conditional_data, id=None, autocommit=True):
params = {
'zone_id': zone_id,
'ttl': ttl,
'cls': cls,
'type': type,
'active': active,
'data': data,
'has_conditional_responses': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_records (dns_zone_id, ttl, cls, type, data, active, has_conditional_responses, conditional_count, conditional_limit, conditional_reset, conditional_data, updated_at, created_at) " \
"VALUES(:zone_id, :ttl, :cls, :type, :data, :active, :has_conditional_responses, :conditional_count, :conditional_limit, :conditional_reset, :conditional_data, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_records SET dns_zone_id = :zone_id, ttl = :ttl, cls = :cls, type = :type, data = :data, active = :active, has_conditional_responses = :has_conditional_responses, conditional_count = :conditional_count, conditional_limit = :conditional_limit, conditional_reset = :conditional_reset, conditional_data = :conditional_data, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __tags_create(self, user_id, tags):
for tag in tags:
name = tag.strip().lower()
result = db.session.execute(
"SELECT id FROM tags WHERE name = :name AND user_id = :user_id",
{'name': name, 'user_id': user_id}
).first()
if result is None:
params = {
'user_id': user_id,
'name': tag,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO tags (user_id, name, created_at, updated_at) VALUES(:user_id, :name, :created_at, :updated_at)"
db.session.execute(sql, params)
db.session.commit()
return True
def __zone_save_tags(self, zone_id, tags, autocommit=True):
for name, id in tags.items():
params = {
'dns_zone_id': zone_id,
'tag_id': id,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO dns_zone_tags (dns_zone_id, tag_id, created_at, updated_at) VALUES(:dns_zone_id, :tag_id, :created_at, :updated_at)"
db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __zone_clear_tags(self, zone_ids, batch_size=100, show_progressbar=False, widget=None):
batches = list(self.__chunks(zone_ids, batch_size))
if show_progressbar:
widget[0] = progressbar.FormatLabel('Removing existing tags')
bar = progressbar.ProgressBar(max_value=len(batches), widgets=widget)
count = 0
for batch in batches:
count += 1
bar.update(count) if show_progressbar else False
i = 0
params = {}
for id in batch:
i += 1
params['param' + str(i)] = id
bind = [':' + v for v in params.keys()]
sql = "DELETE FROM dns_zone_tags WHERE dns_zone_id IN({0})".format(', '.join(bind))
db.session.execute(sql, params)
db.session.commit()
return True
def __chunks(self, data, size):
# From https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(data), size):
yield data[i:i + size]
| data = []
for i, row in enumerate(rows):
# Error row is +1 because the first row is the header which was removed.
actual_row = i + 1
if type == self.IMPORT_TYPE_ZONE:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'active': row['active'].strip().lower(),
'catch_all': row['catch_all'].strip().lower(),
'forwarding': row['forwarding'].strip().lower(),
'regex': row['regex'].strip().lower(),
'master': row['master'].strip().lower(),
'tags': row['tags'].strip()
})
elif type == self.IMPORT_TYPE_RECORD:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'id': row['id'].strip(),
'ttl': row['ttl'].strip().lower(),
'cls': row['cls'].strip().upper(),
'type': row['type'].strip().upper(),
'active': row['active'].strip().lower(),
'data': row['data'].strip(),
'is_conditional': row['is_conditional'].strip().lower(),
'conditional_count': row['conditional_count'].strip().lower(),
'conditional_limit': row['conditional_limit'].strip().lower(),
'conditional_reset': row['conditional_reset'].strip().lower(),
'conditional_data': row['conditional_data'].strip(),
})
return data |
cryptoutils.go | /*
Copyright SecureKey Technologies Inc. All Rights Reserved.
SPDX-License-Identifier: Apache-2.0
*/
package cryptoutil
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"encoding/pem"
"github.com/aiguo186/fabric-sdk-go-gm/internal/github.com/tjfoc/gmsm/sm2"
tls "github.com/aiguo186/fabric-sdk-go-gm/internal/github.com/tjfoc/gmtls"
"io"
"github.com/aiguo186/fabric-sdk-go-gm/pkg/common/logging"
"github.com/aiguo186/fabric-sdk-go-gm/pkg/common/providers/core"
"github.com/pkg/errors"
factory "github.com/aiguo186/fabric-sdk-go-gm/internal/github.com/aiguo186/fabric-ca/sdkpatch/cryptosuitebridge"
)
var logger = logging.NewLogger("fabsdk/core")
// GetPrivateKeyFromCert will return private key represented by SKI in cert's public key
func | (cert []byte, cs core.CryptoSuite) (core.Key, error) {
// get the public key in the right format
certPubK, err := GetPublicKeyFromCert(cert, cs)
if err != nil {
return nil, errors.WithMessage(err, "Failed to import certificate's public key")
}
if certPubK == nil || certPubK.SKI() == nil {
return nil, errors.New("Failed to get SKI")
}
// Get the key given the SKI value
key, err := cs.GetKey(certPubK.SKI())
if err != nil {
return nil, errors.WithMessage(err, "Could not find matching key for SKI")
}
if key != nil && !key.Private() {
return nil, errors.Errorf("Found key is not private, SKI: %s", certPubK.SKI())
}
return key, nil
}
// GetPublicKeyFromCert will return public key the from cert
func GetPublicKeyFromCert(cert []byte, cs core.CryptoSuite) (core.Key, error) {
dcert, _ := pem.Decode(cert)
if dcert == nil {
return nil, errors.Errorf("Unable to decode cert bytes [%v]", cert)
}
x509Cert, err := sm2.ParseCertificate(dcert.Bytes)
if err != nil {
return nil, errors.Errorf("Unable to parse cert from decoded bytes: %s", err)
}
// get the public key in the right format
key, err := cs.KeyImport(x509Cert, factory.GetX509PublicKeyImportOpts(true))
if err != nil {
return nil, errors.WithMessage(err, "Failed to import certificate's public key")
}
return key, nil
}
// X509KeyPair will return cer/key pair used for mutual TLS
func X509KeyPair(certPEMBlock []byte, pk core.Key, cs core.CryptoSuite) (tls.Certificate, error) {
fail := func(err error) (tls.Certificate, error) { return tls.Certificate{}, err }
var cert tls.Certificate
for {
var certDERBlock *pem.Block
certDERBlock, certPEMBlock = pem.Decode(certPEMBlock)
if certDERBlock == nil {
break
}
if certDERBlock.Type == "CERTIFICATE" {
cert.Certificate = append(cert.Certificate, certDERBlock.Bytes)
} else {
logger.Debugf("Skipping block type: %s", certDERBlock.Type)
}
}
if len(cert.Certificate) == 0 {
return fail(errors.New("No certs available from bytes"))
}
// We are parsing public key for TLS to find its type
x509Cert, err := sm2.ParseCertificate(cert.Certificate[0])
if err != nil {
return fail(err)
}
switch x509Cert.PublicKey.(type) {
case *rsa.PublicKey:
cert.PrivateKey = &PrivateKey{cs, pk, &rsa.PublicKey{}}
case *ecdsa.PublicKey:
cert.PrivateKey = &PrivateKey{cs, pk, &ecdsa.PublicKey{}}
default:
return fail(errors.New("tls: unknown public key algorithm"))
}
return cert, nil
}
//PrivateKey is signer implementation for golang client TLS
type PrivateKey struct {
cryptoSuite core.CryptoSuite
key core.Key
publicKey crypto.PublicKey
}
// Public returns the public key corresponding to private key
func (priv *PrivateKey) Public() crypto.PublicKey {
return priv.publicKey
}
// Sign signs msg with priv, reading randomness from rand. If opts is a
// *PSSOptions then the PSS algorithm will be used, otherwise PKCS#1 v1.5 will
// be used. This method is intended to support keys where the private part is
// kept in, for example, a hardware module.
func (priv *PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) ([]byte, error) {
if priv.cryptoSuite == nil {
return nil, errors.New("Crypto suite not set")
}
if priv.key == nil {
return nil, errors.New("Private key not set")
}
return priv.cryptoSuite.Sign(priv.key, msg, opts)
}
| GetPrivateKeyFromCert |
mod.rs | // Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*
# check.rs
Within the check phase of type check, we check each item one at a time
(bodies of function expressions are checked as part of the containing
function). Inference is used to supply types wherever they are
unknown.
By far the most complex case is checking the body of a function. This
can be broken down into several distinct phases:
- gather: creates type variables to represent the type of each local
variable and pattern binding.
- main: the main pass does the lion's share of the work: it
determines the types of all expressions, resolves
methods, checks for most invalid conditions, and so forth. In
some cases, where a type is unknown, it may create a type or region
variable and use that as the type of an expression.
In the process of checking, various constraints will be placed on
these type variables through the subtyping relationships requested
through the `demand` module. The `infer` module is in charge
of resolving those constraints.
- regionck: after main is complete, the regionck pass goes over all
types looking for regions and making sure that they did not escape
into places they are not in scope. This may also influence the
final assignments of the various region variables if there is some
flexibility.
- vtable: find and records the impls to use for each trait bound that
appears on a type parameter.
- writeback: writes the final types within a function body, replacing
type variables with their final inferred types. These final types
are written into the `tcx.node_types` table, which should *never* contain
any reference to a type variable.
## Intermediate types
While type checking a function, the intermediate types for the
expressions, blocks, and so forth contained within the function are
stored in `fcx.node_types` and `fcx.node_substs`. These types
may contain unresolved type variables. After type checking is
complete, the functions in the writeback module are used to take the
types from this table, resolve them, and then write them into their
permanent home in the type context `tcx`.
This means that during inferencing you should use `fcx.write_ty()`
and `fcx.expr_ty()` / `fcx.node_ty()` to write/obtain the types of
nodes within the function.
The types of top-level items, which never contain unbound type
variables, are stored directly into the `tcx` tables.
N.B.: A type variable is not the same thing as a type parameter. A
type variable is rather an "instance" of a type parameter: that is,
given a generic function `fn foo<T>(t: T)`: while checking the
function `foo`, the type `ty_param(0)` refers to the type `T`, which
is treated in abstract. When `foo()` is called, however, `T` will be
substituted for a fresh type variable `N`. This variable will
eventually be resolved to some concrete type (which might itself be
type parameter).
*/
pub use self::Expectation::*;
use self::autoderef::Autoderef;
use self::callee::DeferredCallResolution;
use self::coercion::{CoerceMany, DynamicCoerceMany};
pub use self::compare_method::{compare_impl_method, compare_const_impl};
use self::method::MethodCallee;
use self::TupleArgumentsFlag::*;
use astconv::AstConv;
use hir::GenericArg;
use hir::def::Def;
use hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use std::slice;
use namespace::Namespace;
use rustc::infer::{self, InferCtxt, InferOk, RegionVariableOrigin};
use rustc::infer::opaque_types::OpaqueTypeDecl;
use rustc::infer::type_variable::{TypeVariableOrigin};
use rustc::middle::region;
use rustc::mir::interpret::{ConstValue, GlobalId};
use rustc::ty::subst::{CanonicalUserSubsts, UnpackedKind, Subst, Substs,
UserSelfTy, UserSubsts};
use rustc::traits::{self, ObligationCause, ObligationCauseCode, TraitEngine};
use rustc::ty::{self, Ty, TyCtxt, GenericParamDefKind, Visibility, ToPredicate, RegionKind};
use rustc::ty::adjustment::{Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability};
use rustc::ty::fold::TypeFoldable;
use rustc::ty::query::Providers;
use rustc::ty::util::{Representability, IntTypeExt, Discr};
use errors::{Applicability, DiagnosticBuilder, DiagnosticId};
use require_c_abi_if_variadic;
use session::{CompileIncomplete, config, Session};
use TypeAndSubsts;
use lint;
use util::common::{ErrorReported, indenter};
use util::nodemap::{DefIdMap, DefIdSet, FxHashMap, FxHashSet, NodeMap};
use std::cell::{Cell, RefCell, Ref, RefMut};
use rustc_data_structures::sync::Lrc;
use std::collections::hash_map::Entry;
use std::cmp;
use std::fmt::Display;
use std::iter;
use std::mem::replace;
use std::ops::{self, Deref};
use rustc_target::spec::abi::Abi;
use syntax::ast;
use syntax::attr;
use syntax::source_map::DUMMY_SP;
use syntax::source_map::original_sp;
use syntax::feature_gate::{GateIssue, emit_feature_err};
use syntax::ptr::P;
use syntax::symbol::{Symbol, LocalInternedString, keywords};
use syntax::util::lev_distance::find_best_match_for_name;
use syntax_pos::{self, BytePos, Span, MultiSpan};
use rustc::hir::intravisit::{self, Visitor, NestedVisitorMap};
use rustc::hir::itemlikevisit::ItemLikeVisitor;
use rustc::hir::Node;
use rustc::hir::{self, PatKind, ItemKind};
use rustc::middle::lang_items;
mod autoderef;
pub mod dropck;
pub mod _match;
pub mod writeback;
mod regionck;
pub mod coercion;
pub mod demand;
pub mod method;
mod upvar;
mod wfcheck;
mod cast;
mod closure;
mod callee;
mod compare_method;
mod generator_interior;
mod intrinsic;
mod op;
/// The type of a local binding, including the revealed type for anon types.
#[derive(Copy, Clone)]
pub struct LocalTy<'tcx> {
decl_ty: Ty<'tcx>,
revealed_ty: Ty<'tcx>
}
/// A wrapper for InferCtxt's `in_progress_tables` field.
#[derive(Copy, Clone)]
struct MaybeInProgressTables<'a, 'tcx: 'a> {
maybe_tables: Option<&'a RefCell<ty::TypeckTables<'tcx>>>,
}
impl<'a, 'tcx> MaybeInProgressTables<'a, 'tcx> {
fn borrow(self) -> Ref<'a, ty::TypeckTables<'tcx>> {
match self.maybe_tables {
Some(tables) => tables.borrow(),
None => {
bug!("MaybeInProgressTables: inh/fcx.tables.borrow() with no tables")
}
}
}
fn borrow_mut(self) -> RefMut<'a, ty::TypeckTables<'tcx>> {
match self.maybe_tables {
Some(tables) => tables.borrow_mut(),
None => {
bug!("MaybeInProgressTables: inh/fcx.tables.borrow_mut() with no tables")
}
}
}
}
/// closures defined within the function. For example:
///
/// fn foo() {
/// bar(move|| { ... })
/// }
///
/// Here, the function `foo()` and the closure passed to
/// `bar()` will each have their own `FnCtxt`, but they will
/// share the inherited fields.
pub struct Inherited<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: InferCtxt<'a, 'gcx, 'tcx>,
tables: MaybeInProgressTables<'a, 'tcx>,
locals: RefCell<NodeMap<LocalTy<'tcx>>>,
fulfillment_cx: RefCell<Box<dyn TraitEngine<'tcx>>>,
// When we process a call like `c()` where `c` is a closure type,
// we may not have decided yet whether `c` is a `Fn`, `FnMut`, or
// `FnOnce` closure. In that case, we defer full resolution of the
// call until upvar inference can kick in and make the
// decision. We keep these deferred resolutions grouped by the
// def-id of the closure, so that once we decide, we can easily go
// back and process them.
deferred_call_resolutions: RefCell<DefIdMap<Vec<DeferredCallResolution<'gcx, 'tcx>>>>,
deferred_cast_checks: RefCell<Vec<cast::CastCheck<'tcx>>>,
deferred_generator_interiors: RefCell<Vec<(hir::BodyId, Ty<'tcx>)>>,
// Opaque types found in explicit return types and their
// associated fresh inference variable. Writeback resolves these
// variables to get the concrete type, which can be used to
// 'de-opaque' OpaqueTypeDecl, after typeck is done with all functions.
opaque_types: RefCell<DefIdMap<OpaqueTypeDecl<'tcx>>>,
/// Each type parameter has an implicit region bound that
/// indicates it must outlive at least the function body (the user
/// may specify stronger requirements). This field indicates the
/// region of the callee. If it is `None`, then the parameter
/// environment is for an item or something where the "callee" is
/// not clear.
implicit_region_bound: Option<ty::Region<'tcx>>,
body_id: Option<hir::BodyId>,
}
impl<'a, 'gcx, 'tcx> Deref for Inherited<'a, 'gcx, 'tcx> {
type Target = InferCtxt<'a, 'gcx, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.infcx
}
}
/// When type-checking an expression, we propagate downward
/// whatever type hint we are able in the form of an `Expectation`.
#[derive(Copy, Clone, Debug)]
pub enum Expectation<'tcx> {
/// We know nothing about what type this expression should have.
NoExpectation,
/// This expression is an `if` condition, it must resolve to `bool`.
ExpectIfCondition,
/// This expression should have the type given (or some subtype)
ExpectHasType(Ty<'tcx>),
/// This expression will be cast to the `Ty`
ExpectCastableToType(Ty<'tcx>),
/// This rvalue expression will be wrapped in `&` or `Box` and coerced
/// to `&Ty` or `Box<Ty>`, respectively. `Ty` is `[A]` or `Trait`.
ExpectRvalueLikeUnsized(Ty<'tcx>),
}
impl<'a, 'gcx, 'tcx> Expectation<'tcx> {
// Disregard "castable to" expectations because they
// can lead us astray. Consider for example `if cond
// {22} else {c} as u8` -- if we propagate the
// "castable to u8" constraint to 22, it will pick the
// type 22u8, which is overly constrained (c might not
// be a u8). In effect, the problem is that the
// "castable to" expectation is not the tightest thing
// we can say, so we want to drop it in this case.
// The tightest thing we can say is "must unify with
// else branch". Note that in the case of a "has type"
// constraint, this limitation does not hold.
// If the expected type is just a type variable, then don't use
// an expected type. Otherwise, we might write parts of the type
// when checking the 'then' block which are incompatible with the
// 'else' branch.
fn adjust_for_branches(&self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
match *self {
ExpectHasType(ety) => {
let ety = fcx.shallow_resolve(ety);
if !ety.is_ty_var() {
ExpectHasType(ety)
} else {
NoExpectation
}
}
ExpectRvalueLikeUnsized(ety) => {
ExpectRvalueLikeUnsized(ety)
}
_ => NoExpectation
}
}
/// Provide an expectation for an rvalue expression given an *optional*
/// hint, which is not required for type safety (the resulting type might
/// be checked higher up, as is the case with `&expr` and `box expr`), but
/// is useful in determining the concrete type.
///
/// The primary use case is where the expected type is a fat pointer,
/// like `&[isize]`. For example, consider the following statement:
///
/// let x: &[isize] = &[1, 2, 3];
///
/// In this case, the expected type for the `&[1, 2, 3]` expression is
/// `&[isize]`. If however we were to say that `[1, 2, 3]` has the
/// expectation `ExpectHasType([isize])`, that would be too strong --
/// `[1, 2, 3]` does not have the type `[isize]` but rather `[isize; 3]`.
/// It is only the `&[1, 2, 3]` expression as a whole that can be coerced
/// to the type `&[isize]`. Therefore, we propagate this more limited hint,
/// which still is useful, because it informs integer literals and the like.
/// See the test case `test/run-pass/coerce-expect-unsized.rs` and #20169
/// for examples of where this comes up,.
fn rvalue_hint(fcx: &FnCtxt<'a, 'gcx, 'tcx>, ty: Ty<'tcx>) -> Expectation<'tcx> {
match fcx.tcx.struct_tail(ty).sty {
ty::Slice(_) | ty::Str | ty::Dynamic(..) => {
ExpectRvalueLikeUnsized(ty)
}
_ => ExpectHasType(ty)
}
}
// Resolves `expected` by a single level if it is a variable. If
// there is no expected type or resolution is not possible (e.g.,
// no constraints yet present), just returns `None`.
fn resolve(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Expectation<'tcx> {
match self {
NoExpectation => NoExpectation,
ExpectIfCondition => ExpectIfCondition,
ExpectCastableToType(t) => {
ExpectCastableToType(fcx.resolve_type_vars_if_possible(&t))
}
ExpectHasType(t) => {
ExpectHasType(fcx.resolve_type_vars_if_possible(&t))
}
ExpectRvalueLikeUnsized(t) => {
ExpectRvalueLikeUnsized(fcx.resolve_type_vars_if_possible(&t))
}
}
}
fn to_option(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
NoExpectation => None,
ExpectIfCondition => Some(fcx.tcx.types.bool),
ExpectCastableToType(ty) |
ExpectHasType(ty) |
ExpectRvalueLikeUnsized(ty) => Some(ty),
}
}
/// It sometimes happens that we want to turn an expectation into
/// a **hard constraint** (i.e., something that must be satisfied
/// for the program to type-check). `only_has_type` will return
/// such a constraint, if it exists.
fn only_has_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>) -> Option<Ty<'tcx>> {
match self.resolve(fcx) {
ExpectHasType(ty) => Some(ty),
ExpectIfCondition => Some(fcx.tcx.types.bool),
NoExpectation | ExpectCastableToType(_) | ExpectRvalueLikeUnsized(_) => None,
}
}
/// Like `only_has_type`, but instead of returning `None` if no
/// hard constraint exists, creates a fresh type variable.
fn coercion_target_type(self, fcx: &FnCtxt<'a, 'gcx, 'tcx>, span: Span) -> Ty<'tcx> {
self.only_has_type(fcx)
.unwrap_or_else(|| fcx.next_ty_var(TypeVariableOrigin::MiscVariable(span)))
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum Needs {
MutPlace,
None
}
impl Needs {
fn maybe_mut_place(m: hir::Mutability) -> Self {
match m {
hir::MutMutable => Needs::MutPlace,
hir::MutImmutable => Needs::None,
}
}
}
#[derive(Copy, Clone)]
pub struct UnsafetyState {
pub def: ast::NodeId,
pub unsafety: hir::Unsafety,
pub unsafe_push_count: u32,
from_fn: bool
}
impl UnsafetyState {
pub fn function(unsafety: hir::Unsafety, def: ast::NodeId) -> UnsafetyState {
UnsafetyState { def: def, unsafety: unsafety, unsafe_push_count: 0, from_fn: true }
}
pub fn recurse(&mut self, blk: &hir::Block) -> UnsafetyState {
match self.unsafety {
// If this unsafe, then if the outer function was already marked as
// unsafe we shouldn't attribute the unsafe'ness to the block. This
// way the block can be warned about instead of ignoring this
// extraneous block (functions are never warned about).
hir::Unsafety::Unsafe if self.from_fn => *self,
unsafety => {
let (unsafety, def, count) = match blk.rules {
hir::PushUnsafeBlock(..) =>
(unsafety, blk.id, self.unsafe_push_count.checked_add(1).unwrap()),
hir::PopUnsafeBlock(..) =>
(unsafety, blk.id, self.unsafe_push_count.checked_sub(1).unwrap()),
hir::UnsafeBlock(..) =>
(hir::Unsafety::Unsafe, blk.id, self.unsafe_push_count),
hir::DefaultBlock =>
(unsafety, self.def, self.unsafe_push_count),
};
UnsafetyState{ def,
unsafety,
unsafe_push_count: count,
from_fn: false }
}
}
}
}
#[derive(Debug, Copy, Clone)]
pub enum PlaceOp {
Deref,
Index
}
/// Tracks whether executing a node may exit normally (versus
/// return/break/panic, which "diverge", leaving dead code in their
/// wake). Tracked semi-automatically (through type variables marked
/// as diverging), with some manual adjustments for control-flow
/// primitives (approximating a CFG).
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum Diverges {
/// Potentially unknown, some cases converge,
/// others require a CFG to determine them.
Maybe,
/// Definitely known to diverge and therefore
/// not reach the next sibling or its parent.
Always,
/// Same as `Always` but with a reachability
/// warning already emitted
WarnedAlways
}
// Convenience impls for combinig `Diverges`.
impl ops::BitAnd for Diverges {
type Output = Self;
fn bitand(self, other: Self) -> Self {
cmp::min(self, other)
}
}
impl ops::BitOr for Diverges {
type Output = Self;
fn bitor(self, other: Self) -> Self {
cmp::max(self, other)
}
}
impl ops::BitAndAssign for Diverges {
fn bitand_assign(&mut self, other: Self) {
*self = *self & other;
}
}
impl ops::BitOrAssign for Diverges {
fn bitor_assign(&mut self, other: Self) {
*self = *self | other;
}
}
impl Diverges {
fn always(self) -> bool {
self >= Diverges::Always
}
}
pub struct BreakableCtxt<'gcx: 'tcx, 'tcx> {
may_break: bool,
// this is `null` for loops where break with a value is illegal,
// such as `while`, `for`, and `while let`
coerce: Option<DynamicCoerceMany<'gcx, 'tcx>>,
}
pub struct EnclosingBreakables<'gcx: 'tcx, 'tcx> {
stack: Vec<BreakableCtxt<'gcx, 'tcx>>,
by_id: NodeMap<usize>,
}
impl<'gcx, 'tcx> EnclosingBreakables<'gcx, 'tcx> {
fn find_breakable(&mut self, target_id: ast::NodeId) -> &mut BreakableCtxt<'gcx, 'tcx> {
let ix = *self.by_id.get(&target_id).unwrap_or_else(|| {
bug!("could not find enclosing breakable with id {}", target_id);
});
&mut self.stack[ix]
}
}
#[derive(Debug)]
struct PathSeg(DefId, usize);
pub struct FnCtxt<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
body_id: ast::NodeId,
/// The parameter environment used for proving trait obligations
/// in this function. This can change when we descend into
/// closures (as they bring new things into scope), hence it is
/// not part of `Inherited` (as of the time of this writing,
/// closures do not yet change the environment, but they will
/// eventually).
param_env: ty::ParamEnv<'tcx>,
// Number of errors that had been reported when we started
// checking this function. On exit, if we find that *more* errors
// have been reported, we will skip regionck and other work that
// expects the types within the function to be consistent.
err_count_on_creation: usize,
ret_coercion: Option<RefCell<DynamicCoerceMany<'gcx, 'tcx>>>,
yield_ty: Option<Ty<'tcx>>,
ps: RefCell<UnsafetyState>,
/// Whether the last checked node generates a divergence (e.g.,
/// `return` will set this to Always). In general, when entering
/// an expression or other node in the tree, the initial value
/// indicates whether prior parts of the containing expression may
/// have diverged. It is then typically set to `Maybe` (and the
/// old value remembered) for processing the subparts of the
/// current expression. As each subpart is processed, they may set
/// the flag to `Always` etc. Finally, at the end, we take the
/// result and "union" it with the original value, so that when we
/// return the flag indicates if any subpart of the the parent
/// expression (up to and including this part) has diverged. So,
/// if you read it after evaluating a subexpression `X`, the value
/// you get indicates whether any subexpression that was
/// evaluating up to and including `X` diverged.
///
/// We currently use this flag only for diagnostic purposes:
///
/// - To warn about unreachable code: if, after processing a
/// sub-expression but before we have applied the effects of the
/// current node, we see that the flag is set to `Always`, we
/// can issue a warning. This corresponds to something like
/// `foo(return)`; we warn on the `foo()` expression. (We then
/// update the flag to `WarnedAlways` to suppress duplicate
/// reports.) Similarly, if we traverse to a fresh statement (or
/// tail expression) from a `Always` setting, we will issue a
/// warning. This corresponds to something like `{return;
/// foo();}` or `{return; 22}`, where we would warn on the
/// `foo()` or `22`.
///
/// An expression represents dead-code if, after checking it,
/// the diverges flag is set to something other than `Maybe`.
diverges: Cell<Diverges>,
/// Whether any child nodes have any type errors.
has_errors: Cell<bool>,
enclosing_breakables: RefCell<EnclosingBreakables<'gcx, 'tcx>>,
inh: &'a Inherited<'a, 'gcx, 'tcx>,
}
impl<'a, 'gcx, 'tcx> Deref for FnCtxt<'a, 'gcx, 'tcx> {
type Target = Inherited<'a, 'gcx, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.inh
}
}
/// Helper type of a temporary returned by Inherited::build(...).
/// Necessary because we can't write the following bound:
/// F: for<'b, 'tcx> where 'gcx: 'tcx FnOnce(Inherited<'b, 'gcx, 'tcx>).
pub struct InheritedBuilder<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
infcx: infer::InferCtxtBuilder<'a, 'gcx, 'tcx>,
def_id: DefId,
}
impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
pub fn build(tcx: TyCtxt<'a, 'gcx, 'gcx>, def_id: DefId)
-> InheritedBuilder<'a, 'gcx, 'tcx> {
let hir_id_root = if def_id.is_local() {
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let hir_id = tcx.hir.definitions().node_to_hir_id(node_id);
DefId::local(hir_id.owner)
} else {
def_id
};
InheritedBuilder {
infcx: tcx.infer_ctxt().with_fresh_in_progress_tables(hir_id_root),
def_id,
}
}
}
impl<'a, 'gcx, 'tcx> InheritedBuilder<'a, 'gcx, 'tcx> {
fn enter<F, R>(&'tcx mut self, f: F) -> R
where F: for<'b> FnOnce(Inherited<'b, 'gcx, 'tcx>) -> R
{
let def_id = self.def_id;
self.infcx.enter(|infcx| f(Inherited::new(infcx, def_id)))
}
}
impl<'a, 'gcx, 'tcx> Inherited<'a, 'gcx, 'tcx> {
fn new(infcx: InferCtxt<'a, 'gcx, 'tcx>, def_id: DefId) -> Self {
let tcx = infcx.tcx;
let item_id = tcx.hir.as_local_node_id(def_id);
let body_id = item_id.and_then(|id| tcx.hir.maybe_body_owned_by(id));
let implicit_region_bound = body_id.map(|body_id| {
let body = tcx.hir.body(body_id);
tcx.mk_region(ty::ReScope(region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite
}))
});
Inherited {
tables: MaybeInProgressTables {
maybe_tables: infcx.in_progress_tables,
},
infcx,
fulfillment_cx: RefCell::new(TraitEngine::new(tcx)),
locals: RefCell::new(NodeMap()),
deferred_call_resolutions: RefCell::new(DefIdMap()),
deferred_cast_checks: RefCell::new(Vec::new()),
deferred_generator_interiors: RefCell::new(Vec::new()),
opaque_types: RefCell::new(DefIdMap()),
implicit_region_bound,
body_id,
}
}
fn register_predicate(&self, obligation: traits::PredicateObligation<'tcx>) {
debug!("register_predicate({:?})", obligation);
if obligation.has_escaping_bound_vars() {
span_bug!(obligation.cause.span, "escaping bound vars in predicate {:?}",
obligation);
}
self.fulfillment_cx
.borrow_mut()
.register_predicate_obligation(self, obligation);
}
fn register_predicates<I>(&self, obligations: I)
where I: IntoIterator<Item = traits::PredicateObligation<'tcx>>
{
for obligation in obligations {
self.register_predicate(obligation);
}
}
fn register_infer_ok_obligations<T>(&self, infer_ok: InferOk<'tcx, T>) -> T {
self.register_predicates(infer_ok.obligations);
infer_ok.value
}
fn normalize_associated_types_in<T>(&self,
span: Span,
body_id: ast::NodeId,
param_env: ty::ParamEnv<'tcx>,
value: &T) -> T
where T : TypeFoldable<'tcx>
{
let ok = self.partially_normalize_associated_types_in(span, body_id, param_env, value);
self.register_infer_ok_obligations(ok)
}
}
struct CheckItemTypesVisitor<'a, 'tcx: 'a> { tcx: TyCtxt<'a, 'tcx, 'tcx> }
impl<'a, 'tcx> ItemLikeVisitor<'tcx> for CheckItemTypesVisitor<'a, 'tcx> {
fn visit_item(&mut self, i: &'tcx hir::Item) {
check_item_type(self.tcx, i);
}
fn visit_trait_item(&mut self, _: &'tcx hir::TraitItem) { }
fn visit_impl_item(&mut self, _: &'tcx hir::ImplItem) { }
}
pub fn check_wf_new<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), ErrorReported> {
tcx.sess.track_errors(|| {
let mut visit = wfcheck::CheckTypeWellFormedVisitor::new(tcx);
tcx.hir.krate().visit_all_item_likes(&mut visit.as_deep_visitor());
})
}
pub fn check_item_types<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), ErrorReported> {
tcx.sess.track_errors(|| {
tcx.hir.krate().visit_all_item_likes(&mut CheckItemTypesVisitor { tcx });
})
}
pub fn check_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) -> Result<(), CompileIncomplete> {
tcx.typeck_item_bodies(LOCAL_CRATE)
}
fn typeck_item_bodies<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, crate_num: CrateNum)
-> Result<(), CompileIncomplete>
{
debug_assert!(crate_num == LOCAL_CRATE);
Ok(tcx.sess.track_errors(|| {
tcx.par_body_owners(|body_owner_def_id| {
ty::query::queries::typeck_tables_of::ensure(tcx, body_owner_def_id);
});
})?)
}
fn check_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_item_well_formed(tcx, def_id);
}
fn check_trait_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_trait_item(tcx, def_id);
}
fn check_impl_item_well_formed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, def_id: DefId) {
wfcheck::check_impl_item(tcx, def_id);
}
pub fn provide(providers: &mut Providers) {
method::provide(providers);
*providers = Providers {
typeck_item_bodies,
typeck_tables_of,
has_typeck_tables,
adt_destructor,
used_trait_imports,
check_item_well_formed,
check_trait_item_well_formed,
check_impl_item_well_formed,
..*providers
};
}
fn adt_destructor<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Option<ty::Destructor> {
tcx.calculate_dtor(def_id, &mut dropck::check_drop_impl)
}
/// If this def-id is a "primary tables entry", returns `Some((body_id, decl))`
/// with information about it's body-id and fn-decl (if any). Otherwise,
/// returns `None`.
///
/// If this function returns "some", then `typeck_tables(def_id)` will
/// succeed; if it returns `None`, then `typeck_tables(def_id)` may or
/// may not succeed. In some cases where this function returns `None`
/// (notably closures), `typeck_tables(def_id)` would wind up
/// redirecting to the owning function.
fn primary_body_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: ast::NodeId)
-> Option<(hir::BodyId, Option<&'tcx hir::FnDecl>)>
{
match tcx.hir.get(id) {
Node::Item(item) => {
match item.node {
hir::ItemKind::Const(_, body) |
hir::ItemKind::Static(_, _, body) =>
Some((body, None)),
hir::ItemKind::Fn(ref decl, .., body) =>
Some((body, Some(decl))),
_ =>
None,
}
}
Node::TraitItem(item) => {
match item.node {
hir::TraitItemKind::Const(_, Some(body)) =>
Some((body, None)),
hir::TraitItemKind::Method(ref sig, hir::TraitMethod::Provided(body)) =>
Some((body, Some(&sig.decl))),
_ =>
None,
}
}
Node::ImplItem(item) => {
match item.node {
hir::ImplItemKind::Const(_, body) =>
Some((body, None)),
hir::ImplItemKind::Method(ref sig, body) =>
Some((body, Some(&sig.decl))),
_ =>
None,
}
}
Node::AnonConst(constant) => Some((constant.body, None)),
_ => None,
}
}
fn has_typeck_tables<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> bool {
// Closures' tables come from their outermost function,
// as they are part of the same "inference environment".
let outer_def_id = tcx.closure_base_def_id(def_id);
if outer_def_id != def_id {
return tcx.has_typeck_tables(outer_def_id);
}
let id = tcx.hir.as_local_node_id(def_id).unwrap();
primary_body_of(tcx, id).is_some()
}
fn used_trait_imports<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> Lrc<DefIdSet> {
tcx.typeck_tables_of(def_id).used_trait_imports.clone()
}
fn typeck_tables_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId)
-> &'tcx ty::TypeckTables<'tcx> {
// Closures' tables come from their outermost function,
// as they are part of the same "inference environment".
let outer_def_id = tcx.closure_base_def_id(def_id);
if outer_def_id != def_id {
return tcx.typeck_tables_of(outer_def_id);
}
let id = tcx.hir.as_local_node_id(def_id).unwrap();
let span = tcx.hir.span(id);
// Figure out what primary body this item has.
let (body_id, fn_decl) = primary_body_of(tcx, id).unwrap_or_else(|| {
span_bug!(span, "can't type-check body of {:?}", def_id);
});
let body = tcx.hir.body(body_id);
let tables = Inherited::build(tcx, def_id).enter(|inh| {
let param_env = tcx.param_env(def_id);
let fcx = if let Some(decl) = fn_decl {
let fn_sig = tcx.fn_sig(def_id);
check_abi(tcx, span, fn_sig.abi());
// Compute the fty from point of view of inside the fn.
let fn_sig =
tcx.liberate_late_bound_regions(def_id, &fn_sig);
let fn_sig =
inh.normalize_associated_types_in(body.value.span,
body_id.node_id,
param_env,
&fn_sig);
let fcx = check_fn(&inh, param_env, fn_sig, decl, id, body, None).0;
fcx
} else {
let fcx = FnCtxt::new(&inh, param_env, body.value.id);
let expected_type = tcx.type_of(def_id);
let expected_type = fcx.normalize_associated_types_in(body.value.span, &expected_type);
fcx.require_type_is_sized(expected_type, body.value.span, traits::ConstSized);
let revealed_ty = if tcx.features().impl_trait_in_bindings {
fcx.instantiate_opaque_types_from_value(
id,
&expected_type
)
} else {
expected_type
};
// Gather locals in statics (because of block expressions).
GatherLocalsVisitor { fcx: &fcx, parent_id: id, }.visit_body(body);
fcx.check_expr_coercable_to_type(&body.value, revealed_ty);
fcx
};
// All type checking constraints were added, try to fallback unsolved variables.
fcx.select_obligations_where_possible(false);
let mut fallback_has_occurred = false;
for ty in &fcx.unsolved_variables() {
fallback_has_occurred |= fcx.fallback_if_possible(ty);
}
fcx.select_obligations_where_possible(fallback_has_occurred);
// Even though coercion casts provide type hints, we check casts after fallback for
// backwards compatibility. This makes fallback a stronger type hint than a cast coercion.
fcx.check_casts();
// Closure and generator analysis may run after fallback
// because they don't constrain other type variables.
fcx.closure_analyze(body);
assert!(fcx.deferred_call_resolutions.borrow().is_empty());
fcx.resolve_generator_interiors(def_id);
fcx.select_all_obligations_or_error();
if fn_decl.is_some() {
fcx.regionck_fn(id, body);
} else {
fcx.regionck_expr(body);
}
fcx.resolve_type_vars_in_body(body)
});
// Consistency check our TypeckTables instance can hold all ItemLocalIds
// it will need to hold.
assert_eq!(tables.local_id_root,
Some(DefId::local(tcx.hir.definitions().node_to_hir_id(id).owner)));
tables
}
fn check_abi<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, span: Span, abi: Abi) {
if !tcx.sess.target.target.is_abi_supported(abi) {
struct_span_err!(tcx.sess, span, E0570,
"The ABI `{}` is not supported for the current target", abi).emit()
}
}
struct GatherLocalsVisitor<'a, 'gcx: 'a+'tcx, 'tcx: 'a> {
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
parent_id: ast::NodeId,
}
impl<'a, 'gcx, 'tcx> GatherLocalsVisitor<'a, 'gcx, 'tcx> {
fn assign(&mut self, span: Span, nid: ast::NodeId, ty_opt: Option<LocalTy<'tcx>>) -> Ty<'tcx> {
match ty_opt {
None => {
// infer the variable's type
let var_ty = self.fcx.next_ty_var(TypeVariableOrigin::TypeInference(span));
self.fcx.locals.borrow_mut().insert(nid, LocalTy {
decl_ty: var_ty,
revealed_ty: var_ty
});
var_ty
}
Some(typ) => {
// take type that the user specified
self.fcx.locals.borrow_mut().insert(nid, typ);
typ.revealed_ty
}
}
}
}
impl<'a, 'gcx, 'tcx> Visitor<'gcx> for GatherLocalsVisitor<'a, 'gcx, 'tcx> {
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> {
NestedVisitorMap::None
}
// Add explicitly-declared locals.
fn visit_local(&mut self, local: &'gcx hir::Local) {
let local_ty = match local.ty {
Some(ref ty) => {
let o_ty = self.fcx.to_ty(&ty);
let revealed_ty = if self.fcx.tcx.features().impl_trait_in_bindings {
self.fcx.instantiate_opaque_types_from_value(
self.parent_id,
&o_ty
)
} else {
o_ty
};
let c_ty = self.fcx.inh.infcx.canonicalize_user_type_annotation(&revealed_ty);
debug!("visit_local: ty.hir_id={:?} o_ty={:?} revealed_ty={:?} c_ty={:?}",
ty.hir_id, o_ty, revealed_ty, c_ty);
self.fcx.tables.borrow_mut().user_provided_tys_mut().insert(ty.hir_id, c_ty);
Some(LocalTy { decl_ty: o_ty, revealed_ty })
},
None => None,
};
self.assign(local.span, local.id, local_ty);
debug!("Local variable {:?} is assigned type {}",
local.pat,
self.fcx.ty_to_string(
self.fcx.locals.borrow().get(&local.id).unwrap().clone().decl_ty));
intravisit::walk_local(self, local);
}
// Add pattern bindings.
fn visit_pat(&mut self, p: &'gcx hir::Pat) {
if let PatKind::Binding(_, _, ident, _) = p.node {
let var_ty = self.assign(p.span, p.id, None);
if !self.fcx.tcx.features().unsized_locals {
self.fcx.require_type_is_sized(var_ty, p.span,
traits::VariableType(p.id));
}
debug!("Pattern binding {} is assigned to {} with type {:?}",
ident,
self.fcx.ty_to_string(
self.fcx.locals.borrow().get(&p.id).unwrap().clone().decl_ty),
var_ty);
}
intravisit::walk_pat(self, p);
}
// Don't descend into the bodies of nested closures
fn visit_fn(&mut self, _: intravisit::FnKind<'gcx>, _: &'gcx hir::FnDecl,
_: hir::BodyId, _: Span, _: ast::NodeId) { }
}
/// When `check_fn` is invoked on a generator (i.e., a body that
/// includes yield), it returns back some information about the yield
/// points.
struct GeneratorTypes<'tcx> {
/// Type of value that is yielded.
yield_ty: ty::Ty<'tcx>,
/// Types that are captured (see `GeneratorInterior` for more).
interior: ty::Ty<'tcx>,
/// Indicates if the generator is movable or static (immovable)
movability: hir::GeneratorMovability,
}
/// Helper used for fns and closures. Does the grungy work of checking a function
/// body and returns the function context used for that purpose, since in the case of a fn item
/// there is still a bit more to do.
///
/// * ...
/// * inherited: other fields inherited from the enclosing fn (if any)
fn check_fn<'a, 'gcx, 'tcx>(inherited: &'a Inherited<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
fn_sig: ty::FnSig<'tcx>,
decl: &'gcx hir::FnDecl,
fn_id: ast::NodeId,
body: &'gcx hir::Body,
can_be_generator: Option<hir::GeneratorMovability>)
-> (FnCtxt<'a, 'gcx, 'tcx>, Option<GeneratorTypes<'tcx>>)
{
let mut fn_sig = fn_sig.clone();
debug!("check_fn(sig={:?}, fn_id={}, param_env={:?})", fn_sig, fn_id, param_env);
// Create the function context. This is either derived from scratch or,
// in the case of closures, based on the outer context.
let mut fcx = FnCtxt::new(inherited, param_env, body.value.id);
*fcx.ps.borrow_mut() = UnsafetyState::function(fn_sig.unsafety, fn_id);
let declared_ret_ty = fn_sig.output();
fcx.require_type_is_sized(declared_ret_ty, decl.output.span(), traits::SizedReturnType);
let revealed_ret_ty = fcx.instantiate_opaque_types_from_value(fn_id, &declared_ret_ty);
fcx.ret_coercion = Some(RefCell::new(CoerceMany::new(revealed_ret_ty)));
fn_sig = fcx.tcx.mk_fn_sig(
fn_sig.inputs().iter().cloned(),
revealed_ret_ty,
fn_sig.variadic,
fn_sig.unsafety,
fn_sig.abi
);
let span = body.value.span;
if body.is_generator && can_be_generator.is_some() {
let yield_ty = fcx.next_ty_var(TypeVariableOrigin::TypeInference(span));
fcx.require_type_is_sized(yield_ty, span, traits::SizedYieldType);
fcx.yield_ty = Some(yield_ty);
}
let outer_def_id = fcx.tcx.closure_base_def_id(fcx.tcx.hir.local_def_id(fn_id));
let outer_node_id = fcx.tcx.hir.as_local_node_id(outer_def_id).unwrap();
GatherLocalsVisitor { fcx: &fcx, parent_id: outer_node_id, }.visit_body(body);
// Add formal parameters.
for (arg_ty, arg) in fn_sig.inputs().iter().zip(&body.arguments) {
// Check the pattern.
fcx.check_pat_walk(&arg.pat, arg_ty,
ty::BindingMode::BindByValue(hir::Mutability::MutImmutable), true);
// Check that argument is Sized.
// The check for a non-trivial pattern is a hack to avoid duplicate warnings
// for simple cases like `fn foo(x: Trait)`,
// where we would error once on the parameter as a whole, and once on the binding `x`.
if arg.pat.simple_ident().is_none() && !fcx.tcx.features().unsized_locals {
fcx.require_type_is_sized(arg_ty, decl.output.span(), traits::SizedArgumentType);
}
fcx.write_ty(arg.hir_id, arg_ty);
}
let fn_hir_id = fcx.tcx.hir.node_to_hir_id(fn_id);
inherited.tables.borrow_mut().liberated_fn_sigs_mut().insert(fn_hir_id, fn_sig);
fcx.check_return_expr(&body.value);
// We insert the deferred_generator_interiors entry after visiting the body.
// This ensures that all nested generators appear before the entry of this generator.
// resolve_generator_interiors relies on this property.
let gen_ty = if can_be_generator.is_some() && body.is_generator {
let interior = fcx.next_ty_var(TypeVariableOrigin::MiscVariable(span));
fcx.deferred_generator_interiors.borrow_mut().push((body.id(), interior));
Some(GeneratorTypes {
yield_ty: fcx.yield_ty.unwrap(),
interior,
movability: can_be_generator.unwrap(),
})
} else {
None
};
// Finalize the return check by taking the LUB of the return types
// we saw and assigning it to the expected return type. This isn't
// really expected to fail, since the coercions would have failed
// earlier when trying to find a LUB.
//
// However, the behavior around `!` is sort of complex. In the
// event that the `actual_return_ty` comes back as `!`, that
// indicates that the fn either does not return or "returns" only
// values of type `!`. In this case, if there is an expected
// return type that is *not* `!`, that should be ok. But if the
// return type is being inferred, we want to "fallback" to `!`:
//
// let x = move || panic!();
//
// To allow for that, I am creating a type variable with diverging
// fallback. This was deemed ever so slightly better than unifying
// the return value with `!` because it allows for the caller to
// make more assumptions about the return type (e.g., they could do
//
// let y: Option<u32> = Some(x());
//
// which would then cause this return type to become `u32`, not
// `!`).
let coercion = fcx.ret_coercion.take().unwrap().into_inner();
let mut actual_return_ty = coercion.complete(&fcx);
if actual_return_ty.is_never() {
actual_return_ty = fcx.next_diverging_ty_var(
TypeVariableOrigin::DivergingFn(span));
}
fcx.demand_suptype(span, revealed_ret_ty, actual_return_ty);
// Check that the main return type implements the termination trait.
if let Some(term_id) = fcx.tcx.lang_items().termination() {
if let Some((id, _, entry_type)) = *fcx.tcx.sess.entry_fn.borrow() {
if id == fn_id {
if let config::EntryFnType::Main = entry_type {
let substs = fcx.tcx.mk_substs_trait(declared_ret_ty, &[]);
let trait_ref = ty::TraitRef::new(term_id, substs);
let return_ty_span = decl.output.span();
let cause = traits::ObligationCause::new(
return_ty_span, fn_id, ObligationCauseCode::MainFunctionType);
inherited.register_predicate(
traits::Obligation::new(
cause, param_env, trait_ref.to_predicate()));
}
}
}
}
// Check that a function marked as `#[panic_implementation]` has signature `fn(&PanicInfo) -> !`
if let Some(panic_impl_did) = fcx.tcx.lang_items().panic_impl() {
if panic_impl_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(panic_info_did) = fcx.tcx.lang_items().panic_info() {
// at this point we don't care if there are duplicate handlers or if the handler has
// the wrong signature as this value we'll be used when writing metadata and that
// only happens if compilation succeeded
fcx.tcx.sess.has_panic_handler.try_set_same(true);
if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
);
}
let inputs = fn_sig.inputs();
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_panic_info = match inputs[0].sty {
ty::Ref(region, ty, mutbl) => match ty.sty {
ty::Adt(ref adt, _) => {
adt.did == panic_info_did &&
mutbl == hir::Mutability::MutImmutable &&
*region != RegionKind::ReStatic
},
_ => false,
},
_ => false,
};
if !arg_is_panic_info {
fcx.tcx.sess.span_err(
decl.inputs[0].span,
"argument should be `&PanicInfo`",
);
}
if let Node::Item(item) = fcx.tcx.hir.get(fn_id) {
if let ItemKind::Fn(_, _, ref generics, _) = item.node {
if !generics.params.is_empty() {
fcx.tcx.sess.span_err(
span,
"should have no type parameters",
);
}
}
}
} else {
let span = fcx.tcx.sess.source_map().def_span(span);
fcx.tcx.sess.span_err(span, "function should have one argument");
}
} else {
fcx.tcx.sess.err("language item required, but not found: `panic_info`");
}
}
}
// Check that a function marked as `#[alloc_error_handler]` has signature `fn(Layout) -> !`
if let Some(alloc_error_handler_did) = fcx.tcx.lang_items().oom() {
if alloc_error_handler_did == fcx.tcx.hir.local_def_id(fn_id) {
if let Some(alloc_layout_did) = fcx.tcx.lang_items().alloc_layout() {
if declared_ret_ty.sty != ty::Never {
fcx.tcx.sess.span_err(
decl.output.span(),
"return type should be `!`",
);
}
let inputs = fn_sig.inputs();
let span = fcx.tcx.hir.span(fn_id);
if inputs.len() == 1 {
let arg_is_alloc_layout = match inputs[0].sty {
ty::Adt(ref adt, _) => {
adt.did == alloc_layout_did
},
_ => false,
};
if !arg_is_alloc_layout {
fcx.tcx.sess.span_err(
decl.inputs[0].span,
"argument should be `Layout`",
);
}
if let Node::Item(item) = fcx.tcx.hir.get(fn_id) {
if let ItemKind::Fn(_, _, ref generics, _) = item.node {
if !generics.params.is_empty() {
fcx.tcx.sess.span_err(
span,
"`#[alloc_error_handler]` function should have no type \
parameters",
);
}
}
}
} else {
let span = fcx.tcx.sess.source_map().def_span(span);
fcx.tcx.sess.span_err(span, "function should have one argument");
}
} else {
fcx.tcx.sess.err("language item required, but not found: `alloc_layout`");
}
}
}
(fcx, gen_ty)
}
fn check_struct<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: ast::NodeId,
span: Span) {
let def_id = tcx.hir.local_def_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
if def.repr.simd() {
check_simd(tcx, span, def_id);
}
check_transparent(tcx, span, def_id);
check_packed(tcx, span, def_id);
}
fn check_union<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
id: ast::NodeId,
span: Span) {
let def_id = tcx.hir.local_def_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
check_representable(tcx, span, def_id);
check_packed(tcx, span, def_id);
}
pub fn check_item_type<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, it: &'tcx hir::Item) {
debug!(
"check_item_type(it.id={}, it.name={})",
it.id,
tcx.item_path_str(tcx.hir.local_def_id(it.id))
);
let _indenter = indenter();
match it.node {
// Consts can play a role in type-checking, so they are included here.
hir::ItemKind::Static(..) => {
let def_id = tcx.hir.local_def_id(it.id);
tcx.typeck_tables_of(def_id);
maybe_check_static_with_link_section(tcx, def_id, it.span);
}
hir::ItemKind::Const(..) => {
tcx.typeck_tables_of(tcx.hir.local_def_id(it.id));
}
hir::ItemKind::Enum(ref enum_definition, _) => {
check_enum(tcx, it.span, &enum_definition.variants, it.id);
}
hir::ItemKind::Fn(..) => {} // entirely within check_item_body
hir::ItemKind::Impl(.., ref impl_item_refs) => {
debug!("ItemKind::Impl {} with id {}", it.name, it.id);
let impl_def_id = tcx.hir.local_def_id(it.id);
if let Some(impl_trait_ref) = tcx.impl_trait_ref(impl_def_id) {
check_impl_items_against_trait(
tcx,
it.span,
impl_def_id,
impl_trait_ref,
impl_item_refs,
);
let trait_def_id = impl_trait_ref.def_id;
check_on_unimplemented(tcx, trait_def_id, it);
}
}
hir::ItemKind::Trait(..) => {
let def_id = tcx.hir.local_def_id(it.id);
check_on_unimplemented(tcx, def_id, it);
}
hir::ItemKind::Struct(..) => {
check_struct(tcx, it.id, it.span);
}
hir::ItemKind::Union(..) => {
check_union(tcx, it.id, it.span);
}
hir::ItemKind::Existential(..) | hir::ItemKind::Ty(..) => {
let def_id = tcx.hir.local_def_id(it.id);
let pty_ty = tcx.type_of(def_id);
let generics = tcx.generics_of(def_id);
check_bounds_are_used(tcx, &generics, pty_ty);
}
hir::ItemKind::ForeignMod(ref m) => {
check_abi(tcx, it.span, m.abi);
if m.abi == Abi::RustIntrinsic {
for item in &m.items {
intrinsic::check_intrinsic_type(tcx, item);
}
} else if m.abi == Abi::PlatformIntrinsic {
for item in &m.items {
intrinsic::check_platform_intrinsic_type(tcx, item);
}
} else {
for item in &m.items {
let generics = tcx.generics_of(tcx.hir.local_def_id(item.id));
if generics.params.len() - generics.own_counts().lifetimes != 0 {
let mut err = struct_span_err!(
tcx.sess,
item.span,
E0044,
"foreign items may not have type parameters"
);
err.span_label(item.span, "can't have type parameters");
// FIXME: once we start storing spans for type arguments, turn this into a
// suggestion.
err.help(
"use specialization instead of type parameters by replacing them \
with concrete types like `u32`",
);
err.emit();
}
if let hir::ForeignItemKind::Fn(ref fn_decl, _, _) = item.node {
require_c_abi_if_variadic(tcx, fn_decl, m.abi, item.span);
}
}
}
}
_ => { /* nothing to do */ }
}
}
fn maybe_check_static_with_link_section(tcx: TyCtxt, id: DefId, span: Span) {
// Only restricted on wasm32 target for now
if !tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
return
}
// If `#[link_section]` is missing, then nothing to verify
let attrs = tcx.codegen_fn_attrs(id);
if attrs.link_section.is_none() {
return
}
// For the wasm32 target statics with #[link_section] are placed into custom
// sections of the final output file, but this isn't link custom sections of
// other executable formats. Namely we can only embed a list of bytes,
// nothing with pointers to anything else or relocations. If any relocation
// show up, reject them here.
let instance = ty::Instance::mono(tcx, id);
let cid = GlobalId {
instance,
promoted: None
};
let param_env = ty::ParamEnv::reveal_all();
if let Ok(static_) = tcx.const_eval(param_env.and(cid)) {
let alloc = if let ConstValue::ByRef(_, allocation, _) = static_.val {
allocation
} else {
bug!("Matching on non-ByRef static")
};
if alloc.relocations.len() != 0 {
let msg = "statics with a custom `#[link_section]` must be a \
simple list of bytes on the wasm target with no \
extra levels of indirection such as references";
tcx.sess.span_err(span, msg);
}
}
}
fn check_on_unimplemented<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def_id: DefId,
item: &hir::Item) {
let item_def_id = tcx.hir.local_def_id(item.id);
// an error would be reported if this fails.
let _ = traits::OnUnimplementedDirective::of_item(tcx, trait_def_id, item_def_id);
}
fn report_forbidden_specialization<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_item: &hir::ImplItem,
parent_impl: DefId)
{
let mut err = struct_span_err!(
tcx.sess, impl_item.span, E0520,
"`{}` specializes an item from a parent `impl`, but \
that item is not marked `default`",
impl_item.ident);
err.span_label(impl_item.span, format!("cannot specialize default item `{}`",
impl_item.ident));
match tcx.span_of_impl(parent_impl) {
Ok(span) => {
err.span_label(span, "parent `impl` is here");
err.note(&format!("to specialize, `{}` in the parent `impl` must be marked `default`",
impl_item.ident));
}
Err(cname) => {
err.note(&format!("parent implementation is in crate `{}`", cname));
}
}
err.emit();
}
fn check_specialization_validity<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
trait_def: &ty::TraitDef,
trait_item: &ty::AssociatedItem,
impl_id: DefId,
impl_item: &hir::ImplItem)
{
let ancestors = trait_def.ancestors(tcx, impl_id);
let kind = match impl_item.node {
hir::ImplItemKind::Const(..) => ty::AssociatedKind::Const,
hir::ImplItemKind::Method(..) => ty::AssociatedKind::Method,
hir::ImplItemKind::Existential(..) => ty::AssociatedKind::Existential,
hir::ImplItemKind::Type(_) => ty::AssociatedKind::Type
};
let parent = ancestors.defs(tcx, trait_item.ident, kind, trait_def.def_id).nth(1)
.map(|node_item| node_item.map(|parent| parent.defaultness));
if let Some(parent) = parent {
if tcx.impl_item_is_final(&parent) {
report_forbidden_specialization(tcx, impl_item, parent.node.def_id());
}
}
}
fn check_impl_items_against_trait<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
impl_span: Span,
impl_id: DefId,
impl_trait_ref: ty::TraitRef<'tcx>,
impl_item_refs: &[hir::ImplItemRef]) {
let impl_span = tcx.sess.source_map().def_span(impl_span);
// If the trait reference itself is erroneous (so the compilation is going
// to fail), skip checking the items here -- the `impl_item` table in `tcx`
// isn't populated for such impls.
if impl_trait_ref.references_error() { return; }
// Locate trait definition and items
let trait_def = tcx.trait_def(impl_trait_ref.def_id);
let mut overridden_associated_type = None;
let impl_items = || impl_item_refs.iter().map(|iiref| tcx.hir.impl_item(iiref.id));
// Check existing impl methods to see if they are both present in trait
// and compatible with trait signature
for impl_item in impl_items() {
let ty_impl_item = tcx.associated_item(tcx.hir.local_def_id(impl_item.id));
let ty_trait_item = tcx.associated_items(impl_trait_ref.def_id)
.find(|ac| Namespace::from(&impl_item.node) == Namespace::from(ac.kind) &&
tcx.hygienic_eq(ty_impl_item.ident, ac.ident, impl_trait_ref.def_id))
.or_else(|| {
// Not compatible, but needed for the error message
tcx.associated_items(impl_trait_ref.def_id)
.find(|ac| tcx.hygienic_eq(ty_impl_item.ident, ac.ident, impl_trait_ref.def_id))
});
// Check that impl definition matches trait definition
if let Some(ty_trait_item) = ty_trait_item {
match impl_item.node {
hir::ImplItemKind::Const(..) => {
// Find associated const definition.
if ty_trait_item.kind == ty::AssociatedKind::Const {
compare_const_impl(tcx,
&ty_impl_item,
impl_item.span,
&ty_trait_item,
impl_trait_ref);
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0323,
"item `{}` is an associated const, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
// We can only get the spans from local trait definition
// Same for E0324 and E0325
if let Some(trait_span) = tcx.hir.span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
hir::ImplItemKind::Method(..) => {
let trait_span = tcx.hir.span_if_local(ty_trait_item.def_id);
if ty_trait_item.kind == ty::AssociatedKind::Method {
compare_impl_method(tcx,
&ty_impl_item,
impl_item.span,
&ty_trait_item,
impl_trait_ref,
trait_span);
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0324,
"item `{}` is an associated method, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
if let Some(trait_span) = tcx.hir.span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
hir::ImplItemKind::Existential(..) |
hir::ImplItemKind::Type(_) => {
if ty_trait_item.kind == ty::AssociatedKind::Type {
if ty_trait_item.defaultness.has_value() {
overridden_associated_type = Some(impl_item);
}
} else {
let mut err = struct_span_err!(tcx.sess, impl_item.span, E0325,
"item `{}` is an associated type, \
which doesn't match its trait `{}`",
ty_impl_item.ident,
impl_trait_ref);
err.span_label(impl_item.span, "does not match trait");
if let Some(trait_span) = tcx.hir.span_if_local(ty_trait_item.def_id) {
err.span_label(trait_span, "item in trait");
}
err.emit()
}
}
}
check_specialization_validity(tcx, trait_def, &ty_trait_item, impl_id, impl_item);
}
}
// Check for missing items from trait
let mut missing_items = Vec::new();
let mut invalidated_items = Vec::new();
let associated_type_overridden = overridden_associated_type.is_some();
for trait_item in tcx.associated_items(impl_trait_ref.def_id) {
let is_implemented = trait_def.ancestors(tcx, impl_id)
.defs(tcx, trait_item.ident, trait_item.kind, impl_trait_ref.def_id)
.next()
.map(|node_item| !node_item.node.is_from_trait())
.unwrap_or(false);
if !is_implemented && !tcx.impl_is_default(impl_id) {
if !trait_item.defaultness.has_value() {
missing_items.push(trait_item);
} else if associated_type_overridden {
invalidated_items.push(trait_item.ident);
}
}
}
if !missing_items.is_empty() {
let mut err = struct_span_err!(tcx.sess, impl_span, E0046,
"not all trait items implemented, missing: `{}`",
missing_items.iter()
.map(|trait_item| trait_item.ident.to_string())
.collect::<Vec<_>>().join("`, `"));
err.span_label(impl_span, format!("missing `{}` in implementation",
missing_items.iter()
.map(|trait_item| trait_item.ident.to_string())
.collect::<Vec<_>>().join("`, `")));
for trait_item in missing_items {
if let Some(span) = tcx.hir.span_if_local(trait_item.def_id) {
err.span_label(span, format!("`{}` from trait", trait_item.ident));
} else {
err.note_trait_signature(trait_item.ident.to_string(),
trait_item.signature(&tcx));
}
}
err.emit();
}
if !invalidated_items.is_empty() {
let invalidator = overridden_associated_type.unwrap();
span_err!(tcx.sess, invalidator.span, E0399,
"the following trait items need to be reimplemented \
as `{}` was overridden: `{}`",
invalidator.ident,
invalidated_items.iter()
.map(|name| name.to_string())
.collect::<Vec<_>>().join("`, `"))
}
}
/// Checks whether a type can be represented in memory. In particular, it
/// identifies types that contain themselves without indirection through a
/// pointer, which would mean their size is unbounded.
fn check_representable<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
item_def_id: DefId)
-> bool {
let rty = tcx.type_of(item_def_id);
// Check that it is possible to represent this type. This call identifies
// (1) types that contain themselves and (2) types that contain a different
// recursive type. It is only necessary to throw an error on those that
// contain themselves. For case 2, there must be an inner type that will be
// caught by case 1.
match rty.is_representable(tcx, sp) {
Representability::SelfRecursive(spans) => {
let mut err = tcx.recursive_type_with_infinite_size_error(item_def_id);
for span in spans {
err.span_label(span, "recursive without indirection");
}
err.emit();
return false
}
Representability::Representable | Representability::ContainsRecursive => (),
}
return true
}
pub fn check_simd<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let t = tcx.type_of(def_id);
if let ty::Adt(def, substs) = t.sty {
if def.is_struct() {
let fields = &def.non_enum_variant().fields;
if fields.is_empty() {
span_err!(tcx.sess, sp, E0075, "SIMD vector cannot be empty");
return;
}
let e = fields[0].ty(tcx, substs);
if !fields.iter().all(|f| f.ty(tcx, substs) == e) {
struct_span_err!(tcx.sess, sp, E0076, "SIMD vector should be homogeneous")
.span_label(sp, "SIMD elements must have the same type")
.emit();
return;
}
match e.sty {
ty::Param(_) => { /* struct<T>(T, T, T, T) is ok */ }
_ if e.is_machine() => { /* struct(u8, u8, u8, u8) is ok */ }
_ => {
span_err!(tcx.sess, sp, E0077,
"SIMD vector element type should be machine type");
return;
}
}
}
}
}
fn check_packed<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let repr = tcx.adt_def(def_id).repr;
if repr.packed() {
for attr in tcx.get_attrs(def_id).iter() {
for r in attr::find_repr_attrs(&tcx.sess.parse_sess, attr) {
if let attr::ReprPacked(pack) = r {
if pack != repr.pack {
struct_span_err!(tcx.sess, sp, E0634,
"type has conflicting packed representation hints").emit();
}
}
}
}
if repr.align > 0 {
struct_span_err!(tcx.sess, sp, E0587,
"type has conflicting packed and align representation hints").emit();
}
else if check_packed_inner(tcx, def_id, &mut Vec::new()) {
struct_span_err!(tcx.sess, sp, E0588,
"packed type cannot transitively contain a `[repr(align)]` type").emit();
}
}
}
fn check_packed_inner<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
def_id: DefId,
stack: &mut Vec<DefId>) -> bool {
let t = tcx.type_of(def_id);
if stack.contains(&def_id) {
debug!("check_packed_inner: {:?} is recursive", t);
return false;
}
if let ty::Adt(def, substs) = t.sty {
if def.is_struct() || def.is_union() {
if tcx.adt_def(def.did).repr.align > 0 {
return true;
}
// push struct def_id before checking fields
stack.push(def_id);
for field in &def.non_enum_variant().fields {
let f = field.ty(tcx, substs);
if let ty::Adt(def, _) = f.sty {
if check_packed_inner(tcx, def.did, stack) {
return true;
}
}
}
// only need to pop if not early out
stack.pop();
}
}
false
}
fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: DefId) {
let adt = tcx.adt_def(def_id);
if !adt.repr.transparent() {
return;
}
// For each field, figure out if it's known to be a ZST and align(1)
let field_infos = adt.non_enum_variant().fields.iter().map(|field| {
let ty = field.ty(tcx, Substs::identity_for_item(tcx, field.did));
let param_env = tcx.param_env(field.did);
let layout = tcx.layout_of(param_env.and(ty));
// We are currently checking the type this field came from, so it must be local
let span = tcx.hir.span_if_local(field.did).unwrap();
let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
let align1 = layout.map(|layout| layout.align.abi() == 1).unwrap_or(false);
(span, zst, align1)
});
let non_zst_fields = field_infos.clone().filter(|(_span, zst, _align1)| !*zst);
let non_zst_count = non_zst_fields.clone().count();
if non_zst_count != 1 {
let field_spans: Vec<_> = non_zst_fields.map(|(span, _zst, _align1)| span).collect();
struct_span_err!(tcx.sess, sp, E0690,
"transparent struct needs exactly one non-zero-sized field, but has {}",
non_zst_count)
.span_note(field_spans, "non-zero-sized field")
.emit();
}
for (span, zst, align1) in field_infos {
if zst && !align1 {
span_err!(tcx.sess, span, E0691,
"zero-sized field in transparent struct has alignment larger than 1");
}
}
}
#[allow(trivial_numeric_casts)]
pub fn check_enum<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
sp: Span,
vs: &'tcx [hir::Variant],
id: ast::NodeId) {
let def_id = tcx.hir.local_def_id(id);
let def = tcx.adt_def(def_id);
def.destructor(tcx); // force the destructor to be evaluated
if vs.is_empty() {
let attributes = tcx.get_attrs(def_id);
if let Some(attr) = attr::find_by_name(&attributes, "repr") {
struct_span_err!(
tcx.sess, attr.span, E0084,
"unsupported representation for zero-variant enum")
.span_label(sp, "zero-variant enum")
.emit();
}
}
let repr_type_ty = def.repr.discr_type().to_ty(tcx);
if repr_type_ty == tcx.types.i128 || repr_type_ty == tcx.types.u128 {
if !tcx.features().repr128 {
emit_feature_err(&tcx.sess.parse_sess,
"repr128",
sp,
GateIssue::Language,
"repr with 128-bit type is unstable");
}
}
for v in vs {
if let Some(ref e) = v.node.disr_expr {
tcx.typeck_tables_of(tcx.hir.local_def_id(e.id));
}
}
let mut disr_vals: Vec<Discr<'tcx>> = Vec::with_capacity(vs.len());
for (discr, v) in def.discriminants(tcx).zip(vs) {
// Check for duplicate discriminant values
if let Some(i) = disr_vals.iter().position(|&x| x.val == discr.val) {
let variant_i_node_id = tcx.hir.as_local_node_id(def.variants[i].did).unwrap();
let variant_i = tcx.hir.expect_variant(variant_i_node_id);
let i_span = match variant_i.node.disr_expr {
Some(ref expr) => tcx.hir.span(expr.id),
None => tcx.hir.span(variant_i_node_id)
};
let span = match v.node.disr_expr {
Some(ref expr) => tcx.hir.span(expr.id),
None => v.span
};
struct_span_err!(tcx.sess, span, E0081,
"discriminant value `{}` already exists", disr_vals[i])
.span_label(i_span, format!("first use of `{}`", disr_vals[i]))
.span_label(span , format!("enum already has `{}`", disr_vals[i]))
.emit();
}
disr_vals.push(discr);
}
check_representable(tcx, sp, def_id);
}
impl<'a, 'gcx, 'tcx> AstConv<'gcx, 'tcx> for FnCtxt<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'tcx> { self.tcx }
fn get_type_parameter_bounds(&self, _: Span, def_id: DefId)
-> ty::GenericPredicates<'tcx>
{
let tcx = self.tcx;
let node_id = tcx.hir.as_local_node_id(def_id).unwrap();
let item_id = tcx.hir.ty_param_owner(node_id);
let item_def_id = tcx.hir.local_def_id(item_id);
let generics = tcx.generics_of(item_def_id);
let index = generics.param_def_id_to_index[&def_id];
ty::GenericPredicates {
parent: None,
predicates: self.param_env.caller_bounds.iter().filter_map(|&predicate| {
match predicate {
ty::Predicate::Trait(ref data)
if data.skip_binder().self_ty().is_param(index) => {
// HACK(eddyb) should get the original `Span`.
let span = tcx.def_span(def_id);
Some((predicate, span))
}
_ => None
}
}).collect()
}
}
fn re_infer(&self, span: Span, def: Option<&ty::GenericParamDef>)
-> Option<ty::Region<'tcx>> {
let v = match def {
Some(def) => infer::EarlyBoundRegion(span, def.name),
None => infer::MiscVariable(span)
};
Some(self.next_region_var(v))
}
fn ty_infer(&self, span: Span) -> Ty<'tcx> {
self.next_ty_var(TypeVariableOrigin::TypeInference(span))
}
fn ty_infer_for_def(&self,
ty_param_def: &ty::GenericParamDef,
span: Span) -> Ty<'tcx> {
if let UnpackedKind::Type(ty) = self.var_for_def(span, ty_param_def).unpack() {
return ty;
}
unreachable!()
}
fn projected_ty_from_poly_trait_ref(&self,
span: Span,
item_def_id: DefId,
poly_trait_ref: ty::PolyTraitRef<'tcx>)
-> Ty<'tcx>
{
let (trait_ref, _) =
self.replace_late_bound_regions_with_fresh_var(
span,
infer::LateBoundRegionConversionTime::AssocTypeProjection(item_def_id),
&poly_trait_ref);
self.tcx().mk_projection(item_def_id, trait_ref.substs)
}
fn normalize_ty(&self, span: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
if ty.has_escaping_bound_vars() {
ty // FIXME: normalization and escaping regions
} else {
self.normalize_associated_types_in(span, &ty)
}
}
fn set_tainted_by_errors(&self) {
self.infcx.set_tainted_by_errors()
}
fn record_ty(&self, hir_id: hir::HirId, ty: Ty<'tcx>, _span: Span) {
self.write_ty(hir_id, ty)
}
}
/// Controls whether the arguments are tupled. This is used for the call
/// operator.
///
/// Tupling means that all call-side arguments are packed into a tuple and
/// passed as a single parameter. For example, if tupling is enabled, this
/// function:
///
/// fn f(x: (isize, isize))
///
/// Can be called as:
///
/// f(1, 2);
///
/// Instead of:
///
/// f((1, 2));
#[derive(Clone, Eq, PartialEq)]
enum TupleArgumentsFlag {
DontTupleArguments,
TupleArguments,
}
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
pub fn new(inh: &'a Inherited<'a, 'gcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
body_id: ast::NodeId)
-> FnCtxt<'a, 'gcx, 'tcx> {
FnCtxt {
body_id,
param_env,
err_count_on_creation: inh.tcx.sess.err_count(),
ret_coercion: None,
yield_ty: None,
ps: RefCell::new(UnsafetyState::function(hir::Unsafety::Normal,
ast::CRATE_NODE_ID)),
diverges: Cell::new(Diverges::Maybe),
has_errors: Cell::new(false),
enclosing_breakables: RefCell::new(EnclosingBreakables {
stack: Vec::new(),
by_id: NodeMap(),
}),
inh,
}
}
pub fn sess(&self) -> &Session {
&self.tcx.sess
}
pub fn err_count_since_creation(&self) -> usize {
self.tcx.sess.err_count() - self.err_count_on_creation
}
/// Produce warning on the given node, if the current point in the
/// function is unreachable, and there hasn't been another warning.
fn warn_if_unreachable(&self, id: ast::NodeId, span: Span, kind: &str) {
if self.diverges.get() == Diverges::Always {
self.diverges.set(Diverges::WarnedAlways);
debug!("warn_if_unreachable: id={:?} span={:?} kind={}", id, span, kind);
self.tcx().lint_node(
lint::builtin::UNREACHABLE_CODE,
id, span,
&format!("unreachable {}", kind));
}
}
pub fn cause(&self,
span: Span,
code: ObligationCauseCode<'tcx>)
-> ObligationCause<'tcx> {
ObligationCause::new(span, self.body_id, code)
}
pub fn misc(&self, span: Span) -> ObligationCause<'tcx> {
self.cause(span, ObligationCauseCode::MiscObligation)
}
/// Resolves type variables in `ty` if possible. Unlike the infcx
/// version (resolve_type_vars_if_possible), this version will
/// also select obligations if it seems useful, in an effort
/// to get more type information.
fn resolve_type_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> {
debug!("resolve_type_vars_with_obligations(ty={:?})", ty);
// No Infer()? Nothing needs doing.
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// If `ty` is a type variable, see whether we already know what it is.
ty = self.resolve_type_vars_if_possible(&ty);
if !ty.has_infer_types() {
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
return ty;
}
// If not, try resolving pending obligations as much as
// possible. This can help substantially when there are
// indirect dependencies that don't seem worth tracking
// precisely.
self.select_obligations_where_possible(false);
ty = self.resolve_type_vars_if_possible(&ty);
debug!("resolve_type_vars_with_obligations: ty={:?}", ty);
ty
}
fn record_deferred_call_resolution(&self,
closure_def_id: DefId,
r: DeferredCallResolution<'gcx, 'tcx>) {
let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.entry(closure_def_id).or_default().push(r);
}
fn remove_deferred_call_resolutions(&self,
closure_def_id: DefId)
-> Vec<DeferredCallResolution<'gcx, 'tcx>>
{
let mut deferred_call_resolutions = self.deferred_call_resolutions.borrow_mut();
deferred_call_resolutions.remove(&closure_def_id).unwrap_or(vec![])
}
pub fn tag(&self) -> String {
let self_ptr: *const FnCtxt = self;
format!("{:?}", self_ptr)
}
pub fn local_ty(&self, span: Span, nid: ast::NodeId) -> LocalTy<'tcx> {
self.locals.borrow().get(&nid).cloned().unwrap_or_else(||
span_bug!(span, "no type for local variable {}",
self.tcx.hir.node_to_string(nid))
)
}
#[inline]
pub fn write_ty(&self, id: hir::HirId, ty: Ty<'tcx>) {
debug!("write_ty({:?}, {:?}) in fcx {}",
id, self.resolve_type_vars_if_possible(&ty), self.tag());
self.tables.borrow_mut().node_types_mut().insert(id, ty);
if ty.references_error() {
self.has_errors.set(true);
self.set_tainted_by_errors();
}
}
pub fn write_field_index(&self, node_id: ast::NodeId, index: usize) {
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.tables.borrow_mut().field_indices_mut().insert(hir_id, index);
}
// The NodeId and the ItemLocalId must identify the same item. We just pass
// both of them for consistency checking.
pub fn write_method_call(&self,
hir_id: hir::HirId,
method: MethodCallee<'tcx>) {
debug!("write_method_call(hir_id={:?}, method={:?})", hir_id, method);
self.tables
.borrow_mut()
.type_dependent_defs_mut()
.insert(hir_id, Def::Method(method.def_id));
self.write_substs(hir_id, method.substs);
// When the method is confirmed, the `method.substs` includes
// parameters from not just the method, but also the impl of
// the method -- in particular, the `Self` type will be fully
// resolved. However, those are not something that the "user
// specified" -- i.e., those types come from the inferred type
// of the receiver, not something the user wrote. So when we
// create the user-substs, we want to replace those earlier
// types with just the types that the user actually wrote --
// that is, those that appear on the *method itself*.
//
// As an example, if the user wrote something like
// `foo.bar::<u32>(...)` -- the `Self` type here will be the
// type of `foo` (possibly adjusted), but we don't want to
// include that. We want just the `[_, u32]` part.
if !method.substs.is_noop() {
let method_generics = self.tcx.generics_of(method.def_id);
if !method_generics.params.is_empty() {
let user_substs = self.infcx.probe(|_| {
let just_method_substs = Substs::for_item(self.tcx, method.def_id, |param, _| {
let i = param.index as usize;
if i < method_generics.parent_count {
self.infcx.var_for_def(DUMMY_SP, param)
} else {
method.substs[i]
}
});
self.infcx.canonicalize_user_type_annotation(&UserSubsts {
substs: just_method_substs,
user_self_ty: None, // not relevant here
})
});
debug!("write_method_call: user_substs = {:?}", user_substs);
self.write_user_substs(hir_id, user_substs);
}
}
}
pub fn write_substs(&self, node_id: hir::HirId, substs: &'tcx Substs<'tcx>) {
if !substs.is_noop() {
debug!("write_substs({:?}, {:?}) in fcx {}",
node_id,
substs,
self.tag());
self.tables.borrow_mut().node_substs_mut().insert(node_id, substs);
}
}
/// Given the substs that we just converted from the HIR, try to
/// canonicalize them and store them as user-given substitutions
/// (i.e., substitutions that must be respected by the NLL check).
///
/// This should be invoked **before any unifications have
/// occurred**, so that annotations like `Vec<_>` are preserved
/// properly.
pub fn write_user_substs_from_substs(
&self,
hir_id: hir::HirId,
substs: &'tcx Substs<'tcx>,
user_self_ty: Option<UserSelfTy<'tcx>>,
) {
debug!(
"write_user_substs_from_substs({:?}, {:?}) in fcx {}",
hir_id,
substs,
self.tag(),
);
if !substs.is_noop() {
let user_substs = self.infcx.canonicalize_user_type_annotation(&UserSubsts {
substs,
user_self_ty,
});
debug!("instantiate_value_path: user_substs = {:?}", user_substs);
self.write_user_substs(hir_id, user_substs);
}
}
pub fn write_user_substs(&self, hir_id: hir::HirId, substs: CanonicalUserSubsts<'tcx>) {
debug!(
"write_user_substs({:?}, {:?}) in fcx {}",
hir_id,
substs,
self.tag(),
);
if !substs.is_identity() {
self.tables.borrow_mut().user_substs_mut().insert(hir_id, substs);
} else {
debug!("write_user_substs: skipping identity substs");
}
}
pub fn apply_adjustments(&self, expr: &hir::Expr, adj: Vec<Adjustment<'tcx>>) {
debug!("apply_adjustments(expr={:?}, adj={:?})", expr, adj);
if adj.is_empty() {
return;
}
match self.tables.borrow_mut().adjustments_mut().entry(expr.hir_id) {
Entry::Vacant(entry) => { entry.insert(adj); },
Entry::Occupied(mut entry) => {
debug!(" - composing on top of {:?}", entry.get());
match (&entry.get()[..], &adj[..]) {
// Applying any adjustment on top of a NeverToAny
// is a valid NeverToAny adjustment, because it can't
// be reached.
(&[Adjustment { kind: Adjust::NeverToAny, .. }], _) => return,
(&[
Adjustment { kind: Adjust::Deref(_), .. },
Adjustment { kind: Adjust::Borrow(AutoBorrow::Ref(..)), .. },
], &[
Adjustment { kind: Adjust::Deref(_), .. },
.. // Any following adjustments are allowed.
]) => {
// A reborrow has no effect before a dereference.
}
// FIXME: currently we never try to compose autoderefs
// and ReifyFnPointer/UnsafeFnPointer, but we could.
_ =>
bug!("while adjusting {:?}, can't compose {:?} and {:?}",
expr, entry.get(), adj)
};
*entry.get_mut() = adj;
}
}
}
/// Basically whenever we are converting from a type scheme into
/// the fn body space, we always want to normalize associated
/// types as well. This function combines the two.
fn instantiate_type_scheme<T>(&self,
span: Span,
substs: &Substs<'tcx>,
value: &T)
-> T
where T : TypeFoldable<'tcx>
{
let value = value.subst(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &value);
debug!("instantiate_type_scheme(value={:?}, substs={:?}) = {:?}",
value,
substs,
result);
result
}
/// As `instantiate_type_scheme`, but for the bounds found in a
/// generic type scheme.
fn instantiate_bounds(&self, span: Span, def_id: DefId, substs: &Substs<'tcx>)
-> ty::InstantiatedPredicates<'tcx> {
let bounds = self.tcx.predicates_of(def_id);
let result = bounds.instantiate(self.tcx, substs);
let result = self.normalize_associated_types_in(span, &result);
debug!("instantiate_bounds(bounds={:?}, substs={:?}) = {:?}",
bounds,
substs,
result);
result
}
/// Replace the opaque types from the given value with type variables,
/// and records the `OpaqueTypeMap` for later use during writeback. See
/// `InferCtxt::instantiate_opaque_types` for more details.
fn instantiate_opaque_types_from_value<T: TypeFoldable<'tcx>>(
&self,
parent_id: ast::NodeId,
value: &T,
) -> T {
let parent_def_id = self.tcx.hir.local_def_id(parent_id);
debug!("instantiate_opaque_types_from_value(parent_def_id={:?}, value={:?})",
parent_def_id,
value);
let (value, opaque_type_map) = self.register_infer_ok_obligations(
self.instantiate_opaque_types(
parent_def_id,
self.body_id,
self.param_env,
value,
)
);
let mut opaque_types = self.opaque_types.borrow_mut();
for (ty, decl) in opaque_type_map {
let old_value = opaque_types.insert(ty, decl);
assert!(old_value.is_none(), "instantiated twice: {:?}/{:?}", ty, decl);
}
value
}
fn normalize_associated_types_in<T>(&self, span: Span, value: &T) -> T
where T : TypeFoldable<'tcx>
{
self.inh.normalize_associated_types_in(span, self.body_id, self.param_env, value)
}
fn normalize_associated_types_in_as_infer_ok<T>(&self, span: Span, value: &T)
-> InferOk<'tcx, T>
where T : TypeFoldable<'tcx>
{
self.inh.partially_normalize_associated_types_in(span,
self.body_id,
self.param_env,
value)
}
pub fn require_type_meets(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>,
def_id: DefId)
{
self.register_bound(
ty,
def_id,
traits::ObligationCause::new(span, self.body_id, code));
}
pub fn require_type_is_sized(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>)
{
let lang_item = self.tcx.require_lang_item(lang_items::SizedTraitLangItem);
self.require_type_meets(ty, span, code, lang_item);
}
pub fn register_bound(&self,
ty: Ty<'tcx>,
def_id: DefId,
cause: traits::ObligationCause<'tcx>)
{
self.fulfillment_cx.borrow_mut()
.register_bound(self, self.param_env, ty, def_id, cause);
}
pub fn to_ty(&self, ast_t: &hir::Ty) -> Ty<'tcx> {
let t = AstConv::ast_ty_to_ty(self, ast_t);
self.register_wf_obligation(t, ast_t.span, traits::MiscObligation);
t
}
pub fn to_ty_saving_user_provided_ty(&self, ast_ty: &hir::Ty) -> Ty<'tcx> {
let ty = self.to_ty(ast_ty);
// If the type given by the user has free regions, save it for
// later, since NLL would like to enforce those. Also pass in
// types that involve projections, since those can resolve to
// `'static` bounds (modulo #54940, which hopefully will be
// fixed by the time you see this comment, dear reader,
// although I have my doubts). Other sorts of things are
// already sufficiently enforced with erased regions. =)
if ty.has_free_regions() || ty.has_projections() {
let c_ty = self.infcx.canonicalize_response(&ty);
self.tables.borrow_mut().user_provided_tys_mut().insert(ast_ty.hir_id, c_ty);
}
ty
}
pub fn node_ty(&self, id: hir::HirId) -> Ty<'tcx> {
match self.tables.borrow().node_types().get(id) {
Some(&t) => t,
None if self.is_tainted_by_errors() => self.tcx.types.err,
None => {
let node_id = self.tcx.hir.hir_to_node_id(id);
bug!("no type for node {}: {} in fcx {}",
node_id, self.tcx.hir.node_to_string(node_id),
self.tag());
}
}
}
/// Registers an obligation for checking later, during regionck, that the type `ty` must
/// outlive the region `r`.
pub fn register_wf_obligation(&self,
ty: Ty<'tcx>,
span: Span,
code: traits::ObligationCauseCode<'tcx>)
{
// WF obligations never themselves fail, so no real need to give a detailed cause:
let cause = traits::ObligationCause::new(span, self.body_id, code);
self.register_predicate(traits::Obligation::new(cause,
self.param_env,
ty::Predicate::WellFormed(ty)));
}
/// Registers obligations that all types appearing in `substs` are well-formed.
pub fn add_wf_bounds(&self, substs: &Substs<'tcx>, expr: &hir::Expr) {
for ty in substs.types() {
self.register_wf_obligation(ty, expr.span, traits::MiscObligation);
}
}
/// Given a fully substituted set of bounds (`generic_bounds`), and the values with which each
/// type/region parameter was instantiated (`substs`), creates and registers suitable
/// trait/region obligations.
///
/// For example, if there is a function:
///
/// ```
/// fn foo<'a,T:'a>(...)
/// ```
///
/// and a reference:
///
/// ```
/// let f = foo;
/// ```
///
/// Then we will create a fresh region variable `'$0` and a fresh type variable `$1` for `'a`
/// and `T`. This routine will add a region obligation `$1:'$0` and register it locally.
pub fn add_obligations_for_parameters(&self,
cause: traits::ObligationCause<'tcx>,
predicates: &ty::InstantiatedPredicates<'tcx>)
{
assert!(!predicates.has_escaping_bound_vars());
debug!("add_obligations_for_parameters(predicates={:?})",
predicates);
for obligation in traits::predicates_for_generics(cause, self.param_env, predicates) {
self.register_predicate(obligation);
}
}
// FIXME(arielb1): use this instead of field.ty everywhere
// Only for fields! Returns <none> for methods>
// Indifferent to privacy flags
pub fn field_ty(&self,
span: Span,
field: &'tcx ty::FieldDef,
substs: &Substs<'tcx>)
-> Ty<'tcx>
{
self.normalize_associated_types_in(span, &field.ty(self.tcx, substs))
}
fn check_casts(&self) {
let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
for cast in deferred_cast_checks.drain(..) {
cast.check(self);
}
}
fn resolve_generator_interiors(&self, def_id: DefId) {
let mut generators = self.deferred_generator_interiors.borrow_mut();
for (body_id, interior) in generators.drain(..) {
self.select_obligations_where_possible(false);
generator_interior::resolve_interior(self, def_id, body_id, interior);
}
}
// Tries to apply a fallback to `ty` if it is an unsolved variable.
// Non-numerics get replaced with ! or () (depending on whether
// feature(never_type) is enabled, unconstrained ints with i32,
// unconstrained floats with f64.
// Fallback becomes very dubious if we have encountered type-checking errors.
// In that case, fallback to Error.
// The return value indicates whether fallback has occurred.
fn fallback_if_possible(&self, ty: Ty<'tcx>) -> bool {
use rustc::ty::error::UnconstrainedNumeric::Neither;
use rustc::ty::error::UnconstrainedNumeric::{UnconstrainedInt, UnconstrainedFloat};
assert!(ty.is_ty_infer());
let fallback = match self.type_is_unconstrained_numeric(ty) {
_ if self.is_tainted_by_errors() => self.tcx().types.err,
UnconstrainedInt => self.tcx.types.i32,
UnconstrainedFloat => self.tcx.types.f64,
Neither if self.type_var_diverges(ty) => self.tcx.mk_diverging_default(),
Neither => return false,
};
debug!("default_type_parameters: defaulting `{:?}` to `{:?}`", ty, fallback);
self.demand_eqtype(syntax_pos::DUMMY_SP, ty, fallback);
true
}
fn select_all_obligations_or_error(&self) {
debug!("select_all_obligations_or_error");
if let Err(errors) = self.fulfillment_cx.borrow_mut().select_all_or_error(&self) {
self.report_fulfillment_errors(&errors, self.inh.body_id, false);
}
}
/// Select as many obligations as we can at present.
fn select_obligations_where_possible(&self, fallback_has_occurred: bool) {
if let Err(errors) = self.fulfillment_cx.borrow_mut().select_where_possible(self) {
self.report_fulfillment_errors(&errors, self.inh.body_id, fallback_has_occurred);
}
}
/// For the overloaded place expressions (`*x`, `x[3]`), the trait
/// returns a type of `&T`, but the actual type we assign to the
/// *expression* is `T`. So this function just peels off the return
/// type by one layer to yield `T`.
fn make_overloaded_place_return_type(&self,
method: MethodCallee<'tcx>)
-> ty::TypeAndMut<'tcx>
{
// extract method return type, which will be &T;
let ret_ty = method.sig.output();
// method returns &T, but the type as visible to user is T, so deref
ret_ty.builtin_deref(true).unwrap()
}
fn lookup_indexing(&self,
expr: &hir::Expr,
base_expr: &'gcx hir::Expr,
base_ty: Ty<'tcx>,
idx_ty: Ty<'tcx>,
needs: Needs)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
// FIXME(#18741) -- this is almost but not quite the same as the
// autoderef that normal method probing does. They could likely be
// consolidated.
let mut autoderef = self.autoderef(base_expr.span, base_ty);
let mut result = None;
while result.is_none() && autoderef.next().is_some() {
result = self.try_index_step(expr, base_expr, &autoderef, needs, idx_ty);
}
autoderef.finalize();
result
}
/// To type-check `base_expr[index_expr]`, we progressively autoderef
/// (and otherwise adjust) `base_expr`, looking for a type which either
/// supports builtin indexing or overloaded indexing.
/// This loop implements one step in that search; the autoderef loop
/// is implemented by `lookup_indexing`.
fn try_index_step(&self,
expr: &hir::Expr,
base_expr: &hir::Expr,
autoderef: &Autoderef<'a, 'gcx, 'tcx>,
needs: Needs,
index_ty: Ty<'tcx>)
-> Option<(/*index type*/ Ty<'tcx>, /*element type*/ Ty<'tcx>)>
{
let adjusted_ty = autoderef.unambiguous_final_ty();
debug!("try_index_step(expr={:?}, base_expr={:?}, adjusted_ty={:?}, \
index_ty={:?})",
expr,
base_expr,
adjusted_ty,
index_ty);
for &unsize in &[false, true] {
let mut self_ty = adjusted_ty;
if unsize {
// We only unsize arrays here.
if let ty::Array(element_ty, _) = adjusted_ty.sty {
self_ty = self.tcx.mk_slice(element_ty);
} else {
continue;
}
}
// If some lookup succeeds, write callee into table and extract index/element
// type from the method signature.
// If some lookup succeeded, install method in table
let input_ty = self.next_ty_var(TypeVariableOrigin::AutoDeref(base_expr.span));
let method = self.try_overloaded_place_op(
expr.span, self_ty, &[input_ty], needs, PlaceOp::Index);
let result = method.map(|ok| {
debug!("try_index_step: success, using overloaded indexing");
let method = self.register_infer_ok_obligations(ok);
let mut adjustments = autoderef.adjust_steps(needs);
if let ty::Ref(region, _, r_mutbl) = method.sig.inputs()[0].sty {
let mutbl = match r_mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
// Indexing can be desugared to a method call,
// so maybe we could use two-phase here.
// See the documentation of AllowTwoPhase for why that's
// not the case today.
allow_two_phase_borrow: AllowTwoPhase::No,
}
};
adjustments.push(Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
target: self.tcx.mk_ref(region, ty::TypeAndMut {
mutbl: r_mutbl,
ty: adjusted_ty
})
});
}
if unsize {
adjustments.push(Adjustment {
kind: Adjust::Unsize,
target: method.sig.inputs()[0]
});
}
self.apply_adjustments(base_expr, adjustments);
self.write_method_call(expr.hir_id, method);
(input_ty, self.make_overloaded_place_return_type(method).ty)
});
if result.is_some() {
return result;
}
}
None
}
fn resolve_place_op(&self, op: PlaceOp, is_mut: bool) -> (Option<DefId>, ast::Ident) {
let (tr, name) = match (op, is_mut) {
(PlaceOp::Deref, false) =>
(self.tcx.lang_items().deref_trait(), "deref"),
(PlaceOp::Deref, true) =>
(self.tcx.lang_items().deref_mut_trait(), "deref_mut"),
(PlaceOp::Index, false) =>
(self.tcx.lang_items().index_trait(), "index"),
(PlaceOp::Index, true) =>
(self.tcx.lang_items().index_mut_trait(), "index_mut"),
};
(tr, ast::Ident::from_str(name))
}
fn try_overloaded_place_op(&self,
span: Span,
base_ty: Ty<'tcx>,
arg_tys: &[Ty<'tcx>],
needs: Needs,
op: PlaceOp)
-> Option<InferOk<'tcx, MethodCallee<'tcx>>>
{
debug!("try_overloaded_place_op({:?},{:?},{:?},{:?})",
span,
base_ty,
needs,
op);
// Try Mut first, if needed.
let (mut_tr, mut_op) = self.resolve_place_op(op, true);
let method = match (needs, mut_tr) {
(Needs::MutPlace, Some(trait_did)) => {
self.lookup_method_in_trait(span, mut_op, trait_did, base_ty, Some(arg_tys))
}
_ => None,
};
// Otherwise, fall back to the immutable version.
let (imm_tr, imm_op) = self.resolve_place_op(op, false);
let method = match (method, imm_tr) {
(None, Some(trait_did)) => {
self.lookup_method_in_trait(span, imm_op, trait_did, base_ty, Some(arg_tys))
}
(method, _) => method,
};
method
}
fn check_method_argument_types(&self,
sp: Span,
expr_sp: Span,
method: Result<MethodCallee<'tcx>, ()>,
args_no_rcvr: &'gcx [hir::Expr],
tuple_arguments: TupleArgumentsFlag,
expected: Expectation<'tcx>)
-> Ty<'tcx> {
let has_error = match method {
Ok(method) => {
method.substs.references_error() || method.sig.references_error()
}
Err(_) => true
};
if has_error {
let err_inputs = self.err_args(args_no_rcvr.len());
let err_inputs = match tuple_arguments {
DontTupleArguments => err_inputs,
TupleArguments => vec![self.tcx.intern_tup(&err_inputs[..])],
};
self.check_argument_types(sp, expr_sp, &err_inputs[..], &[], args_no_rcvr,
false, tuple_arguments, None);
return self.tcx.types.err;
}
let method = method.unwrap();
// HACK(eddyb) ignore self in the definition (see above).
let expected_arg_tys = self.expected_inputs_for_expected_output(
sp,
expected,
method.sig.output(),
&method.sig.inputs()[1..]
);
self.check_argument_types(sp, expr_sp, &method.sig.inputs()[1..], &expected_arg_tys[..],
args_no_rcvr, method.sig.variadic, tuple_arguments,
self.tcx.hir.span_if_local(method.def_id));
method.sig.output()
}
/// Generic function that factors out common logic from function calls,
/// method calls and overloaded operators.
fn check_argument_types(&self,
sp: Span,
expr_sp: Span,
fn_inputs: &[Ty<'tcx>],
mut expected_arg_tys: &[Ty<'tcx>],
args: &'gcx [hir::Expr],
variadic: bool,
tuple_arguments: TupleArgumentsFlag,
def_span: Option<Span>) {
let tcx = self.tcx;
// Grab the argument types, supplying fresh type variables
// if the wrong number of arguments were supplied
let supplied_arg_count = if tuple_arguments == DontTupleArguments {
args.len()
} else {
1
};
// All the input types from the fn signature must outlive the call
// so as to validate implied bounds.
for &fn_input_ty in fn_inputs {
self.register_wf_obligation(fn_input_ty, sp, traits::MiscObligation);
}
let expected_arg_count = fn_inputs.len();
let param_count_error = |expected_count: usize,
arg_count: usize,
error_code: &str,
variadic: bool,
sugg_unit: bool| {
let mut err = tcx.sess.struct_span_err_with_code(sp,
&format!("this function takes {}{} but {} {} supplied",
if variadic {"at least "} else {""},
potentially_plural_count(expected_count, "parameter"),
potentially_plural_count(arg_count, "parameter"),
if arg_count == 1 {"was"} else {"were"}),
DiagnosticId::Error(error_code.to_owned()));
if let Some(def_s) = def_span.map(|sp| tcx.sess.source_map().def_span(sp)) {
err.span_label(def_s, "defined here");
}
if sugg_unit {
let sugg_span = tcx.sess.source_map().end_point(expr_sp);
// remove closing `)` from the span
let sugg_span = sugg_span.shrink_to_lo();
err.span_suggestion_with_applicability(
sugg_span,
"expected the unit value `()`; create it with empty parentheses",
String::from("()"),
Applicability::MachineApplicable);
} else {
err.span_label(sp, format!("expected {}{}",
if variadic {"at least "} else {""},
potentially_plural_count(expected_count, "parameter")));
}
err.emit();
};
let formal_tys = if tuple_arguments == TupleArguments {
let tuple_type = self.structurally_resolved_type(sp, fn_inputs[0]);
match tuple_type.sty {
ty::Tuple(arg_types) if arg_types.len() != args.len() => {
param_count_error(arg_types.len(), args.len(), "E0057", false, false);
expected_arg_tys = &[];
self.err_args(args.len())
}
ty::Tuple(arg_types) => {
expected_arg_tys = match expected_arg_tys.get(0) {
Some(&ty) => match ty.sty {
ty::Tuple(ref tys) => &tys,
_ => &[]
},
None => &[]
};
arg_types.to_vec()
}
_ => {
span_err!(tcx.sess, sp, E0059,
"cannot use call notation; the first type parameter \
for the function trait is neither a tuple nor unit");
expected_arg_tys = &[];
self.err_args(args.len())
}
}
} else if expected_arg_count == supplied_arg_count {
fn_inputs.to_vec()
} else if variadic {
if supplied_arg_count >= expected_arg_count {
fn_inputs.to_vec()
} else {
param_count_error(expected_arg_count, supplied_arg_count, "E0060", true, false);
expected_arg_tys = &[];
self.err_args(supplied_arg_count)
}
} else {
// is the missing argument of type `()`?
let sugg_unit = if expected_arg_tys.len() == 1 && supplied_arg_count == 0 {
self.resolve_type_vars_if_possible(&expected_arg_tys[0]).is_unit()
} else if fn_inputs.len() == 1 && supplied_arg_count == 0 {
self.resolve_type_vars_if_possible(&fn_inputs[0]).is_unit()
} else {
false
};
param_count_error(expected_arg_count, supplied_arg_count, "E0061", false, sugg_unit);
expected_arg_tys = &[];
self.err_args(supplied_arg_count)
};
// If there is no expectation, expect formal_tys.
let expected_arg_tys = if !expected_arg_tys.is_empty() {
expected_arg_tys
} else {
&formal_tys
};
debug!("check_argument_types: formal_tys={:?}",
formal_tys.iter().map(|t| self.ty_to_string(*t)).collect::<Vec<String>>());
// Check the arguments.
// We do this in a pretty awful way: first we typecheck any arguments
// that are not closures, then we typecheck the closures. This is so
// that we have more information about the types of arguments when we
// typecheck the functions. This isn't really the right way to do this.
for &check_closures in &[false, true] {
debug!("check_closures={}", check_closures);
// More awful hacks: before we check argument types, try to do
// an "opportunistic" vtable resolution of any trait bounds on
// the call. This helps coercions.
if check_closures {
self.select_obligations_where_possible(false);
}
// For variadic functions, we don't have a declared type for all of
// the arguments hence we only do our usual type checking with
// the arguments who's types we do know.
let t = if variadic {
expected_arg_count
} else if tuple_arguments == TupleArguments {
args.len()
} else | ;
for (i, arg) in args.iter().take(t).enumerate() {
// Warn only for the first loop (the "no closures" one).
// Closure arguments themselves can't be diverging, but
// a previous argument can, e.g. `foo(panic!(), || {})`.
if !check_closures {
self.warn_if_unreachable(arg.id, arg.span, "expression");
}
let is_closure = match arg.node {
hir::ExprKind::Closure(..) => true,
_ => false
};
if is_closure != check_closures {
continue;
}
debug!("checking the argument");
let formal_ty = formal_tys[i];
// The special-cased logic below has three functions:
// 1. Provide as good of an expected type as possible.
let expected = Expectation::rvalue_hint(self, expected_arg_tys[i]);
let checked_ty = self.check_expr_with_expectation(&arg, expected);
// 2. Coerce to the most detailed type that could be coerced
// to, which is `expected_ty` if `rvalue_hint` returns an
// `ExpectHasType(expected_ty)`, or the `formal_ty` otherwise.
let coerce_ty = expected.only_has_type(self).unwrap_or(formal_ty);
// We're processing function arguments so we definitely want to use
// two-phase borrows.
self.demand_coerce(&arg, checked_ty, coerce_ty, AllowTwoPhase::Yes);
// 3. Relate the expected type and the formal one,
// if the expected type was used for the coercion.
self.demand_suptype(arg.span, formal_ty, coerce_ty);
}
}
// We also need to make sure we at least write the ty of the other
// arguments which we skipped above.
if variadic {
fn variadic_error<'tcx>(s: &Session, span: Span, t: Ty<'tcx>, cast_ty: &str) {
use structured_errors::{VariadicError, StructuredDiagnostic};
VariadicError::new(s, span, t, cast_ty).diagnostic().emit();
}
for arg in args.iter().skip(expected_arg_count) {
let arg_ty = self.check_expr(&arg);
// There are a few types which get autopromoted when passed via varargs
// in C but we just error out instead and require explicit casts.
let arg_ty = self.structurally_resolved_type(arg.span, arg_ty);
match arg_ty.sty {
ty::Float(ast::FloatTy::F32) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_double");
}
ty::Int(ast::IntTy::I8) | ty::Int(ast::IntTy::I16) | ty::Bool => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_int");
}
ty::Uint(ast::UintTy::U8) | ty::Uint(ast::UintTy::U16) => {
variadic_error(tcx.sess, arg.span, arg_ty, "c_uint");
}
ty::FnDef(..) => {
let ptr_ty = self.tcx.mk_fn_ptr(arg_ty.fn_sig(self.tcx));
let ptr_ty = self.resolve_type_vars_if_possible(&ptr_ty);
variadic_error(tcx.sess, arg.span, arg_ty, &ptr_ty.to_string());
}
_ => {}
}
}
}
}
fn err_args(&self, len: usize) -> Vec<Ty<'tcx>> {
vec![self.tcx.types.err; len]
}
// AST fragment checking
fn check_lit(&self,
lit: &ast::Lit,
expected: Expectation<'tcx>)
-> Ty<'tcx>
{
let tcx = self.tcx;
match lit.node {
ast::LitKind::Str(..) => tcx.mk_static_str(),
ast::LitKind::ByteStr(ref v) => {
tcx.mk_imm_ref(tcx.types.re_static,
tcx.mk_array(tcx.types.u8, v.len() as u64))
}
ast::LitKind::Byte(_) => tcx.types.u8,
ast::LitKind::Char(_) => tcx.types.char,
ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => tcx.mk_mach_int(t),
ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => tcx.mk_mach_uint(t),
ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
ty::Int(_) | ty::Uint(_) => Some(ty),
ty::Char => Some(tcx.types.u8),
ty::RawPtr(..) => Some(tcx.types.usize),
ty::FnDef(..) | ty::FnPtr(_) => Some(tcx.types.usize),
_ => None
}
});
opt_ty.unwrap_or_else(
|| tcx.mk_int_var(self.next_int_var_id()))
}
ast::LitKind::Float(_, t) => tcx.mk_mach_float(t),
ast::LitKind::FloatUnsuffixed(_) => {
let opt_ty = expected.to_option(self).and_then(|ty| {
match ty.sty {
ty::Float(_) => Some(ty),
_ => None
}
});
opt_ty.unwrap_or_else(
|| tcx.mk_float_var(self.next_float_var_id()))
}
ast::LitKind::Bool(_) => tcx.types.bool
}
}
fn check_expr_eq_type(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) {
let ty = self.check_expr_with_hint(expr, expected);
self.demand_eqtype(expr.span, expected, ty);
}
pub fn check_expr_has_type_or_error(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
self.check_expr_meets_expectation_or_error(expr, ExpectHasType(expected))
}
fn check_expr_meets_expectation_or_error(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let expected_ty = expected.to_option(&self).unwrap_or(self.tcx.types.bool);
let mut ty = self.check_expr_with_expectation(expr, expected);
// While we don't allow *arbitrary* coercions here, we *do* allow
// coercions from ! to `expected`.
if ty.is_never() {
assert!(!self.tables.borrow().adjustments().contains_key(expr.hir_id),
"expression with never type wound up being adjusted");
let adj_ty = self.next_diverging_ty_var(
TypeVariableOrigin::AdjustmentType(expr.span));
self.apply_adjustments(expr, vec![Adjustment {
kind: Adjust::NeverToAny,
target: adj_ty
}]);
ty = adj_ty;
}
if let Some(mut err) = self.demand_suptype_diag(expr.span, expected_ty, ty) {
// Add help to type error if this is an `if` condition with an assignment
if let (ExpectIfCondition, &hir::ExprKind::Assign(ref lhs, ref rhs))
= (expected, &expr.node)
{
let msg = "try comparing for equality";
if let (Ok(left), Ok(right)) = (
self.tcx.sess.source_map().span_to_snippet(lhs.span),
self.tcx.sess.source_map().span_to_snippet(rhs.span))
{
err.span_suggestion_with_applicability(
expr.span,
msg,
format!("{} == {}", left, right),
Applicability::MaybeIncorrect);
} else {
err.help(msg);
}
}
err.emit();
}
ty
}
fn check_expr_coercable_to_type(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
let ty = self.check_expr_with_hint(expr, expected);
// checks don't need two phase
self.demand_coerce(expr, ty, expected, AllowTwoPhase::No)
}
fn check_expr_with_hint(&self,
expr: &'gcx hir::Expr,
expected: Ty<'tcx>) -> Ty<'tcx> {
self.check_expr_with_expectation(expr, ExpectHasType(expected))
}
fn check_expr_with_expectation(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>) -> Ty<'tcx> {
self.check_expr_with_expectation_and_needs(expr, expected, Needs::None)
}
fn check_expr(&self, expr: &'gcx hir::Expr) -> Ty<'tcx> {
self.check_expr_with_expectation(expr, NoExpectation)
}
fn check_expr_with_needs(&self, expr: &'gcx hir::Expr, needs: Needs) -> Ty<'tcx> {
self.check_expr_with_expectation_and_needs(expr, NoExpectation, needs)
}
// determine the `self` type, using fresh variables for all variables
// declared on the impl declaration e.g., `impl<A,B> for Vec<(A,B)>`
// would return ($0, $1) where $0 and $1 are freshly instantiated type
// variables.
pub fn impl_self_ty(&self,
span: Span, // (potential) receiver for this impl
did: DefId)
-> TypeAndSubsts<'tcx> {
let ity = self.tcx.type_of(did);
debug!("impl_self_ty: ity={:?}", ity);
let substs = self.fresh_substs_for_item(span, did);
let substd_ty = self.instantiate_type_scheme(span, &substs, &ity);
TypeAndSubsts { substs: substs, ty: substd_ty }
}
/// Unifies the output type with the expected type early, for more coercions
/// and forward type information on the input expressions.
fn expected_inputs_for_expected_output(&self,
call_span: Span,
expected_ret: Expectation<'tcx>,
formal_ret: Ty<'tcx>,
formal_args: &[Ty<'tcx>])
-> Vec<Ty<'tcx>> {
let formal_ret = self.resolve_type_vars_with_obligations(formal_ret);
let ret_ty = match expected_ret.only_has_type(self) {
Some(ret) => ret,
None => return Vec::new()
};
let expect_args = self.fudge_regions_if_ok(&RegionVariableOrigin::Coercion(call_span), || {
// Attempt to apply a subtyping relationship between the formal
// return type (likely containing type variables if the function
// is polymorphic) and the expected return type.
// No argument expectations are produced if unification fails.
let origin = self.misc(call_span);
let ures = self.at(&origin, self.param_env).sup(ret_ty, &formal_ret);
// FIXME(#27336) can't use ? here, Try::from_error doesn't default
// to identity so the resulting type is not constrained.
match ures {
Ok(ok) => {
// Process any obligations locally as much as
// we can. We don't care if some things turn
// out unconstrained or ambiguous, as we're
// just trying to get hints here.
self.save_and_restore_in_snapshot_flag(|_| {
let mut fulfill = TraitEngine::new(self.tcx);
for obligation in ok.obligations {
fulfill.register_predicate_obligation(self, obligation);
}
fulfill.select_where_possible(self)
}).map_err(|_| ())?;
}
Err(_) => return Err(()),
}
// Record all the argument types, with the substitutions
// produced from the above subtyping unification.
Ok(formal_args.iter().map(|ty| {
self.resolve_type_vars_if_possible(ty)
}).collect())
}).unwrap_or_default();
debug!("expected_inputs_for_expected_output(formal={:?} -> {:?}, expected={:?} -> {:?})",
formal_args, formal_ret,
expect_args, expected_ret);
expect_args
}
// Checks a method call.
fn check_method_call(&self,
expr: &'gcx hir::Expr,
segment: &hir::PathSegment,
span: Span,
args: &'gcx [hir::Expr],
expected: Expectation<'tcx>,
needs: Needs) -> Ty<'tcx> {
let rcvr = &args[0];
let rcvr_t = self.check_expr_with_needs(&rcvr, needs);
// no need to check for bot/err -- callee does that
let rcvr_t = self.structurally_resolved_type(args[0].span, rcvr_t);
let method = match self.lookup_method(rcvr_t,
segment,
span,
expr,
rcvr) {
Ok(method) => {
self.write_method_call(expr.hir_id, method);
Ok(method)
}
Err(error) => {
if segment.ident.name != keywords::Invalid.name() {
self.report_method_error(span,
rcvr_t,
segment.ident,
Some(rcvr),
error,
Some(args));
}
Err(())
}
};
// Call the generic checker.
self.check_method_argument_types(span,
expr.span,
method,
&args[1..],
DontTupleArguments,
expected)
}
fn check_return_expr(&self, return_expr: &'gcx hir::Expr) {
let ret_coercion =
self.ret_coercion
.as_ref()
.unwrap_or_else(|| span_bug!(return_expr.span,
"check_return_expr called outside fn body"));
let ret_ty = ret_coercion.borrow().expected_ty();
let return_expr_ty = self.check_expr_with_hint(return_expr, ret_ty.clone());
ret_coercion.borrow_mut()
.coerce(self,
&self.cause(return_expr.span,
ObligationCauseCode::ReturnType(return_expr.id)),
return_expr,
return_expr_ty);
}
// A generic function for checking the then and else in an if
// or if-else.
fn check_then_else(&self,
cond_expr: &'gcx hir::Expr,
then_expr: &'gcx hir::Expr,
opt_else_expr: Option<&'gcx hir::Expr>,
sp: Span,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let cond_ty = self.check_expr_meets_expectation_or_error(cond_expr, ExpectIfCondition);
let cond_diverges = self.diverges.get();
self.diverges.set(Diverges::Maybe);
let expected = expected.adjust_for_branches(self);
let then_ty = self.check_expr_with_expectation(then_expr, expected);
let then_diverges = self.diverges.get();
self.diverges.set(Diverges::Maybe);
// We've already taken the expected type's preferences
// into account when typing the `then` branch. To figure
// out the initial shot at a LUB, we thus only consider
// `expected` if it represents a *hard* constraint
// (`only_has_type`); otherwise, we just go with a
// fresh type variable.
let coerce_to_ty = expected.coercion_target_type(self, sp);
let mut coerce: DynamicCoerceMany = CoerceMany::new(coerce_to_ty);
let if_cause = self.cause(sp, ObligationCauseCode::IfExpression);
coerce.coerce(self, &if_cause, then_expr, then_ty);
if let Some(else_expr) = opt_else_expr {
let else_ty = self.check_expr_with_expectation(else_expr, expected);
let else_diverges = self.diverges.get();
coerce.coerce(self, &if_cause, else_expr, else_ty);
// We won't diverge unless both branches do (or the condition does).
self.diverges.set(cond_diverges | then_diverges & else_diverges);
} else {
let else_cause = self.cause(sp, ObligationCauseCode::IfExpressionWithNoElse);
coerce.coerce_forced_unit(self, &else_cause, &mut |_| (), true);
// If the condition is false we can't diverge.
self.diverges.set(cond_diverges);
}
let result_ty = coerce.complete(self);
if cond_ty.references_error() {
self.tcx.types.err
} else {
result_ty
}
}
// Check field access expressions
fn check_field(&self,
expr: &'gcx hir::Expr,
needs: Needs,
base: &'gcx hir::Expr,
field: ast::Ident) -> Ty<'tcx> {
let expr_t = self.check_expr_with_needs(base, needs);
let expr_t = self.structurally_resolved_type(base.span,
expr_t);
let mut private_candidate = None;
let mut autoderef = self.autoderef(expr.span, expr_t);
while let Some((base_t, _)) = autoderef.next() {
match base_t.sty {
ty::Adt(base_def, substs) if !base_def.is_enum() => {
debug!("struct named {:?}", base_t);
let (ident, def_scope) =
self.tcx.adjust_ident(field, base_def.did, self.body_id);
let fields = &base_def.non_enum_variant().fields;
if let Some(index) = fields.iter().position(|f| f.ident.modern() == ident) {
let field = &fields[index];
let field_ty = self.field_ty(expr.span, field, substs);
// Save the index of all fields regardless of their visibility in case
// of error recovery.
self.write_field_index(expr.id, index);
if field.vis.is_accessible_from(def_scope, self.tcx) {
let adjustments = autoderef.adjust_steps(needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize();
self.tcx.check_stability(field.did, Some(expr.id), expr.span);
return field_ty;
}
private_candidate = Some((base_def.did, field_ty));
}
}
ty::Tuple(ref tys) => {
let fstr = field.as_str();
if let Ok(index) = fstr.parse::<usize>() {
if fstr == index.to_string() {
if let Some(field_ty) = tys.get(index) {
let adjustments = autoderef.adjust_steps(needs);
self.apply_adjustments(base, adjustments);
autoderef.finalize();
self.write_field_index(expr.id, index);
return field_ty;
}
}
}
}
_ => {}
}
}
autoderef.unambiguous_final_ty();
if let Some((did, field_ty)) = private_candidate {
let struct_path = self.tcx().item_path_str(did);
let mut err = struct_span_err!(self.tcx().sess, expr.span, E0616,
"field `{}` of struct `{}` is private",
field, struct_path);
// Also check if an accessible method exists, which is often what is meant.
if self.method_exists(field, expr_t, expr.id, false) {
err.note(&format!("a method `{}` also exists, perhaps you wish to call it", field));
}
err.emit();
field_ty
} else if field.name == keywords::Invalid.name() {
self.tcx().types.err
} else if self.method_exists(field, expr_t, expr.id, true) {
type_error_struct!(self.tcx().sess, field.span, expr_t, E0615,
"attempted to take value of method `{}` on type `{}`",
field, expr_t)
.help("maybe a `()` to call it is missing?")
.emit();
self.tcx().types.err
} else {
if !expr_t.is_primitive_ty() {
let mut err = self.no_such_field_err(field.span, field, expr_t);
match expr_t.sty {
ty::Adt(def, _) if !def.is_enum() => {
if let Some(suggested_field_name) =
Self::suggest_field_name(def.non_enum_variant(),
&field.as_str(), vec![]) {
err.span_label(field.span,
format!("did you mean `{}`?", suggested_field_name));
} else {
err.span_label(field.span, "unknown field");
let struct_variant_def = def.non_enum_variant();
let field_names = self.available_field_names(struct_variant_def);
if !field_names.is_empty() {
err.note(&format!("available fields are: {}",
self.name_series_display(field_names)));
}
};
}
ty::Array(_, len) => {
if let (Some(len), Ok(user_index)) = (
len.assert_usize(self.tcx),
field.as_str().parse::<u64>()
) {
let base = self.tcx.hir.node_to_pretty_string(base.id);
let help = "instead of using tuple indexing, use array indexing";
let suggestion = format!("{}[{}]", base, field);
let applicability = if len < user_index {
Applicability::MachineApplicable
} else {
Applicability::MaybeIncorrect
};
err.span_suggestion_with_applicability(
expr.span, help, suggestion, applicability
);
}
}
ty::RawPtr(..) => {
let base = self.tcx.hir.node_to_pretty_string(base.id);
let msg = format!("`{}` is a native pointer; try dereferencing it", base);
let suggestion = format!("(*{}).{}", base, field);
err.span_suggestion_with_applicability(
field.span,
&msg,
suggestion,
Applicability::MaybeIncorrect,
);
}
_ => {}
}
err
} else {
type_error_struct!(self.tcx().sess, field.span, expr_t, E0610,
"`{}` is a primitive type and therefore doesn't have fields",
expr_t)
}.emit();
self.tcx().types.err
}
}
// Return an hint about the closest match in field names
fn suggest_field_name(variant: &'tcx ty::VariantDef,
field: &str,
skip: Vec<LocalInternedString>)
-> Option<Symbol> {
let names = variant.fields.iter().filter_map(|field| {
// ignore already set fields and private fields from non-local crates
if skip.iter().any(|x| *x == field.ident.as_str()) ||
(variant.did.krate != LOCAL_CRATE && field.vis != Visibility::Public) {
None
} else {
Some(&field.ident.name)
}
});
find_best_match_for_name(names, field, None)
}
fn available_field_names(&self, variant: &'tcx ty::VariantDef) -> Vec<ast::Name> {
variant.fields.iter().filter(|field| {
let def_scope = self.tcx.adjust_ident(field.ident, variant.did, self.body_id).1;
field.vis.is_accessible_from(def_scope, self.tcx)
})
.map(|field| field.ident.name)
.collect()
}
fn name_series_display(&self, names: Vec<ast::Name>) -> String {
// dynamic limit, to never omit just one field
let limit = if names.len() == 6 { 6 } else { 5 };
let mut display = names.iter().take(limit)
.map(|n| format!("`{}`", n)).collect::<Vec<_>>().join(", ");
if names.len() > limit {
display = format!("{} ... and {} others", display, names.len() - limit);
}
display
}
fn no_such_field_err<T: Display>(&self, span: Span, field: T, expr_t: &ty::TyS)
-> DiagnosticBuilder {
type_error_struct!(self.tcx().sess, span, expr_t, E0609,
"no field `{}` on type `{}`",
field, expr_t)
}
fn report_unknown_field(&self,
ty: Ty<'tcx>,
variant: &'tcx ty::VariantDef,
field: &hir::Field,
skip_fields: &[hir::Field],
kind_name: &str) {
let mut err = self.type_error_struct_with_diag(
field.ident.span,
|actual| match ty.sty {
ty::Adt(adt, ..) if adt.is_enum() => {
struct_span_err!(self.tcx.sess, field.ident.span, E0559,
"{} `{}::{}` has no field named `{}`",
kind_name, actual, variant.name, field.ident)
}
_ => {
struct_span_err!(self.tcx.sess, field.ident.span, E0560,
"{} `{}` has no field named `{}`",
kind_name, actual, field.ident)
}
},
ty);
// prevent all specified fields from being suggested
let skip_fields = skip_fields.iter().map(|ref x| x.ident.as_str());
if let Some(field_name) = Self::suggest_field_name(variant,
&field.ident.as_str(),
skip_fields.collect()) {
err.span_label(field.ident.span,
format!("field does not exist - did you mean `{}`?", field_name));
} else {
match ty.sty {
ty::Adt(adt, ..) => {
if adt.is_enum() {
err.span_label(field.ident.span,
format!("`{}::{}` does not have this field",
ty, variant.name));
} else {
err.span_label(field.ident.span,
format!("`{}` does not have this field", ty));
}
let available_field_names = self.available_field_names(variant);
if !available_field_names.is_empty() {
err.note(&format!("available fields are: {}",
self.name_series_display(available_field_names)));
}
}
_ => bug!("non-ADT passed to report_unknown_field")
}
};
err.emit();
}
fn check_expr_struct_fields(&self,
adt_ty: Ty<'tcx>,
expected: Expectation<'tcx>,
expr_id: ast::NodeId,
span: Span,
variant: &'tcx ty::VariantDef,
ast_fields: &'gcx [hir::Field],
check_completeness: bool) -> bool {
let tcx = self.tcx;
let adt_ty_hint =
self.expected_inputs_for_expected_output(span, expected, adt_ty, &[adt_ty])
.get(0).cloned().unwrap_or(adt_ty);
// re-link the regions that EIfEO can erase.
self.demand_eqtype(span, adt_ty_hint, adt_ty);
let (substs, adt_kind, kind_name) = match &adt_ty.sty {
&ty::Adt(adt, substs) => {
(substs, adt.adt_kind(), adt.variant_descr())
}
_ => span_bug!(span, "non-ADT passed to check_expr_struct_fields")
};
let mut remaining_fields = variant.fields.iter().enumerate().map(|(i, field)|
(field.ident.modern(), (i, field))
).collect::<FxHashMap<_, _>>();
let mut seen_fields = FxHashMap::default();
let mut error_happened = false;
// Typecheck each field.
for field in ast_fields {
let ident = tcx.adjust_ident(field.ident, variant.did, self.body_id).0;
let field_type = if let Some((i, v_field)) = remaining_fields.remove(&ident) {
seen_fields.insert(ident, field.span);
self.write_field_index(field.id, i);
// we don't look at stability attributes on
// struct-like enums (yet...), but it's definitely not
// a bug to have construct one.
if adt_kind != ty::AdtKind::Enum {
tcx.check_stability(v_field.did, Some(expr_id), field.span);
}
self.field_ty(field.span, v_field, substs)
} else {
error_happened = true;
if let Some(prev_span) = seen_fields.get(&ident) {
let mut err = struct_span_err!(self.tcx.sess,
field.ident.span,
E0062,
"field `{}` specified more than once",
ident);
err.span_label(field.ident.span, "used more than once");
err.span_label(*prev_span, format!("first use of `{}`", ident));
err.emit();
} else {
self.report_unknown_field(adt_ty, variant, field, ast_fields, kind_name);
}
tcx.types.err
};
// Make sure to give a type to the field even if there's
// an error, so we can continue typechecking
self.check_expr_coercable_to_type(&field.expr, field_type);
}
// Make sure the programmer specified correct number of fields.
if kind_name == "union" {
if ast_fields.len() != 1 {
tcx.sess.span_err(span, "union expressions should have exactly one field");
}
} else if check_completeness && !error_happened && !remaining_fields.is_empty() {
let len = remaining_fields.len();
let mut displayable_field_names = remaining_fields
.keys()
.map(|ident| ident.as_str())
.collect::<Vec<_>>();
displayable_field_names.sort();
let truncated_fields_error = if len <= 3 {
String::new()
} else {
format!(" and {} other field{}", (len - 3), if len - 3 == 1 {""} else {"s"})
};
let remaining_fields_names = displayable_field_names.iter().take(3)
.map(|n| format!("`{}`", n))
.collect::<Vec<_>>()
.join(", ");
struct_span_err!(tcx.sess, span, E0063,
"missing field{} {}{} in initializer of `{}`",
if remaining_fields.len() == 1 { "" } else { "s" },
remaining_fields_names,
truncated_fields_error,
adt_ty)
.span_label(span, format!("missing {}{}",
remaining_fields_names,
truncated_fields_error))
.emit();
}
error_happened
}
fn check_struct_fields_on_error(&self,
fields: &'gcx [hir::Field],
base_expr: &'gcx Option<P<hir::Expr>>) {
for field in fields {
self.check_expr(&field.expr);
}
if let Some(ref base) = *base_expr {
self.check_expr(&base);
}
}
pub fn check_struct_path(&self,
qpath: &hir::QPath,
node_id: ast::NodeId)
-> Option<(&'tcx ty::VariantDef, Ty<'tcx>)> {
let path_span = match *qpath {
hir::QPath::Resolved(_, ref path) => path.span,
hir::QPath::TypeRelative(ref qself, _) => qself.span
};
let (def, ty) = self.finish_resolving_struct_path(qpath, path_span, node_id);
let variant = match def {
Def::Err => {
self.set_tainted_by_errors();
return None;
}
Def::Variant(..) => {
match ty.sty {
ty::Adt(adt, substs) => {
Some((adt.variant_of_def(def), adt.did, substs))
}
_ => bug!("unexpected type: {:?}", ty.sty)
}
}
Def::Struct(..) | Def::Union(..) | Def::TyAlias(..) |
Def::AssociatedTy(..) | Def::SelfTy(..) => {
match ty.sty {
ty::Adt(adt, substs) if !adt.is_enum() => {
Some((adt.non_enum_variant(), adt.did, substs))
}
_ => None,
}
}
_ => bug!("unexpected definition: {:?}", def)
};
if let Some((variant, did, substs)) = variant {
debug!("check_struct_path: did={:?} substs={:?}", did, substs);
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.write_user_substs_from_substs(hir_id, substs, None);
// Check bounds on type arguments used in the path.
let bounds = self.instantiate_bounds(path_span, did, substs);
let cause = traits::ObligationCause::new(path_span, self.body_id,
traits::ItemObligation(did));
self.add_obligations_for_parameters(cause, &bounds);
Some((variant, ty))
} else {
struct_span_err!(self.tcx.sess, path_span, E0071,
"expected struct, variant or union type, found {}",
ty.sort_string(self.tcx))
.span_label(path_span, "not a struct")
.emit();
None
}
}
fn check_expr_struct(&self,
expr: &hir::Expr,
expected: Expectation<'tcx>,
qpath: &hir::QPath,
fields: &'gcx [hir::Field],
base_expr: &'gcx Option<P<hir::Expr>>) -> Ty<'tcx>
{
// Find the relevant variant
let (variant, adt_ty) =
if let Some(variant_ty) = self.check_struct_path(qpath, expr.id) {
variant_ty
} else {
self.check_struct_fields_on_error(fields, base_expr);
return self.tcx.types.err;
};
let path_span = match *qpath {
hir::QPath::Resolved(_, ref path) => path.span,
hir::QPath::TypeRelative(ref qself, _) => qself.span
};
// Prohibit struct expressions when non exhaustive flag is set.
let adt = adt_ty.ty_adt_def().expect("`check_struct_path` returned non-ADT type");
if !adt.did.is_local() && variant.is_field_list_non_exhaustive() {
span_err!(self.tcx.sess, expr.span, E0639,
"cannot create non-exhaustive {} using struct expression",
adt.variant_descr());
}
let error_happened = self.check_expr_struct_fields(adt_ty, expected, expr.id, path_span,
variant, fields, base_expr.is_none());
if let &Some(ref base_expr) = base_expr {
// If check_expr_struct_fields hit an error, do not attempt to populate
// the fields with the base_expr. This could cause us to hit errors later
// when certain fields are assumed to exist that in fact do not.
if !error_happened {
self.check_expr_has_type_or_error(base_expr, adt_ty);
match adt_ty.sty {
ty::Adt(adt, substs) if adt.is_struct() => {
let fru_field_types = adt.non_enum_variant().fields.iter().map(|f| {
self.normalize_associated_types_in(expr.span, &f.ty(self.tcx, substs))
}).collect();
self.tables
.borrow_mut()
.fru_field_types_mut()
.insert(expr.hir_id, fru_field_types);
}
_ => {
span_err!(self.tcx.sess, base_expr.span, E0436,
"functional record update syntax requires a struct");
}
}
}
}
self.require_type_is_sized(adt_ty, expr.span, traits::StructInitializerSized);
adt_ty
}
/// Invariant:
/// If an expression has any sub-expressions that result in a type error,
/// inspecting that expression's type with `ty.references_error()` will return
/// true. Likewise, if an expression is known to diverge, inspecting its
/// type with `ty::type_is_bot` will return true (n.b.: since Rust is
/// strict, _|_ can appear in the type of an expression that does not,
/// itself, diverge: for example, fn() -> _|_.)
/// Note that inspecting a type's structure *directly* may expose the fact
/// that there are actually multiple representations for `Error`, so avoid
/// that when err needs to be handled differently.
fn check_expr_with_expectation_and_needs(&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
needs: Needs) -> Ty<'tcx> {
debug!(">> typechecking: expr={:?} expected={:?}",
expr, expected);
// Warn for expressions after diverging siblings.
self.warn_if_unreachable(expr.id, expr.span, "expression");
// Hide the outer diverging and has_errors flags.
let old_diverges = self.diverges.get();
let old_has_errors = self.has_errors.get();
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
let ty = self.check_expr_kind(expr, expected, needs);
// Warn for non-block expressions with diverging children.
match expr.node {
hir::ExprKind::Block(..) |
hir::ExprKind::Loop(..) | hir::ExprKind::While(..) |
hir::ExprKind::If(..) | hir::ExprKind::Match(..) => {}
_ => self.warn_if_unreachable(expr.id, expr.span, "expression")
}
// Any expression that produces a value of type `!` must have diverged
if ty.is_never() {
self.diverges.set(self.diverges.get() | Diverges::Always);
}
// Record the type, which applies it effects.
// We need to do this after the warning above, so that
// we don't warn for the diverging expression itself.
self.write_ty(expr.hir_id, ty);
// Combine the diverging and has_error flags.
self.diverges.set(self.diverges.get() | old_diverges);
self.has_errors.set(self.has_errors.get() | old_has_errors);
debug!("type of {} is...", self.tcx.hir.node_to_string(expr.id));
debug!("... {:?}, expected is {:?}", ty, expected);
ty
}
fn check_expr_kind(
&self,
expr: &'gcx hir::Expr,
expected: Expectation<'tcx>,
needs: Needs
) -> Ty<'tcx> {
debug!(
"check_expr_kind(expr={:?}, expected={:?}, needs={:?})",
expr,
expected,
needs,
);
let tcx = self.tcx;
let id = expr.id;
match expr.node {
hir::ExprKind::Box(ref subexpr) => {
let expected_inner = expected.to_option(self).map_or(NoExpectation, |ty| {
match ty.sty {
ty::Adt(def, _) if def.is_box()
=> Expectation::rvalue_hint(self, ty.boxed_ty()),
_ => NoExpectation
}
});
let referent_ty = self.check_expr_with_expectation(subexpr, expected_inner);
tcx.mk_box(referent_ty)
}
hir::ExprKind::Lit(ref lit) => {
self.check_lit(&lit, expected)
}
hir::ExprKind::Binary(op, ref lhs, ref rhs) => {
self.check_binop(expr, op, lhs, rhs)
}
hir::ExprKind::AssignOp(op, ref lhs, ref rhs) => {
self.check_binop_assign(expr, op, lhs, rhs)
}
hir::ExprKind::Unary(unop, ref oprnd) => {
let expected_inner = match unop {
hir::UnNot | hir::UnNeg => {
expected
}
hir::UnDeref => {
NoExpectation
}
};
let needs = match unop {
hir::UnDeref => needs,
_ => Needs::None
};
let mut oprnd_t = self.check_expr_with_expectation_and_needs(&oprnd,
expected_inner,
needs);
if !oprnd_t.references_error() {
oprnd_t = self.structurally_resolved_type(expr.span, oprnd_t);
match unop {
hir::UnDeref => {
if let Some(mt) = oprnd_t.builtin_deref(true) {
oprnd_t = mt.ty;
} else if let Some(ok) = self.try_overloaded_deref(
expr.span, oprnd_t, needs) {
let method = self.register_infer_ok_obligations(ok);
if let ty::Ref(region, _, mutbl) = method.sig.inputs()[0].sty {
let mutbl = match mutbl {
hir::MutImmutable => AutoBorrowMutability::Immutable,
hir::MutMutable => AutoBorrowMutability::Mutable {
// (It shouldn't actually matter for unary ops whether
// we enable two-phase borrows or not, since a unary
// op has no additional operands.)
allow_two_phase_borrow: AllowTwoPhase::No,
}
};
self.apply_adjustments(oprnd, vec![Adjustment {
kind: Adjust::Borrow(AutoBorrow::Ref(region, mutbl)),
target: method.sig.inputs()[0]
}]);
}
oprnd_t = self.make_overloaded_place_return_type(method).ty;
self.write_method_call(expr.hir_id, method);
} else {
type_error_struct!(tcx.sess, expr.span, oprnd_t, E0614,
"type `{}` cannot be dereferenced",
oprnd_t).emit();
oprnd_t = tcx.types.err;
}
}
hir::UnNot => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !(oprnd_t.is_integral() || oprnd_t.sty == ty::Bool) {
oprnd_t = result;
}
}
hir::UnNeg => {
let result = self.check_user_unop(expr, oprnd_t, unop);
// If it's builtin, we can reuse the type, this helps inference.
if !(oprnd_t.is_integral() || oprnd_t.is_fp()) {
oprnd_t = result;
}
}
}
}
oprnd_t
}
hir::ExprKind::AddrOf(mutbl, ref oprnd) => {
let hint = expected.only_has_type(self).map_or(NoExpectation, |ty| {
match ty.sty {
ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
if oprnd.is_place_expr() {
// Places may legitimately have unsized types.
// For example, dereferences of a fat pointer and
// the last field of a struct can be unsized.
ExpectHasType(ty)
} else {
Expectation::rvalue_hint(self, ty)
}
}
_ => NoExpectation
}
});
let needs = Needs::maybe_mut_place(mutbl);
let ty = self.check_expr_with_expectation_and_needs(&oprnd, hint, needs);
let tm = ty::TypeAndMut { ty: ty, mutbl: mutbl };
if tm.ty.references_error() {
tcx.types.err
} else {
// Note: at this point, we cannot say what the best lifetime
// is to use for resulting pointer. We want to use the
// shortest lifetime possible so as to avoid spurious borrowck
// errors. Moreover, the longest lifetime will depend on the
// precise details of the value whose address is being taken
// (and how long it is valid), which we don't know yet until type
// inference is complete.
//
// Therefore, here we simply generate a region variable. The
// region inferencer will then select the ultimate value.
// Finally, borrowck is charged with guaranteeing that the
// value whose address was taken can actually be made to live
// as long as it needs to live.
let region = self.next_region_var(infer::AddrOfRegion(expr.span));
tcx.mk_ref(region, tm)
}
}
hir::ExprKind::Path(ref qpath) => {
let (def, opt_ty, segs) = self.resolve_ty_and_def_ufcs(qpath, expr.id, expr.span);
let ty = if def != Def::Err {
self.instantiate_value_path(segs, opt_ty, def, expr.span, id).0
} else {
self.set_tainted_by_errors();
tcx.types.err
};
// We always require that the type provided as the value for
// a type parameter outlives the moment of instantiation.
let substs = self.tables.borrow().node_substs(expr.hir_id);
self.add_wf_bounds(substs, expr);
ty
}
hir::ExprKind::InlineAsm(_, ref outputs, ref inputs) => {
for expr in outputs.iter().chain(inputs.iter()) {
self.check_expr(expr);
}
tcx.mk_unit()
}
hir::ExprKind::Break(destination, ref expr_opt) => {
if let Ok(target_id) = destination.target_id {
let (e_ty, cause);
if let Some(ref e) = *expr_opt {
// If this is a break with a value, we need to type-check
// the expression. Get an expected type from the loop context.
let opt_coerce_to = {
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
enclosing_breakables.find_breakable(target_id)
.coerce
.as_ref()
.map(|coerce| coerce.expected_ty())
};
// If the loop context is not a `loop { }`, then break with
// a value is illegal, and `opt_coerce_to` will be `None`.
// Just set expectation to error in that case.
let coerce_to = opt_coerce_to.unwrap_or(tcx.types.err);
// Recurse without `enclosing_breakables` borrowed.
e_ty = self.check_expr_with_hint(e, coerce_to);
cause = self.misc(e.span);
} else {
// Otherwise, this is a break *without* a value. That's
// always legal, and is equivalent to `break ()`.
e_ty = tcx.mk_unit();
cause = self.misc(expr.span);
}
// Now that we have type-checked `expr_opt`, borrow
// the `enclosing_loops` field and let's coerce the
// type of `expr_opt` into what is expected.
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
let ctxt = enclosing_breakables.find_breakable(target_id);
if let Some(ref mut coerce) = ctxt.coerce {
if let Some(ref e) = *expr_opt {
coerce.coerce(self, &cause, e, e_ty);
} else {
assert!(e_ty.is_unit());
coerce.coerce_forced_unit(self, &cause, &mut |_| (), true);
}
} else {
// If `ctxt.coerce` is `None`, we can just ignore
// the type of the expresison. This is because
// either this was a break *without* a value, in
// which case it is always a legal type (`()`), or
// else an error would have been flagged by the
// `loops` pass for using break with an expression
// where you are not supposed to.
assert!(expr_opt.is_none() || self.tcx.sess.err_count() > 0);
}
ctxt.may_break = true;
// the type of a `break` is always `!`, since it diverges
tcx.types.never
} else {
// Otherwise, we failed to find the enclosing loop;
// this can only happen if the `break` was not
// inside a loop at all, which is caught by the
// loop-checking pass.
if self.tcx.sess.err_count() == 0 {
self.tcx.sess.delay_span_bug(expr.span,
"break was outside loop, but no error was emitted");
}
// We still need to assign a type to the inner expression to
// prevent the ICE in #43162.
if let Some(ref e) = *expr_opt {
self.check_expr_with_hint(e, tcx.types.err);
// ... except when we try to 'break rust;'.
// ICE this expression in particular (see #43162).
if let hir::ExprKind::Path(hir::QPath::Resolved(_, ref path)) = e.node {
if path.segments.len() == 1 && path.segments[0].ident.name == "rust" {
fatally_break_rust(self.tcx.sess);
}
}
}
// There was an error, make typecheck fail
tcx.types.err
}
}
hir::ExprKind::Continue(destination) => {
if destination.target_id.is_ok() {
tcx.types.never
} else {
// There was an error, make typecheck fail
tcx.types.err
}
}
hir::ExprKind::Ret(ref expr_opt) => {
if self.ret_coercion.is_none() {
struct_span_err!(self.tcx.sess, expr.span, E0572,
"return statement outside of function body").emit();
} else if let Some(ref e) = *expr_opt {
self.check_return_expr(e);
} else {
let mut coercion = self.ret_coercion.as_ref().unwrap().borrow_mut();
let cause = self.cause(expr.span, ObligationCauseCode::ReturnNoExpression);
coercion.coerce_forced_unit(self, &cause, &mut |_| (), true);
}
tcx.types.never
}
hir::ExprKind::Assign(ref lhs, ref rhs) => {
let lhs_ty = self.check_expr_with_needs(&lhs, Needs::MutPlace);
let rhs_ty = self.check_expr_coercable_to_type(&rhs, lhs_ty);
match expected {
ExpectIfCondition => {
self.tcx.sess.delay_span_bug(lhs.span, "invalid lhs expression in if;\
expected error elsehwere");
}
_ => {
// Only check this if not in an `if` condition, as the
// mistyped comparison help is more appropriate.
if !lhs.is_place_expr() {
struct_span_err!(self.tcx.sess, expr.span, E0070,
"invalid left-hand side expression")
.span_label(expr.span, "left-hand of expression not valid")
.emit();
}
}
}
self.require_type_is_sized(lhs_ty, lhs.span, traits::AssignmentLhsSized);
if lhs_ty.references_error() || rhs_ty.references_error() {
tcx.types.err
} else {
tcx.mk_unit()
}
}
hir::ExprKind::If(ref cond, ref then_expr, ref opt_else_expr) => {
self.check_then_else(&cond, then_expr, opt_else_expr.as_ref().map(|e| &**e),
expr.span, expected)
}
hir::ExprKind::While(ref cond, ref body, _) => {
let ctxt = BreakableCtxt {
// cannot use break with a value from a while loop
coerce: None,
may_break: false, // Will get updated if/when we find a `break`.
};
let (ctxt, ()) = self.with_breakable_ctxt(expr.id, ctxt, || {
self.check_expr_has_type_or_error(&cond, tcx.types.bool);
let cond_diverging = self.diverges.get();
self.check_block_no_value(&body);
// We may never reach the body so it diverging means nothing.
self.diverges.set(cond_diverging);
});
if ctxt.may_break {
// No way to know whether it's diverging because
// of a `break` or an outer `break` or `return`.
self.diverges.set(Diverges::Maybe);
}
self.tcx.mk_unit()
}
hir::ExprKind::Loop(ref body, _, source) => {
let coerce = match source {
// you can only use break with a value from a normal `loop { }`
hir::LoopSource::Loop => {
let coerce_to = expected.coercion_target_type(self, body.span);
Some(CoerceMany::new(coerce_to))
}
hir::LoopSource::WhileLet |
hir::LoopSource::ForLoop => {
None
}
};
let ctxt = BreakableCtxt {
coerce,
may_break: false, // Will get updated if/when we find a `break`.
};
let (ctxt, ()) = self.with_breakable_ctxt(expr.id, ctxt, || {
self.check_block_no_value(&body);
});
if ctxt.may_break {
// No way to know whether it's diverging because
// of a `break` or an outer `break` or `return`.
self.diverges.set(Diverges::Maybe);
}
// If we permit break with a value, then result type is
// the LUB of the breaks (possibly ! if none); else, it
// is nil. This makes sense because infinite loops
// (which would have type !) are only possible iff we
// permit break with a value [1].
if ctxt.coerce.is_none() && !ctxt.may_break {
// [1]
self.tcx.sess.delay_span_bug(body.span, "no coercion, but loop may not break");
}
ctxt.coerce.map(|c| c.complete(self)).unwrap_or_else(|| self.tcx.mk_unit())
}
hir::ExprKind::Match(ref discrim, ref arms, match_src) => {
self.check_match(expr, &discrim, arms, expected, match_src)
}
hir::ExprKind::Closure(capture, ref decl, body_id, _, gen) => {
self.check_expr_closure(expr, capture, &decl, body_id, gen, expected)
}
hir::ExprKind::Block(ref body, _) => {
self.check_block_with_expected(&body, expected)
}
hir::ExprKind::Call(ref callee, ref args) => {
self.check_call(expr, &callee, args, expected)
}
hir::ExprKind::MethodCall(ref segment, span, ref args) => {
self.check_method_call(expr, segment, span, args, expected, needs)
}
hir::ExprKind::Cast(ref e, ref t) => {
// Find the type of `e`. Supply hints based on the type we are casting to,
// if appropriate.
let t_cast = self.to_ty_saving_user_provided_ty(t);
let t_cast = self.resolve_type_vars_if_possible(&t_cast);
let t_expr = self.check_expr_with_expectation(e, ExpectCastableToType(t_cast));
let t_cast = self.resolve_type_vars_if_possible(&t_cast);
// Eagerly check for some obvious errors.
if t_expr.references_error() || t_cast.references_error() {
tcx.types.err
} else {
// Defer other checks until we're done type checking.
let mut deferred_cast_checks = self.deferred_cast_checks.borrow_mut();
match cast::CastCheck::new(self, e, t_expr, t_cast, t.span, expr.span) {
Ok(cast_check) => {
deferred_cast_checks.push(cast_check);
t_cast
}
Err(ErrorReported) => {
tcx.types.err
}
}
}
}
hir::ExprKind::Type(ref e, ref t) => {
let ty = self.to_ty_saving_user_provided_ty(&t);
self.check_expr_eq_type(&e, ty);
ty
}
hir::ExprKind::Array(ref args) => {
let uty = expected.to_option(self).and_then(|uty| {
match uty.sty {
ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
});
let element_ty = if !args.is_empty() {
let coerce_to = uty.unwrap_or_else(
|| self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span)));
let mut coerce = CoerceMany::with_coercion_sites(coerce_to, args);
assert_eq!(self.diverges.get(), Diverges::Maybe);
for e in args {
let e_ty = self.check_expr_with_hint(e, coerce_to);
let cause = self.misc(e.span);
coerce.coerce(self, &cause, e, e_ty);
}
coerce.complete(self)
} else {
self.next_ty_var(TypeVariableOrigin::TypeInference(expr.span))
};
tcx.mk_array(element_ty, args.len() as u64)
}
hir::ExprKind::Repeat(ref element, ref count) => {
let count_def_id = tcx.hir.local_def_id(count.id);
let param_env = ty::ParamEnv::empty();
let substs = Substs::identity_for_item(tcx.global_tcx(), count_def_id);
let instance = ty::Instance::resolve(
tcx.global_tcx(),
param_env,
count_def_id,
substs,
).unwrap();
let global_id = GlobalId {
instance,
promoted: None
};
let count = tcx.const_eval(param_env.and(global_id));
let uty = match expected {
ExpectHasType(uty) => {
match uty.sty {
ty::Array(ty, _) | ty::Slice(ty) => Some(ty),
_ => None
}
}
_ => None
};
let (element_ty, t) = match uty {
Some(uty) => {
self.check_expr_coercable_to_type(&element, uty);
(uty, uty)
}
None => {
let ty = self.next_ty_var(TypeVariableOrigin::MiscVariable(element.span));
let element_ty = self.check_expr_has_type_or_error(&element, ty);
(element_ty, ty)
}
};
if let Ok(count) = count {
let zero_or_one = count.assert_usize(tcx).map_or(false, |count| count <= 1);
if !zero_or_one {
// For [foo, ..n] where n > 1, `foo` must have
// Copy type:
let lang_item = self.tcx.require_lang_item(lang_items::CopyTraitLangItem);
self.require_type_meets(t, expr.span, traits::RepeatVec, lang_item);
}
}
if element_ty.references_error() {
tcx.types.err
} else if let Ok(count) = count {
tcx.mk_ty(ty::Array(t, count))
} else {
tcx.types.err
}
}
hir::ExprKind::Tup(ref elts) => {
let flds = expected.only_has_type(self).and_then(|ty| {
let ty = self.resolve_type_vars_with_obligations(ty);
match ty.sty {
ty::Tuple(ref flds) => Some(&flds[..]),
_ => None
}
});
let elt_ts_iter = elts.iter().enumerate().map(|(i, e)| {
let t = match flds {
Some(ref fs) if i < fs.len() => {
let ety = fs[i];
self.check_expr_coercable_to_type(&e, ety);
ety
}
_ => {
self.check_expr_with_expectation(&e, NoExpectation)
}
};
t
});
let tuple = tcx.mk_tup(elt_ts_iter);
if tuple.references_error() {
tcx.types.err
} else {
self.require_type_is_sized(tuple, expr.span, traits::TupleInitializerSized);
tuple
}
}
hir::ExprKind::Struct(ref qpath, ref fields, ref base_expr) => {
self.check_expr_struct(expr, expected, qpath, fields, base_expr)
}
hir::ExprKind::Field(ref base, field) => {
self.check_field(expr, needs, &base, field)
}
hir::ExprKind::Index(ref base, ref idx) => {
let base_t = self.check_expr_with_needs(&base, needs);
let idx_t = self.check_expr(&idx);
if base_t.references_error() {
base_t
} else if idx_t.references_error() {
idx_t
} else {
let base_t = self.structurally_resolved_type(base.span, base_t);
match self.lookup_indexing(expr, base, base_t, idx_t, needs) {
Some((index_ty, element_ty)) => {
// two-phase not needed because index_ty is never mutable
self.demand_coerce(idx, idx_t, index_ty, AllowTwoPhase::No);
element_ty
}
None => {
let mut err =
type_error_struct!(tcx.sess, expr.span, base_t, E0608,
"cannot index into a value of type `{}`",
base_t);
// Try to give some advice about indexing tuples.
if let ty::Tuple(..) = base_t.sty {
let mut needs_note = true;
// If the index is an integer, we can show the actual
// fixed expression:
if let hir::ExprKind::Lit(ref lit) = idx.node {
if let ast::LitKind::Int(i,
ast::LitIntType::Unsuffixed) = lit.node {
let snip = tcx.sess.source_map().span_to_snippet(base.span);
if let Ok(snip) = snip {
err.span_suggestion_with_applicability(
expr.span,
"to access tuple elements, use",
format!("{}.{}", snip, i),
Applicability::MachineApplicable);
needs_note = false;
}
}
}
if needs_note {
err.help("to access tuple elements, use tuple indexing \
syntax (e.g. `tuple.0`)");
}
}
err.emit();
self.tcx.types.err
}
}
}
}
hir::ExprKind::Yield(ref value) => {
match self.yield_ty {
Some(ty) => {
self.check_expr_coercable_to_type(&value, ty);
}
None => {
struct_span_err!(self.tcx.sess, expr.span, E0627,
"yield statement outside of generator literal").emit();
}
}
tcx.mk_unit()
}
}
}
// Finish resolving a path in a struct expression or pattern `S::A { .. }` if necessary.
// The newly resolved definition is written into `type_dependent_defs`.
fn finish_resolving_struct_path(&self,
qpath: &hir::QPath,
path_span: Span,
node_id: ast::NodeId)
-> (Def, Ty<'tcx>)
{
match *qpath {
hir::QPath::Resolved(ref maybe_qself, ref path) => {
let self_ty = maybe_qself.as_ref().map(|qself| self.to_ty(qself));
let ty = AstConv::def_to_ty(self, self_ty, path, true);
(path.def, ty)
}
hir::QPath::TypeRelative(ref qself, ref segment) => {
let ty = self.to_ty(qself);
let def = if let hir::TyKind::Path(hir::QPath::Resolved(_, ref path)) = qself.node {
path.def
} else {
Def::Err
};
let (ty, def) = AstConv::associated_path_def_to_ty(self, node_id, path_span,
ty, def, segment);
// Write back the new resolution.
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def);
(def, ty)
}
}
}
// Resolve associated value path into a base type and associated constant or method definition.
// The newly resolved definition is written into `type_dependent_defs`.
pub fn resolve_ty_and_def_ufcs<'b>(&self,
qpath: &'b hir::QPath,
node_id: ast::NodeId,
span: Span)
-> (Def, Option<Ty<'tcx>>, &'b [hir::PathSegment])
{
let (ty, item_segment) = match *qpath {
hir::QPath::Resolved(ref opt_qself, ref path) => {
return (path.def,
opt_qself.as_ref().map(|qself| self.to_ty(qself)),
&path.segments[..]);
}
hir::QPath::TypeRelative(ref qself, ref segment) => {
(self.to_ty(qself), segment)
}
};
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
if let Some(cached_def) = self.tables.borrow().type_dependent_defs().get(hir_id) {
// Return directly on cache hit. This is useful to avoid doubly reporting
// errors with default match binding modes. See #44614.
return (*cached_def, Some(ty), slice::from_ref(&**item_segment))
}
let item_name = item_segment.ident;
let def = match self.resolve_ufcs(span, item_name, ty, node_id) {
Ok(def) => def,
Err(error) => {
let def = match error {
method::MethodError::PrivateMatch(def, _) => def,
_ => Def::Err,
};
if item_name.name != keywords::Invalid.name() {
self.report_method_error(span, ty, item_name, None, error, None);
}
def
}
};
// Write back the new resolution.
self.tables.borrow_mut().type_dependent_defs_mut().insert(hir_id, def);
(def, Some(ty), slice::from_ref(&**item_segment))
}
pub fn check_decl_initializer(&self,
local: &'gcx hir::Local,
init: &'gcx hir::Expr) -> Ty<'tcx>
{
// FIXME(tschottdorf): contains_explicit_ref_binding() must be removed
// for #42640 (default match binding modes).
//
// See #44848.
let ref_bindings = local.pat.contains_explicit_ref_binding();
let local_ty = self.local_ty(init.span, local.id).revealed_ty;
if let Some(m) = ref_bindings {
// Somewhat subtle: if we have a `ref` binding in the pattern,
// we want to avoid introducing coercions for the RHS. This is
// both because it helps preserve sanity and, in the case of
// ref mut, for soundness (issue #23116). In particular, in
// the latter case, we need to be clear that the type of the
// referent for the reference that results is *equal to* the
// type of the place it is referencing, and not some
// supertype thereof.
let init_ty = self.check_expr_with_needs(init, Needs::maybe_mut_place(m));
self.demand_eqtype(init.span, local_ty, init_ty);
init_ty
} else {
self.check_expr_coercable_to_type(init, local_ty)
}
}
pub fn check_decl_local(&self, local: &'gcx hir::Local) {
let t = self.local_ty(local.span, local.id).decl_ty;
self.write_ty(local.hir_id, t);
if let Some(ref init) = local.init {
let init_ty = self.check_decl_initializer(local, &init);
if init_ty.references_error() {
self.write_ty(local.hir_id, init_ty);
}
}
self.check_pat_walk(&local.pat, t,
ty::BindingMode::BindByValue(hir::Mutability::MutImmutable),
true);
let pat_ty = self.node_ty(local.pat.hir_id);
if pat_ty.references_error() {
self.write_ty(local.hir_id, pat_ty);
}
}
pub fn check_stmt(&self, stmt: &'gcx hir::Stmt) {
// Don't do all the complex logic below for DeclItem.
match stmt.node {
hir::StmtKind::Decl(ref decl, _) => {
if let hir::DeclKind::Item(_) = decl.node {
return
}
}
hir::StmtKind::Expr(..) | hir::StmtKind::Semi(..) => {}
}
self.warn_if_unreachable(stmt.node.id(), stmt.span, "statement");
// Hide the outer diverging and has_errors flags.
let old_diverges = self.diverges.get();
let old_has_errors = self.has_errors.get();
self.diverges.set(Diverges::Maybe);
self.has_errors.set(false);
match stmt.node {
hir::StmtKind::Decl(ref decl, _) => {
match decl.node {
hir::DeclKind::Local(ref l) => {
self.check_decl_local(&l);
}
hir::DeclKind::Item(_) => {/* ignore for now */}
}
}
hir::StmtKind::Expr(ref expr, _) => {
// Check with expected type of ()
self.check_expr_has_type_or_error(&expr, self.tcx.mk_unit());
}
hir::StmtKind::Semi(ref expr, _) => {
self.check_expr(&expr);
}
}
// Combine the diverging and has_error flags.
self.diverges.set(self.diverges.get() | old_diverges);
self.has_errors.set(self.has_errors.get() | old_has_errors);
}
pub fn check_block_no_value(&self, blk: &'gcx hir::Block) {
let unit = self.tcx.mk_unit();
let ty = self.check_block_with_expected(blk, ExpectHasType(unit));
// if the block produces a `!` value, that can always be
// (effectively) coerced to unit.
if !ty.is_never() {
self.demand_suptype(blk.span, unit, ty);
}
}
fn check_block_with_expected(&self,
blk: &'gcx hir::Block,
expected: Expectation<'tcx>) -> Ty<'tcx> {
let prev = {
let mut fcx_ps = self.ps.borrow_mut();
let unsafety_state = fcx_ps.recurse(blk);
replace(&mut *fcx_ps, unsafety_state)
};
// In some cases, blocks have just one exit, but other blocks
// can be targeted by multiple breaks. This can happen both
// with labeled blocks as well as when we desugar
// a `try { ... }` expression.
//
// Example 1:
//
// 'a: { if true { break 'a Err(()); } Ok(()) }
//
// Here we would wind up with two coercions, one from
// `Err(())` and the other from the tail expression
// `Ok(())`. If the tail expression is omitted, that's a
// "forced unit" -- unless the block diverges, in which
// case we can ignore the tail expression (e.g., `'a: {
// break 'a 22; }` would not force the type of the block
// to be `()`).
let tail_expr = blk.expr.as_ref();
let coerce_to_ty = expected.coercion_target_type(self, blk.span);
let coerce = if blk.targeted_by_break {
CoerceMany::new(coerce_to_ty)
} else {
let tail_expr: &[P<hir::Expr>] = match tail_expr {
Some(e) => slice::from_ref(e),
None => &[],
};
CoerceMany::with_coercion_sites(coerce_to_ty, tail_expr)
};
let prev_diverges = self.diverges.get();
let ctxt = BreakableCtxt {
coerce: Some(coerce),
may_break: false,
};
let (ctxt, ()) = self.with_breakable_ctxt(blk.id, ctxt, || {
for s in &blk.stmts {
self.check_stmt(s);
}
// check the tail expression **without** holding the
// `enclosing_breakables` lock below.
let tail_expr_ty = tail_expr.map(|t| self.check_expr_with_expectation(t, expected));
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
let ctxt = enclosing_breakables.find_breakable(blk.id);
let coerce = ctxt.coerce.as_mut().unwrap();
if let Some(tail_expr_ty) = tail_expr_ty {
let tail_expr = tail_expr.unwrap();
let cause = self.cause(tail_expr.span,
ObligationCauseCode::BlockTailExpression(blk.id));
coerce.coerce(self,
&cause,
tail_expr,
tail_expr_ty);
} else {
// Subtle: if there is no explicit tail expression,
// that is typically equivalent to a tail expression
// of `()` -- except if the block diverges. In that
// case, there is no value supplied from the tail
// expression (assuming there are no other breaks,
// this implies that the type of the block will be
// `!`).
//
// #41425 -- label the implicit `()` as being the
// "found type" here, rather than the "expected type".
//
// #44579 -- if the block was recovered during parsing,
// the type would be nonsensical and it is not worth it
// to perform the type check, so we avoid generating the
// diagnostic output.
if !self.diverges.get().always() && !blk.recovered {
coerce.coerce_forced_unit(self, &self.misc(blk.span), &mut |err| {
if let Some(expected_ty) = expected.only_has_type(self) {
self.consider_hint_about_removing_semicolon(blk,
expected_ty,
err);
}
}, false);
}
}
});
if ctxt.may_break {
// If we can break from the block, then the block's exit is always reachable
// (... as long as the entry is reachable) - regardless of the tail of the block.
self.diverges.set(prev_diverges);
}
let mut ty = ctxt.coerce.unwrap().complete(self);
if self.has_errors.get() || ty.references_error() {
ty = self.tcx.types.err
}
self.write_ty(blk.hir_id, ty);
*self.ps.borrow_mut() = prev;
ty
}
/// Given a `NodeId`, return the `FnDecl` of the method it is enclosed by and whether a
/// suggestion can be made, `None` otherwise.
pub fn get_fn_decl(&self, blk_id: ast::NodeId) -> Option<(hir::FnDecl, bool)> {
// Get enclosing Fn, if it is a function or a trait method, unless there's a `loop` or
// `while` before reaching it, as block tail returns are not available in them.
if let Some(fn_id) = self.tcx.hir.get_return_block(blk_id) {
let parent = self.tcx.hir.get(fn_id);
if let Node::Item(&hir::Item {
name, node: hir::ItemKind::Fn(ref decl, ..), ..
}) = parent {
decl.clone().and_then(|decl| {
// This is less than ideal, it will not suggest a return type span on any
// method called `main`, regardless of whether it is actually the entry point,
// but it will still present it as the reason for the expected type.
Some((decl, name != Symbol::intern("main")))
})
} else if let Node::TraitItem(&hir::TraitItem {
node: hir::TraitItemKind::Method(hir::MethodSig {
ref decl, ..
}, ..), ..
}) = parent {
decl.clone().and_then(|decl| {
Some((decl, true))
})
} else if let Node::ImplItem(&hir::ImplItem {
node: hir::ImplItemKind::Method(hir::MethodSig {
ref decl, ..
}, ..), ..
}) = parent {
decl.clone().and_then(|decl| {
Some((decl, false))
})
} else {
None
}
} else {
None
}
}
/// On implicit return expressions with mismatched types, provide the following suggestions:
///
/// - Point out the method's return type as the reason for the expected type
/// - Possible missing semicolon
/// - Possible missing return type if the return type is the default, and not `fn main()`
pub fn suggest_mismatched_types_on_tail(&self,
err: &mut DiagnosticBuilder<'tcx>,
expression: &'gcx hir::Expr,
expected: Ty<'tcx>,
found: Ty<'tcx>,
cause_span: Span,
blk_id: ast::NodeId) {
self.suggest_missing_semicolon(err, expression, expected, cause_span);
if let Some((fn_decl, can_suggest)) = self.get_fn_decl(blk_id) {
self.suggest_missing_return_type(err, &fn_decl, expected, found, can_suggest);
}
self.suggest_ref_or_into(err, expression, expected, found);
}
pub fn suggest_ref_or_into(
&self,
err: &mut DiagnosticBuilder<'tcx>,
expr: &hir::Expr,
expected: Ty<'tcx>,
found: Ty<'tcx>,
) {
if let Some((sp, msg, suggestion)) = self.check_ref(expr, found, expected) {
err.span_suggestion_with_applicability(
sp,
msg,
suggestion,
Applicability::MachineApplicable,
);
} else if !self.check_for_cast(err, expr, found, expected) {
let methods = self.get_conversion_methods(expr.span, expected, found);
if let Ok(expr_text) = self.sess().source_map().span_to_snippet(expr.span) {
let mut suggestions = iter::repeat(&expr_text).zip(methods.iter())
.filter_map(|(receiver, method)| {
let method_call = format!(".{}()", method.ident);
if receiver.ends_with(&method_call) {
None // do not suggest code that is already there (#53348)
} else {
let method_call_list = [".to_vec()", ".to_string()"];
if receiver.ends_with(".clone()")
&& method_call_list.contains(&method_call.as_str()) {
let max_len = receiver.rfind(".").unwrap();
Some(format!("{}{}", &receiver[..max_len], method_call))
}
else {
Some(format!("{}{}", receiver, method_call))
}
}
}).peekable();
if suggestions.peek().is_some() {
err.span_suggestions_with_applicability(
expr.span,
"try using a conversion method",
suggestions,
Applicability::MaybeIncorrect,
);
}
}
}
}
/// A common error is to forget to add a semicolon at the end of a block:
///
/// ```
/// fn foo() {
/// bar_that_returns_u32()
/// }
/// ```
///
/// This routine checks if the return expression in a block would make sense on its own as a
/// statement and the return type has been left as default or has been specified as `()`. If so,
/// it suggests adding a semicolon.
fn suggest_missing_semicolon(&self,
err: &mut DiagnosticBuilder<'tcx>,
expression: &'gcx hir::Expr,
expected: Ty<'tcx>,
cause_span: Span) {
if expected.is_unit() {
// `BlockTailExpression` only relevant if the tail expr would be
// useful on its own.
match expression.node {
hir::ExprKind::Call(..) |
hir::ExprKind::MethodCall(..) |
hir::ExprKind::If(..) |
hir::ExprKind::While(..) |
hir::ExprKind::Loop(..) |
hir::ExprKind::Match(..) |
hir::ExprKind::Block(..) => {
let sp = self.tcx.sess.source_map().next_point(cause_span);
err.span_suggestion_with_applicability(
sp,
"try adding a semicolon",
";".to_string(),
Applicability::MachineApplicable);
}
_ => (),
}
}
}
/// A possible error is to forget to add a return type that is needed:
///
/// ```
/// fn foo() {
/// bar_that_returns_u32()
/// }
/// ```
///
/// This routine checks if the return type is left as default, the method is not part of an
/// `impl` block and that it isn't the `main` method. If so, it suggests setting the return
/// type.
fn suggest_missing_return_type(&self,
err: &mut DiagnosticBuilder<'tcx>,
fn_decl: &hir::FnDecl,
expected: Ty<'tcx>,
found: Ty<'tcx>,
can_suggest: bool) {
// Only suggest changing the return type for methods that
// haven't set a return type at all (and aren't `fn main()` or an impl).
match (&fn_decl.output, found.is_suggestable(), can_suggest, expected.is_unit()) {
(&hir::FunctionRetTy::DefaultReturn(span), true, true, true) => {
err.span_suggestion_with_applicability(
span,
"try adding a return type",
format!("-> {} ", self.resolve_type_vars_with_obligations(found)),
Applicability::MachineApplicable);
}
(&hir::FunctionRetTy::DefaultReturn(span), false, true, true) => {
err.span_label(span, "possibly return type missing here?");
}
(&hir::FunctionRetTy::DefaultReturn(span), _, false, true) => {
// `fn main()` must return `()`, do not suggest changing return type
err.span_label(span, "expected `()` because of default return type");
}
// expectation was caused by something else, not the default return
(&hir::FunctionRetTy::DefaultReturn(_), _, _, false) => {}
(&hir::FunctionRetTy::Return(ref ty), _, _, _) => {
// Only point to return type if the expected type is the return type, as if they
// are not, the expectation must have been caused by something else.
debug!("suggest_missing_return_type: return type {:?} node {:?}", ty, ty.node);
let sp = ty.span;
let ty = AstConv::ast_ty_to_ty(self, ty);
debug!("suggest_missing_return_type: return type sty {:?}", ty.sty);
debug!("suggest_missing_return_type: expected type sty {:?}", ty.sty);
if ty.sty == expected.sty {
err.span_label(sp, format!("expected `{}` because of return type",
expected));
}
}
}
}
/// A common error is to add an extra semicolon:
///
/// ```
/// fn foo() -> usize {
/// 22;
/// }
/// ```
///
/// This routine checks if the final statement in a block is an
/// expression with an explicit semicolon whose type is compatible
/// with `expected_ty`. If so, it suggests removing the semicolon.
fn consider_hint_about_removing_semicolon(&self,
blk: &'gcx hir::Block,
expected_ty: Ty<'tcx>,
err: &mut DiagnosticBuilder) {
// Be helpful when the user wrote `{... expr;}` and
// taking the `;` off is enough to fix the error.
let last_stmt = match blk.stmts.last() {
Some(s) => s,
None => return,
};
let last_expr = match last_stmt.node {
hir::StmtKind::Semi(ref e, _) => e,
_ => return,
};
let last_expr_ty = self.node_ty(last_expr.hir_id);
if self.can_sub(self.param_env, last_expr_ty, expected_ty).is_err() {
return;
}
let original_span = original_sp(last_stmt.span, blk.span);
let span_semi = original_span.with_lo(original_span.hi() - BytePos(1));
err.span_suggestion_with_applicability(
span_semi,
"consider removing this semicolon",
String::new(),
Applicability::MachineApplicable);
}
fn def_ids_for_path_segments(&self,
segments: &[hir::PathSegment],
def: Def)
-> Vec<PathSeg> {
// We need to extract the type parameters supplied by the user in
// the path `path`. Due to the current setup, this is a bit of a
// tricky-process; the problem is that resolve only tells us the
// end-point of the path resolution, and not the intermediate steps.
// Luckily, we can (at least for now) deduce the intermediate steps
// just from the end-point.
//
// There are basically four cases to consider:
//
// 1. Reference to a constructor of enum variant or struct:
//
// struct Foo<T>(...)
// enum E<T> { Foo(...) }
//
// In these cases, the parameters are declared in the type
// space.
//
// 2. Reference to a fn item or a free constant:
//
// fn foo<T>() { }
//
// In this case, the path will again always have the form
// `a::b::foo::<T>` where only the final segment should have
// type parameters. However, in this case, those parameters are
// declared on a value, and hence are in the `FnSpace`.
//
// 3. Reference to a method or an associated constant:
//
// impl<A> SomeStruct<A> {
// fn foo<B>(...)
// }
//
// Here we can have a path like
// `a::b::SomeStruct::<A>::foo::<B>`, in which case parameters
// may appear in two places. The penultimate segment,
// `SomeStruct::<A>`, contains parameters in TypeSpace, and the
// final segment, `foo::<B>` contains parameters in fn space.
//
// 4. Reference to a local variable
//
// Local variables can't have any type parameters.
//
// The first step then is to categorize the segments appropriately.
assert!(!segments.is_empty());
let last = segments.len() - 1;
let mut path_segs = vec![];
match def {
// Case 1. Reference to a struct/variant constructor.
Def::StructCtor(def_id, ..) |
Def::VariantCtor(def_id, ..) |
Def::SelfCtor(.., def_id) => {
// Everything but the final segment should have no
// parameters at all.
let generics = self.tcx.generics_of(def_id);
// Variant and struct constructors use the
// generics of their parent type definition.
let generics_def_id = generics.parent.unwrap_or(def_id);
path_segs.push(PathSeg(generics_def_id, last));
}
// Case 2. Reference to a top-level value.
Def::Fn(def_id) |
Def::Const(def_id) |
Def::Static(def_id, _) => {
path_segs.push(PathSeg(def_id, last));
}
// Case 3. Reference to a method or associated const.
Def::Method(def_id) |
Def::AssociatedConst(def_id) => {
if segments.len() >= 2 {
let generics = self.tcx.generics_of(def_id);
path_segs.push(PathSeg(generics.parent.unwrap(), last - 1));
}
path_segs.push(PathSeg(def_id, last));
}
// Case 4. Local variable, no generics.
Def::Local(..) | Def::Upvar(..) => {}
_ => bug!("unexpected definition: {:?}", def),
}
debug!("path_segs = {:?}", path_segs);
path_segs
}
// Instantiates the given path, which must refer to an item with the given
// number of type parameters and type.
pub fn instantiate_value_path(&self,
segments: &[hir::PathSegment],
self_ty: Option<Ty<'tcx>>,
def: Def,
span: Span,
node_id: ast::NodeId)
-> (Ty<'tcx>, Def) {
debug!(
"instantiate_value_path(segments={:?}, self_ty={:?}, def={:?}, node_id={})",
segments,
self_ty,
def,
node_id,
);
let path_segs = self.def_ids_for_path_segments(segments, def);
let mut user_self_ty = None;
match def {
Def::Method(def_id) |
Def::AssociatedConst(def_id) => {
let container = self.tcx.associated_item(def_id).container;
match container {
ty::TraitContainer(trait_did) => {
callee::check_legal_trait_for_method_call(self.tcx, span, trait_did)
}
ty::ImplContainer(impl_def_id) => {
if segments.len() == 1 {
// `<T>::assoc` will end up here, and so
// can `T::assoc`. It this came from an
// inherent impl, we need to record the
// `T` for posterity (see `UserSelfTy` for
// details).
let self_ty = self_ty.expect("UFCS sugared assoc missing Self");
user_self_ty = Some(UserSelfTy {
impl_def_id,
self_ty,
});
}
}
}
}
_ => {}
}
// Now that we have categorized what space the parameters for each
// segment belong to, let's sort out the parameters that the user
// provided (if any) into their appropriate spaces. We'll also report
// errors if type parameters are provided in an inappropriate place.
let generic_segs = path_segs.iter().map(|PathSeg(_, index)| index)
.collect::<FxHashSet<_>>();
AstConv::prohibit_generics(self, segments.iter().enumerate().filter_map(|(index, seg)| {
if !generic_segs.contains(&index) {
Some(seg)
} else {
None
}
}));
match def {
Def::Local(nid) | Def::Upvar(nid, ..) => {
let ty = self.local_ty(span, nid).decl_ty;
let ty = self.normalize_associated_types_in(span, &ty);
self.write_ty(self.tcx.hir.node_to_hir_id(node_id), ty);
return (ty, def);
}
_ => {}
}
// Now we have to compare the types that the user *actually*
// provided against the types that were *expected*. If the user
// did not provide any types, then we want to substitute inference
// variables. If the user provided some types, we may still need
// to add defaults. If the user provided *too many* types, that's
// a problem.
let mut infer_args_for_err = FxHashSet::default();
for &PathSeg(def_id, index) in &path_segs {
let seg = &segments[index];
let generics = self.tcx.generics_of(def_id);
// Argument-position `impl Trait` is treated as a normal generic
// parameter internally, but we don't allow users to specify the
// parameter's value explicitly, so we have to do some error-
// checking here.
let suppress_errors = AstConv::check_generic_arg_count_for_call(
self.tcx,
span,
&generics,
&seg,
false, // `is_method_call`
);
if suppress_errors {
infer_args_for_err.insert(index);
self.set_tainted_by_errors(); // See issue #53251.
}
}
let has_self = path_segs.last().map(|PathSeg(def_id, _)| {
self.tcx.generics_of(*def_id).has_self
}).unwrap_or(false);
let mut new_def = def;
let (def_id, ty) = if let Def::SelfCtor(impl_def_id) = def {
let ty = self.impl_self_ty(span, impl_def_id).ty;
match ty.ty_adt_def() {
Some(adt_def) if adt_def.is_struct() => {
let variant = adt_def.non_enum_variant();
new_def = Def::StructCtor(variant.did, variant.ctor_kind);
(variant.did, self.tcx.type_of(variant.did))
}
_ => {
(impl_def_id, self.tcx.types.err)
}
}
} else {
let def_id = def.def_id();
// The things we are substituting into the type should not contain
// escaping late-bound regions, and nor should the base type scheme.
let ty = self.tcx.type_of(def_id);
(def_id, ty)
};
let substs = AstConv::create_substs_for_generic_args(
self.tcx,
def_id,
&[][..],
has_self,
self_ty,
// Provide the generic args, and whether types should be inferred.
|def_id| {
if let Some(&PathSeg(_, index)) = path_segs.iter().find(|&PathSeg(did, _)| {
*did == def_id
}) {
// If we've encountered an `impl Trait`-related error, we're just
// going to infer the arguments for better error messages.
if !infer_args_for_err.contains(&index) {
// Check whether the user has provided generic arguments.
if let Some(ref data) = segments[index].args {
return (Some(data), segments[index].infer_types);
}
}
return (None, segments[index].infer_types);
}
(None, true)
},
// Provide substitutions for parameters for which (valid) arguments have been provided.
|param, arg| {
match (¶m.kind, arg) {
(GenericParamDefKind::Lifetime, GenericArg::Lifetime(lt)) => {
AstConv::ast_region_to_region(self, lt, Some(param)).into()
}
(GenericParamDefKind::Type { .. }, GenericArg::Type(ty)) => {
self.to_ty(ty).into()
}
_ => unreachable!(),
}
},
// Provide substitutions for parameters for which arguments are inferred.
|substs, param, infer_types| {
match param.kind {
GenericParamDefKind::Lifetime => {
self.re_infer(span, Some(param)).unwrap().into()
}
GenericParamDefKind::Type { has_default, .. } => {
if !infer_types && has_default {
// If we have a default, then we it doesn't matter that we're not
// inferring the type arguments: we provide the default where any
// is missing.
let default = self.tcx.type_of(param.def_id);
self.normalize_ty(
span,
default.subst_spanned(self.tcx, substs.unwrap(), Some(span))
).into()
} else {
// If no type arguments were provided, we have to infer them.
// This case also occurs as a result of some malformed input, e.g.
// a lifetime argument being given instead of a type parameter.
// Using inference instead of `Error` gives better error messages.
self.var_for_def(span, param)
}
}
}
},
);
assert!(!substs.has_escaping_bound_vars());
assert!(!ty.has_escaping_bound_vars());
// Write the "user substs" down first thing for later.
let hir_id = self.tcx.hir.node_to_hir_id(node_id);
self.write_user_substs_from_substs(hir_id, substs, user_self_ty);
// Add all the obligations that are required, substituting and
// normalized appropriately.
let bounds = self.instantiate_bounds(span, def_id, &substs);
self.add_obligations_for_parameters(
traits::ObligationCause::new(span, self.body_id, traits::ItemObligation(def_id)),
&bounds);
// Substitute the values for the type parameters into the type of
// the referenced item.
let ty_substituted = self.instantiate_type_scheme(span, &substs, &ty);
if let Some(UserSelfTy { impl_def_id, self_ty }) = user_self_ty {
// In the case of `Foo<T>::method` and `<Foo<T>>::method`, if `method`
// is inherent, there is no `Self` parameter, instead, the impl needs
// type parameters, which we can infer by unifying the provided `Self`
// with the substituted impl type.
let ty = self.tcx.type_of(impl_def_id);
let impl_ty = self.instantiate_type_scheme(span, &substs, &ty);
match self.at(&self.misc(span), self.param_env).sup(impl_ty, self_ty) {
Ok(ok) => self.register_infer_ok_obligations(ok),
Err(_) => {
span_bug!(span,
"instantiate_value_path: (UFCS) {:?} was a subtype of {:?} but now is not?",
self_ty,
impl_ty);
}
}
}
self.check_rustc_args_require_const(def_id, node_id, span);
debug!("instantiate_value_path: type of {:?} is {:?}",
node_id,
ty_substituted);
self.write_substs(hir_id, substs);
(ty_substituted, new_def)
}
fn check_rustc_args_require_const(&self,
def_id: DefId,
node_id: ast::NodeId,
span: Span) {
// We're only interested in functions tagged with
// #[rustc_args_required_const], so ignore anything that's not.
if !self.tcx.has_attr(def_id, "rustc_args_required_const") {
return
}
// If our calling expression is indeed the function itself, we're good!
// If not, generate an error that this can only be called directly.
if let Node::Expr(expr) = self.tcx.hir.get(self.tcx.hir.get_parent_node(node_id)) {
if let hir::ExprKind::Call(ref callee, ..) = expr.node {
if callee.id == node_id {
return
}
}
}
self.tcx.sess.span_err(span, "this function can only be invoked \
directly, not through a function pointer");
}
// Resolves `typ` by a single level if `typ` is a type variable.
// If no resolution is possible, then an error is reported.
// Numeric inference variables may be left unresolved.
pub fn structurally_resolved_type(&self, sp: Span, ty: Ty<'tcx>) -> Ty<'tcx> {
let ty = self.resolve_type_vars_with_obligations(ty);
if !ty.is_ty_var() {
ty
} else {
if !self.is_tainted_by_errors() {
self.need_type_info_err((**self).body_id, sp, ty)
.note("type must be known at this point")
.emit();
}
self.demand_suptype(sp, self.tcx.types.err, ty);
self.tcx.types.err
}
}
fn with_breakable_ctxt<F: FnOnce() -> R, R>(&self, id: ast::NodeId,
ctxt: BreakableCtxt<'gcx, 'tcx>, f: F)
-> (BreakableCtxt<'gcx, 'tcx>, R) {
let index;
{
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
index = enclosing_breakables.stack.len();
enclosing_breakables.by_id.insert(id, index);
enclosing_breakables.stack.push(ctxt);
}
let result = f();
let ctxt = {
let mut enclosing_breakables = self.enclosing_breakables.borrow_mut();
debug_assert!(enclosing_breakables.stack.len() == index + 1);
enclosing_breakables.by_id.remove(&id).expect("missing breakable context");
enclosing_breakables.stack.pop().expect("missing breakable context")
};
(ctxt, result)
}
}
pub fn check_bounds_are_used<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
generics: &ty::Generics,
ty: Ty<'tcx>) {
let own_counts = generics.own_counts();
debug!("check_bounds_are_used(n_tps={}, ty={:?})", own_counts.types, ty);
if own_counts.types == 0 {
return;
}
// Make a vector of booleans initially false, set to true when used.
let mut types_used = vec![false; own_counts.types];
for leaf_ty in ty.walk() {
if let ty::Param(ty::ParamTy { idx, .. }) = leaf_ty.sty {
debug!("Found use of ty param num {}", idx);
types_used[idx as usize - own_counts.lifetimes] = true;
} else if let ty::Error = leaf_ty.sty {
// If there is already another error, do not emit
// an error for not using a type Parameter.
assert!(tcx.sess.err_count() > 0);
return;
}
}
let types = generics.params.iter().filter(|param| match param.kind {
ty::GenericParamDefKind::Type { .. } => true,
_ => false,
});
for (&used, param) in types_used.iter().zip(types) {
if !used {
let id = tcx.hir.as_local_node_id(param.def_id).unwrap();
let span = tcx.hir.span(id);
struct_span_err!(tcx.sess, span, E0091, "type parameter `{}` is unused", param.name)
.span_label(span, "unused type parameter")
.emit();
}
}
}
fn fatally_break_rust(sess: &Session) {
let handler = sess.diagnostic();
handler.span_bug_no_panic(
MultiSpan::new(),
"It looks like you're trying to break rust; would you like some ICE?",
);
handler.note_without_error("the compiler expectedly panicked. this is a feature.");
handler.note_without_error(
"we would appreciate a joke overview: \
https://github.com/rust-lang/rust/issues/43162#issuecomment-320764675"
);
handler.note_without_error(&format!("rustc {} running on {}",
option_env!("CFG_VERSION").unwrap_or("unknown_version"),
::session::config::host_triple(),
));
}
fn potentially_plural_count(count: usize, word: &str) -> String {
format!("{} {}{}", count, word, if count == 1 { "" } else { "s" })
}
| {
supplied_arg_count
} |
orahlp_test.go | // Copyright 2019 Tamás Gulácsi
//
// SPDX-License-Identifier: UPL-1.0
package oracledb
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestMapToSlice(t *testing.T) {
for i, tc := range []struct {
in, await string
params []interface{}
}{
{
`SELECT NVL(MAX(F_dazon), :dazon) FROM T_spl_level
WHERE (F_spl_azon = :lev_azon OR --:lev_azon OR
F_ssz = 0 AND F_lev_azon = /*:lev_azon*/:lev_azon)`,
`SELECT NVL(MAX(F_dazon), :1) FROM T_spl_level
WHERE (F_spl_azon = :2 OR --:lev_azon OR
F_ssz = 0 AND F_lev_azon = /*:lev_azon*/:3)`,
[]interface{}{"dazon", "lev_azon", "lev_azon"},
},
{
`INSERT INTO PERSON(NAME) VALUES('hello') RETURNING ID INTO :ID`,
`INSERT INTO PERSON(NAME) VALUES('hello') RETURNING ID INTO :1`,
[]interface{}{"ID"},
},
{
`DECLARE
i1 PLS_INTEGER;
i2 PLS_INTEGER;
v001 BRUNO.DB_WEB_ELEKTR.KOTVENY_REC_TYP;
BEGIN
v001.dijkod := :p002#dijkod;
DB_web.sendpreoffer_31101(p_kotveny=>v001);
:p002#dijkod := v001.dijkod;
END;
`,
`DECLARE
i1 PLS_INTEGER;
i2 PLS_INTEGER;
v001 BRUNO.DB_WEB_ELEKTR.KOTVENY_REC_TYP;
BEGIN
v001.dijkod := :1;
DB_web.sendpreoffer_31101(p_kotveny=>v001);
:2 := v001.dijkod;
END;
`,
[]interface{}{"p002#dijkod", "p002#dijkod"},
},
} {
got, params := MapToSlice(tc.in, func(s string) interface{} { return s })
d := cmp.Diff(tc.await, got)
if d != "" {
| if !reflect.DeepEqual(params, tc.params) {
t.Errorf("%d. params: got\n\t%#v,\nwanted\n\t%#v.", i, params, tc.params)
}
}
}
| t.Errorf("%d. diff:\n%s", i, d)
}
|
util.go | package util
// IsNotEqual does not purely compare equality like reflect.DeepEqual does
// It returns false if a and b are equal
// It returns false if a and b are not equal but a is nil
// It returns true if a and b are not equal and a is not nil
func IsNotEqual(a, b interface{}) bool |
// IsZeroValue returns true if input interface is the corresponding zero value
func IsZeroValue(i interface{}) bool {
if i == nil {
return true
} // nil interface
if i == "" {
return true
} // zero value of a string
if i == 0.0 {
return true
} // zero value of a float64
if i == 0 {
return true
} // zero value of an int
if i == false {
return true
} // zero value of a boolean
return false
}
| {
if a != b {
return !IsZeroValue(a)
}
return false
} |
sound_slice.py | from pydub import AudioSegment
import json
import re
timing_src = [
"./inputs/timing/04-JHN-01-timing.txt",
"./inputs/timing/04-JHN-02-timing.txt",
"./inputs/timing/04-JHN-03-timing.txt",
"./inputs/timing/04-JHN-04-timing.txt",
"./inputs/timing/04-JHN-05-timing.txt",
"./inputs/timing/04-JHN-06-timing.txt",
"./inputs/timing/04-JHN-07-timing.txt",
"./inputs/timing/04-JHN-08-timing.txt",
"./inputs/timing/04-JHN-09-timing.txt",
"./inputs/timing/04-JHN-10-timing.txt",
"./inputs/timing/04-JHN-11-timing.txt", | "./inputs/timing/04-JHN-16-timing.txt",
"./inputs/timing/04-JHN-17-timing.txt",
"./inputs/timing/04-JHN-18-timing.txt",
"./inputs/timing/04-JHN-19-timing.txt",
"./inputs/timing/04-JHN-20-timing.txt",
"./inputs/timing/04-JHN-21-timing.txt"
]
audio_src = [
"./inputs/mp3/44-JHNgul-01.mp3",
"./inputs/mp3/44-JHNgul-02.mp3",
"./inputs/mp3/44-JHNgul-03.mp3",
"./inputs/mp3/44-JHNgul-04.mp3",
"./inputs/mp3/44-JHNgul-05.mp3",
"./inputs/mp3/44-JHNgul-06.mp3",
"./inputs/mp3/44-JHNgul-07.mp3",
"./inputs/mp3/44-JHNgul-08.mp3",
"./inputs/mp3/44-JHNgul-09.mp3",
"./inputs/mp3/44-JHNgul-10.mp3",
"./inputs/mp3/44-JHNgul-11.mp3",
"./inputs/mp3/44-JHNgul-12.mp3",
"./inputs/mp3/44-JHNgul-13.mp3",
"./inputs/mp3/44-JHNgul-14.mp3",
"./inputs/mp3/44-JHNgul-15.mp3",
"./inputs/mp3/44-JHNgul-16.mp3",
"./inputs/mp3/44-JHNgul-17.mp3",
"./inputs/mp3/44-JHNgul-18.mp3",
"./inputs/mp3/44-JHNgul-19.mp3",
"./inputs/mp3/44-JHNgul-20.mp3",
"./inputs/mp3/44-JHNgul-21.mp3"
]
def get_audio(i):
if not audio[i]:
audio[i] = AudioSegment.from_mp3(audio_src[i])
return audio[i]
pages_src = "./inputs/story_data.json"
timing_raw = []
pages_raw = ""
audio = [None for _ in audio_src]
for t in timing_src:
with open(t) as file:
timing_raw.append(file.read())
#for a in audio_src:
# audio.append(AudioSegment.from_mp3(a))
with open(pages_src) as file:
pages_raw = file.read()
segments = [None] * len(timing_raw)
def get_segment(i):
if not segments[i]:
segments[i] = []
raw = timing_raw[i]
raw = raw.replace('\ufeff','')
raw = raw.strip()
timings = raw.split("\n")
timings = [x.split("\t") for x in timings]
timings = [(float(x[0]), float(x[1]), x[2]) for x in timings]
timings = [(int(x[0] * 1000), int(x[1] * 1000), x[2]) for x in timings]
timings = [x for x in timings if x[2][0].isdigit()]
#print(timings)
timings2 = []
curr_verse = 0
curr_start = 0
curr_end = 0
for x in timings:
verse = int(re.match(r"[0-9]+",x[2]).group(0))
if verse != curr_verse:
timings2.append((curr_start,curr_end,curr_verse))
curr_verse = verse
curr_start = x[0]
curr_end = x[1]
timings2.append((curr_start,curr_end,curr_verse))
timings = timings2[1:]
for t in timings:
#print(t)
start = t[0]
end = t[1]
seg = get_audio(i)[start:end]
segments[i].append(seg)
return segments[i]
# assumes that start and end are in the same book
def get_seg(ref_start,ref_end):
seg = AudioSegment.empty()
m_start = re.match(r"([0-9]+):([0-9]+)",ref_start)
m_end = re.match(r"([0-9]+):([0-9]+)",ref_end)
book = int(m_start.group(1))
verse_start = int(m_start.group(2))
verse_end = int(m_end.group(2))
#print(book,verse_start,verse_end)
for verse in range(verse_start,verse_end+1):
seg += get_segment(book-1)[verse-1]
return seg
def format_book_title(t):
return re.sub(r"[ -]",'_',t)
# produce audio files for a story
def segment_story(story):
durations = [] # in millis
filenames = []
for p in story["pages"]:
seg = get_seg(p["ref_start"],p["ref_end"])
filename = "./outputs/{0}_{1:02d}.mp3".format(format_book_title(story["title"]),p["page"])
file = open(filename,"w+")
file.write(' ')
file.close()
seg.export(filename, format="mp3")
print(filename)
durations.append(len(seg))
filenames.append(filename)
return filenames, durations
if __name__ == "__main__":
stories = json.loads(pages_raw)
for story in stories["storyCollection"]:
segment_story(story["story"])
# print by verse
'''
for i in range(len(segments)):
for j in range(len(segments[i])):
#print(i,j)
filename = "./outputs/{0:02d}_{1:02d}.mp3".format(i+1,j+1)
file = open(filename,"w+")
file.write(' ')
file.close()
segments[i][j].export(filename, format="mp3")
print(filename)
''' | "./inputs/timing/04-JHN-12-timing.txt",
"./inputs/timing/04-JHN-13-timing.txt",
"./inputs/timing/04-JHN-14-timing.txt",
"./inputs/timing/04-JHN-15-timing.txt", |
test_epi_catalog_listing.py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, MN Technique and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('EPI Catalog Listing') |
class TestEPICatalogListing(unittest.TestCase):
pass |
|
table.go | // Copyright (c) 2016-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package iptables
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"reflect"
"regexp"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
log "github.com/sirupsen/logrus"
"github.com/projectcalico/libcalico-go/lib/set"
)
const (
MaxChainNameLength = 28
minPostWriteInterval = 50 * time.Millisecond
)
var (
// List of all the top-level kernel-created chains by iptables table.
tableToKernelChains = map[string][]string{
"filter": []string{"INPUT", "FORWARD", "OUTPUT"},
"nat": []string{"PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"},
"mangle": []string{"PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"},
"raw": []string{"PREROUTING", "OUTPUT"},
}
// chainCreateRegexp matches iptables-save output lines for chain forward reference lines.
// It captures the name of the chain.
chainCreateRegexp = regexp.MustCompile(`^:(\S+)`)
// appendRegexp matches an iptables-save output line for an append operation.
appendRegexp = regexp.MustCompile(`^-A (\S+)`)
// Prometheus metrics.
countNumRestoreCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_calls",
Help: "Number of iptables-restore calls.",
})
countNumRestoreErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_restore_errors",
Help: "Number of iptables-restore errors.",
})
countNumSaveCalls = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_calls",
Help: "Number of iptables-save calls.",
})
countNumSaveErrors = prometheus.NewCounter(prometheus.CounterOpts{
Name: "felix_iptables_save_errors",
Help: "Number of iptables-save errors.",
})
gaugeNumChains = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_chains",
Help: "Number of active iptables chains.",
}, []string{"ip_version", "table"})
gaugeNumRules = prometheus.NewGaugeVec(prometheus.GaugeOpts{
Name: "felix_iptables_rules",
Help: "Number of active iptables rules.",
}, []string{"ip_version", "table"})
countNumLinesExecuted = prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "felix_iptables_lines_executed",
Help: "Number of iptables rule updates executed.",
}, []string{"ip_version", "table"})
)
func init() {
prometheus.MustRegister(countNumRestoreCalls)
prometheus.MustRegister(countNumRestoreErrors)
prometheus.MustRegister(countNumSaveCalls)
prometheus.MustRegister(countNumSaveErrors)
prometheus.MustRegister(gaugeNumChains)
prometheus.MustRegister(gaugeNumRules)
prometheus.MustRegister(countNumLinesExecuted)
}
// Table represents a single one of the iptables tables i.e. "raw", "nat", "filter", etc. It
// caches the desired state of that table, then attempts to bring it into sync when Apply() is
// called.
//
// API Model
//
// Table supports two classes of operation: "rule insertions" and "full chain updates".
//
// As the name suggests, rule insertions allow for inserting one or more rules into a pre-existing
// chain. Rule insertions are intended to be used to hook kernel chains (such as "FORWARD") in
// order to direct them to a Felix-owned chain. It is important to minimise the use of rule
// insertions because the top-level chains are shared resources, which can be modified by other
// applications. In addition, rule insertions are harder to clean up after an upgrade to a new
// version of Felix (because we need a way to recognise our rules in a crowded chain).
//
// Full chain updates replace the entire contents of a Felix-owned chain with a new set of rules.
// Limiting the operation to "replace whole chain" in this way significantly simplifies the API.
// Although the API operates on full chains, the dataplane write logic tries to avoid rewriting
// a whole chain if only part of it has changed (this was not the case in Felix 1.4). This
// prevents iptables counters from being reset unnecessarily.
//
// In either case, the actual dataplane updates are deferred until the next call to Apply() so
// chain updates and insertions may occur in any order as long as they are consistent (i.e. there
// are no references to non-existent chains) by the time Apply() is called.
//
// Design
//
// We had several goals in designing the iptables machinery in 2.0.0:
//
// (1) High performance. Felix needs to handle high churn of endpoints and rules.
//
// (2) Ability to restore rules, even if other applications accidentally break them: we found that
// other applications sometimes misuse iptables-save and iptables-restore to do a read, modify,
// write cycle. That behaviour is not safe under concurrent modification.
//
// (3) Avoid rewriting rules that haven't changed so that we don't reset iptables counters.
//
// (4) Avoid parsing iptables commands (for example, the output from iptables/iptables-save).
// This is very hard to do robustly because iptables rules do not necessarily round-trip through
// the kernel in the same form. In addition, the format could easily change due to changes or
// fixes in the iptables/iptables-save command.
//
// (5) Support for graceful restart. I.e. deferring potentially incorrect updates until we're
// in-sync with the datastore. For example, if we have 100 endpoints on a host, after a restart
// we don't want to write a "dispatch" chain when we learn about the first endpoint (possibly
// replacing an existing one that had all 100 endpoints in place and causing traffic to glitch);
// instead, we want to defer until we've seen all 100 and then do the write.
//
// (6) Improved handling of rule inserts vs Felix 1.4.x. Previous versions of Felix sometimes
// inserted special-case rules that were not marked as Calico rules in any sensible way making
// cleanup of those rules after an upgrade difficult.
//
// Implementation
//
// For high performance (goal 1), we use iptables-restore to do bulk updates to iptables. This is
// much faster than individual iptables calls.
//
// To allow us to restore rules after they are clobbered by another process (goal 2), we cache
// them at this layer. This means that we don't need a mechanism to ask the other layers of Felix
// to do a resync. Note: Table doesn't start a thread of its own so it relies on the main event
// loop to trigger any dataplane resync polls.
//
// There is tension between goals 3 and 4. In order to avoid full rewrites (goal 3), we need to
// know what rules are in place, but we also don't want to parse them to find out (goal 4)! As
// a compromise, we deterministically calculate an ID for each rule and store it in an iptables
// comment. Then, when we want to know what rules are in place, we _do_ parse the output from
// iptables-save, but only to read back the rule IDs. That limits the amount of parsing we need
// to do and keeps it manageable/robust.
//
// To support graceful restart (goal 5), we defer updates to the dataplane until Apply() is called,
// then we do an atomic update using iptables-restore. As long as the first Apply() call is
// after we're in sync, the dataplane won't be touched until the right time. Felix 1.4.x had a
// more complex mechanism to support partial updates during the graceful restart period but
// Felix 2.0.0 resyncs so quickly that the added complexity is not justified.
//
// To make it easier to manage rule insertions (goal 6), we add rule IDs to those too. With
// rule IDs in place, we can easily distinguish Calico rules from non-Calico rules without needing
// to know exactly which rules to expect. To deal with cleanup after upgrade from older versions
// that did not write rule IDs, we support special-case regexes to detect our old rules.
//
// Thread safety
//
// Table doesn't do any internal synchronization, its methods should only be called from one
// thread. To avoid conflicts in the dataplane itself, there should only be one instance of
// Table for each iptable table in an application.
type Table struct {
Name string
IPVersion uint8
// featureDetector detects the features of the dataplane.
featureDetector *FeatureDetector
// chainToInsertedRules maps from chain name to a list of rules to be inserted at the start
// of that chain. Rules are written with rule hash comments. The Table cleans up inserted
// rules with unknown hashes.
chainToInsertedRules map[string][]Rule
dirtyInserts set.Set
// chainToRuleFragments contains the desired state of our iptables chains, indexed by
// chain name. The values are slices of iptables fragments, such as
// "--match foo --jump DROP" (i.e. omitting the action and chain name, which are calculated
// as needed).
chainNameToChain map[string]*Chain
dirtyChains set.Set
inSyncWithDataPlane bool
// chainToDataplaneHashes contains the rule hashes that we think are in the dataplane.
// it is updated when we write to the dataplane but it can also be read back and compared
// to what we calculate from chainToContents.
chainToDataplaneHashes map[string][]string
// chainToFullRules contains the full rules for any chains that we may be hooking into, mapped from chain name
// to slices of rules in that chain.
chainToFullRules map[string][]string
// hashCommentPrefix holds the prefix that we prepend to our rule-tracking hashes.
hashCommentPrefix string
// hashCommentRegexp matches the rule-tracking comment, capturing the rule hash.
hashCommentRegexp *regexp.Regexp
// ourChainsRegexp matches the names of chains that are "ours", i.e. start with one of our
// prefixes.
ourChainsRegexp *regexp.Regexp
// oldInsertRegexp matches inserted rules from old pre rule-hash versions of felix.
oldInsertRegexp *regexp.Regexp
// nftablesMode should be set to true if iptables is using the nftables backend.
nftablesMode bool
iptablesRestoreCmd string
iptablesSaveCmd string
// insertMode is either "insert" or "append"; whether we insert our rules or append them
// to top-level chains.
insertMode string
// Record when we did our most recent reads and writes of the table. We use these to
// calculate the next time we should force a refresh.
lastReadTime time.Time
lastWriteTime time.Time
initialPostWriteInterval time.Duration
postWriteInterval time.Duration
refreshInterval time.Duration
// calicoXtablesLock, if enabled, our implementation of the xtables lock.
calicoXtablesLock sync.Locker
// lockTimeout is the timeout used for iptables-restore's native xtables lock implementation.
lockTimeout time.Duration
// lockTimeout is the lock probe interval used for iptables-restore's native xtables lock
// implementation.
lockProbeInterval time.Duration
logCxt *log.Entry
gaugeNumChains prometheus.Gauge
gaugeNumRules prometheus.Gauge
countNumLinesExecuted prometheus.Counter
// Reusable buffer for writing to iptables.
restoreInputBuffer RestoreInputBuilder
// Factory for making commands, used by UTs to shim exec.Command().
newCmd cmdFactory
// Shims for time.XXX functions:
timeSleep func(d time.Duration)
timeNow func() time.Time
// lookPath is a shim for exec.LookPath.
lookPath func(file string) (string, error)
}
type TableOptions struct {
HistoricChainPrefixes []string
ExtraCleanupRegexPattern string
BackendMode string
InsertMode string
RefreshInterval time.Duration
PostWriteInterval time.Duration
// LockTimeout is the timeout to use for iptables-restore's native xtables lock.
LockTimeout time.Duration
// LockProbeInterval is the probe interval to use for iptables-restore's native xtables lock.
LockProbeInterval time.Duration
// NewCmdOverride for tests, if non-nil, factory to use instead of the real exec.Command()
NewCmdOverride cmdFactory
// SleepOverride for tests, if non-nil, replacement for time.Sleep()
SleepOverride func(d time.Duration)
// NowOverride for tests, if non-nil, replacement for time.Now()
NowOverride func() time.Time
// LookPathOverride for tests, if non-nil, replacement for exec.LookPath()
LookPathOverride func(file string) (string, error)
}
func NewTable(
name string,
ipVersion uint8,
hashPrefix string,
iptablesWriteLock sync.Locker,
detector *FeatureDetector,
options TableOptions,
) *Table {
// Calculate the regex used to match the hash comment. The comment looks like this:
// --comment "cali:abcd1234_-".
hashCommentRegexp := regexp.MustCompile(`--comment "?` + hashPrefix + `([a-zA-Z0-9_-]+)"?`)
ourChainsPattern := "^(" + strings.Join(options.HistoricChainPrefixes, "|") + ")"
ourChainsRegexp := regexp.MustCompile(ourChainsPattern)
oldInsertRegexpParts := []string{}
for _, prefix := range options.HistoricChainPrefixes {
part := fmt.Sprintf("(?:-j|--jump) %s", prefix)
oldInsertRegexpParts = append(oldInsertRegexpParts, part)
}
if options.ExtraCleanupRegexPattern != "" {
oldInsertRegexpParts = append(oldInsertRegexpParts,
options.ExtraCleanupRegexPattern)
}
oldInsertPattern := strings.Join(oldInsertRegexpParts, "|")
oldInsertRegexp := regexp.MustCompile(oldInsertPattern)
// Pre-populate the insert table with empty lists for each kernel chain. Ensures that we
// clean up any chains that we hooked on a previous run.
inserts := map[string][]Rule{}
dirtyInserts := set.New()
for _, kernelChain := range tableToKernelChains[name] {
inserts[kernelChain] = []Rule{}
dirtyInserts.Add(kernelChain)
}
var insertMode string
switch options.InsertMode {
case "", "insert":
insertMode = "insert"
case "append":
insertMode = "append"
default:
log.WithField("insertMode", options.InsertMode).Panic("Unknown insert mode")
}
if options.PostWriteInterval <= minPostWriteInterval {
log.WithFields(log.Fields{
"setValue": options.PostWriteInterval,
"default": minPostWriteInterval,
}).Info("PostWriteInterval too small, defaulting.")
options.PostWriteInterval = minPostWriteInterval
}
// Allow override of exec.Command() and time.Sleep() for test purposes.
newCmd := newRealCmd
if options.NewCmdOverride != nil {
newCmd = options.NewCmdOverride
}
sleep := time.Sleep
if options.SleepOverride != nil {
sleep = options.SleepOverride
}
now := time.Now
if options.NowOverride != nil {
now = options.NowOverride
}
lookPath := exec.LookPath
if options.LookPathOverride != nil {
lookPath = options.LookPathOverride
}
table := &Table{
Name: name,
IPVersion: ipVersion,
featureDetector: detector,
chainToInsertedRules: inserts,
dirtyInserts: dirtyInserts,
chainNameToChain: map[string]*Chain{},
dirtyChains: set.New(),
chainToDataplaneHashes: map[string][]string{},
chainToFullRules: map[string][]string{},
logCxt: log.WithFields(log.Fields{
"ipVersion": ipVersion,
"table": name,
}),
hashCommentPrefix: hashPrefix,
hashCommentRegexp: hashCommentRegexp,
ourChainsRegexp: ourChainsRegexp,
oldInsertRegexp: oldInsertRegexp,
insertMode: insertMode,
// Initialise the write tracking as if we'd just done a write, this will trigger
// us to recheck the dataplane at exponentially increasing intervals at startup.
// Note: if we didn't do this, the calculation logic would need to be modified
// to cope with zero values for these fields.
lastWriteTime: now(),
initialPostWriteInterval: options.PostWriteInterval,
postWriteInterval: options.PostWriteInterval,
refreshInterval: options.RefreshInterval,
calicoXtablesLock: iptablesWriteLock,
lockTimeout: options.LockTimeout,
lockProbeInterval: options.LockProbeInterval,
newCmd: newCmd,
timeSleep: sleep,
timeNow: now,
lookPath: lookPath,
gaugeNumChains: gaugeNumChains.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
gaugeNumRules: gaugeNumRules.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
countNumLinesExecuted: countNumLinesExecuted.WithLabelValues(fmt.Sprintf("%d", ipVersion), name),
}
table.restoreInputBuffer.NumLinesWritten = table.countNumLinesExecuted
iptablesVariant := strings.ToLower(options.BackendMode)
if iptablesVariant == "" {
iptablesVariant = "legacy"
}
if iptablesVariant == "nft" {
log.Info("Enabling iptables-in-nftables-mode workarounds.")
table.nftablesMode = true
}
table.iptablesRestoreCmd = table.findBestBinary(ipVersion, iptablesVariant, "restore")
table.iptablesSaveCmd = table.findBestBinary(ipVersion, iptablesVariant, "save")
return table
}
// findBestBinary tries to find an iptables binary for the specific variant (legacy/nftables mode) and returns the name
// of the binary. Falls back on iptables-restore/iptables-save if the specific variant isn't available.
// Panics if no binary can be found.
func (t *Table) findBestBinary(ipVersion uint8, backendMode, saveOrRestore string) string {
verInfix := ""
if ipVersion == 6 {
verInfix = "6"
}
candidates := []string{
"ip" + verInfix + "tables-" + backendMode + "-" + saveOrRestore,
"ip" + verInfix + "tables-" + saveOrRestore,
}
logCxt := log.WithFields(log.Fields{
"ipVersion": ipVersion,
"backendMode": backendMode,
"saveOrRestore": saveOrRestore,
"candidates": candidates,
})
for _, candidate := range candidates {
_, err := t.lookPath(candidate)
if err == nil {
logCxt.WithField("command", candidate).Info("Looked up iptables command")
return candidate
}
}
logCxt.Panic("Failed to find iptables command")
return ""
}
func (t *Table) SetRuleInsertions(chainName string, rules []Rule) {
t.logCxt.WithField("chainName", chainName).Debug("Updating rule insertions")
oldRules := t.chainToInsertedRules[chainName]
t.chainToInsertedRules[chainName] = rules
numRulesDelta := len(rules) - len(oldRules)
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyInserts.Add(chainName)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("insertion")
}
func (t *Table) UpdateChains(chains []*Chain) {
for _, chain := range chains {
t.UpdateChain(chain)
}
}
func (t *Table) UpdateChain(chain *Chain) {
t.logCxt.WithField("chainName", chain.Name).Info("Queueing update of chain.")
oldNumRules := 0
if oldChain := t.chainNameToChain[chain.Name]; oldChain != nil {
oldNumRules = len(oldChain.Rules)
}
t.chainNameToChain[chain.Name] = chain
numRulesDelta := len(chain.Rules) - oldNumRules
t.gaugeNumRules.Add(float64(numRulesDelta))
t.dirtyChains.Add(chain.Name)
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain update")
}
func (t *Table) RemoveChains(chains []*Chain) {
for _, chain := range chains {
t.RemoveChainByName(chain.Name)
}
}
func (t *Table) RemoveChainByName(name string) {
t.logCxt.WithField("chainName", name).Info("Queing deletion of chain.")
if oldChain, known := t.chainNameToChain[name]; known {
t.gaugeNumRules.Sub(float64(len(oldChain.Rules)))
delete(t.chainNameToChain, name)
t.dirtyChains.Add(name)
}
// Defensive: make sure we re-read the dataplane state before we make updates. While the
// code was originally designed not to need this, we found that other users of
// iptables-restore can still clobber out updates so it's safest to re-read the state before
// each write.
t.InvalidateDataplaneCache("chain removal")
}
func (t *Table) loadDataplaneState() {
// Refresh the cache of feature data.
t.featureDetector.RefreshFeatures()
// Load the hashes from the dataplane.
t.logCxt.Info("Loading current iptables state and checking it is correct.")
t.lastReadTime = t.timeNow()
dataplaneHashes, dataplaneRules := t.getHashesAndRulesFromDataplane()
// Check that the rules we think we've programmed are still there and mark any inconsistent
// chains for refresh.
for chainName, expectedHashes := range t.chainToDataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain; no point in flagging it as
// out-of-sync.
logCxt.Debug("Skipping known-dirty chain")
continue
}
dpHashes := dataplaneHashes[chainName]
if !t.ourChainsRegexp.MatchString(chainName) {
// Not one of our chains so it may be one that we're inserting rules into.
insertedRules := t.chainToInsertedRules[chainName]
if len(insertedRules) == 0 {
// This chain shouldn't have any inserts, make sure that's the
// case. This case also covers the case where a chain was removed,
// making dpHashes nil.
dataplaneHasInserts := false
for _, hash := range dpHashes {
if hash != "" {
dataplaneHasInserts = true
break
}
}
if dataplaneHasInserts {
logCxt.WithField("actualRuleIDs", dpHashes).Warn(
"Chain had unexpected inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
continue
}
// Re-calculate the expected rule insertions based on the current length
// of the chain (since other processes may have inserted/removed rules
// from the chain, throwing off the numbers).
expectedHashes, _ = t.expectedHashesForInsertChain(
chainName,
numEmptyStrings(dpHashes),
)
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.WithFields(log.Fields{
"expectedRuleIDs": expectedHashes,
"actualRuleIDs": dpHashes,
}).Warn("Detected out-of-sync inserts, marking for resync")
t.dirtyInserts.Add(chainName)
}
} else {
// One of our chains, should match exactly.
if !reflect.DeepEqual(dpHashes, expectedHashes) {
logCxt.Warn("Detected out-of-sync Calico chain, marking for resync")
t.dirtyChains.Add(chainName)
}
}
}
// Now scan for chains that shouldn't be there and mark for deletion.
t.logCxt.Debug("Scanning for unexpected iptables chains")
for chainName, dataplaneHashes := range dataplaneHashes {
logCxt := t.logCxt.WithField("chainName", chainName)
if t.dirtyChains.Contains(chainName) || t.dirtyInserts.Contains(chainName) {
// Already an update pending for this chain.
logCxt.Debug("Skipping known-dirty chain")
continue
}
if _, ok := t.chainToDataplaneHashes[chainName]; ok {
// Chain expected, we'll have checked its contents above.
logCxt.Debug("Skipping expected chain")
continue
}
if !t.ourChainsRegexp.MatchString(chainName) {
// Non-calico chain that is not tracked in chainToDataplaneHashes. We
// haven't seen the chain before and we haven't been asked to insert
// anything into it. Check that it doesn't have an rule insertions in it
// from a previous run of Felix.
for _, hash := range dataplaneHashes {
if hash != "" {
logCxt.Info("Found unexpected insert, marking for cleanup")
t.dirtyInserts.Add(chainName)
break
}
}
continue
}
// Chain exists in dataplane but not in memory, mark as dirty so we'll clean it up.
logCxt.Info("Found unexpected chain, marking for cleanup")
t.dirtyChains.Add(chainName)
}
t.logCxt.Debug("Finished loading iptables state")
t.chainToDataplaneHashes = dataplaneHashes
t.chainToFullRules = dataplaneRules
t.inSyncWithDataPlane = true
}
// expectedHashesForInsertChain calculates the expected hashes for a whole top-level chain
// given our inserts. If we're in append mode, that consists of numNonCalicoRules empty strings
// followed by our hashes; in insert mode, the opposite way round. To avoid recalculation, it
// returns the rule hashes as a second output.
func (t *Table) expectedHashesForInsertChain(
chainName string,
numNonCalicoRules int,
) (allHashes, ourHashes []string) {
insertedRules := t.chainToInsertedRules[chainName]
allHashes = make([]string, len(insertedRules)+numNonCalicoRules)
features := t.featureDetector.GetFeatures()
ourHashes = calculateRuleInsertHashes(chainName, insertedRules, features)
offset := 0
if t.insertMode == "append" {
log.Debug("In append mode, returning our hashes at end.")
offset = numNonCalicoRules
}
for i, hash := range ourHashes {
allHashes[i+offset] = hash
}
return
}
// getHashesAndRulesFromDataplane loads the current state of our table. It parses out the hashes that we
// add to rules and, for chains that we insert into, the full rules. The 'hashes' map contains an entry for each chain
// in the table. Each entry is a slice containing the hashes for the rules in that table. Rules with no hashes are
// represented by an empty string. The 'rules' map contains an entry for each non-Calico chain in the table that
// contains inserts. It is used to generate deletes using the full rule, rather than deletes by line number, to avoid
// race conditions on chains we don't fully control.
func (t *Table) getHashesAndRulesFromDataplane() (hashes map[string][]string, rules map[string][]string) {
retries := 3
retryDelay := 100 * time.Millisecond
// Retry a few times before we panic. This deals with any transient errors and it prevents
// us from spamming a panic into the log when we're being gracefully shut down by a SIGTERM.
for {
hashes, rules, err := t.attemptToGetHashesAndRulesFromDataplane()
if err != nil {
countNumSaveErrors.Inc()
var stderr string
if ee, ok := err.(*exec.ExitError); ok {
stderr = string(ee.Stderr)
}
t.logCxt.WithError(err).WithField("stderr", stderr).Warnf("%s command failed", t.iptablesSaveCmd)
if retries > 0 {
retries--
t.timeSleep(retryDelay)
retryDelay *= 2
} else {
t.logCxt.Panicf("%s command failed after retries", t.iptablesSaveCmd)
}
continue
}
return hashes, rules
}
}
// attemptToGetHashesAndRulesFromDataplane starts an iptables-save subprocess and feeds its output to
// readHashesAndRulesFrom() via a pipe. It handles the various error cases.
func (t *Table) attemptToGetHashesAndRulesFromDataplane() (hashes map[string][]string, rules map[string][]string, err error) {
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
countNumSaveCalls.Inc()
stdout, err := cmd.StdoutPipe()
if err != nil {
log.WithError(err).Warnf("Failed to get stdout pipe for %s", t.iptablesSaveCmd)
return
}
err = cmd.Start()
if err != nil {
// Failed even before we started, close the pipe. (This would normally be done
// by Wait().
log.WithError(err).Warnf("Failed to start %s", t.iptablesSaveCmd)
closeErr := stdout.Close()
if closeErr != nil {
log.WithError(closeErr).Warn("Error closing stdout after Start() failed.")
}
return
}
hashes, rules, err = t.readHashesAndRulesFrom(stdout)
if err != nil {
// In case readHashesAndRulesFrom() returned due to an error that didn't cause the
// process to exit, kill it now.
log.WithError(err).Warnf("Killing %s process after a failure", t.iptablesSaveCmd)
killErr := cmd.Kill()
if killErr != nil {
// If we don't know what state the process is in, we can't Wait() on it.
log.WithError(killErr).Panicf(
"Failed to kill %s process after failure.", t.iptablesSaveCmd)
}
}
waitErr := cmd.Wait()
if waitErr != nil {
log.WithError(waitErr).Warn("iptables save failed")
if err == nil {
err = waitErr
}
}
return
}
// readHashesAndRulesFrom scans the given reader containing iptables-save output for this table, extracting
// our rule hashes and, for all chains we insert into, the full rules. Entries in the returned map are indexed by
// chain name. For rules that we wrote, the hash is extracted from a comment that we added to the rule.
// For rules written by previous versions of Felix, returns a dummy non-zero value. For rules not written by Felix,
// returns a zero string. Hence, the lengths of the returned values are the lengths of the chains
// whether written by Felix or not.
func (t *Table) readHashesAndRulesFrom(r io.ReadCloser) (hashes map[string][]string, rules map[string][]string, err error) {
hashes = map[string][]string{}
rules = map[string][]string{}
scanner := bufio.NewScanner(r)
// Keep track of whether the non-Calico chain has inserts. If the chain does not have inserts, we'll remove the
// full rules for that chain.
chainHasCalicoRule := set.New()
// Figure out if debug logging is enabled so we can skip some WithFields() calls in the
// tight loop below if the log wouldn't be emitted anyway.
debug := log.GetLevel() >= log.DebugLevel
for scanner.Scan() {
// Read the next line of the output.
line := scanner.Bytes()
// Look for lines of the form ":chain-name - [0:0]", which are forward declarations
// for (possibly empty) chains.
logCxt := t.logCxt
if debug {
// Avoid stringifying the line (and hence copying it) unless we're at debug
// level.
logCxt = logCxt.WithField("line", string(line))
logCxt.Debug("Parsing line")
}
captures := chainCreateRegexp.FindSubmatch(line)
if captures != nil {
// Chain forward-reference, make sure the chain exists.
chainName := string(captures[1])
if debug {
logCxt.WithField("chainName", chainName).Debug("Found forward-reference")
}
hashes[chainName] = []string{}
continue
}
// Look for append lines, such as "-A chain-name -m foo --foo bar"; these are the
// actual rules.
captures = appendRegexp.FindSubmatch(line)
if captures == nil {
// Skip any non-append lines.
logCxt.Debug("Not an append, skipping")
continue
}
chainName := string(captures[1])
// Look for one of our hashes on the rule. We record a zero hash for unknown rules
// so that they get cleaned up. Note: we're implicitly capturing the first match
// of the regex. When writing the rules, we ensure that the hash is written as the
// first comment.
hash := ""
captures = t.hashCommentRegexp.FindSubmatch(line)
if captures != nil {
hash = string(captures[1])
if debug {
logCxt.WithField("hash", hash).Debug("Found hash in rule")
}
chainHasCalicoRule.Add(chainName)
} else if t.oldInsertRegexp.Find(line) != nil {
logCxt.WithFields(log.Fields{
"rule": line,
"chainName": chainName,
}).Info("Found inserted rule from previous Felix version, marking for cleanup.")
hash = "OLD INSERT RULE"
chainHasCalicoRule.Add(chainName)
}
hashes[chainName] = append(hashes[chainName], hash)
// Not our chain so cache the full rule in case we need to generate deletes later on.
// After scanning the input, we prune any chains of full rules that do not contain inserts.
if !t.ourChainsRegexp.MatchString(chainName) {
// Only store the full rule for Calico rules. Otherwise, we just use the placeholder "-".
fullRule := "-"
if captures := t.hashCommentRegexp.FindSubmatch(line); captures != nil {
fullRule = string(line)
} else if t.oldInsertRegexp.Find(line) != nil {
fullRule = string(line)
}
rules[chainName] = append(rules[chainName], fullRule)
}
}
if scanner.Err() != nil {
log.WithError(scanner.Err()).Error("Failed to read hashes from dataplane")
return nil, nil, scanner.Err()
}
// Remove full rules for the non-Calico chain if it does not have inserts.
for chainName := range rules {
if !chainHasCalicoRule.Contains(chainName) {
delete(rules, chainName)
}
}
t.logCxt.Debugf("Read hashes from dataplane: %#v", hashes)
t.logCxt.Debugf("Read rules from dataplane: %#v", rules)
return hashes, rules, nil
}
func (t *Table) InvalidateDataplaneCache(reason string) {
logCxt := t.logCxt.WithField("reason", reason)
if !t.inSyncWithDataPlane {
logCxt.Debug("Would invalidate dataplane cache but it was already invalid.")
return
}
logCxt.Info("Invalidating dataplane cache")
t.inSyncWithDataPlane = false
}
func (t *Table) Apply() (rescheduleAfter time.Duration) {
now := t.timeNow()
// We _think_ we're in sync, check if there are any reasons to think we might
// not be in sync.
lastReadToNow := now.Sub(t.lastReadTime)
invalidated := false
if t.refreshInterval > 0 && lastReadToNow > t.refreshInterval {
// Too long since we've forced a refresh.
t.InvalidateDataplaneCache("refresh timer")
invalidated = true
}
// To workaround the possibility of another process clobbering our updates, we refresh the
// dataplane after we do a write at exponentially increasing intervals. We do a refresh
// if the delta from the last write to now is twice the delta from the last read.
for t.postWriteInterval != 0 &&
t.postWriteInterval < time.Hour &&
!now.Before(t.lastWriteTime.Add(t.postWriteInterval)) {
t.postWriteInterval *= 2
t.logCxt.WithField("newPostWriteInterval", t.postWriteInterval).Debug("Updating post-write interval")
if !invalidated {
t.InvalidateDataplaneCache("post update")
invalidated = true
}
}
// Retry until we succeed. There are several reasons that updating iptables may fail:
//
// - A concurrent write may invalidate iptables-restore's compare-and-swap; this manifests
// as a failure on the COMMIT line.
// - Another process may have clobbered some of our state, resulting in inconsistencies
// in what we try to program. This could manifest in a number of ways depending on what
// the other process did.
// - Random transient failure.
//
// It's also possible that we're bugged and trying to write bad data so we give up
// eventually.
retries := 10
backoffTime := 1 * time.Millisecond
failedAtLeastOnce := false
for {
if !t.inSyncWithDataPlane {
// We have reason to believe that our picture of the dataplane is out of
// sync. Refresh it. This may mark more chains as dirty.
t.loadDataplaneState()
}
if err := t.applyUpdates(); err != nil {
if retries > 0 {
retries--
t.logCxt.WithError(err).Warn("Failed to program iptables, will retry")
t.timeSleep(backoffTime)
backoffTime *= 2
t.logCxt.WithError(err).Warn("Retrying...")
failedAtLeastOnce = true
continue
} else {
t.logCxt.WithError(err).Error("Failed to program iptables, loading diags before panic.")
cmd := t.newCmd(t.iptablesSaveCmd, "-t", t.Name)
output, err2 := cmd.Output()
if err2 != nil {
t.logCxt.WithError(err2).Error("Failed to load iptables state")
} else {
t.logCxt.WithField("iptablesState", string(output)).Error("Current state of iptables")
}
t.logCxt.WithError(err).Panic("Failed to program iptables, giving up after retries")
}
}
if failedAtLeastOnce {
t.logCxt.Warn("Succeeded after retry.")
}
break
}
t.gaugeNumChains.Set(float64(len(t.chainNameToChain)))
// Check whether we need to be rescheduled and how soon.
if t.refreshInterval > 0 {
// Refresh interval is set, start with that.
lastReadToNow = now.Sub(t.lastReadTime)
rescheduleAfter = t.refreshInterval - lastReadToNow
}
if t.postWriteInterval < time.Hour {
postWriteReched := t.lastWriteTime.Add(t.postWriteInterval).Sub(now)
if postWriteReched <= 0 {
rescheduleAfter = 1 * time.Millisecond
} else if t.refreshInterval <= 0 || postWriteReched < rescheduleAfter {
rescheduleAfter = postWriteReched
}
}
return
}
func (t *Table) applyUpdates() error {
// If needed, detect the dataplane features.
features := t.featureDetector.GetFeatures()
// Build up the iptables-restore input in an in-memory buffer. This allows us to log out the exact input after
// a failure, which has proven to be a very useful diagnostic tool.
buf := &t.restoreInputBuffer
buf.Reset() // Defensive.
// iptables-restore commands live in per-table transactions.
buf.StartTransaction(t.Name)
// Make a pass over the dirty chains and generate a forward reference for any that we're about to update.
// Writing a forward reference ensures that the chain exists and that it is empty.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
chainNeedsToBeFlushed := false
if t.nftablesMode {
// iptables-nft-restore <v1.8.3 has a bug (https://bugzilla.netfilter.org/show_bug.cgi?id=1348)
// where only the first replace command sets the rule index. Work around that by refreshing the
// whole chain using a flush.
chain := t.chainNameToChain[chainName]
currentHashes := chain.RuleHashes(features)
previousHashes := t.chainToDataplaneHashes[chainName]
t.logCxt.WithFields(log.Fields{
"previous": previousHashes,
"current": currentHashes,
}).Debug("Comparing old to new hashes.")
if len(previousHashes) > 0 && reflect.DeepEqual(currentHashes, previousHashes) {
// Chain is already correct, skip it.
log.Debug("Chain already correct")
return set.RemoveItem
}
chainNeedsToBeFlushed = true
} else if _, ok := t.chainNameToChain[chainName]; !ok {
// About to delete this chain, flush it first to sever dependencies.
chainNeedsToBeFlushed = true
} else if _, ok := t.chainToDataplaneHashes[chainName]; !ok {
// Chain doesn't exist in dataplane, mark it for creation.
chainNeedsToBeFlushed = true
}
if chainNeedsToBeFlushed {
buf.WriteForwardReference(chainName)
}
return nil
})
// Make a second pass over the dirty chains. This time, we write out the rule changes.
newHashes := map[string][]string{}
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if chain, ok := t.chainNameToChain[chainName]; ok {
// Chain update or creation. Scan the chain against its previous hashes
// and replace/append/delete as appropriate.
var previousHashes []string
if t.nftablesMode {
// Due to a bug in iptables nft mode, force a whole-chain rewrite. (See above.)
previousHashes = nil
} else {
// In iptables legacy mode, we compare the rules one by one and apply deltas rule by rule.
previousHashes = t.chainToDataplaneHashes[chainName]
}
currentHashes := chain.RuleHashes(features)
newHashes[chainName] = currentHashes
for i := 0; i < len(previousHashes) || i < len(currentHashes); i++ {
var line string
if i < len(previousHashes) && i < len(currentHashes) {
if previousHashes[i] == currentHashes[i] {
continue
}
// Hash doesn't match, replace the rule.
ruleNum := i + 1 // 1-indexed.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderReplace(chainName, ruleNum, prefixFrag, features)
} else if i < len(previousHashes) {
// previousHashes was longer, remove the old rules from the end.
ruleNum := len(currentHashes) + 1 // 1-indexed
line = t.renderDeleteByIndexLine(chainName, ruleNum)
} else {
// currentHashes was longer. Append.
prefixFrag := t.commentFrag(currentHashes[i])
line = chain.Rules[i].RenderAppend(chainName, prefixFrag, features)
}
buf.WriteLine(line)
}
}
return nil // Delay clearing the set until we've programmed iptables.
})
// Make a copy of our full rules map and keep track of all changes made while processing dirtyInserts.
// When we've successfully updated iptables, we'll update our cache of chainToFullRules with this map.
newChainToFullRules := map[string][]string{}
for chain, rules := range t.chainToFullRules {
newChainToFullRules[chain] = make([]string, len(rules))
copy(newChainToFullRules[chain], rules)
}
// Now calculate iptables updates for our inserted rules, which are used to hook top-level chains.
var deleteRenderingErr error
var line string
t.dirtyInserts.Iter(func(item interface{}) error {
chainName := item.(string)
previousHashes := t.chainToDataplaneHashes[chainName]
newRules := newChainToFullRules[chainName]
// Calculate the hashes for our inserted rules.
newChainHashes, newRuleHashes := t.expectedHashesForInsertChain(
chainName, numEmptyStrings(previousHashes))
if reflect.DeepEqual(newChainHashes, previousHashes) {
// Chain is in sync, skip to next one.
return nil
}
// For simplicity, if we've discovered that we're out-of-sync, remove all our
// rules from this chain, then re-insert/re-append them below.
for i := 0; i < len(previousHashes); i++ {
if previousHashes[i] != "" {
line, deleteRenderingErr = t.renderDeleteByValueLine(chainName, i)
if deleteRenderingErr != nil {
return set.StopIteration
}
buf.WriteLine(line)
}
}
// Go over our slice of "new" rules and create a copy of the slice with just the rules we didn't empty out.
copyOfNewRules := []string{}
for _, rule := range newRules {
if rule != "" {
copyOfNewRules = append(copyOfNewRules, rule)
}
}
newRules = copyOfNewRules
rules := t.chainToInsertedRules[chainName]
insertRuleLines := make([]string, len(rules))
if t.insertMode == "insert" {
t.logCxt.Debug("Rendering insert rules.")
// Since each insert is pushed onto the top of the chain, do the inserts in
// reverse order so that they end up in the correct order in the final
// state of the chain.
for i := len(rules) - 1; i >= 0; i-- {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderInsert(chainName, prefixFrag, features)
buf.WriteLine(line)
insertRuleLines[i] = line
}
newRules = append(insertRuleLines, newRules...)
} else {
t.logCxt.Debug("Rendering append rules.")
for i := 0; i < len(rules); i++ {
prefixFrag := t.commentFrag(newRuleHashes[i])
line := rules[i].RenderAppend(chainName, prefixFrag, features)
buf.WriteLine(line)
insertRuleLines[i] = line
}
newRules = append(newRules, insertRuleLines...)
}
newHashes[chainName] = newChainHashes
newChainToFullRules[chainName] = newRules
return nil // Delay clearing the set until we've programmed iptables.
})
// If rendering a delete by line number reached an unexpected state, error out so applyUpdates() can be retried.
if deleteRenderingErr != nil {
return deleteRenderingErr
}
if t.nftablesMode {
// The nftables version of iptables-restore requires that chains are unreferenced at the start of the
// transaction before they can be deleted (i.e. it doesn't seem to update the reference calculation as
// rules are deleted). Close the current transaction and open a new one for the deletions in order to
// refresh its state. The buffer will discard a no-op transaction so we don't need to check.
t.logCxt.Debug("In nftables mode, restarting transaction between updates and deletions.")
buf.EndTransaction()
buf.StartTransaction(t.Name)
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if _, ok := t.chainNameToChain[chainName]; !ok {
// Chain deletion
buf.WriteForwardReference(chainName)
}
return nil // Delay clearing the set until we've programmed iptables.
})
}
// Do deletions at the end. This ensures that we don't try to delete any chains that
// are still referenced (because we'll have removed the references in the modify pass
// above). Note: if a chain is being deleted at the same time as a chain that it refers to
// then we'll issue a create+flush instruction in the very first pass, which will sever the
// references.
t.dirtyChains.Iter(func(item interface{}) error {
chainName := item.(string)
if _, ok := t.chainNameToChain[chainName]; !ok {
// Chain deletion
buf.WriteLine(fmt.Sprintf("--delete-chain %s", chainName))
newHashes[chainName] = nil
}
return nil // Delay clearing the set until we've programmed iptables.
})
buf.EndTransaction()
if buf.Empty() {
t.logCxt.Debug("Update ended up being no-op, skipping call to ip(6)tables-restore.")
} else {
// Get the contents of the buffer ready to send to iptables-restore. Warning: for perf, this is directly
// accessing the buffer's internal array; don't touch the buffer after this point.
inputBytes := buf.GetBytesAndReset()
if log.GetLevel() >= log.DebugLevel {
// Only convert (potentially very large slice) to string at debug level.
inputStr := string(inputBytes)
t.logCxt.WithField("iptablesInput", inputStr).Debug("Writing to iptables")
}
var outputBuf, errBuf bytes.Buffer
args := []string{"--noflush", "--verbose"}
if features.RestoreSupportsLock {
// Versions of iptables-restore that support the xtables lock also make it impossible to disable. Make
// sure that we configure it to retry and configure for a short retry interval (the default is to try to
// acquire the lock only once).
lockTimeout := t.lockTimeout.Seconds()
if lockTimeout <= 0 {
// Before iptables-restore added lock support, we were able to disable the lock completely, which
// was indicated by a value <=0 (and was our default). Newer versions of iptables-restore require the
// lock so we override the default and set it to 10s.
lockTimeout = 10
}
lockProbeMicros := t.lockProbeInterval.Nanoseconds() / 1000
timeoutStr := fmt.Sprintf("%.0f", lockTimeout)
intervalStr := fmt.Sprintf("%d", lockProbeMicros)
args = append(args,
"--wait", timeoutStr, // seconds
"--wait-interval", intervalStr, // microseconds
)
log.WithFields(log.Fields{
"timeoutSecs": timeoutStr,
"probeIntervalMicros": intervalStr,
}).Debug("Using native iptables-restore xtables lock.")
}
cmd := t.newCmd(t.iptablesRestoreCmd, args...)
cmd.SetStdin(bytes.NewReader(inputBytes))
cmd.SetStdout(&outputBuf)
cmd.SetStderr(&errBuf)
countNumRestoreCalls.Inc()
// Note: calicoXtablesLock will be a dummy lock if our xtables lock is disabled (i.e. if iptables-restore
// supports the xtables lock itself, or if our implementation is disabled by config.
t.calicoXtablesLock.Lock()
err := cmd.Run()
t.calicoXtablesLock.Unlock()
if err != nil {
// To log out the input, we must convert to string here since, after we return, the buffer can be re-used
// (and the logger may convert to string on a background thread).
inputStr := string(inputBytes)
t.logCxt.WithFields(log.Fields{
"output": outputBuf.String(),
"errorOutput": errBuf.String(),
"error": err,
"input": inputStr,
}).Warn("Failed to execute ip(6)tables-restore command")
t.inSyncWithDataPlane = false
countNumRestoreErrors.Inc()
return err
}
t.lastWriteTime = t.timeNow()
t.postWriteInterval = t.initialPostWriteInterval
}
// Now we've successfully updated iptables, clear the dirty sets. We do this even if we
// found there was nothing to do above, since we may have found out that a dirty chain
// was actually a no-op update.
t.dirtyChains = set.New()
t.dirtyInserts = set.New()
// Store off the updates.
for chainName, hashes := range newHashes {
if hashes == nil {
delete(t.chainToDataplaneHashes, chainName)
} else {
t.chainToDataplaneHashes[chainName] = hashes
}
}
t.chainToFullRules = newChainToFullRules
return nil
}
func (t *Table) commentFrag(hash string) string {
return fmt.Sprintf(`-m comment --comment "%s%s"`, t.hashCommentPrefix, hash)
}
// renderDeleteByIndexLine produces a delete line by rule number. This function is used for cali chains.
func (t *Table) renderDeleteByIndexLine(chainName string, ruleNum int) string {
return fmt.Sprintf("-D %s %d", chainName, ruleNum)
}
// renderDeleteByValueLine produces a delete line by the full rule at the given rule number. This function is
// used for non-Calico chains.
func (t *Table) renderDeleteByValueLine(chainName string, ruleNum int) (string, error) {
// For non-cali chains, get the rule by number but delete using the full rule instead of rule number.
rules, ok := t.chainToFullRules[chainName]
if !ok || ruleNum >= len(rules) {
return "", fmt.Errorf("Rendering delete for non-existent rule: Rule %d in %q", ruleNum, chainName)
}
rule := rules[ruleNum]
// Make the append a delete.
return strings.Replace(rule, "-A", "-D", 1), nil
}
func | (chainName string, rules []Rule, features *Features) []string {
chain := Chain{
Name: chainName,
Rules: rules,
}
return (&chain).RuleHashes(features)
}
func numEmptyStrings(strs []string) int {
count := 0
for _, s := range strs {
if s == "" {
count++
}
}
return count
}
| calculateRuleInsertHashes |
api.go | package web
import (
"encoding/json"
"fmt"
"github.com/co0p/patchy/internal/domain"
"io/ioutil"
"net/http"
)
type ApiHandler struct {
Usecase domain.PatchUsecase
}
type ApiRequest struct {
Repository string `json:"repository"`
OriginBranch string `json:"origin_branch"`
TargetBranch string `json:"target_branch"`
}
type ApiResponse struct {
Repository string `json:"repository"`
OriginBranch string `json:"origin_branch"`
TargetBranch string `json:"target_branch"`
Diff string `json:"diff"`
}
func (h *ApiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
http.Error(w, "only POST allowed", http.StatusMethodNotAllowed)
return
}
bytes, err := ioutil.ReadAll(r.Body)
defer r.Body.Close()
if err != nil {
http.Error(w, "failed to get data", http.StatusBadRequest)
return
}
var request ApiRequest
err = json.Unmarshal(bytes, &request)
if err != nil {
http.Error(w, "failed to parse data", http.StatusBadRequest)
return
}
patchRequest := domain.PatchRequest{
Repository: request.Repository,
OriginBranch: request.OriginBranch,
TargetBranch: request.TargetBranch,
}
if !patchRequest.Valid() {
http.Error(w, "invalid request", http.StatusBadRequest)
return
}
patch, err := h.Usecase.Patch(patchRequest)
if err != nil {
fmt.Errorf("failed to generate patch: %v", err)
http.Error(w, "failed to generate patch", http.StatusBadRequest)
return
}
response := ApiResponse{
Repository: request.Repository,
OriginBranch: request.OriginBranch,
TargetBranch: request.OriginBranch,
Diff: patch.Diff, | }
jsonResponse, err := json.Marshal(response)
if err != nil {
fmt.Errorf("failed to marshal response: %v", err)
http.Error(w, "failed to generate path", http.StatusInternalServerError)
return
}
header := w.Header()
header.Set("Content-Type", "application/json")
w.Write(jsonResponse)
} | |
conf.go | /******************************************
*FileName: conf.go
*Author: Liu han
*Date: 2016-12-9
*Description: read conf file
*******************************************/
package api
import (
"bufio"
"errors"
"os"
"strconv"
"strings"
)
var ConfigFile = "./conf/app.conf"
// var ConfigFile = "./test.conf" //for test
type ConfigInterface interface {
//Set(key, val string) error // support section::key type in given key when using ini type.
String(key string) string // support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same.
Strings(key string) []string //get string slice
Int(key string) (int, error)
Int64(key string) (int64, error)
Bool(key string) (bool, error)
Float(key string) (float64, error)
DefaultString(key string, defaultval string) string // support section::key type in key string when using ini and json type; Int,Int64,Bool,Float,DIY are same.
DefaultStrings(key string, defaultval []string) []string //get string slice
DefaultInt(key string, defaultval int) int
DefaultInt64(key string, defaultval int64) int64
DefaultBool(key string, defaultval bool) bool
DefaultFloat(key string, defaultval float64) float64
//DIY(key string) (interface{}, error)
//GetSection(section string) (map[string]string, error)
//SaveConfigFile(filename string) error
}
type Key struct {
Name string
Value string
}
type Config struct {
File string
Keys map[string]string
}
var AppConfig ConfigInterface
func init() |
func (c *Config) Prase() error {
isEnd := false
f, err := os.Open(c.File)
defer f.Close()
if err != nil {
return errors.New("Open file" + ConfigFile + " failed")
}
buf := bufio.NewReader(f)
for {
line, err := buf.ReadString('\n')
if err != nil {
if line != "" {
isEnd = true
} else {
break
}
}
line = strings.TrimSpace(line)
if isCommentOut(line) {
continue
}
firstIndex := strings.Index(line, "=")
if firstIndex < 1 {
continue
} else {
c.Keys[strings.Trim(line[:firstIndex], "\" ")] = strings.Trim(line[firstIndex+1:], "\" ")
}
if isEnd {
break
}
}
return nil
}
func isCommentOut(line string) bool {
if strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") || strings.HasPrefix(line, "//") || strings.HasPrefix(line, "*") {
return true
} else {
return false
}
}
func (c *Config) String(key string) string {
return c.Keys[key]
}
func (c *Config) Strings(key string) []string {
if c.Keys[key] == "" {
return make([]string, 0)
} else {
return strings.Split(c.Keys[key], " ")
}
}
func (c *Config) Int(key string) (int, error) {
return strconv.Atoi(c.Keys[key])
}
func (c *Config) Int64(key string) (int64, error) {
return strconv.ParseInt(c.Keys[key], 10, 64)
}
func (c *Config) Bool(key string) (bool, error) {
return strconv.ParseBool(c.Keys[key])
}
func (c *Config) Float(key string) (float64, error) {
return strconv.ParseFloat(c.Keys[key], 64)
}
func (c *Config) DefaultString(key string, defaultval string) string {
if c.String(key) == "" {
return defaultval
} else {
return c.String(key)
}
}
func (c *Config) DefaultStrings(key string, defaultval []string) []string {
if len(c.Strings(key)) < 1 {
return defaultval
} else {
return c.Strings(key)
}
}
func (c *Config) DefaultInt(key string, defaultval int) int {
if value, err := c.Int(key); err != nil {
return defaultval
} else {
return value
}
}
func (c *Config) DefaultInt64(key string, defaultval int64) int64 {
if value, err := c.Int64(key); err != nil {
return defaultval
} else {
return value
}
}
func (c *Config) DefaultBool(key string, defaultval bool) bool {
if value, err := c.Bool(key); err != nil {
return defaultval
} else {
return value
}
}
func (c *Config) DefaultFloat(key string, defaultval float64) float64 {
if value, err := c.Float(key); err != nil {
return defaultval
} else {
return value
}
}
| {
config := &Config{ConfigFile, make(map[string]string)}
if err := config.Prase(); err != nil {
panic(err)
}
AppConfig = config
} |
utils.py | import tensorflow as tf
#from tensorflow.python.ops.rnn_cell import *
#from tensorflow.python.ops.rnn_cell_impl import _Linear
from tensorflow.contrib.rnn.python.ops.core_rnn_cell import *
#from tensorflow import keras
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope as vs
#from keras import backend as K
def din_attention(query, facts, attention_size, mask=None, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
print ("query_size mismatch")
query = tf.concat(values = [
query,
query,
], axis=1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
if mask is not None:
mask = tf.equal(mask, tf.ones_like(mask))
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
class VecAttGRUCell(RNNCell):
"""Gated Recurrent Unit cell (cf. http://arxiv.org/abs/1406.1078).
Args:
num_units: int, The number of units in the GRU cell.
activation: Nonlinearity to use. Default: `tanh`.
reuse: (optional) Python boolean describing whether to reuse variables
in an existing scope. If not `True`, and the existing scope already has
the given variables, an error is raised.
kernel_initializer: (optional) The initializer to use for the weight and
projection matrices.
bias_initializer: (optional) The initializer to use for the bias.
"""
def __init__(self,
num_units,
activation=None,
reuse=None,
kernel_initializer=None,
bias_initializer=None):
super(VecAttGRUCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._activation = activation or math_ops.tanh
self._kernel_initializer = kernel_initializer
self._bias_initializer = bias_initializer
self._gate_linear = None
self._candidate_linear = None
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def __call__(self, inputs, state, att_score):
return self.call(inputs, state, att_score)
def call(self, inputs, state, att_score=None):
"""Gated recurrent unit (GRU) with nunits cells."""
if self._gate_linear is None:
bias_ones = self._bias_initializer
if self._bias_initializer is None:
bias_ones = init_ops.constant_initializer(1.0, dtype=inputs.dtype)
with vs.variable_scope("gates"): # Reset gate and update gate.
self._gate_linear = _Linear(
[inputs, state],
2 * self._num_units,
True,
bias_initializer=bias_ones,
kernel_initializer=self._kernel_initializer)
value = math_ops.sigmoid(self._gate_linear([inputs, state]))
r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
r_state = r * state
if self._candidate_linear is None:
with vs.variable_scope("candidate"):
self._candidate_linear = _Linear(
[inputs, r_state],
self._num_units,
True,
bias_initializer=self._bias_initializer,
kernel_initializer=self._kernel_initializer)
c = self._activation(self._candidate_linear([inputs, r_state]))
u = (1.0 - att_score) * u
new_h = u * state + (1 - u) * c
return new_h, new_h
def prelu(_x, scope=''):
"""parametric ReLU activation"""
with tf.variable_scope(name_or_scope=scope, default_name="prelu"):
_alpha = tf.get_variable("prelu_"+scope, shape=_x.get_shape()[-1],
dtype=_x.dtype, initializer=tf.constant_initializer(0.1))
return tf.maximum(0.0, _x) + _alpha * tf.minimum(0.0, _x)
def calc_auc(raw_arr):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
arr = sorted(raw_arr, key=lambda d:d[0], reverse=True)
pos, neg = 0., 0.
for record in arr:
if record[1] == 1.:
pos += 1
else:
neg += 1
fp, tp = 0., 0.
xy_arr = []
for record in arr:
if record[1] == 1.:
tp += 1
else:
fp += 1
xy_arr.append([fp/neg, tp/pos])
auc = 0.
prev_x = 0.
prev_y = 0.
for x, y in xy_arr:
if x != prev_x:
auc += ((x - prev_x) * (y + prev_y) / 2.)
prev_x = x
prev_y = y
return auc
def calc_gauc(raw_arr, nick_index):
"""Summary
Args:
raw_arr (TYPE): Description
Returns:
TYPE: Description
"""
last_index = 0
gauc = 0.
pv_sum = 0
for idx in xrange(len(nick_index)):
if nick_index[idx] != nick_index[last_index]:
input_arr = raw_arr[last_index:idx]
auc_val=calc_auc(input_arr)
if auc_val >= 0.0:
gauc += auc_val * len(input_arr)
pv_sum += len(input_arr)
else:
pv_sum += len(input_arr)
last_index = idx
return gauc / pv_sum
def attention(query, facts, attention_size, mask, stag='null', mode='LIST', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
mask = tf.equal(mask, tf.ones_like(mask))
hidden_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
input_size = query.get_shape().as_list()[-1]
# Trainable parameters
w1 = tf.Variable(tf.random_normal([hidden_size, attention_size], stddev=0.1))
w2 = tf.Variable(tf.random_normal([input_size, attention_size], stddev=0.1))
b = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
v = tf.Variable(tf.random_normal([attention_size], stddev=0.1))
with tf.name_scope('v'):
# Applying fully connected layer with non-linear activation to each of the B*T timestamps;
# the shape of `tmp` is (B,T,D)*(D,A)=(B,T,A), where A=attention_size
tmp1 = tf.tensordot(facts, w1, axes=1)
tmp2 = tf.tensordot(query, w2, axes=1)
tmp2 = tf.reshape(tmp2, [-1, 1, tf.shape(tmp2)[-1]])
tmp = tf.tanh((tmp1 + tmp2) + b)
# For each of the timestamps its vector of size A from `tmp` is reduced with `v` vector
v_dot_tmp = tf.tensordot(tmp, v, axes=1, name='v_dot_tmp') # (B,T) shape
key_masks = mask # [B, 1, T]
# key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(v_dot_tmp) * (-2 ** 32 + 1)
v_dot_tmp = tf.where(key_masks, v_dot_tmp, paddings) # [B, 1, T]
alphas = tf.nn.softmax(v_dot_tmp, name='alphas') # (B,T) shape
# Output of (Bi-)RNN is reduced with attention vector; the result has (B,D) shape
#output = tf.reduce_sum(facts * tf.expand_dims(alphas, -1), 1)
output = facts * tf.expand_dims(alphas, -1)
output = tf.reshape(output, tf.shape(facts))
# output = output / (facts.get_shape().as_list()[-1] ** 0.5)
if not return_alphas:
return output
else:
return output, alphas
def din_fcn_attention(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False, forCnn=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, 80, activation=tf.nn.sigmoid, name='f1_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, 40, activation=tf.nn.sigmoid, name='f2_att' + stag)
d_layer_3_all = tf.layers.dense(d_layer_2_all, 1, activation=None, name='f3_att' + stag)
d_layer_3_all = tf.reshape(d_layer_3_all, [-1, 1, tf.shape(facts)[1]])
scores = d_layer_3_all
# Mask
if mask is not None:
# key_masks = tf.sequence_mask(facts_length, tf.shape(facts)[1]) # [B, T]
key_masks = tf.expand_dims(mask, 1) # [B, 1, T]
paddings = tf.ones_like(scores) * (-2 ** 32 + 1)
if not forCnn:
scores = tf.where(key_masks, scores, paddings) # [B, 1, T]
# Scale
# scores = scores / (facts.get_shape().as_list()[-1] ** 0.5)
# Activation
if softmax_stag:
scores = tf.nn.softmax(scores) # [B, 1, T]
# Weighted sum
if mode == 'SUM':
output = tf.matmul(scores, facts) # [B, 1, H]
# output = tf.reshape(output, [-1, tf.shape(facts)[-1]])
else:
scores = tf.reshape(scores, [-1, tf.shape(facts)[1]])
output = facts * tf.expand_dims(scores, -1)
output = tf.reshape(output, tf.shape(facts))
if return_alphas:
return output, scores
return output
def self_attention(facts, ATTENTION_SIZE, mask, stag='null'):
|
def self_all_attention(facts, ATTENTION_SIZE, mask, stag='null'):
if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch,
ATTENTION_SIZE, mask, softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention
def din_fcn_shine(query, facts, attention_size, mask, stag='null', mode='SUM', softmax_stag=1, time_major=False, return_alphas=False):
if isinstance(facts, tuple):
# In case of Bi-RNN, concatenate the forward and the backward RNN outputs.
facts = tf.concat(facts, 2)
if time_major:
# (T,B,D) => (B,T,D)
facts = tf.array_ops.transpose(facts, [1, 0, 2])
# Trainable parameters
mask = tf.equal(mask, tf.ones_like(mask))
facts_size = facts.get_shape().as_list()[-1] # D value - hidden size of the RNN layer
querry_size = query.get_shape().as_list()[-1]
query = tf.layers.dense(query, facts_size, activation=None, name='f1_trans_shine' + stag)
query = prelu(query)
queries = tf.tile(query, [1, tf.shape(facts)[1]])
queries = tf.reshape(queries, tf.shape(facts))
din_all = tf.concat([queries, facts, queries-facts, queries*facts], axis=-1)
d_layer_1_all = tf.layers.dense(din_all, facts_size, activation=tf.nn.sigmoid, name='f1_shine_att' + stag)
d_layer_2_all = tf.layers.dense(d_layer_1_all, facts_size, activation=tf.nn.sigmoid, name='f2_shine_att' + stag)
d_layer_2_all = tf.reshape(d_layer_2_all, tf.shape(facts))
output = d_layer_2_all
return output
| if len(facts.get_shape().as_list()) == 2:
facts = tf.expand_dims(facts, 1)
def cond(batch, output, i):
return tf.less(i, tf.shape(batch)[1])
def body(batch, output, i):
self_attention_tmp = din_fcn_attention(batch[:, i, :], batch[:, 0:i+1, :],
ATTENTION_SIZE, mask[:, 0:i+1], softmax_stag=1, stag=stag,
mode='LIST')
self_attention_tmp = tf.reduce_sum(self_attention_tmp, 1)
output = output.write(i, self_attention_tmp)
return batch, output, i + 1
output_ta = tf.TensorArray(dtype=tf.float32,
size=0,
dynamic_size=True,
element_shape=(facts[:, 0, :].get_shape()))
_, output_op, _ = tf.while_loop(cond, body, [facts, output_ta, 0])
self_attention = output_op.stack()
self_attention = tf.transpose(self_attention, perm = [1, 0, 2])
return self_attention |
get_deployment_apm_resource_info_responses.go | // Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
// Code generated by go-swagger; DO NOT EDIT.
package deployments
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"fmt"
"io"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/elastic/cloud-sdk-go/pkg/models"
)
// GetDeploymentApmResourceInfoReader is a Reader for the GetDeploymentApmResourceInfo structure.
type GetDeploymentApmResourceInfoReader struct {
formats strfmt.Registry
}
// ReadResponse reads a server response into the received o.
func (o *GetDeploymentApmResourceInfoReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) {
switch response.Code() {
case 200:
result := NewGetDeploymentApmResourceInfoOK()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return result, nil
case 404:
result := NewGetDeploymentApmResourceInfoNotFound()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
case 500:
result := NewGetDeploymentApmResourceInfoInternalServerError()
if err := result.readResponse(response, consumer, o.formats); err != nil {
return nil, err
}
return nil, result
default:
return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code())
}
}
// NewGetDeploymentApmResourceInfoOK creates a GetDeploymentApmResourceInfoOK with default headers values
func NewGetDeploymentApmResourceInfoOK() *GetDeploymentApmResourceInfoOK {
return &GetDeploymentApmResourceInfoOK{}
}
/*GetDeploymentApmResourceInfoOK handles this case with default header values.
Standard response.
*/
type GetDeploymentApmResourceInfoOK struct {
Payload *models.ApmResourceInfo
}
func (o *GetDeploymentApmResourceInfoOK) Error() string {
return fmt.Sprintf("[GET /deployments/{deployment_id}/apm/{ref_id}][%d] getDeploymentApmResourceInfoOK %+v", 200, o.Payload)
}
func (o *GetDeploymentApmResourceInfoOK) GetPayload() *models.ApmResourceInfo {
return o.Payload
}
func (o *GetDeploymentApmResourceInfoOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
o.Payload = new(models.ApmResourceInfo)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetDeploymentApmResourceInfoNotFound creates a GetDeploymentApmResourceInfoNotFound with default headers values
func NewGetDeploymentApmResourceInfoNotFound() *GetDeploymentApmResourceInfoNotFound {
return &GetDeploymentApmResourceInfoNotFound{}
}
/*GetDeploymentApmResourceInfoNotFound handles this case with default header values.
The Deployment specified by {deployment_id} cannot be found. (code: `deployments.deployment_not_found`)
*/
type GetDeploymentApmResourceInfoNotFound struct {
/*The error codes associated with the response
*/
XCloudErrorCodes string
Payload *models.BasicFailedReply
}
func (o *GetDeploymentApmResourceInfoNotFound) Error() string {
return fmt.Sprintf("[GET /deployments/{deployment_id}/apm/{ref_id}][%d] getDeploymentApmResourceInfoNotFound %+v", 404, o.Payload)
}
func (o *GetDeploymentApmResourceInfoNotFound) GetPayload() *models.BasicFailedReply {
return o.Payload
}
func (o *GetDeploymentApmResourceInfoNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header x-cloud-error-codes
o.XCloudErrorCodes = response.GetHeader("x-cloud-error-codes")
o.Payload = new(models.BasicFailedReply)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
// NewGetDeploymentApmResourceInfoInternalServerError creates a GetDeploymentApmResourceInfoInternalServerError with default headers values
func NewGetDeploymentApmResourceInfoInternalServerError() *GetDeploymentApmResourceInfoInternalServerError |
/*GetDeploymentApmResourceInfoInternalServerError handles this case with default header values.
We have failed you. (code: `deployments.deployment_resource_no_longer_exists`)
*/
type GetDeploymentApmResourceInfoInternalServerError struct {
/*The error codes associated with the response
*/
XCloudErrorCodes string
Payload *models.BasicFailedReply
}
func (o *GetDeploymentApmResourceInfoInternalServerError) Error() string {
return fmt.Sprintf("[GET /deployments/{deployment_id}/apm/{ref_id}][%d] getDeploymentApmResourceInfoInternalServerError %+v", 500, o.Payload)
}
func (o *GetDeploymentApmResourceInfoInternalServerError) GetPayload() *models.BasicFailedReply {
return o.Payload
}
func (o *GetDeploymentApmResourceInfoInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error {
// response header x-cloud-error-codes
o.XCloudErrorCodes = response.GetHeader("x-cloud-error-codes")
o.Payload = new(models.BasicFailedReply)
// response payload
if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF {
return err
}
return nil
}
| {
return &GetDeploymentApmResourceInfoInternalServerError{}
} |
__init__.py | """CouchDB Models"""
from kai.model.blog import Article
from kai.model.documentation import Documentation
from kai.model.generics import Comment, Rating
from kai.model.human import Human
from kai.model.paste import Paste | from kai.model.snippet import Snippet
from kai.model.traceback import Traceback |
|
homography.py |
# coding: utf-8
# In[1]:
import numpy as np
def get_homograph(u,v):
|
def interpolation(img, new_x, new_y):
fx = round(new_x - int(new_x), 2)
fy = round(new_y - int(new_y), 2)
p = np.zeros((3,))
p += (1 - fx) * (1 - fy) * img[int(new_y), int(new_x)]
p += (1 - fx) * fy * img[int(new_y) + 1, int(new_x)]
p += fx * (1 - fy) * img[int(new_y), int(new_x) + 1]
p += fx * fy * img[int(new_y) + 1, int(new_x) + 1]
return p
def forward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v)
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for i in range(i1_range):
for j in range(i0_range):
tmp2 = np.dot(matrix, np.array([[j+i0_min, i+i1_min, 1]]).T)
x, y = int(tmp2[0][0] / tmp2[2][0]), int(tmp2[1][0] / tmp2[2][0])
canvas[y][x] = input_image[i+i1_min][j+i0_min]
return canvas
def backward_warping(u,v,input_image,canvas):
matrix = get_homograph(u,v) # v: output, u: input
i0_max = u[0:4,0:1].max()
i0_min = u[0:4,0:1].min()
i1_max = u[0:4,1:2].max()
i1_min = u[0:4,1:2].min()
i0_range = i0_max-i0_min
i1_range = i1_max-i1_min
for j in range(i1_range):
for i in range(i0_range):
new_pos = np.dot(matrix, np.array([[i+i0_min, j+i1_min, 1]]).T)
new_x, new_y = new_pos[0][0] / new_pos[2][0], new_pos[1][0] / new_pos[2][0]
res = interpolation(input_image, new_x, new_y)
canvas[j+i1_min][i+i0_min] = res
return canvas | A = np.array([[u[0][0], u[0][1], 1, 0, 0, 0, -1 * u[0][0] * v[0][0], -1 * u[0][1] * v[0][0]],
[0, 0, 0, u[0][0], u[0][1], 1, -1 * u[0][0] * v[0][1], -1 * u[0][1] * v[0][1]],
[u[1][0], u[1][1], 1, 0, 0, 0, -1 * u[1][0] * v[1][0], -1 * u[1][1] * v[1][0]],
[0, 0, 0, u[1][0], u[1][1], 1, -1 * u[1][0] * v[1][1], -1 * u[1][1] * v[1][1]],
[u[2][0], u[2][1], 1, 0, 0, 0, -1 * u[2][0] * v[2][0], -1 * u[2][1] * v[2][0]],
[0, 0, 0, u[2][0], u[2][1], 1, -1 * u[2][0] * v[2][1], -1 * u[2][1] * v[2][1]],
[u[3][0], u[3][1], 1, 0, 0, 0, -1 * u[3][0] * v[3][0], -1 * u[3][1] * v[3][0]],
[0, 0, 0, u[3][0], u[3][1], 1, -1 * u[3][0] * v[3][1], -1 * u[3][1] * v[3][1]]
])
b = np.array([[v[0][0]],
[v[0][1]],
[v[1][0]],
[v[1][1]],
[v[2][0]],
[v[2][1]],
[v[3][0]],
[v[3][1]]
])
tmp = np.dot(np.linalg.inv(A), b)
H = np.array([[tmp[0][0], tmp[1][0], tmp[2][0]],
[tmp[3][0], tmp[4][0], tmp[5][0]],
[tmp[6][0], tmp[7][0], 1]
])
return H |
acciones.js | const AddCliente = 'ADD_CLIENTE' | const DeleteProveedor = 'DELETE_POROVEEDOR'
export default {
AddCliente,
EditCliente,
DeleteCliente,
AddProveedor,
EditProveedor,
DeleteProveedor
} | const EditCliente = 'EDIT_CLIENTE'
const DeleteCliente = 'DELETE_CLIENTE'
const AddProveedor = 'ADD_PROVEEDOR'
const EditProveedor = 'EDIT_PROVEEDOR' |
run_mtsv1.py | # encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
import logging
from NiaPy.algorithms.other import MultipleTrajectorySearchV1
from NiaPy.benchmarks.utility import TaskConvPrint, TaskConvPlot, OptimizationType
from margparser import getDictArgs
logging.basicConfig()
logger = logging.getLogger('examples')
logger.setLevel('INFO')
# For reproducive results
class MinMB(object):
def __init__(self):
self.Lower = -11
self.Upper = 11
def function(self):
def evaluate(D, sol):
val = 0.0
for i in range(D): val = val + sol[i] * sol[i]
return val
return evaluate
class MaxMB(MinMB):
def function(self):
f = MinMB.function(self)
def e(D, sol): return -f(D, sol)
return e
def simple_example(runs=10, D=10, nFES=50000, nGEN=10000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
for i in range(runs):
algo = MultipleTrajectorySearchV1(D=D, nFES=nFES, nGEN=nGEN, n=15, C_a=1, C_r=0.5, optType=optType, benchmark=optFunc())
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def logging_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPrint(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
def plot_example(D=10, nFES=50000, nGEN=100000, seed=None, optType=OptimizationType.MINIMIZATION, optFunc=MinMB, **kn):
task = TaskConvPlot(D=D, nFES=nFES, nGEN=nGEN, optType=optType, benchmark=optFunc())
algo = MultipleTrajectorySearchV1(task=task, n=15, C_a=1, C_r=0.5)
best = algo.run()
logger.info('%s %s' % (best[0], best[1]))
input('Press [enter] to continue')
def getOptType(strtype):
if strtype == 'min': return OptimizationType.MINIMIZATION, MinMB
elif strtype == 'max': return OptimizationType.MAXIMIZATION, MaxMB
else: return None
if __name__ == '__main__':
pargs = getDictArgs(sys.argv[1:])
optType, optFunc = getOptType(pargs.pop('optType', 'min'))
if not pargs['runType']: simple_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'log': logging_example(optType=optType, optFunc=optFunc, **pargs)
elif pargs['runType'] == 'plot': |
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| plot_example(optType=optType, optFunc=optFunc, **pargs) |
stop_action_factory.py | from pyopteryx.factories.loop_action_factories.abstract_loop_action_factory import AbstractLoopActionFactory
from pyopteryx.utils.builder_utils import add_activity_to_task, add_reply_entry
class StopLoopActionFactory(AbstractLoopActionFactory):
def __init__(self, action, xml_cache, input_data, processor, mapping_cache):
super().__init__(action=action, xml_cache=xml_cache, input_data=input_data, processor=processor,
mapping_cache=mapping_cache)
def | (self):
add_activity_to_task(task_activities=self.task_activities,
activity_name=self.activity_name,
host_demand_mean="0.0",
hide_activity=True)
add_reply_entry(processor=self.processor, activity_name=self.activity_name)
| add_action |
index.ts | /**********************************************************************************
* MIT License *
* *
* Copyright (c) 2021 Hyperjump Technology *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all *
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
**********************************************************************************/
import events from '../../events'
import { Notification } from '../../interfaces/notification'
import { Probe } from '../../interfaces/probe'
import type { ServerAlertState } from '../../interfaces/probe-status'
import { ProbeRequestResponse } from '../../interfaces/request'
import validateResponse, {
ValidatedResponse,
} from '../../plugins/validate-response'
import { getEventEmitter } from '../../utils/events'
import { log } from '../../utils/pino'
import { RequestLog } from '../logger'
import { sendAlerts } from '../notification'
import { processThresholds } from '../notification/process-server-status'
import { probing } from './probing'
import { logResponseTime } from '../logger/response-time-log'
// TODO: move this to interface file?
interface ProbeStatusProcessed {
probe: Probe
statuses?: ServerAlertState[]
notifications: Notification[]
validatedResponseStatuses: ValidatedResponse[]
requestIndex: number
}
interface ProbeSendNotification extends Omit<ProbeStatusProcessed, 'statuses'> {
index: number
probeState?: ServerAlertState
}
// Probes Thresholds processed, Send out notifications/alerts.
async function checkThresholdsAndSendAlert(
data: ProbeStatusProcessed,
requestLog: RequestLog
) {
const {
probe,
statuses,
notifications,
requestIndex,
validatedResponseStatuses,
} = data
const probeSendNotification = async (data: ProbeSendNotification) => {
const eventEmitter = getEventEmitter()
const {
index,
probe,
probeState,
notifications,
requestIndex,
validatedResponseStatuses,
} = data
const statusString = probeState?.state ?? 'UP'
const url = probe.requests[requestIndex].url ?? ''
const validation =
validatedResponseStatuses.find(
(validateResponse: ValidatedResponse) =>
validateResponse.alert.query === probeState?.alertQuery
) || validatedResponseStatuses[index]
eventEmitter.emit(events.probe.notification.willSend, {
probeID: probe.id,
url: url,
probeState: statusString,
validation,
})
if ((notifications?.length ?? 0) > 0) {
await sendAlerts({
probeID: probe.id,
url: url,
probeState: statusString,
notifications: notifications ?? [],
validation,
})
}
}
statuses
?.filter((probeState) => probeState.shouldSendNotification)
?.forEach((probeState, index) => {
probeSendNotification({
index,
probe,
probeState,
notifications,
requestIndex,
validatedResponseStatuses,
}).catch((error: Error) => log.error(error.message))
requestLog.addNotifications(
(notifications ?? []).map((notification) => ({
notification,
type:
probeState?.state === 'DOWN' ? 'NOTIFY-INCIDENT' : 'NOTIFY-RECOVER',
alertQuery: probeState?.alertQuery || '',
}))
)
})
}
/**
* doProbe sends out the http request
* @param {number} checkOrder the order of probe being processed
* @param {object} probe contains all the probes
* @param {array} notifications contains all the notifications
* @param {boolean} verboseLogs store all requests to database
*/
export async function | (
checkOrder: number,
probe: Probe,
notifications: Notification[],
verboseLogs: boolean
) {
const eventEmitter = getEventEmitter()
const responses = []
for (
let requestIndex = 0;
requestIndex < probe.requests.length;
requestIndex++
) {
const request = probe.requests[requestIndex]
const requestLog = new RequestLog(probe, requestIndex, checkOrder)
try {
// intentionally wait for a request to finish before processing next request in loop
// eslint-disable-next-line no-await-in-loop
const probeRes: ProbeRequestResponse = await probing(request, responses)
logResponseTime(probeRes)
eventEmitter.emit(events.probe.response.received, {
probe,
requestIndex,
response: probeRes,
})
// Add to an array to be accessed by another request
responses.push(probeRes)
requestLog.setResponse(probeRes)
// store request error log
if ([0, 1, 2, 599].includes(probeRes.status)) {
const errorMessageMap: Record<number, string> = {
0: 'URI not found',
1: 'Connection reset',
2: 'Connection refused',
3: 'Unknown error',
599: 'Request Timed out',
}
requestLog.addError(errorMessageMap[probeRes.status])
}
// combine global probe alerts with all individual request alerts
const probeAlerts = probe.alerts ?? []
const combinedAlerts = probeAlerts.concat(...(request.alerts || []))
// Responses have been processed and validated
const validatedResponse = validateResponse(combinedAlerts, probeRes)
requestLog.addAlerts(
validatedResponse
.filter((item) => item.isAlertTriggered)
.map((item) => item.alert)
)
// done probing, got some result, process it, check for thresholds and notifications
const statuses = processThresholds({
probe,
requestIndex,
validatedResponse,
})
// Done processing results, check if need to send out alerts
checkThresholdsAndSendAlert(
{
probe,
statuses,
notifications,
requestIndex,
validatedResponseStatuses: validatedResponse,
},
requestLog
).catch((error) => {
requestLog.addError(error.message)
})
// Exit the loop if there is any alert triggered
if (validatedResponse.some((item) => item.isAlertTriggered)) {
break
}
} catch (error) {
requestLog.addError((error as any).message)
break
} finally {
requestLog.print()
if (verboseLogs || requestLog.hasIncidentOrRecovery) {
requestLog.saveToDatabase().catch((error) => log.error(error.message))
}
}
}
}
| doProbe |
getReplicationStorageClassificationMapping.ts | // *** WARNING: this file was generated by the Pulumi SDK Generator. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
import * as pulumi from "@pulumi/pulumi";
import { input as inputs, output as outputs, enums } from "../../types";
import * as utilities from "../../utilities";
/**
* Storage mapping object.
*/
export function getReplicationStorageClassificationMapping(args: GetReplicationStorageClassificationMappingArgs, opts?: pulumi.InvokeOptions): Promise<GetReplicationStorageClassificationMappingResult> {
if (!opts) {
opts = {}
}
if (!opts.version) {
opts.version = utilities.getVersion();
}
return pulumi.runtime.invoke("azure-native:recoveryservices/v20210301:getReplicationStorageClassificationMapping", {
"fabricName": args.fabricName,
"resourceGroupName": args.resourceGroupName,
"resourceName": args.resourceName,
"storageClassificationMappingName": args.storageClassificationMappingName,
"storageClassificationName": args.storageClassificationName,
}, opts);
}
export interface GetReplicationStorageClassificationMappingArgs {
/**
* Fabric name.
*/
fabricName: string;
/**
* The name of the resource group where the recovery services vault is present.
*/
resourceGroupName: string;
/**
* The name of the recovery services vault.
*/
resourceName: string;
/**
* Storage classification mapping name. | storageClassificationMappingName: string;
/**
* Storage classification name.
*/
storageClassificationName: string;
}
/**
* Storage mapping object.
*/
export interface GetReplicationStorageClassificationMappingResult {
/**
* Resource Id
*/
readonly id: string;
/**
* Resource Location
*/
readonly location?: string;
/**
* Resource Name
*/
readonly name: string;
/**
* Properties of the storage mapping object.
*/
readonly properties: outputs.recoveryservices.v20210301.StorageClassificationMappingPropertiesResponse;
/**
* Resource Type
*/
readonly type: string;
} | */ |
device.rs | use auxil::ShaderStage;
use hal::{
adapter::MemoryProperties, buffer, device, format, image, memory, pass, pool, pso,
pso::VertexInputRate, query, queue::QueueFamilyId, window,
};
use winapi::{
shared::{dxgi, dxgiformat, dxgitype, minwindef::TRUE, windef::HWND, winerror},
um::{d3d11, d3d11_1, d3d11sdklayers, d3dcommon},
};
use wio::com::ComPtr;
use std::{
fmt, mem,
ops::Range,
ptr,
sync::{Arc, Weak},
};
use parking_lot::{Condvar, Mutex, RwLock};
use crate::{
conv,
debug::{set_debug_name, set_debug_name_with_suffix, verify_debug_ascii},
internal, shader, Backend, Buffer, BufferView, CommandBuffer, CommandPool, ComputePipeline,
DescriptorContent, DescriptorIndex, DescriptorPool, DescriptorSet, DescriptorSetInfo,
DescriptorSetLayout, Fence, Framebuffer, GraphicsPipeline, Image, ImageView, InternalBuffer,
InternalImage, Memory, MultiStageData, PipelineLayout, QueryPool, RawFence,
RegisterAccumulator, RegisterData, RenderPass, ResourceIndex, Sampler, Semaphore, ShaderModule,
SubpassDesc, ViewInfo,
};
//TODO: expose coherent type 0x2 when it's properly supported
const BUFFER_TYPE_MASK: u32 = 0x1 | 0x4;
struct InputLayout {
raw: ComPtr<d3d11::ID3D11InputLayout>,
required_bindings: u32,
max_vertex_bindings: u32,
topology: d3d11::D3D11_PRIMITIVE_TOPOLOGY,
vertex_strides: Vec<u32>,
}
#[derive(Clone)]
pub struct DepthStencilState {
pub raw: ComPtr<d3d11::ID3D11DepthStencilState>,
pub stencil_ref: pso::State<pso::StencilValue>,
pub read_only: bool,
}
pub struct Device {
raw: ComPtr<d3d11::ID3D11Device>,
raw1: Option<ComPtr<d3d11_1::ID3D11Device1>>,
pub(crate) context: ComPtr<d3d11::ID3D11DeviceContext>,
features: hal::Features,
memory_properties: MemoryProperties,
pub(crate) internal: Arc<internal::Internal>,
}
impl fmt::Debug for Device {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.write_str("Device")
}
}
impl Drop for Device {
fn drop(&mut self) {
if let Ok(debug) = self.raw.cast::<d3d11sdklayers::ID3D11Debug>() {
unsafe {
debug.ReportLiveDeviceObjects(d3d11sdklayers::D3D11_RLDO_DETAIL);
}
}
}
}
unsafe impl Send for Device {}
unsafe impl Sync for Device {}
impl Device {
pub fn new(
device: ComPtr<d3d11::ID3D11Device>,
device1: Option<ComPtr<d3d11_1::ID3D11Device1>>,
context: ComPtr<d3d11::ID3D11DeviceContext>,
features: hal::Features,
downlevel: hal::DownlevelProperties,
memory_properties: MemoryProperties,
feature_level: u32,
) -> Self {
Device {
internal: Arc::new(internal::Internal::new(&device, features, feature_level, downlevel)),
raw: device,
raw1: device1,
context,
features,
memory_properties,
}
}
pub fn as_raw(&self) -> *mut d3d11::ID3D11Device {
self.raw.as_raw()
}
fn create_rasterizer_state(
&self,
rasterizer_desc: &pso::Rasterizer,
multisampling_desc: &Option<pso::Multisampling>,
) -> Result<ComPtr<d3d11::ID3D11RasterizerState>, pso::CreationError> {
let mut rasterizer = ptr::null_mut();
let desc = conv::map_rasterizer_desc(rasterizer_desc, multisampling_desc);
let hr = unsafe {
self.raw
.CreateRasterizerState(&desc, &mut rasterizer as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(rasterizer) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_blend_state(
&self,
blend_desc: &pso::BlendDesc,
multisampling: &Option<pso::Multisampling>,
) -> Result<ComPtr<d3d11::ID3D11BlendState>, pso::CreationError> {
let mut blend = ptr::null_mut();
let desc = conv::map_blend_desc(blend_desc, multisampling);
let hr = unsafe {
self.raw
.CreateBlendState(&desc, &mut blend as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(blend) })
} else {
Err(pso::CreationError::Other)
}
}
fn create_depth_stencil_state(
&self,
depth_desc: &pso::DepthStencilDesc,
) -> Result<DepthStencilState, pso::CreationError> {
let mut depth = ptr::null_mut();
let (desc, stencil_ref, read_only) = conv::map_depth_stencil_desc(depth_desc);
let hr = unsafe {
self.raw
.CreateDepthStencilState(&desc, &mut depth as *mut *mut _ as *mut *mut _)
};
if winerror::SUCCEEDED(hr) {
Ok(DepthStencilState {
raw: unsafe { ComPtr::from_raw(depth) },
stencil_ref,
read_only,
})
} else {
Err(pso::CreationError::Other)
}
}
fn create_input_layout(
&self,
vs: ComPtr<d3dcommon::ID3DBlob>,
vertex_buffers: &[pso::VertexBufferDesc],
attributes: &[pso::AttributeDesc],
input_assembler: &pso::InputAssemblerDesc,
vertex_semantic_remapping: auxil::FastHashMap<u32, Option<(u32, u32)>>,
) -> Result<InputLayout, pso::CreationError> {
let mut layout = ptr::null_mut();
let mut vertex_strides = Vec::new();
let mut required_bindings = 0u32;
let mut max_vertex_bindings = 0u32;
for buffer in vertex_buffers {
required_bindings |= 1 << buffer.binding as u32;
max_vertex_bindings = max_vertex_bindings.max(1u32 + buffer.binding as u32);
while vertex_strides.len() <= buffer.binding as usize {
vertex_strides.push(0);
}
vertex_strides[buffer.binding as usize] = buffer.stride;
}
// See [`shader::introspect_spirv_vertex_semantic_remapping`] for details of why this is needed.
let semantics: Vec<_> = attributes
.iter()
.map(
|attrib| match vertex_semantic_remapping.get(&attrib.location) {
Some(Some((major, minor))) => {
let name = std::borrow::Cow::Owned(format!("TEXCOORD{}_\0", major));
let location = *minor;
(name, location)
}
_ => {
let name = std::borrow::Cow::Borrowed("TEXCOORD\0");
let location = attrib.location;
(name, location)
}
},
)
.collect();
let input_elements = attributes
.iter()
.zip(semantics.iter())
.filter_map(|(attrib, (semantic_name, semantic_index))| {
let buffer_desc = match vertex_buffers
.iter()
.find(|buffer_desc| buffer_desc.binding == attrib.binding)
{
Some(buffer_desc) => buffer_desc,
None => {
// TODO:
// error!("Couldn't find associated vertex buffer description {:?}", attrib.binding);
return Some(Err(pso::CreationError::Other));
}
};
let (slot_class, step_rate) = match buffer_desc.rate {
VertexInputRate::Vertex => (d3d11::D3D11_INPUT_PER_VERTEX_DATA, 0),
VertexInputRate::Instance(divisor) => {
(d3d11::D3D11_INPUT_PER_INSTANCE_DATA, divisor)
}
};
let format = attrib.element.format;
Some(Ok(d3d11::D3D11_INPUT_ELEMENT_DESC {
SemanticName: semantic_name.as_ptr() as *const _, // Semantic name used by SPIRV-Cross
SemanticIndex: *semantic_index,
Format: match conv::map_format(format) {
Some(fm) => fm,
None => {
// TODO:
// error!("Unable to find DXGI format for {:?}", format);
return Some(Err(pso::CreationError::Other));
}
},
InputSlot: attrib.binding as _,
AlignedByteOffset: attrib.element.offset,
InputSlotClass: slot_class,
InstanceDataStepRate: step_rate as _,
}))
})
.collect::<Result<Vec<_>, _>>()?;
let hr = unsafe {
self.raw.CreateInputLayout(
input_elements.as_ptr(),
input_elements.len() as _,
vs.GetBufferPointer(),
vs.GetBufferSize(),
&mut layout as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
let topology = conv::map_topology(input_assembler);
Ok(InputLayout {
raw: unsafe { ComPtr::from_raw(layout) },
required_bindings,
max_vertex_bindings,
topology,
vertex_strides,
})
} else {
error!("CreateInputLayout error 0x{:X}", hr);
Err(pso::CreationError::Other)
}
}
fn create_vertex_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11VertexShader>, pso::CreationError> {
let mut vs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateVertexShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut vs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(vs) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::VERTEX,
String::from("Failed to create a vertex shader"),
))
}
}
fn create_pixel_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11PixelShader>, pso::CreationError> {
let mut ps = ptr::null_mut();
let hr = unsafe {
self.raw.CreatePixelShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut ps as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(ps) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::FRAGMENT,
String::from("Failed to create a pixel shader"),
))
}
}
fn create_geometry_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11GeometryShader>, pso::CreationError> {
let mut gs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateGeometryShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut gs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(gs) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::GEOMETRY,
String::from("Failed to create a geometry shader"),
))
}
}
fn create_hull_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11HullShader>, pso::CreationError> {
let mut hs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateHullShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut hs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(hs) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::HULL,
String::from("Failed to create a hull shader"),
))
}
}
fn create_domain_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11DomainShader>, pso::CreationError> {
let mut ds = ptr::null_mut();
let hr = unsafe {
self.raw.CreateDomainShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut ds as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(ds) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::DOMAIN,
String::from("Failed to create a domain shader"),
))
}
}
fn create_compute_shader(
&self,
blob: ComPtr<d3dcommon::ID3DBlob>,
) -> Result<ComPtr<d3d11::ID3D11ComputeShader>, pso::CreationError> {
let mut cs = ptr::null_mut();
let hr = unsafe {
self.raw.CreateComputeShader(
blob.GetBufferPointer(),
blob.GetBufferSize(),
ptr::null_mut(),
&mut cs as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(cs) })
} else {
Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::COMPUTE,
String::from("Failed to create a compute shader"),
))
}
}
// TODO: fix return type..
fn extract_entry_point(
stage: ShaderStage,
source: &pso::EntryPoint<Backend>,
layout: &PipelineLayout,
features: &hal::Features,
device_feature_level: u32,
) -> Result<Option<ComPtr<d3dcommon::ID3DBlob>>, pso::CreationError> {
// TODO: entrypoint stuff
match *source.module {
ShaderModule::Dxbc(ref _shader) => Err(pso::CreationError::ShaderCreationError(
pso::ShaderStageFlags::ALL,
String::from("DXBC modules are not supported yet"),
)),
ShaderModule::Spirv(ref raw_data) => Ok(shader::compile_spirv_entrypoint(
raw_data,
stage,
source,
layout,
features,
device_feature_level,
)?),
}
}
fn view_image_as_shader_resource(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11ShaderResourceView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
if desc.Format == dxgiformat::DXGI_FORMAT_D32_FLOAT_S8X24_UINT {
desc.Format = dxgiformat::DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS;
}
#[allow(non_snake_case)]
let MostDetailedMip = info.levels.start as _;
#[allow(non_snake_case)]
let MipLevels = (info.levels.end - info.levels.start) as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_SRV {
MostDetailedMip,
MipLevels,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 if info.kind.num_samples() > 1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_SRV {
UnusedField_NothingToDefine: 0,
}
}
image::ViewKind::D2 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::D2Array if info.kind.num_samples() > 1 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_SRV {
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2Array => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_SRV {
MostDetailedMip,
MipLevels,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::Cube => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBE;
*unsafe { desc.u.TextureCube_mut() } = d3d11::D3D11_TEXCUBE_SRV {
MostDetailedMip,
MipLevels,
}
}
image::ViewKind::CubeArray => {
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY;
*unsafe { desc.u.TextureCubeArray_mut() } = d3d11::D3D11_TEXCUBE_ARRAY_SRV {
MostDetailedMip,
MipLevels,
First2DArrayFace: FirstArraySlice,
NumCubes: ArraySize / 6,
}
}
}
let mut srv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateShaderResourceView(
info.resource,
&desc,
&mut srv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(srv) })
} else {
Err(image::ViewCreationError::Unsupported)
}
}
fn view_image_as_unordered_access(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11UnorderedAccessView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
#[allow(non_snake_case)]
let MipSlice = info.levels.start as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_UAV {
MipSlice: info.levels.start as _,
}
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_UAV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_UAV {
MipSlice: info.levels.start as _,
}
}
image::ViewKind::D2Array => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_UAV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_UAV {
MipSlice,
FirstWSlice: FirstArraySlice,
WSize: ArraySize,
}
}
_ => unimplemented!(),
}
let mut uav = ptr::null_mut();
let hr = unsafe {
self.raw.CreateUnorderedAccessView(
info.resource,
&desc,
&mut uav as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(uav) })
} else {
error!("CreateUnorderedAccessView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
pub(crate) fn view_image_as_render_target(
&self,
info: &ViewInfo,
) -> Result<ComPtr<d3d11::ID3D11RenderTargetView>, image::ViewCreationError> {
let mut desc: d3d11::D3D11_RENDER_TARGET_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
#[allow(non_snake_case)]
let MipSlice = info.levels.start as _;
#[allow(non_snake_case)]
let FirstArraySlice = info.layers.start as _;
#[allow(non_snake_case)]
let ArraySize = (info.layers.end - info.layers.start) as _;
match info.view_kind {
image::ViewKind::D1 => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1D;
*unsafe { desc.u.Texture1D_mut() } = d3d11::D3D11_TEX1D_RTV { MipSlice }
}
image::ViewKind::D1Array => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE1DARRAY;
*unsafe { desc.u.Texture1DArray_mut() } = d3d11::D3D11_TEX1D_ARRAY_RTV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
image::ViewKind::D2 => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_RTV {
UnusedField_NothingToDefine: 0,
}
} else {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_RTV { MipSlice }
}
}
image::ViewKind::D2Array => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_RTV {
FirstArraySlice,
ArraySize,
}
} else {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_RTV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
}
image::ViewKind::D3 => {
desc.ViewDimension = d3d11::D3D11_RTV_DIMENSION_TEXTURE3D;
*unsafe { desc.u.Texture3D_mut() } = d3d11::D3D11_TEX3D_RTV {
MipSlice,
FirstWSlice: FirstArraySlice,
WSize: ArraySize,
}
}
_ => unimplemented!(),
}
let mut rtv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateRenderTargetView(
info.resource,
&desc,
&mut rtv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(rtv) })
} else {
error!("CreateRenderTargetView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
fn view_image_as_depth_stencil(
&self,
info: &ViewInfo,
read_only_stencil: Option<bool>,
) -> Result<ComPtr<d3d11::ID3D11DepthStencilView>, image::ViewCreationError> {
#![allow(non_snake_case)]
let MipSlice = info.levels.start as _;
let FirstArraySlice = info.layers.start as _;
let ArraySize = (info.layers.end - info.layers.start) as _;
assert_eq!(info.levels.start + 1, info.levels.end);
assert!(info.layers.end <= info.kind.num_layers());
let mut desc: d3d11::D3D11_DEPTH_STENCIL_VIEW_DESC = unsafe { mem::zeroed() };
desc.Format = info.format;
if let Some(stencil) = read_only_stencil {
desc.Flags = match stencil {
true => d3d11::D3D11_DSV_READ_ONLY_DEPTH | d3d11::D3D11_DSV_READ_ONLY_STENCIL,
false => d3d11::D3D11_DSV_READ_ONLY_DEPTH,
}
}
match info.view_kind {
image::ViewKind::D2 => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DMS;
*unsafe { desc.u.Texture2DMS_mut() } = d3d11::D3D11_TEX2DMS_DSV {
UnusedField_NothingToDefine: 0,
}
} else {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2D;
*unsafe { desc.u.Texture2D_mut() } = d3d11::D3D11_TEX2D_DSV { MipSlice }
}
}
image::ViewKind::D2Array => {
if info.kind.num_samples() > 1 {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY;
*unsafe { desc.u.Texture2DMSArray_mut() } = d3d11::D3D11_TEX2DMS_ARRAY_DSV {
FirstArraySlice,
ArraySize,
}
} else {
desc.ViewDimension = d3d11::D3D11_DSV_DIMENSION_TEXTURE2DARRAY;
*unsafe { desc.u.Texture2DArray_mut() } = d3d11::D3D11_TEX2D_ARRAY_DSV {
MipSlice,
FirstArraySlice,
ArraySize,
}
}
}
_ => unimplemented!(),
}
let mut dsv = ptr::null_mut();
let hr = unsafe {
self.raw.CreateDepthStencilView(
info.resource,
&desc,
&mut dsv as *mut *mut _ as *mut *mut _,
)
};
if winerror::SUCCEEDED(hr) {
Ok(unsafe { ComPtr::from_raw(dsv) })
} else {
error!("CreateDepthStencilView failed: 0x{:x}", hr);
Err(image::ViewCreationError::Unsupported)
}
}
pub(crate) fn create_swapchain_impl(
&self,
config: &window::SwapchainConfig,
window_handle: HWND,
factory: ComPtr<dxgi::IDXGIFactory>,
) -> Result<(ComPtr<dxgi::IDXGISwapChain>, dxgiformat::DXGI_FORMAT), window::SwapchainError>
{
// TODO: use IDXGIFactory2 for >=11.1
// TODO: this function should be able to fail (Result)?
debug!("{:#?}", config);
let non_srgb_format = conv::map_format_nosrgb(config.format).unwrap();
let mut desc = dxgi::DXGI_SWAP_CHAIN_DESC {
BufferDesc: dxgitype::DXGI_MODE_DESC {
Width: config.extent.width,
Height: config.extent.height,
// TODO: should this grab max value of all monitor hz? vsync
// will clamp to current monitor anyways?
RefreshRate: dxgitype::DXGI_RATIONAL {
Numerator: 1,
Denominator: 60,
},
Format: non_srgb_format,
ScanlineOrdering: dxgitype::DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED,
Scaling: dxgitype::DXGI_MODE_SCALING_UNSPECIFIED,
},
SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
Count: 1,
Quality: 0,
},
BufferUsage: dxgitype::DXGI_USAGE_RENDER_TARGET_OUTPUT,
BufferCount: config.image_count,
OutputWindow: window_handle,
// TODO:
Windowed: TRUE,
// TODO:
SwapEffect: dxgi::DXGI_SWAP_EFFECT_DISCARD,
Flags: 0,
};
let dxgi_swapchain = {
let mut swapchain: *mut dxgi::IDXGISwapChain = ptr::null_mut();
let hr = unsafe {
factory.CreateSwapChain(
self.raw.as_raw() as *mut _,
&mut desc as *mut _,
&mut swapchain as *mut *mut _ as *mut *mut _,
)
};
assert_eq!(hr, winerror::S_OK);
unsafe { ComPtr::from_raw(swapchain) }
};
Ok((dxgi_swapchain, non_srgb_format))
}
}
impl device::Device<Backend> for Device {
unsafe fn allocate_memory(
&self,
mem_type: hal::MemoryTypeId,
size: u64,
) -> Result<Memory, device::AllocationError> {
let properties = self.memory_properties.memory_types[mem_type.0].properties;
let host_ptr = if properties.contains(hal::memory::Properties::CPU_VISIBLE) {
let mut data = vec![0u8; size as usize];
let ptr = data.as_mut_ptr();
mem::forget(data);
ptr
} else {
ptr::null_mut()
};
Ok(Memory {
properties,
size,
host_ptr,
local_buffers: Arc::new(RwLock::new(thunderdome::Arena::new())),
})
}
unsafe fn create_command_pool(
&self,
_family: QueueFamilyId,
_create_flags: pool::CommandPoolCreateFlags,
) -> Result<CommandPool, device::OutOfMemory> {
// TODO:
Ok(CommandPool {
device: self.raw.clone(),
device1: self.raw1.clone(),
internal: Arc::clone(&self.internal),
})
}
unsafe fn destroy_command_pool(&self, _pool: CommandPool) {
// automatic
}
unsafe fn create_render_pass<'a, Ia, Is, Id>(
&self,
attachments: Ia,
subpasses: Is,
_dependencies: Id,
) -> Result<RenderPass, device::OutOfMemory>
where
Ia: Iterator<Item = pass::Attachment>,
Is: Iterator<Item = pass::SubpassDesc<'a>>,
{
Ok(RenderPass {
attachments: attachments.collect(),
subpasses: subpasses
.map(|desc| SubpassDesc {
color_attachments: desc.colors.to_vec(),
depth_stencil_attachment: desc.depth_stencil.cloned(),
input_attachments: desc.inputs.to_vec(),
resolve_attachments: desc.resolves.to_vec(),
})
.collect(),
})
}
unsafe fn create_pipeline_layout<'a, Is, Ic>(
&self,
set_layouts: Is,
_push_constant_ranges: Ic,
) -> Result<PipelineLayout, device::OutOfMemory>
where
Is: Iterator<Item = &'a DescriptorSetLayout>,
Ic: Iterator<Item = (pso::ShaderStageFlags, Range<u32>)>,
{
let mut res_offsets = MultiStageData::<RegisterData<RegisterAccumulator>>::default();
let mut sets = Vec::new();
for set_layout in set_layouts {
sets.push(DescriptorSetInfo {
bindings: Arc::clone(&set_layout.bindings),
registers: res_offsets.advance(&set_layout.pool_mapping),
});
}
res_offsets.map_other(|data| {
// These use <= because this tells us the _next_ register, so maximum usage will be equal to the limit.
//
// Leave one slot for push constants
assert!(
data.c.res_index as u32
<= d3d11::D3D11_COMMONSHADER_CONSTANT_BUFFER_API_SLOT_COUNT - 1,
"{} bound constant buffers exceeds limit of {}",
data.c.res_index as u32,
d3d11::D3D11_COMMONSHADER_CONSTANT_BUFFER_API_SLOT_COUNT - 1,
);
assert!(
data.s.res_index as u32 <= d3d11::D3D11_COMMONSHADER_SAMPLER_REGISTER_COUNT,
"{} bound samplers exceeds limit of {}",
data.s.res_index as u32,
d3d11::D3D11_COMMONSHADER_SAMPLER_REGISTER_COUNT,
);
assert!(
data.t.res_index as u32 <= d3d11::D3D11_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT,
"{} bound sampled textures and read-only buffers exceeds limit of {}",
data.t.res_index as u32,
d3d11::D3D11_COMMONSHADER_INPUT_RESOURCE_REGISTER_COUNT,
);
assert!(
data.u.res_index as u32 <= d3d11::D3D11_PS_CS_UAV_REGISTER_COUNT,
"{} bound storage textures and read-write buffers exceeds limit of {}",
data.u.res_index as u32,
d3d11::D3D11_PS_CS_UAV_REGISTER_COUNT,
);
});
Ok(PipelineLayout { sets })
}
unsafe fn create_pipeline_cache(
&self,
_data: Option<&[u8]>,
) -> Result<(), device::OutOfMemory> {
Ok(())
}
unsafe fn get_pipeline_cache_data(&self, _cache: &()) -> Result<Vec<u8>, device::OutOfMemory> {
//empty
Ok(Vec::new())
}
unsafe fn destroy_pipeline_cache(&self, _: ()) {
//empty
}
unsafe fn merge_pipeline_caches<'a, I>(
&self,
_: &mut (),
_: I,
) -> Result<(), device::OutOfMemory>
where
I: Iterator<Item = &'a ()>,
{
//empty
Ok(())
}
unsafe fn create_graphics_pipeline<'a>(
&self,
desc: &pso::GraphicsPipelineDesc<'a, Backend>,
_cache: Option<&()>,
) -> Result<GraphicsPipeline, pso::CreationError> {
let features = &self.features;
let build_shader =
|stage: ShaderStage, source: Option<&pso::EntryPoint<'a, Backend>>| match source {
Some(src) => Self::extract_entry_point(
stage,
src,
desc.layout,
features,
self.internal.device_feature_level,
),
None => Ok(None),
};
let (layout, vs, gs, hs, ds) = match desc.primitive_assembler {
pso::PrimitiveAssemblerDesc::Vertex {
buffers,
attributes,
ref input_assembler,
ref vertex,
ref tessellation,
ref geometry,
} => {
let vertex_semantic_remapping = match vertex.module {
ShaderModule::Spirv(spirv) => {
shader::introspect_spirv_vertex_semantic_remapping(spirv)?
}
_ => unimplemented!(),
};
let vs = build_shader(ShaderStage::Vertex, Some(&vertex))?.unwrap();
let gs = build_shader(ShaderStage::Geometry, geometry.as_ref())?;
let layout = self.create_input_layout(
vs.clone(),
buffers,
attributes,
input_assembler,
vertex_semantic_remapping,
)?;
let vs = self.create_vertex_shader(vs)?;
let gs = if let Some(blob) = gs {
Some(self.create_geometry_shader(blob)?)
} else {
None
};
let (hs, ds) = if let Some(ts) = tessellation {
let hs = build_shader(ShaderStage::Hull, Some(&ts.0))?.unwrap();
let ds = build_shader(ShaderStage::Domain, Some(&ts.1))?.unwrap();
(
Some(self.create_hull_shader(hs)?),
Some(self.create_domain_shader(ds)?),
)
} else {
(None, None)
};
(layout, vs, gs, hs, ds)
}
pso::PrimitiveAssemblerDesc::Mesh { .. } => {
return Err(pso::CreationError::UnsupportedPipeline)
}
};
let ps = build_shader(ShaderStage::Fragment, desc.fragment.as_ref())?;
let ps = if let Some(blob) = ps {
Some(self.create_pixel_shader(blob)?)
} else {
None
};
let rasterizer_state =
self.create_rasterizer_state(&desc.rasterizer, &desc.multisampling)?;
let blend_state = self.create_blend_state(&desc.blender, &desc.multisampling)?;
let depth_stencil_state = Some(self.create_depth_stencil_state(&desc.depth_stencil)?);
match desc.label {
Some(label) if verify_debug_ascii(label) => {
let mut name = label.to_string();
set_debug_name_with_suffix(&blend_state, &mut name, " -- Blend State");
set_debug_name_with_suffix(&rasterizer_state, &mut name, " -- Rasterizer State");
set_debug_name_with_suffix(&layout.raw, &mut name, " -- Input Layout");
if let Some(ref dss) = depth_stencil_state {
set_debug_name_with_suffix(&dss.raw, &mut name, " -- Depth Stencil State");
}
}
_ => {}
}
Ok(GraphicsPipeline {
vs,
gs,
ds,
hs,
ps,
topology: layout.topology,
input_layout: layout.raw,
rasterizer_state,
blend_state,
depth_stencil_state,
baked_states: desc.baked_states.clone(),
required_bindings: layout.required_bindings,
max_vertex_bindings: layout.max_vertex_bindings,
strides: layout.vertex_strides,
})
}
unsafe fn create_compute_pipeline<'a>(
&self,
desc: &pso::ComputePipelineDesc<'a, Backend>,
_cache: Option<&()>,
) -> Result<ComputePipeline, pso::CreationError> {
let features = &self.features;
let build_shader =
|stage: ShaderStage, source: Option<&pso::EntryPoint<'a, Backend>>| match source {
Some(src) => Self::extract_entry_point(
stage,
src,
desc.layout,
features,
self.internal.device_feature_level,
),
None => Ok(None),
};
let cs = build_shader(ShaderStage::Compute, Some(&desc.shader))?.unwrap();
let cs = self.create_compute_shader(cs)?;
Ok(ComputePipeline { cs })
}
unsafe fn create_framebuffer<I>(
&self,
_renderpass: &RenderPass,
_attachments: I,
extent: image::Extent,
) -> Result<Framebuffer, device::OutOfMemory> {
Ok(Framebuffer {
layers: extent.depth as _,
})
}
unsafe fn create_shader_module(
&self,
raw_data: &[u32],
) -> Result<ShaderModule, device::ShaderError> {
Ok(ShaderModule::Spirv(raw_data.into()))
}
unsafe fn create_buffer(
&self,
size: u64,
usage: buffer::Usage,
_sparse: memory::SparseFlags,
) -> Result<Buffer, buffer::CreationError> {
use buffer::Usage;
let mut bind = 0;
if usage.contains(Usage::UNIFORM) {
bind |= d3d11::D3D11_BIND_CONSTANT_BUFFER;
}
if usage.contains(Usage::VERTEX) {
bind |= d3d11::D3D11_BIND_VERTEX_BUFFER;
}
if usage.contains(Usage::INDEX) {
bind |= d3d11::D3D11_BIND_INDEX_BUFFER;
}
// TODO: >=11.1
if usage.intersects(
Usage::UNIFORM_TEXEL | Usage::STORAGE_TEXEL | Usage::TRANSFER_SRC | Usage::STORAGE,
) {
bind |= d3d11::D3D11_BIND_SHADER_RESOURCE;
}
if usage.intersects(Usage::TRANSFER_DST | Usage::STORAGE) {
bind |= d3d11::D3D11_BIND_UNORDERED_ACCESS;
}
// if `D3D11_BIND_CONSTANT_BUFFER` intersects with any other bind flag, we need to handle
// it by creating two buffers. one with `D3D11_BIND_CONSTANT_BUFFER` and one with the rest
let needs_disjoint_cb = bind & d3d11::D3D11_BIND_CONSTANT_BUFFER != 0
&& bind != d3d11::D3D11_BIND_CONSTANT_BUFFER;
if needs_disjoint_cb {
bind ^= d3d11::D3D11_BIND_CONSTANT_BUFFER;
}
fn up_align(x: u64, alignment: u64) -> u64 {
(x + alignment - 1) & !(alignment - 1)
}
// constant buffer size need to be divisible by 16
let size = if usage.contains(Usage::UNIFORM) {
up_align(size, 16)
} else {
up_align(size, 4)
};
Ok(Buffer {
internal: InternalBuffer {
raw: ptr::null_mut(),
disjoint_cb: if needs_disjoint_cb {
Some(ptr::null_mut())
} else {
None
},
srv: None,
uav: None,
usage,
debug_name: None,
},
bound_range: 0..0,
local_memory_arena: Weak::new(),
memory_index: None,
is_coherent: false,
memory_ptr: ptr::null_mut(),
bind,
requirements: memory::Requirements {
size,
alignment: 4,
type_mask: BUFFER_TYPE_MASK,
},
})
}
unsafe fn get_buffer_requirements(&self, buffer: &Buffer) -> memory::Requirements {
buffer.requirements
}
unsafe fn bind_buffer_memory(
&self,
memory: &Memory,
offset: u64,
buffer: &mut Buffer,
) -> Result<(), device::BindError> {
debug!(
"usage={:?}, props={:b}",
buffer.internal.usage, memory.properties
);
#[allow(non_snake_case)]
let mut MiscFlags = if buffer.bind
& (d3d11::D3D11_BIND_SHADER_RESOURCE | d3d11::D3D11_BIND_UNORDERED_ACCESS)
!= 0
{
d3d11::D3D11_RESOURCE_MISC_BUFFER_ALLOW_RAW_VIEWS
} else {
0
};
if buffer.internal.usage.contains(buffer::Usage::INDIRECT) {
MiscFlags |= d3d11::D3D11_RESOURCE_MISC_DRAWINDIRECT_ARGS;
}
let initial_data = if memory.host_ptr.is_null() {
None
} else {
Some(d3d11::D3D11_SUBRESOURCE_DATA {
pSysMem: memory.host_ptr.offset(offset as isize) as *const _,
SysMemPitch: 0,
SysMemSlicePitch: 0,
})
};
//TODO: check `memory.properties.contains(memory::Properties::DEVICE_LOCAL)` ?
let raw = {
// device local memory
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
Usage: d3d11::D3D11_USAGE_DEFAULT,
BindFlags: buffer.bind,
CPUAccessFlags: 0,
MiscFlags,
StructureByteStride: if buffer.internal.usage.contains(buffer::Usage::TRANSFER_SRC)
{
4
} else {
0
},
};
let mut raw: *mut d3d11::ID3D11Buffer = ptr::null_mut();
let hr = self.raw.CreateBuffer(
&desc,
initial_data.as_ref().map_or(ptr::null_mut(), |id| id),
&mut raw as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name(&*raw, name);
}
ComPtr::from_raw(raw)
};
let disjoint_cb = if buffer.internal.disjoint_cb.is_some() {
let desc = d3d11::D3D11_BUFFER_DESC {
ByteWidth: buffer.requirements.size as _,
Usage: d3d11::D3D11_USAGE_DEFAULT,
BindFlags: d3d11::D3D11_BIND_CONSTANT_BUFFER,
CPUAccessFlags: 0,
MiscFlags: 0,
StructureByteStride: 0,
};
let mut disjoint_raw: *mut d3d11::ID3D11Buffer = ptr::null_mut();
let hr = self.raw.CreateBuffer(
&desc,
initial_data.as_ref().map_or(ptr::null_mut(), |id| id),
&mut disjoint_raw as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name_with_suffix(&*disjoint_raw, name, " -- Constant Buffer");
}
Some(disjoint_raw)
} else {
None
};
let srv = if buffer.bind & d3d11::D3D11_BIND_SHADER_RESOURCE != 0 {
let mut desc = mem::zeroed::<d3d11::D3D11_SHADER_RESOURCE_VIEW_DESC>();
desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS;
desc.ViewDimension = d3dcommon::D3D11_SRV_DIMENSION_BUFFEREX;
*desc.u.BufferEx_mut() = d3d11::D3D11_BUFFEREX_SRV {
FirstElement: 0,
NumElements: buffer.requirements.size as u32 / 4,
Flags: d3d11::D3D11_BUFFEREX_SRV_FLAG_RAW,
};
let mut srv: *mut d3d11::ID3D11ShaderResourceView = ptr::null_mut();
let hr = self.raw.CreateShaderResourceView(
raw.as_raw() as *mut _,
&desc,
&mut srv as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateShaderResourceView failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name_with_suffix(&*srv, name, " -- SRV");
}
Some(srv)
} else {
None
};
let uav = if buffer.bind & d3d11::D3D11_BIND_UNORDERED_ACCESS != 0 {
let mut desc = mem::zeroed::<d3d11::D3D11_UNORDERED_ACCESS_VIEW_DESC>();
desc.Format = dxgiformat::DXGI_FORMAT_R32_TYPELESS;
desc.ViewDimension = d3d11::D3D11_UAV_DIMENSION_BUFFER;
*desc.u.Buffer_mut() = d3d11::D3D11_BUFFER_UAV {
FirstElement: 0,
NumElements: buffer.requirements.size as u32 / 4,
Flags: d3d11::D3D11_BUFFER_UAV_FLAG_RAW,
};
let mut uav: *mut d3d11::ID3D11UnorderedAccessView = ptr::null_mut();
let hr = self.raw.CreateUnorderedAccessView(
raw.as_raw() as *mut _,
&desc,
&mut uav as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateUnorderedAccessView failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
if let Some(ref mut name) = buffer.internal.debug_name {
set_debug_name_with_suffix(&*uav, name, " -- UAV");
}
Some(uav)
} else {
None
};
let internal = InternalBuffer {
raw: raw.into_raw(),
disjoint_cb,
srv,
uav,
usage: buffer.internal.usage,
debug_name: buffer.internal.debug_name.take(),
};
let range = offset..offset + buffer.requirements.size;
let memory_index = memory.bind_buffer(range.clone(), internal.clone());
buffer.internal = internal;
buffer.is_coherent = memory
.properties
.contains(hal::memory::Properties::COHERENT);
buffer.memory_ptr = memory.host_ptr;
buffer.bound_range = range;
buffer.local_memory_arena = Arc::downgrade(&memory.local_buffers);
buffer.memory_index = Some(memory_index);
Ok(())
}
unsafe fn create_buffer_view(
&self,
_buffer: &Buffer,
_format: Option<format::Format>,
_range: buffer::SubRange,
) -> Result<BufferView, buffer::ViewCreationError> {
unimplemented!()
}
unsafe fn create_image(
&self,
kind: image::Kind,
mip_levels: image::Level,
format: format::Format,
_tiling: image::Tiling,
usage: image::Usage,
_sparse: memory::SparseFlags,
view_caps: image::ViewCapabilities,
) -> Result<Image, image::CreationError> {
let surface_desc = format.base_format().0.desc();
let bytes_per_texel = surface_desc.bits / 8;
let ext = kind.extent();
let size = (ext.width * ext.height * ext.depth) as u64 * bytes_per_texel as u64;
let bind = conv::map_image_usage(usage, surface_desc, self.internal.device_feature_level);
debug!("{:b}", bind);
Ok(Image {
internal: InternalImage {
raw: ptr::null_mut(),
copy_srv: None,
srv: None,
unordered_access_views: Vec::new(),
depth_stencil_views: Vec::new(),
render_target_views: Vec::new(),
debug_name: None,
},
decomposed_format: conv::DecomposedDxgiFormat::UNKNOWN,
kind,
mip_levels,
format,
usage,
view_caps,
bind,
requirements: memory::Requirements {
size: size,
alignment: 4,
type_mask: 0x1, // device-local only
},
})
}
unsafe fn get_image_requirements(&self, image: &Image) -> memory::Requirements {
image.requirements
}
unsafe fn get_image_subresource_footprint(
&self,
_image: &Image,
_sub: image::Subresource,
) -> image::SubresourceFootprint {
unimplemented!()
}
unsafe fn bind_image_memory(
&self,
memory: &Memory,
_offset: u64,
image: &mut Image,
) -> Result<(), device::BindError> {
use image::Usage;
use memory::Properties;
let base_format = image.format.base_format();
let format_desc = base_format.0.desc();
let compressed = format_desc.is_compressed();
let depth = image.format.is_depth();
let stencil = image.format.is_stencil();
let (bind, usage, cpu) = if memory.properties == Properties::DEVICE_LOCAL {
(image.bind, d3d11::D3D11_USAGE_DEFAULT, 0)
} else if memory.properties
== (Properties::DEVICE_LOCAL | Properties::CPU_VISIBLE | Properties::CPU_CACHED)
{
(
image.bind,
d3d11::D3D11_USAGE_DYNAMIC,
d3d11::D3D11_CPU_ACCESS_WRITE,
)
} else if memory.properties == (Properties::CPU_VISIBLE | Properties::CPU_CACHED) {
(
0,
d3d11::D3D11_USAGE_STAGING,
d3d11::D3D11_CPU_ACCESS_READ | d3d11::D3D11_CPU_ACCESS_WRITE,
)
} else {
unimplemented!()
};
let dxgi_format = conv::map_format(image.format).unwrap();
let decomposed = conv::DecomposedDxgiFormat::from_dxgi_format(dxgi_format);
assert!(
memory.host_ptr.is_null(),
"Images can only be allocated from device-local memory"
);
let initial_data_ptr = ptr::null_mut();
let mut resource = ptr::null_mut();
let view_kind = match image.kind {
image::Kind::D1(width, layers) => {
let desc = d3d11::D3D11_TEXTURE1D_DESC {
Width: width,
MipLevels: image.mip_levels as _,
ArraySize: layers as _,
Format: decomposed.typeless,
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: 0,
};
let hr = self.raw.CreateTexture1D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture1D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D1Array
}
image::Kind::D2(width, height, layers, samples) => {
let desc = d3d11::D3D11_TEXTURE2D_DESC {
Width: width,
Height: height,
MipLevels: image.mip_levels as _,
ArraySize: layers as _,
Format: decomposed.typeless,
SampleDesc: dxgitype::DXGI_SAMPLE_DESC {
Count: samples as _,
Quality: 0,
},
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: {
let mut flags = 0;
if image.view_caps.contains(image::ViewCapabilities::KIND_CUBE) {
flags |= d3d11::D3D11_RESOURCE_MISC_TEXTURECUBE;
}
flags
},
};
let hr = self.raw.CreateTexture2D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture2D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D2Array
}
image::Kind::D3(width, height, depth) => {
let desc = d3d11::D3D11_TEXTURE3D_DESC {
Width: width,
Height: height,
Depth: depth,
MipLevels: image.mip_levels as _,
Format: decomposed.typeless,
Usage: usage,
BindFlags: bind,
CPUAccessFlags: cpu,
MiscFlags: 0,
};
let hr = self.raw.CreateTexture3D(
&desc,
initial_data_ptr,
&mut resource as *mut *mut _ as *mut *mut _,
);
if !winerror::SUCCEEDED(hr) {
error!("CreateTexture3D failed: 0x{:x}", hr);
return Err(device::BindError::WrongMemory);
}
image::ViewKind::D3
}
};
let mut unordered_access_views = Vec::new();
if image.usage.contains(Usage::TRANSFER_DST)
&& !compressed
&& !depth
&& self.internal.downlevel.storage_images
{
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource: resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
// TODO: we should be using `uav_format` rather than `copy_uav_format`, and share
// the UAVs when the formats are identical
format: decomposed.copy_uav.unwrap(),
levels: mip..(mip + 1),
layers: 0..image.kind.num_layers(),
};
let uav = self
.view_image_as_unordered_access(&view)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(&uav, &format!("{} -- UAV Mip {}", name, mip));
}
unordered_access_views.push(uav);
}
}
let (copy_srv, srv) = if image.usage.contains(image::Usage::TRANSFER_SRC) {
let mut view = ViewInfo {
resource: resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.copy_srv.unwrap(),
levels: 0..image.mip_levels,
layers: 0..image.kind.num_layers(),
};
let copy_srv = if !compressed {
Some(
self.view_image_as_shader_resource(&view)
.map_err(|_| device::BindError::WrongMemory)?,
)
} else {
None
};
view.format = decomposed.srv.unwrap();
let srv = if !depth && !stencil {
Some(
self.view_image_as_shader_resource(&view)
.map_err(|_| device::BindError::WrongMemory)?,
)
} else {
None
};
(copy_srv, srv)
} else {
(None, None)
};
let mut render_target_views = Vec::new();
if (image.usage.contains(image::Usage::COLOR_ATTACHMENT)
|| image.usage.contains(image::Usage::TRANSFER_DST))
&& !compressed
&& !depth
{
for layer in 0..image.kind.num_layers() {
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.rtv.unwrap(),
levels: mip..(mip + 1),
layers: layer..(layer + 1),
};
let rtv = self
.view_image_as_render_target(&view)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(
&rtv,
&format!("{} -- RTV Mip {} Layer {}", name, mip, layer),
);
}
render_target_views.push(rtv);
}
}
};
let mut depth_stencil_views = Vec::new();
if depth {
for layer in 0..image.kind.num_layers() {
for mip in 0..image.mip_levels {
let view = ViewInfo {
resource,
kind: image.kind,
caps: image::ViewCapabilities::empty(),
view_kind,
format: decomposed.dsv.unwrap(),
levels: mip..(mip + 1),
layers: layer..(layer + 1),
};
let dsv = self
.view_image_as_depth_stencil(&view, None)
.map_err(|_| device::BindError::WrongMemory)?;
if let Some(ref name) = image.internal.debug_name {
set_debug_name(
&dsv,
&format!("{} -- DSV Mip {} Layer {}", name, mip, layer),
);
}
depth_stencil_views.push(dsv);
}
}
}
if let Some(ref mut name) = image.internal.debug_name {
set_debug_name(&*resource, name);
if let Some(ref copy_srv) = copy_srv {
set_debug_name_with_suffix(copy_srv, name, " -- Copy SRV");
}
if let Some(ref srv) = srv {
set_debug_name_with_suffix(srv, name, " -- SRV");
}
}
let internal = InternalImage {
raw: resource,
copy_srv,
srv,
unordered_access_views,
depth_stencil_views,
render_target_views,
debug_name: image.internal.debug_name.take(),
};
image.decomposed_format = decomposed;
image.internal = internal;
Ok(())
}
unsafe fn create_image_view(
&self,
image: &Image,
view_kind: image::ViewKind,
format: format::Format,
_swizzle: format::Swizzle,
range: image::SubresourceRange,
) -> Result<ImageView, image::ViewCreationError> {
let is_array = image.kind.num_layers() > 1;
let num_levels = range.resolve_level_count(image.mip_levels);
let num_layers = range.resolve_layer_count(image.kind.num_layers());
let info = ViewInfo {
resource: image.internal.raw,
kind: image.kind,
caps: image.view_caps,
// D3D11 doesn't allow looking at a single slice of an array as a non-array
view_kind: if is_array && view_kind == image::ViewKind::D2 {
image::ViewKind::D2Array
} else if is_array && view_kind == image::ViewKind::D1 {
image::ViewKind::D1Array
} else {
view_kind
},
format: conv::map_format(format).ok_or(image::ViewCreationError::BadFormat(format))?,
levels: range.level_start..range.level_start + num_levels,
layers: range.layer_start..range.layer_start + num_layers,
};
let srv_info = ViewInfo {
format: conv::viewable_format(info.format),
..info.clone()
};
let mut debug_name = image.internal.debug_name.clone();
Ok(ImageView {
subresource: d3d11::D3D11CalcSubresource(
0,
range.layer_start as _,
range.level_start as _,
),
format,
srv_handle: if image.usage.intersects(image::Usage::SAMPLED) {
let srv = self.view_image_as_shader_resource(&srv_info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&srv, name, " -- SRV");
}
Some(srv.into_raw())
} else {
None
},
rtv_handle: if image.usage.contains(image::Usage::COLOR_ATTACHMENT) {
let rtv = self.view_image_as_render_target(&info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&rtv, name, " -- RTV");
}
Some(rtv.into_raw())
} else {
None
},
uav_handle: if image.usage.contains(image::Usage::STORAGE) {
let uav = self.view_image_as_unordered_access(&info)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&uav, name, " -- UAV");
}
Some(uav.into_raw())
} else {
None
},
dsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT) {
let dsv = self.view_image_as_depth_stencil(&info, None)?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&dsv, name, " -- DSV");
}
Some(dsv.into_raw())
} else {
None
},
rodsv_handle: if image.usage.contains(image::Usage::DEPTH_STENCIL_ATTACHMENT)
&& self.internal.downlevel.read_only_depth_stencil
{
let rodsv =
self.view_image_as_depth_stencil(&info, Some(image.format.is_stencil()))?;
if let Some(ref mut name) = debug_name {
set_debug_name_with_suffix(&rodsv, name, " -- DSV");
}
Some(rodsv.into_raw())
} else {
None
},
owned: true,
})
}
unsafe fn create_sampler(
&self,
info: &image::SamplerDesc,
) -> Result<Sampler, device::AllocationError> {
assert!(info.normalized);
let op = match info.comparison {
Some(_) => d3d11::D3D11_FILTER_REDUCTION_TYPE_COMPARISON,
None => d3d11::D3D11_FILTER_REDUCTION_TYPE_STANDARD,
};
let desc = d3d11::D3D11_SAMPLER_DESC {
Filter: conv::map_filter(
info.min_filter,
info.mag_filter,
info.mip_filter,
op,
info.anisotropy_clamp,
),
AddressU: conv::map_wrapping(info.wrap_mode.0),
AddressV: conv::map_wrapping(info.wrap_mode.1),
AddressW: conv::map_wrapping(info.wrap_mode.2),
MipLODBias: info.lod_bias.0,
MaxAnisotropy: info.anisotropy_clamp.map_or(0, |aniso| aniso as u32),
ComparisonFunc: info.comparison.map_or(0, |comp| conv::map_comparison(comp)),
BorderColor: info.border.into(),
MinLOD: info.lod_range.start.0,
MaxLOD: info.lod_range.end.0,
};
let mut sampler = ptr::null_mut();
let hr = self
.raw
.CreateSamplerState(&desc, &mut sampler as *mut *mut _ as *mut *mut _);
assert_eq!(true, winerror::SUCCEEDED(hr));
Ok(Sampler {
sampler_handle: ComPtr::from_raw(sampler),
})
}
unsafe fn create_descriptor_pool<I>(
&self,
_max_sets: usize,
ranges: I,
_flags: pso::DescriptorPoolCreateFlags,
) -> Result<DescriptorPool, device::OutOfMemory>
where
I: Iterator<Item = pso::DescriptorRangeDesc>,
{
let mut total = RegisterData::default();
for range in ranges {
let content = DescriptorContent::from(range.ty);
total.add_content_many(content, range.count as DescriptorIndex);
}
let max_stages = 6;
let count = total.sum() * max_stages;
Ok(DescriptorPool::with_capacity(count))
}
unsafe fn create_descriptor_set_layout<'a, I, J>(
&self,
layout_bindings: I,
_immutable_samplers: J,
) -> Result<DescriptorSetLayout, device::OutOfMemory>
where
I: Iterator<Item = pso::DescriptorSetLayoutBinding>,
J: Iterator<Item = &'a Sampler>,
{
let mut total = MultiStageData::<RegisterData<_>>::default();
let mut bindings = layout_bindings.collect::<Vec<_>>();
for binding in bindings.iter() {
let content = DescriptorContent::from(binding.ty);
// If this binding is used by the graphics pipeline and is a UAV, it belongs to the "Output Merger"
// stage, so we only put them in the fragment stage to save redundant descriptor allocations.
let stage_flags = if content.contains(DescriptorContent::UAV)
&& binding
.stage_flags
.intersects(pso::ShaderStageFlags::ALL - pso::ShaderStageFlags::COMPUTE)
{
let mut stage_flags = pso::ShaderStageFlags::FRAGMENT;
stage_flags.set(
pso::ShaderStageFlags::COMPUTE,
binding.stage_flags.contains(pso::ShaderStageFlags::COMPUTE),
);
stage_flags
} else {
binding.stage_flags
};
total.add_content_many(content, stage_flags, binding.count as _);
}
bindings.sort_by_key(|a| a.binding);
let accum = total.map_register(|count| RegisterAccumulator {
res_index: *count as ResourceIndex,
});
Ok(DescriptorSetLayout {
bindings: Arc::new(bindings),
pool_mapping: accum.to_mapping(),
})
}
unsafe fn write_descriptor_set<'a, I>(&self, op: pso::DescriptorSetWrite<'a, Backend, I>)
where
I: Iterator<Item = pso::Descriptor<'a, Backend>>,
{
// Get baseline mapping
let mut mapping = op
.set
.layout
.pool_mapping
.map_register(|mapping| mapping.offset);
// Iterate over layout bindings until the first binding is found.
let binding_start = op
.set
.layout
.bindings
.iter()
.position(|binding| binding.binding == op.binding)
.unwrap();
// If we've skipped layout bindings, we need to add them to get the correct binding offset
for binding in &op.set.layout.bindings[..binding_start] {
let content = DescriptorContent::from(binding.ty);
mapping.add_content_many(content, binding.stage_flags, binding.count as _);
}
// We start at the given binding index and array index
let mut binding_index = binding_start;
let mut array_index = op.array_offset;
// If we're skipping array indices in the current binding, we need to add them to get the correct binding offset
if array_index > 0 {
let binding: &pso::DescriptorSetLayoutBinding = &op.set.layout.bindings[binding_index];
let content = DescriptorContent::from(binding.ty);
mapping.add_content_many(content, binding.stage_flags, array_index as _);
}
// Iterate over the descriptors, figuring out the corresponding binding, and adding
// it to the set of bindings.
//
// When we hit the end of an array of descriptors and there are still descriptors left
// over, we will spill into writing the next binding.
for descriptor in op.descriptors {
let binding: &pso::DescriptorSetLayoutBinding = &op.set.layout.bindings[binding_index];
let handles = match descriptor {
pso::Descriptor::Buffer(buffer, ref _sub) => RegisterData {
c: match buffer.internal.disjoint_cb {
Some(dj_buf) => dj_buf as *mut _,
None => buffer.internal.raw as *mut _,
},
t: buffer.internal.srv.map_or(ptr::null_mut(), |p| p as *mut _),
u: buffer.internal.uav.map_or(ptr::null_mut(), |p| p as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Image(image, _layout) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.map_or(ptr::null_mut(), |h| h as *mut _),
u: image.uav_handle.map_or(ptr::null_mut(), |h| h as *mut _),
s: ptr::null_mut(),
},
pso::Descriptor::Sampler(sampler) => RegisterData {
c: ptr::null_mut(),
t: ptr::null_mut(),
u: ptr::null_mut(),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::CombinedImageSampler(image, _layout, sampler) => RegisterData {
c: ptr::null_mut(),
t: image.srv_handle.map_or(ptr::null_mut(), |h| h as *mut _),
u: image.uav_handle.map_or(ptr::null_mut(), |h| h as *mut _),
s: sampler.sampler_handle.as_raw() as *mut _,
},
pso::Descriptor::TexelBuffer(_buffer_view) => unimplemented!(),
};
let content = DescriptorContent::from(binding.ty);
if content.contains(DescriptorContent::CBV) {
let offsets = mapping.map_other(|map| map.c);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.c);
};
if content.contains(DescriptorContent::SRV) {
let offsets = mapping.map_other(|map| map.t);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.t);
};
if content.contains(DescriptorContent::UAV) {
// If this binding is used by the graphics pipeline and is a UAV, it belongs to the "Output Merger"
// stage, so we only put them in the fragment stage to save redundant descriptor allocations.
let stage_flags = if binding
.stage_flags
.intersects(pso::ShaderStageFlags::ALL - pso::ShaderStageFlags::COMPUTE)
{
let mut stage_flags = pso::ShaderStageFlags::FRAGMENT;
stage_flags.set(
pso::ShaderStageFlags::COMPUTE,
binding.stage_flags.contains(pso::ShaderStageFlags::COMPUTE),
);
stage_flags
} else {
binding.stage_flags
};
let offsets = mapping.map_other(|map| map.u);
op.set.assign_stages(&offsets, stage_flags, handles.u);
};
if content.contains(DescriptorContent::SAMPLER) {
let offsets = mapping.map_other(|map| map.s);
op.set
.assign_stages(&offsets, binding.stage_flags, handles.s);
};
mapping.add_content_many(content, binding.stage_flags, 1);
array_index += 1;
if array_index >= binding.count {
// We've run out of array to write to, we should overflow to the next binding.
array_index = 0;
binding_index += 1;
}
}
}
unsafe fn copy_descriptor_set<'a>(&self, _op: pso::DescriptorSetCopy<'a, Backend>) {
unimplemented!()
/*
for offset in 0 .. copy.count {
let (dst_ty, dst_handle_offset, dst_second_handle_offset) = copy
.dst_set
.get_handle_offset(copy.dst_binding + offset as u32);
let (src_ty, src_handle_offset, src_second_handle_offset) = copy
.src_set
.get_handle_offset(copy.src_binding + offset as u32);
assert_eq!(dst_ty, src_ty);
let dst_handle = copy.dst_set.handles.offset(dst_handle_offset as isize);
let src_handle = copy.dst_set.handles.offset(src_handle_offset as isize);
match dst_ty {
pso::DescriptorType::Image {
ty: pso::ImageDescriptorType::Sampled { with_sampler: true }
} => {
let dst_second_handle = copy
.dst_set
.handles
.offset(dst_second_handle_offset as isize);
let src_second_handle = copy
.dst_set
.handles
.offset(src_second_handle_offset as isize);
*dst_handle = *src_handle;
*dst_second_handle = *src_second_handle;
}
_ => *dst_handle = *src_handle,
}
}*/
}
unsafe fn map_memory(
&self,
memory: &mut Memory,
segment: memory::Segment,
) -> Result<*mut u8, device::MapError> {
Ok(memory.host_ptr.offset(segment.offset as isize))
}
unsafe fn unmap_memory(&self, _memory: &mut Memory) {
// persistent mapping FTW
}
unsafe fn flush_mapped_memory_ranges<'a, I>(&self, ranges: I) -> Result<(), device::OutOfMemory>
where
I: Iterator<Item = (&'a Memory, memory::Segment)>,
{
let _scope = debug_scope!(&self.context, "FlushMappedRanges");
// go through every range we wrote to
for (memory, ref segment) in ranges {
let range = memory.resolve(segment);
let _scope = debug_scope!(&self.context, "Range({:?})", range);
memory.flush(&self.context, range);
}
Ok(())
}
unsafe fn invalidate_mapped_memory_ranges<'a, I>(
&self,
ranges: I,
) -> Result<(), device::OutOfMemory>
where
I: Iterator<Item = (&'a Memory, memory::Segment)>,
{
let _scope = debug_scope!(&self.context, "InvalidateMappedRanges");
// go through every range we want to read from
for (memory, ref segment) in ranges {
let range = memory.resolve(segment);
let _scope = debug_scope!(&self.context, "Range({:?})", range);
memory.invalidate(
&self.context,
range,
self.internal.working_buffer.clone(),
self.internal.working_buffer_size,
);
}
Ok(())
}
fn create_semaphore(&self) -> Result<Semaphore, device::OutOfMemory> {
// TODO:
Ok(Semaphore)
}
fn create_fence(&self, signalled: bool) -> Result<Fence, device::OutOfMemory> {
Ok(Arc::new(RawFence {
mutex: Mutex::new(signalled),
condvar: Condvar::new(),
}))
}
unsafe fn reset_fence(&self, fence: &mut Fence) -> Result<(), device::OutOfMemory> {
*fence.mutex.lock() = false;
Ok(())
}
unsafe fn wait_for_fence(
&self,
fence: &Fence,
timeout_ns: u64,
) -> Result<bool, device::WaitError> {
use std::time::{Duration, Instant};
debug!("wait_for_fence {:?} for {} ns", fence, timeout_ns);
let mut guard = fence.mutex.lock();
match timeout_ns {
0 => Ok(*guard),
0xFFFFFFFFFFFFFFFF => {
while !*guard {
fence.condvar.wait(&mut guard);
}
Ok(true)
}
_ => {
let total = Duration::from_nanos(timeout_ns as u64);
let now = Instant::now();
while !*guard {
let duration = match total.checked_sub(now.elapsed()) {
Some(dur) => dur,
None => return Ok(false),
};
let result = fence.condvar.wait_for(&mut guard, duration);
if result.timed_out() {
return Ok(false);
}
}
Ok(true)
}
}
}
unsafe fn get_fence_status(&self, fence: &Fence) -> Result<bool, device::DeviceLost> |
fn create_event(&self) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn get_event_status(&self, _event: &()) -> Result<bool, device::WaitError> {
unimplemented!()
}
unsafe fn set_event(&self, _event: &mut ()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn reset_event(&self, _event: &mut ()) -> Result<(), device::OutOfMemory> {
unimplemented!()
}
unsafe fn free_memory(&self, mut memory: Memory) {
if !memory.host_ptr.is_null() {
let _vec =
Vec::from_raw_parts(memory.host_ptr, memory.size as usize, memory.size as usize);
// let it drop
memory.host_ptr = ptr::null_mut();
}
for (_, (_range, mut internal)) in memory.local_buffers.write().drain() {
internal.release_resources()
}
}
unsafe fn create_query_pool(
&self,
_query_ty: query::Type,
_count: query::Id,
) -> Result<QueryPool, query::CreationError> {
unimplemented!()
}
unsafe fn destroy_query_pool(&self, _pool: QueryPool) {
unimplemented!()
}
unsafe fn get_query_pool_results(
&self,
_pool: &QueryPool,
_queries: Range<query::Id>,
_data: &mut [u8],
_stride: buffer::Stride,
_flags: query::ResultFlags,
) -> Result<bool, device::WaitError> {
unimplemented!()
}
unsafe fn destroy_shader_module(&self, _shader_lib: ShaderModule) {}
unsafe fn destroy_render_pass(&self, _rp: RenderPass) {
//unimplemented!()
}
unsafe fn destroy_pipeline_layout(&self, _layout: PipelineLayout) {
//unimplemented!()
}
unsafe fn destroy_graphics_pipeline(&self, _pipeline: GraphicsPipeline) {}
unsafe fn destroy_compute_pipeline(&self, _pipeline: ComputePipeline) {}
unsafe fn destroy_framebuffer(&self, _fb: Framebuffer) {}
unsafe fn destroy_buffer(&self, buffer: Buffer) {
let mut internal = buffer.internal;
if internal.raw.is_null() {
return;
}
let arena_arc = match buffer.local_memory_arena.upgrade() {
Some(arena) => arena,
// Memory is destroyed before the buffer, we've already been destroyed.
None => return,
};
let mut arena = arena_arc.write();
let memory_index = buffer.memory_index.expect("Buffer's memory index unset");
// Drop the internal stored by the arena on the floor, it owns nothing.
let _ = arena.remove(memory_index);
// Release all memory owned by this buffer
internal.release_resources();
}
unsafe fn destroy_buffer_view(&self, _view: BufferView) {
//unimplemented!()
}
unsafe fn destroy_image(&self, mut image: Image) {
image.internal.release_resources();
}
unsafe fn destroy_image_view(&self, _view: ImageView) {
//unimplemented!()
}
unsafe fn destroy_sampler(&self, _sampler: Sampler) {}
unsafe fn destroy_descriptor_pool(&self, _pool: DescriptorPool) {
//unimplemented!()
}
unsafe fn destroy_descriptor_set_layout(&self, _layout: DescriptorSetLayout) {
//unimplemented!()
}
unsafe fn destroy_fence(&self, _fence: Fence) {
// unimplemented!()
}
unsafe fn destroy_semaphore(&self, _semaphore: Semaphore) {
//unimplemented!()
}
unsafe fn destroy_event(&self, _event: ()) {
//unimplemented!()
}
fn wait_idle(&self) -> Result<(), device::OutOfMemory> {
Ok(())
// unimplemented!()
}
unsafe fn set_image_name(&self, image: &mut Image, name: &str) {
if !verify_debug_ascii(name) {
return;
}
image.internal.debug_name = Some(name.to_string());
}
unsafe fn set_buffer_name(&self, buffer: &mut Buffer, name: &str) {
if !verify_debug_ascii(name) {
return;
}
buffer.internal.debug_name = Some(name.to_string());
}
unsafe fn set_command_buffer_name(&self, command_buffer: &mut CommandBuffer, name: &str) {
if !verify_debug_ascii(name) {
return;
}
command_buffer.debug_name = Some(name.to_string());
}
unsafe fn set_semaphore_name(&self, _semaphore: &mut Semaphore, _name: &str) {
// TODO
}
unsafe fn set_fence_name(&self, _fence: &mut Fence, _name: &str) {
// TODO
}
unsafe fn set_framebuffer_name(&self, _framebuffer: &mut Framebuffer, _name: &str) {
// TODO
}
unsafe fn set_render_pass_name(&self, _render_pass: &mut RenderPass, _name: &str) {
// TODO
}
unsafe fn set_descriptor_set_name(&self, _descriptor_set: &mut DescriptorSet, _name: &str) {
// TODO
}
unsafe fn set_descriptor_set_layout_name(
&self,
_descriptor_set_layout: &mut DescriptorSetLayout,
_name: &str,
) {
// TODO
}
unsafe fn set_pipeline_layout_name(&self, _pipeline_layout: &mut PipelineLayout, _name: &str) {
// TODO
}
}
| {
Ok(*fence.mutex.lock())
} |
loadelastic-aurora.py | import requests, json, os
import argparse
import pandas as pd
import ijson
import time
# Elasticsearch python libs
from elasticsearch import Elasticsearch
from elasticsearch import helpers
directory = ""
indexName = "aurora-meta2"
typeName = "patient"
THRESHOLD = 10000 # this regulates how much data gets loaded then is processed in a bulk group
PK = "ID"
json_root = "item"
errors = []
def loadit():
|
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-d", required=True, help="dir path to json file(s)")
parser.add_argument("-thres", help="set the batch threshold")
parser.add_argument("-i", help="set the index name")
parser.add_argument("-t", help="set the type")
parser.add_argument("-pk", help="primary key of the record, default 'ID'")
parser.add_argument("-r", help="json root node, default 'item', passing 'NOROOT' will ignore the root item")
args = parser.parse_args()
print("Args:")
print(args)
if args.d:
directory = args.d
if directory[-1] != '/':
directory = directory + '/'
if args.thres:
THRESHOLD = int(args.thres)
print ("Batch threshold: " + str(THRESHOLD))
print(type(THRESHOLD))
if args.i:
indexName = args.i
if args.t:
typeName = args.t
if args.pk:
PK = args.pk
if args.r:
if args.r == "NOROOT":
json_root = "" # ignore the root
else:
json_root = args.r
start = time.time()
loadit()
end = time.time()
print("Elapsed time: {}".format((end-start)))
if len(errors) > 0:
print("The following files failed:")
print(errors)
| es = Elasticsearch([{'host': 'localhost', 'port': '9200'}])
for filename in os.listdir(directory):
if filename.endswith(".json"):
json_filename = directory+filename
print("Loading " + json_filename)
with open(json_filename, 'r') as input_file:
i = 1
batchCtr = 1
bulk_action = []
bulkCount = 0
ij = ijson.items(input_file, json_root)
print(ij)
for rec in ij:
print(rec)
#pk = rec['clin'][PK]
pk = rec['clin'][PK]
print(pk)
bulk = {
"_index" : indexName,
#"_type" : typeName,
"_id" : pk,
"_source" : rec,
}
bulk_action.append(bulk)
i = i + 1
batchCtr = batchCtr + 1
if batchCtr > THRESHOLD:
try:
#print(bulk_action)
bulkCount = bulkCount + batchCtr
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
#print ('Imported data ' + str(bulkCount-1) + ' successfully from ' + json_filename)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ("Loading failed for " + json_filename)
errors.append(json_filename)
print ('Error:' + str(ex))
#print ("Loading failed!")
#pass
if i < THRESHOLD:
try:
rtn_status = helpers.bulk(es, bulk_action)
if rtn_status:
print(rtn_status)
#print ('Imported data ' + str(i-1) + ' successfully from ' + json_filename)
batchCtr = 1
bulk_action = []
except Exception as ex:
print ('Error:' + str(ex))
print ("Loading failed for " + json_filename)
errors.append(json_filename)
#pass |
pager.go | package utils
import (
"bytes"
"fmt"
"math"
"strings"
)
type Pager struct {
Page int
Totalnum int
Pagesize int
urlpath string
urlquery string
nopath bool
}
func NewPager(page, totalnum, pagesize int, url string, nopath ...bool) *Pager {
p := new(Pager)
p.Page = page
p.Totalnum = totalnum
p.Pagesize = pagesize
arr := strings.Split(url, "?")
p.urlpath = arr[0]
if len(arr) > 1 {
p.urlquery = "?" + arr[1]
} else {
p.urlquery = ""
}
if len(nopath) > 0 {
p.nopath = nopath[0]
} else {
p.nopath = false
}
return p
}
func (this *Pager) url(page int) string {
if this.nopath { //不使用目录形式
if this.urlquery != "" {
return fmt.Sprintf("%s%s&page=%d", this.urlpath, this.urlquery, page)
} else {
return fmt.Sprintf("%s?page=%d", this.urlpath, page)
}
} else {
return fmt.Sprintf("%s/page/%d%s", this.urlpath, page, this.urlquery)
}
}
func (this *Pager) ToString() string {
if this.Totalnum <= this.Pagesize {
return ""
}
var buf bytes.Buffer
var from, to, linknum, offset, totalpage int
offset = 5
linknum = 10
totalpage = int(math.Ceil(float64(this.Totalnum) / float64(this.Pagesize)))
if totalpage < linknum {
from = 1
to = totalpage
} else {
from = this.Page - offset
to = from + linknum
if from < 1 {
from = 1
to = from + linknum - 1
} else if to > totalpage {
to = totalpage
from = totalpage - linknum + 1
}
}
if this.Page > 1 {
buf.WriteString(fmt.Sprintf("<a class=\"layui-laypage-prev\" href=\"%s\">上一页</a></li>", this.url(this.Page-1)))
} else {
buf.WriteString("<span>上一页</span>")
}
if this.Page > linknum {
buf.WriteString(fmt.Sprintf("<a href=\"%s\" class=\"laypage_first\">1...</a>", this.url(1)))
}
for i := from; i <= to; i++ {
if i == this.Page {
buf.WriteString(fmt.S | (fmt.Sprintf("<a href=\"%s\">%d</a>", this.url(i), i))
}
}
if totalpage > to {
buf.WriteString(fmt.Sprintf("<a class=\"layui-laypage-last\" href=\"%s\">末页</a>", this.url(totalpage)))
}
if this.Page < totalpage {
buf.WriteString(fmt.Sprintf("<a class=\"layui-laypage-next\" href=\"%s\">下一页</a></li>", this.url(this.Page+1)))
} else {
buf.WriteString(fmt.Sprintf("<span>下一页</span>"))
}
return buf.String()
}
| printf("<span class=\"layui-laypage-curr\"><em class=\"layui-laypage-em\"></em><em>%d</em></span>", i))
} else {
buf.WriteString |
damage_digits_spawner.rs | use bevy::{
math::Vec3,
prelude::{
AssetServer, Assets, Commands, ComputedVisibility, Entity, GlobalTransform, Handle,
Transform, Visibility,
},
render::primitives::Aabb,
};
use crate::{
components::{ActiveMotion, DamageDigits},
render::{DamageDigitMaterial, DamageDigitRenderData},
zmo_asset_loader::ZmoAsset,
};
pub struct DamageDigitsSpawner {
pub texture_damage: Handle<DamageDigitMaterial>,
pub texture_damage_player: Handle<DamageDigitMaterial>,
pub texture_miss: Handle<DamageDigitMaterial>,
pub motion: Handle<ZmoAsset>,
}
impl DamageDigitsSpawner {
pub fn load(
asset_server: &AssetServer,
damage_digit_materials: &mut Assets<DamageDigitMaterial>,
) -> Self {
Self {
texture_damage: damage_digit_materials.add(DamageDigitMaterial {
texture: asset_server.load("3DDATA/EFFECT/SPECIAL/DIGITNUMBER01.DDS.rgb_texture"),
}),
texture_damage_player: damage_digit_materials.add(DamageDigitMaterial {
texture: asset_server.load("3DDATA/EFFECT/SPECIAL/DIGITNUMBER02.DDS.rgb_texture"),
}),
texture_miss: damage_digit_materials.add(DamageDigitMaterial {
texture: asset_server.load("3DDATA/EFFECT/SPECIAL/DIGITNUMBERMISS.DDS.rgb_texture"),
}),
motion: asset_server.load("3DDATA/EFFECT/SPECIAL/HIT_FIGURE_01.ZMO"),
}
}
pub fn | (
&self,
commands: &mut Commands,
damage: u32,
is_damage_player: bool,
model_height: f32,
) -> Option<Entity> {
Some(
commands
.spawn_bundle((
DamageDigits {
damage,
model_height,
},
DamageDigitRenderData::new(4),
if damage == 0 {
self.texture_miss.clone_weak()
} else if is_damage_player {
self.texture_damage_player.clone_weak()
} else {
self.texture_damage.clone_weak()
},
ActiveMotion::new_once(self.motion.clone_weak()),
Transform::from_translation(Vec3::new(0.0, 0.0, 0.0)),
GlobalTransform::default(),
Aabb::default(),
Visibility::default(),
ComputedVisibility::default(),
))
.id(),
)
}
}
| spawn |
test_deprecations.py | import sqlalchemy as sa
from sqlalchemy import and_
from sqlalchemy import event
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy.ext.declarative import comparable_using
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import aliased
from sqlalchemy.orm import AttributeExtension
from sqlalchemy.orm import attributes
from sqlalchemy.orm import collections
from sqlalchemy.orm import column_property
from sqlalchemy.orm import comparable_property
from sqlalchemy.orm import composite
from sqlalchemy.orm import configure_mappers
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import create_session
from sqlalchemy.orm import defer
from sqlalchemy.orm import deferred
from sqlalchemy.orm import EXT_CONTINUE
from sqlalchemy.orm import identity
from sqlalchemy.orm import instrumentation
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.orm import mapper
from sqlalchemy.orm import MapperExtension
from sqlalchemy.orm import PropComparator
from sqlalchemy.orm import relationship
from sqlalchemy.orm import Session
from sqlalchemy.orm import SessionExtension
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import synonym
from sqlalchemy.orm import undefer
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.orm.collections import collection
from sqlalchemy.orm.util import polymorphic_union
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import assertions
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.schema import Column
from sqlalchemy.testing.schema import Table
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.util.compat import pypy
from . import _fixtures
from .inheritance import _poly_fixtures
from .test_options import PathTest as OptionsPathTest
from .test_transaction import _LocalFixture
class DeprecationWarningsTest(fixtures.DeclarativeMappedTest):
run_setup_classes = "each"
run_setup_mappers = "each"
run_define_tables = "each"
run_create_tables = None
def test_attribute_extension(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
def remove(self, obj, value, initiator):
pass
def set(self, obj, value, oldvalue, initiator):
pass
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
foo = column_property(
Column("q", Integer), extension=SomeExtension()
)
with assertions.expect_deprecated(
"AttributeExtension.append is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.remove is deprecated. The "
"AttributeExtension class will be removed in a future release.",
"AttributeExtension.set is deprecated. The "
"AttributeExtension class will be removed in a future release.",
):
configure_mappers()
def test_attribute_extension_parameter(self):
class SomeExtension(AttributeExtension):
def append(self, obj, value, initiator):
pass
with assertions.expect_deprecated(
".*The relationship.extension parameter will be removed in a "
"future release."
):
relationship("Bar", extension=SomeExtension)
with assertions.expect_deprecated(
".*The column_property.extension parameter will be removed in a "
"future release."
):
column_property(Column("q", Integer), extension=SomeExtension)
with assertions.expect_deprecated(
".*The composite.extension parameter will be removed in a "
"future release."
):
composite("foo", extension=SomeExtension)
def test_session_extension(self):
class SomeExtension(SessionExtension):
def after_commit(self, session):
pass
def after_rollback(self, session):
pass
def before_flush(self, session, flush_context, instances):
pass
with assertions.expect_deprecated(
".*The Session.extension parameter will be removed",
"SessionExtension.after_commit is deprecated. "
"The SessionExtension class",
"SessionExtension.before_flush is deprecated. "
"The SessionExtension class",
"SessionExtension.after_rollback is deprecated. "
"The SessionExtension class",
):
Session(extension=SomeExtension())
def test_mapper_extension(self):
class SomeExtension(MapperExtension):
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
pass
with assertions.expect_deprecated(
"MapperExtension.init_instance is deprecated. "
"The MapperExtension class",
"MapperExtension.init_failed is deprecated. "
"The MapperExtension class",
".*The mapper.extension parameter will be removed",
):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
__mapper_args__ = {"extension": SomeExtension()}
def test_session_weak_identity_map(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
s = Session(weak_identity_map=True)
is_(s._identity_cls, identity.WeakInstanceDict)
with assertions.expect_deprecated(
"The Session.weak_identity_map parameter as well as"
):
s = Session(weak_identity_map=False)
is_(s._identity_cls, identity.StrongInstanceDict)
s = Session()
is_(s._identity_cls, identity.WeakInstanceDict)
def test_session_prune(self):
s = Session()
with assertions.expect_deprecated(
r"The Session.prune\(\) method is deprecated along with "
"Session.weak_identity_map"
):
s.prune()
def test_session_enable_transaction_accounting(self):
with assertions.expect_deprecated(
"the Session._enable_transaction_accounting parameter is "
"deprecated"
):
Session(_enable_transaction_accounting=False)
def test_session_is_modified(self):
class Foo(self.DeclarativeBasic):
__tablename__ = "foo"
id = Column(Integer, primary_key=True)
f1 = Foo()
s = Session()
with assertions.expect_deprecated(
"The Session.is_modified.passive flag is deprecated"
):
# this flag was for a long time documented as requiring
# that it be set to True, so we've changed the default here
# so that the warning emits
s.is_modified(f1, passive=True)
class DeprecatedAccountingFlagsTest(_LocalFixture):
def test_rollback_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.expire_all()
assert u1.name == "edward"
def test_commit_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = sessionmaker(_enable_transaction_accounting=False)()
u1 = User(name="ed")
sess.add(u1)
sess.commit()
u1.name = "edwardo"
sess.rollback()
testing.db.execute(
users.update(users.c.name == "ed").values(name="edward")
)
assert u1.name == "edwardo"
sess.commit()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("edwardo",)
]
assert u1.name == "edwardo"
sess.delete(u1)
sess.commit()
def test_preflush_no_accounting(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The Session._enable_transaction_accounting parameter"
):
sess = Session(
_enable_transaction_accounting=False,
autocommit=True,
autoflush=False,
)
u1 = User(name="ed")
sess.add(u1)
sess.flush()
sess.begin()
u1.name = "edwardo"
u2 = User(name="some other user")
sess.add(u2)
sess.rollback()
sess.begin()
assert testing.db.execute(select([users.c.name])).fetchall() == [
("ed",)
]
class DeprecatedSessionFeatureTest(_fixtures.FixtureTest):
run_inserts = None
def test_fast_discard_race(self):
# test issue #4068
users, User = self.tables.users, self.classes.User
mapper(User, users)
with testing.expect_deprecated(".*identity map are deprecated"):
sess = Session(weak_identity_map=False)
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1_state = u1._sa_instance_state
sess.identity_map._dict.pop(u1_state.key)
ref = u1_state.obj
u1_state.obj = lambda: None
u2 = sess.query(User).first()
u1_state._cleanup(ref)
u3 = sess.query(User).first()
is_(u2, u3)
u2_state = u2._sa_instance_state
assert sess.identity_map.contains_state(u2._sa_instance_state)
ref = u2_state.obj
u2_state.obj = lambda: None
u2_state._cleanup(ref)
assert not sess.identity_map.contains_state(u2._sa_instance_state)
def test_is_modified_passive_on(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties={"addresses": relationship(Address)})
mapper(Address, addresses)
s = Session()
u = User(name="fred", addresses=[Address(email_address="foo")])
s.add(u)
s.commit()
u.id
def go():
assert not s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
u.name = "newname"
def go():
assert s.is_modified(u, passive=True)
with testing.expect_deprecated(
".*Session.is_modified.passive flag is deprecated "
):
self.assert_sql_count(testing.db, go, 0)
class StrongIdentityMapTest(_fixtures.FixtureTest):
run_inserts = None
def _strong_ident_fixture(self):
with testing.expect_deprecated(
".*Session.weak_identity_map parameter as well as the"
):
sess = create_session(weak_identity_map=False)
def prune():
with testing.expect_deprecated(".*Session.prune"):
return sess.prune()
return sess, prune
def _event_fixture(self):
session = create_session()
@event.listens_for(session, "pending_to_persistent")
@event.listens_for(session, "deleted_to_persistent")
@event.listens_for(session, "detached_to_persistent")
@event.listens_for(session, "loaded_as_persistent")
def strong_ref_object(sess, instance):
if "refs" not in sess.info:
sess.info["refs"] = refs = set()
else:
refs = sess.info["refs"]
refs.add(instance)
@event.listens_for(session, "persistent_to_detached")
@event.listens_for(session, "persistent_to_deleted")
@event.listens_for(session, "persistent_to_transient")
def deref_object(sess, instance):
sess.info["refs"].discard(instance)
def prune():
if "refs" not in session.info:
return 0
sess_size = len(session.identity_map)
session.info["refs"].clear()
gc_collect()
session.info["refs"] = set(
s.obj() for s in session.identity_map.all_states()
)
return sess_size - len(session.identity_map)
return session, prune
def test_strong_ref_imap(self):
self._test_strong_ref(self._strong_ident_fixture)
def test_strong_ref_events(self):
self._test_strong_ref(self._event_fixture)
def _test_strong_ref(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
# save user
s.add(User(name="u1"))
s.flush()
user = s.query(User).one()
user = None
print(s.identity_map)
gc_collect()
assert len(s.identity_map) == 1
user = s.query(User).one()
assert not s.identity_map._modified
user.name = "u2"
assert s.identity_map._modified
s.flush()
eq_(users.select().execute().fetchall(), [(user.id, "u2")])
def test_prune_imap(self):
self._test_prune(self._strong_ident_fixture)
def test_prune_events(self):
self._test_prune(self._event_fixture)
@testing.fails_if(lambda: pypy, "pypy has a real GC")
@testing.fails_on("+zxjdbc", "http://www.sqlalchemy.org/trac/ticket/1473")
def _test_prune(self, fixture):
s, prune = fixture()
users, User = self.tables.users, self.classes.User
mapper(User, users)
for o in [User(name="u%s" % x) for x in range(10)]:
s.add(o)
# o is still live after this loop...
self.assert_(len(s.identity_map) == 0)
eq_(prune(), 0)
s.flush()
gc_collect()
eq_(prune(), 9)
# o is still in local scope here, so still present
self.assert_(len(s.identity_map) == 1)
id_ = o.id
del o
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
u.name = "squiznart"
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
s.add(User(name="x"))
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
s.flush()
self.assert_(len(s.identity_map) == 1)
eq_(prune(), 1)
self.assert_(len(s.identity_map) == 0)
u = s.query(User).get(id_)
s.delete(u)
del u
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 1)
s.flush()
eq_(prune(), 0)
self.assert_(len(s.identity_map) == 0)
class DeprecatedQueryTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = "once"
run_inserts = "once"
run_deletes = None
@classmethod
def setup_mappers(cls):
cls._setup_stock_mapping()
@classmethod
def _expect_implicit_subquery(cls):
return assertions.expect_deprecated(
"Implicit coercion of SELECT and textual SELECT constructs into "
r"FROM clauses is deprecated; please call \.subquery\(\) on any "
"Core select or ORM Query object in order to produce a "
"subquery object."
)
def test_via_textasfrom_select_from(self):
User = self.classes.User
s = create_session()
with self._expect_implicit_subquery():
eq_(
s.query(User)
.select_from(
text("select * from users").columns(
id=Integer, name=String
)
)
.order_by(User.id)
.all(),
[User(id=7), User(id=8), User(id=9), User(id=10)],
)
def test_query_as_scalar(self):
User = self.classes.User
s = Session()
with assertions.expect_deprecated(
r"The Query.as_scalar\(\) method is deprecated and will "
"be removed in a future release."
):
s.query(User).as_scalar()
def test_select_entity_from_crit(self):
User, users = self.classes.User, self.tables.users
sel = users.select()
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.filter(User.id.in_([7, 8]))
.all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_select_entity_from_select(self):
User, users = self.classes.User, self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5)
),
"SELECT anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id > :id_1) AS anon_1",
)
def test_select_entity_from_q_statement(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_entity_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1",
)
def test_select_from_q_statement_no_aliasing(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
with self._expect_implicit_subquery():
q = sess.query(User).select_from(q.statement)
self.assert_compile(
q.filter(User.name == "ed"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1",
)
def test_from_alias_three(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(addresses)
.select(
use_labels=True, order_by=[text("ulist.id"), addresses.c.id]
)
)
sess = create_session()
# better way. use select_entity_from()
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses"))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (
self.classes.User,
self.tables.addresses,
self.tables.users,
)
sess = create_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = (
users.select(users.c.id == 7)
.union(users.select(users.c.id > 7))
.alias("ulist")
.outerjoin(adalias)
.select(use_labels=True, order_by=[text("ulist.id"), adalias.c.id])
)
def go():
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(query)
.options(contains_eager("addresses", alias=adalias))
.all()
)
assert self.static.user_address_result == result
self.assert_sql_count(testing.db, go, 1)
def test_select(self):
users = self.tables.users
sess = create_session()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(users)
.select_entity_from(users.select())
.with_labels()
.statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1",
)
def test_join(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
# mapper(User, users, properties={"addresses": relationship(Address)})
# mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join("addresses")
.add_entity(Address)
.order_by(User.id)
.order_by(Address.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="[email protected]", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=4),
),
],
)
adalias = aliased(Address)
with self._expect_implicit_subquery():
result = (
sess.query(User)
.select_entity_from(sel)
.join(adalias, "addresses")
.add_entity(adalias)
.order_by(User.id)
.order_by(adalias.id)
.all()
)
eq_(
result,
[
(
User(name="jack", id=7),
Address(user_id=7, email_address="[email protected]", id=1),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=2),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=3),
),
(
User(name="ed", id=8),
Address(user_id=8, email_address="[email protected]", id=4),
),
],
)
def test_more_joins(self):
(users, Keyword, User) = (
self.tables.users,
self.classes.Keyword,
self.classes.User,
)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords")
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.select_entity_from(sel)
.join("orders", "items", "keywords", aliased=True)
.filter(Keyword.name.in_(["red", "big", "round"]))
.all(),
[User(name="jack", id=7)],
)
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with self._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_replace_with_eager(self):
users, Address, User = (
self.tables.users,
self.classes.Address,
self.classes.User,
)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)
.all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.filter(User.id == 8)
.order_by(User.id)
.all(),
[
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
)
],
)
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
with self._expect_implicit_subquery():
eq_(
sess.query(User)
.options(joinedload("addresses"))
.select_entity_from(sel)
.order_by(User.id)[1],
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
),
)
self.assert_sql_count(testing.db, go, 1)
def test_onclause_conditional_adaption(self):
Item, Order, orders, order_items, User = (
self.classes.Item,
self.classes.Order,
self.tables.orders,
self.tables.order_items,
self.classes.User,
)
sess = Session()
oalias = orders.select()
with self._expect_implicit_subquery():
self.assert_compile(
sess.query(User)
.join(oalias, User.orders)
.join(
Item,
and_(
Order.id == order_items.c.order_id,
order_items.c.item_id == Item.id,
),
from_joinpoint=True,
),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users JOIN "
"(SELECT orders.id AS id, orders.user_id AS user_id, "
"orders.address_id AS address_id, orders.description "
"AS description, orders.isopen AS isopen FROM orders) "
"AS anon_1 ON users.id = anon_1.user_id JOIN items "
"ON anon_1.id = order_items.order_id "
"AND order_items.item_id = items.id",
use_default_dialect=True,
)
class DeprecatedInhTest(_poly_fixtures._Polymorphic):
def test_with_polymorphic(self):
Person = _poly_fixtures.Person
Engineer = _poly_fixtures.Engineer
with DeprecatedQueryTest._expect_implicit_subquery():
p_poly = with_polymorphic(Person, [Engineer], select([Person]))
is_true(
sa.inspect(p_poly).selectable.compare(select([Person]).subquery())
)
def test_multiple_adaption(self):
"""test that multiple filter() adapters get chained together "
and work correctly within a multiple-entry join()."""
Company = _poly_fixtures.Company
Machine = _poly_fixtures.Machine
Engineer = _poly_fixtures.Engineer
people = self.tables.people
engineers = self.tables.engineers
machines = self.tables.machines
sess = create_session()
mach_alias = machines.select()
with DeprecatedQueryTest._expect_implicit_subquery():
self.assert_compile(
sess.query(Company)
.join(people.join(engineers), Company.employees)
.join(mach_alias, Engineer.machines, from_joinpoint=True)
.filter(Engineer.name == "dilbert")
.filter(Machine.name == "foo"),
"SELECT companies.company_id AS companies_company_id, "
"companies.name AS companies_name "
"FROM companies JOIN (people "
"JOIN engineers ON people.person_id = "
"engineers.person_id) ON companies.company_id = "
"people.company_id JOIN "
"(SELECT machines.machine_id AS machine_id, "
"machines.name AS name, "
"machines.engineer_id AS engineer_id "
"FROM machines) AS anon_1 "
"ON engineers.person_id = anon_1.engineer_id "
"WHERE people.name = :name_1 AND anon_1.name = :name_2",
use_default_dialect=True,
)
class DeprecatedMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_polymorphic_union_w_select(self):
users, addresses = self.tables.users, self.tables.addresses
with DeprecatedQueryTest._expect_implicit_subquery():
dep = polymorphic_union(
{"u": users.select(), "a": addresses.select()},
"type",
"bcjoin",
)
subq_version = polymorphic_union(
{
"u": users.select().subquery(),
"a": addresses.select().subquery(),
},
"type",
"bcjoin",
)
is_true(dep.compare(subq_version))
def test_cancel_order_by(self):
users, User = self.tables.users, self.classes.User
with testing.expect_deprecated(
"The Mapper.order_by parameter is deprecated, and will be "
"removed in a future release."
):
mapper(User, users, order_by=users.c.name.desc())
assert (
"order by users.name desc"
in str(create_session().query(User).statement).lower()
)
assert (
"order by"
not in str(
create_session().query(User).order_by(None).statement
).lower()
)
assert (
"order by users.name asc"
in str(
create_session()
.query(User)
.order_by(User.name.asc())
.statement
).lower()
)
eq_(
create_session().query(User).all(),
[
User(id=7, name="jack"),
User(id=9, name="fred"),
User(id=8, name="ed"),
User(id=10, name="chuck"),
],
)
eq_(
create_session().query(User).order_by(User.name).all(),
[
User(id=10, name="chuck"),
User(id=8, name="ed"),
User(id=9, name="fred"),
User(id=7, name="jack"),
],
)
def test_comparable(self):
users = self.tables.users
class extendedproperty(property):
attribute = 123
def method1(self):
return "method1"
from sqlalchemy.orm.properties import ColumnProperty
class UCComparator(ColumnProperty.Comparator):
__hash__ = None
def method1(self):
return "uccmethod1"
def method2(self, other):
return "method2"
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return sa.func.upper(col) == sa.func.upper(other)
def map_(with_explicit_property):
class User(object):
@extendedproperty
def uc_name(self):
if self.name is None:
return None
return self.name.upper()
if with_explicit_property:
args = (UCComparator, User.uc_name)
else:
args = (UCComparator,)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties=dict(uc_name=sa.orm.comparable_property(*args)),
)
return User
for User in (map_(True), map_(False)):
sess = create_session()
sess.begin()
q = sess.query(User)
assert hasattr(User, "name")
assert hasattr(User, "uc_name")
eq_(User.uc_name.method1(), "method1")
eq_(User.uc_name.method2("x"), "method2")
assert_raises_message(
AttributeError,
"Neither 'extendedproperty' object nor 'UCComparator' "
"object associated with User.uc_name has an attribute "
"'nonexistent'",
getattr,
User.uc_name,
"nonexistent",
)
# test compile
assert not isinstance(User.uc_name == "jack", bool)
u = q.filter(User.uc_name == "JACK").one()
assert u.uc_name == "JACK"
assert u not in sess.dirty
u.name = "some user name"
eq_(u.name, "some user name")
assert u in sess.dirty
eq_(u.uc_name, "SOME USER NAME")
sess.flush()
sess.expunge_all()
q = sess.query(User)
u2 = q.filter(User.name == "some user name").one()
u3 = q.filter(User.uc_name == "SOME USER NAME").one()
assert u2 is u3
eq_(User.uc_name.attribute, 123)
sess.rollback()
def test_comparable_column(self):
users, User = self.tables.users, self.classes.User
class MyComparator(sa.orm.properties.ColumnProperty.Comparator):
__hash__ = None
def __eq__(self, other):
# lower case comparison
return func.lower(self.__clause_element__()) == func.lower(
other
)
def intersects(self, other):
# non-standard comparator
return self.__clause_element__().op("&=")(other)
mapper(
User,
users,
properties={
"name": sa.orm.column_property(
users.c.name, comparator_factory=MyComparator
)
},
)
assert_raises_message(
AttributeError,
"Neither 'InstrumentedAttribute' object nor "
"'MyComparator' object associated with User.name has "
"an attribute 'nonexistent'",
getattr,
User.name,
"nonexistent",
)
eq_(
str(
(User.name == "ed").compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"lower(users.name) = lower(:lower_1)",
)
eq_(
str(
(User.name.intersects("ed")).compile(
dialect=sa.engine.default.DefaultDialect()
)
),
"users.name &= :name_1",
)
def test_info(self):
class MyComposite(object):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for constructor, args in [(comparable_property, "foo")]:
obj = constructor(info={"x": "y"}, *args)
eq_(obj.info, {"x": "y"})
obj.info["q"] = "p"
eq_(obj.info, {"x": "y", "q": "p"})
obj = constructor(*args)
eq_(obj.info, {})
obj.info["q"] = "p"
eq_(obj.info, {"q": "p"})
def test_add_property(self):
users = self.tables.users
assert_col = []
class User(fixtures.ComparableEntity):
def _get_name(self):
assert_col.append(("get", self._name))
return self._name
def _set_name(self, name):
assert_col.append(("set", name))
self._name = name
name = property(_get_name, _set_name)
def _uc_name(self):
if self._name is None:
return None
return self._name.upper()
uc_name = property(_uc_name)
uc_name2 = property(_uc_name)
m = mapper(User, users)
class UCComparator(PropComparator):
|
m.add_property("_name", deferred(users.c.name))
m.add_property("name", synonym("_name"))
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
m.add_property("uc_name", comparable_property(UCComparator))
m.add_property(
"uc_name2", comparable_property(UCComparator, User.uc_name2)
)
sess = create_session(autocommit=False)
assert sess.query(User).get(7)
u = sess.query(User).filter_by(name="jack").one()
def go():
eq_(u.name, "jack")
eq_(u.uc_name, "JACK")
eq_(u.uc_name2, "JACK")
eq_(assert_col, [("get", "jack")], str(assert_col))
self.sql_count_(1, go)
def test_kwarg_accepted(self):
class DummyComposite(object):
def __init__(self, x, y):
pass
class MyFactory(PropComparator):
pass
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
for args in ((comparable_property,),):
fn = args[0]
args = args[1:]
fn(comparator_factory=MyFactory, *args)
def test_merge_synonym_comparable(self):
users = self.tables.users
class User(object):
class Comparator(PropComparator):
pass
def _getValue(self):
return self._value
def _setValue(self, value):
setattr(self, "_value", value)
value = property(_getValue, _setValue)
with assertions.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
mapper(
User,
users,
properties={
"uid": synonym("id"),
"foobar": comparable_property(User.Comparator, User.value),
},
)
sess = create_session()
u = User()
u.name = "ed"
sess.add(u)
sess.flush()
sess.expunge(u)
sess.merge(u)
class DeprecatedDeclTest(fixtures.TestBase):
@testing.provide_metadata
def test_comparable_using(self):
class NameComparator(sa.orm.PropComparator):
@property
def upperself(self):
cls = self.prop.parent.class_
col = getattr(cls, "name")
return sa.func.upper(col)
def operate(self, op, other, **kw):
return op(self.upperself, other, **kw)
Base = declarative_base(metadata=self.metadata)
with testing.expect_deprecated(
r"comparable_property\(\) is deprecated and will be "
"removed in a future release."
):
class User(Base, fixtures.ComparableEntity):
__tablename__ = "users"
id = Column(
"id",
Integer,
primary_key=True,
test_needs_autoincrement=True,
)
name = Column("name", String(50))
@comparable_using(NameComparator)
@property
def uc_name(self):
return self.name is not None and self.name.upper() or None
Base.metadata.create_all()
sess = create_session()
u1 = User(name="someuser")
eq_(u1.name, "someuser", u1.name)
eq_(u1.uc_name, "SOMEUSER", u1.uc_name)
sess.add(u1)
sess.flush()
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name == "SOMEUSER").one()
eq_(rt, u1)
sess.expunge_all()
rt = sess.query(User).filter(User.uc_name.startswith("SOMEUSE")).one()
eq_(rt, u1)
class DeprecatedMapperExtensionTest(_fixtures.FixtureTest):
"""Superseded by MapperEventsTest - test backwards
compatibility of MapperExtension."""
run_inserts = None
def extension(self):
methods = []
class Ext(MapperExtension):
def instrument_class(self, mapper, cls):
methods.append("instrument_class")
return EXT_CONTINUE
def init_instance(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_instance")
return EXT_CONTINUE
def init_failed(
self, mapper, class_, oldinit, instance, args, kwargs
):
methods.append("init_failed")
return EXT_CONTINUE
def reconstruct_instance(self, mapper, instance):
methods.append("reconstruct_instance")
return EXT_CONTINUE
def before_insert(self, mapper, connection, instance):
methods.append("before_insert")
return EXT_CONTINUE
def after_insert(self, mapper, connection, instance):
methods.append("after_insert")
return EXT_CONTINUE
def before_update(self, mapper, connection, instance):
methods.append("before_update")
return EXT_CONTINUE
def after_update(self, mapper, connection, instance):
methods.append("after_update")
return EXT_CONTINUE
def before_delete(self, mapper, connection, instance):
methods.append("before_delete")
return EXT_CONTINUE
def after_delete(self, mapper, connection, instance):
methods.append("after_delete")
return EXT_CONTINUE
return Ext, methods
def test_basic(self):
"""test that common user-defined methods get called."""
User, users = self.classes.User, self.tables.users
Ext, methods = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
sess = create_session()
u = User(name="u1")
sess.add(u)
sess.flush()
u = sess.query(User).populate_existing().get(u.id)
sess.expunge_all()
u = sess.query(User).get(u.id)
u.name = "u1 changed"
sess.flush()
sess.delete(u)
sess.flush()
eq_(
methods,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_inheritance(self):
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=Ext())
mapper(
AdminUser,
addresses,
inherits=User,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_before_after_only_collection(self):
"""before_update is called on parent for collection modifications,
after_update is called even if no columns were updated.
"""
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
Ext1, methods1 = self.extension()
Ext2, methods2 = self.extension()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(
Item,
items,
extension=Ext1(),
properties={
"keywords": relationship(Keyword, secondary=item_keywords)
},
)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(Keyword, keywords, extension=Ext2())
sess = create_session()
i1 = Item(description="i1")
k1 = Keyword(name="k1")
sess.add(i1)
sess.add(k1)
sess.flush()
eq_(
methods1,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
eq_(
methods2,
[
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
],
)
del methods1[:]
del methods2[:]
i1.keywords.append(k1)
sess.flush()
eq_(methods1, ["before_update", "after_update"])
eq_(methods2, [])
def test_inheritance_with_dupes(self):
"""Inheritance with the same extension instance on both mappers."""
users, addresses, User = (
self.tables.users,
self.tables.addresses,
self.classes.User,
)
Ext, methods = self.extension()
class AdminUser(User):
pass
ext = Ext()
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
"MapperExtension.instrument_class is deprecated",
"MapperExtension.init_instance is deprecated",
"MapperExtension.after_insert is deprecated",
"MapperExtension.reconstruct_instance is deprecated",
"MapperExtension.before_delete is deprecated",
"MapperExtension.after_delete is deprecated",
"MapperExtension.before_update is deprecated",
"MapperExtension.after_update is deprecated",
"MapperExtension.init_failed is deprecated",
):
mapper(User, users, extension=ext)
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents"
):
mapper(
AdminUser,
addresses,
inherits=User,
extension=ext,
properties={"address_id": addresses.c.id},
)
sess = create_session()
am = AdminUser(name="au1", email_address="au1@e1")
sess.add(am)
sess.flush()
am = sess.query(AdminUser).populate_existing().get(am.id)
sess.expunge_all()
am = sess.query(AdminUser).get(am.id)
am.name = "au1 changed"
sess.flush()
sess.delete(am)
sess.flush()
eq_(
methods,
[
"instrument_class",
"instrument_class",
"init_instance",
"before_insert",
"after_insert",
"reconstruct_instance",
"before_update",
"after_update",
"before_delete",
"after_delete",
],
)
def test_unnecessary_methods_not_evented(self):
users = self.tables.users
class MyExtension(MapperExtension):
def before_insert(self, mapper, connection, instance):
pass
class Foo(object):
pass
with testing.expect_deprecated(
"MapperExtension is deprecated in favor of the MapperEvents",
"MapperExtension.before_insert is deprecated",
):
m = mapper(Foo, users, extension=MyExtension())
assert not m.class_manager.dispatch.load
assert not m.dispatch.before_update
assert len(m.dispatch.before_insert) == 1
class DeprecatedSessionExtensionTest(_fixtures.FixtureTest):
run_inserts = None
def test_extension(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
log = []
class MyExt(SessionExtension):
def before_commit(self, session):
log.append("before_commit")
def after_commit(self, session):
log.append("after_commit")
def after_rollback(self, session):
log.append("after_rollback")
def before_flush(self, session, flush_context, objects):
log.append("before_flush")
def after_flush(self, session, flush_context):
log.append("after_flush")
def after_flush_postexec(self, session, flush_context):
log.append("after_flush_postexec")
def after_begin(self, session, transaction, connection):
log.append("after_begin")
def after_attach(self, session, instance):
log.append("after_attach")
def after_bulk_update(self, session, query, query_context, result):
log.append("after_bulk_update")
def after_bulk_delete(self, session, query, query_context, result):
log.append("after_bulk_delete")
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
"before_commit",
"after_commit",
]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(autocommit=False, extension=MyExt())
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == [
"after_attach",
"before_flush",
"after_begin",
"after_flush",
"after_flush_postexec",
]
log = []
u.name = "ed"
sess.commit()
assert log == [
"before_commit",
"before_flush",
"after_flush",
"after_flush_postexec",
"after_commit",
]
log = []
sess.commit()
assert log == ["before_commit", "after_commit"]
log = []
sess.query(User).delete()
assert log == ["after_begin", "after_bulk_delete"]
log = []
sess.query(User).update({"name": "foo"})
assert log == ["after_bulk_update"]
log = []
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
"SessionExtension.after_commit is deprecated",
"SessionExtension.after_begin is deprecated",
"SessionExtension.after_attach is deprecated",
"SessionExtension.before_flush is deprecated",
"SessionExtension.after_flush is deprecated",
"SessionExtension.after_flush_postexec is deprecated",
"SessionExtension.after_rollback is deprecated",
"SessionExtension.after_bulk_update is deprecated",
"SessionExtension.after_bulk_delete is deprecated",
):
sess = create_session(
autocommit=False, extension=MyExt(), bind=testing.db
)
sess.connection()
assert log == ["after_begin"]
sess.close()
def test_multiple_extensions(self):
User, users = self.classes.User, self.tables.users
log = []
class MyExt1(SessionExtension):
def before_commit(self, session):
log.append("before_commit_one")
class MyExt2(SessionExtension):
def before_commit(self, session):
log.append("before_commit_two")
mapper(User, users)
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated",
):
sess = create_session(extension=[MyExt1(), MyExt2()])
u = User(name="u1")
sess.add(u)
sess.flush()
assert log == ["before_commit_one", "before_commit_two"]
def test_unnecessary_methods_not_evented(self):
class MyExtension(SessionExtension):
def before_commit(self, session):
pass
with testing.expect_deprecated(
"SessionExtension is deprecated in favor of " "the SessionEvents",
"SessionExtension.before_commit is deprecated.",
):
s = Session(extension=MyExtension())
assert not s.dispatch.after_commit
assert len(s.dispatch.before_commit) == 1
class DeprecatedAttributeExtensionTest1(fixtures.ORMTest):
def test_extension_commit_attr(self):
"""test that an extension which commits attribute history
maintains the end-result history.
This won't work in conjunction with some unitofwork extensions.
"""
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def __init__(self, key):
self.key = key
def append(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def remove(self, state, child, initiator):
if commit:
state._commit_all(state.dict)
return child
def set(self, state, child, oldchild, initiator):
if commit:
state._commit_all(state.dict)
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
b1, b2, b3, b4 = Bar(id="b1"), Bar(id="b2"), Bar(id="b3"), Bar(id="b4")
def loadcollection(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [b1, b2]
def loadscalar(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return b2
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
useobject=True,
callable_=loadcollection,
extension=[ReceiveEvents("bars")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bar",
uselist=False,
useobject=True,
callable_=loadscalar,
extension=[ReceiveEvents("bar")],
)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"scalar",
uselist=False,
useobject=False,
extension=[ReceiveEvents("scalar")],
)
def create_hist():
def hist(key, fn, *arg):
attributes.instance_state(f1)._commit_all(
attributes.instance_dict(f1)
)
fn(*arg)
histories.append(attributes.get_history(f1, key))
f1 = Foo()
hist("bars", f1.bars.append, b3)
hist("bars", f1.bars.append, b4)
hist("bars", f1.bars.remove, b2)
hist("bar", setattr, f1, "bar", b3)
hist("bar", setattr, f1, "bar", None)
hist("bar", setattr, f1, "bar", b4)
hist("scalar", setattr, f1, "scalar", 5)
hist("scalar", setattr, f1, "scalar", None)
hist("scalar", setattr, f1, "scalar", 4)
histories = []
commit = False
create_hist()
without_commit = list(histories)
histories[:] = []
commit = True
create_hist()
with_commit = histories
for without, with_ in zip(without_commit, with_commit):
woc = without
wic = with_
eq_(woc, wic)
def test_extension_lazyload_assertion(self):
class Foo(fixtures.BasicEntity):
pass
class Bar(fixtures.BasicEntity):
pass
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
state.obj().bars
return child
def remove(self, state, child, initiator):
state.obj().bars
return child
def set(self, state, child, oldchild, initiator):
return child
instrumentation.register_class(Foo)
instrumentation.register_class(Bar)
bar1, bar2, bar3 = [Bar(id=1), Bar(id=2), Bar(id=3)]
def func1(state, passive):
if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
return [bar1, bar2, bar3]
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"bars",
uselist=True,
callable_=func1,
useobject=True,
extension=[ReceiveEvents()],
)
attributes.register_attribute(
Bar, "foos", uselist=True, useobject=True, backref="bars"
)
x = Foo()
assert_raises(AssertionError, Bar(id=4).foos.append, x)
x.bars
b = Bar(id=4)
b.foos.append(x)
attributes.instance_state(x)._expire_attributes(
attributes.instance_dict(x), ["bars"]
)
assert_raises(AssertionError, b.foos.remove, x)
def test_scalar_listener(self):
# listeners on ScalarAttributeImpl aren't used normally. test that
# they work for the benefit of user extensions
class Foo(object):
pass
results = []
class ReceiveEvents(AttributeExtension):
def append(self, state, child, initiator):
assert False
def remove(self, state, child, initiator):
results.append(("remove", state.obj(), child))
def set(self, state, child, oldchild, initiator):
results.append(("set", state.obj(), child, oldchild))
return child
instrumentation.register_class(Foo)
with testing.expect_deprecated(
"AttributeExtension.append is deprecated.",
"AttributeExtension.remove is deprecated.",
"AttributeExtension.set is deprecated.",
):
attributes.register_attribute(
Foo,
"x",
uselist=False,
useobject=False,
extension=ReceiveEvents(),
)
f = Foo()
f.x = 5
f.x = 17
del f.x
eq_(
results,
[
("set", f, 5, attributes.NEVER_SET),
("set", f, 17, 5),
("remove", f, 17),
],
)
def test_cascading_extensions(self):
t1 = Table(
"t1",
MetaData(),
Column("id", Integer, primary_key=True),
Column("type", String(40)),
Column("data", String(50)),
)
ext_msg = []
class Ex1(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex1 %r" % value)
return "ex1" + value
class Ex2(AttributeExtension):
def set(self, state, value, oldvalue, initiator):
ext_msg.append("Ex2 %r" % value)
return "ex2" + value
class A(fixtures.BasicEntity):
pass
class B(A):
pass
class C(B):
pass
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
A,
t1,
polymorphic_on=t1.c.type,
polymorphic_identity="a",
properties={
"data": column_property(t1.c.data, extension=Ex1())
},
)
mapper(B, polymorphic_identity="b", inherits=A)
with testing.expect_deprecated(
"AttributeExtension is deprecated in favor of the "
"AttributeEvents listener interface. "
"The column_property.extension parameter"
):
mapper(
C,
polymorphic_identity="c",
inherits=B,
properties={
"data": column_property(t1.c.data, extension=Ex2())
},
)
with testing.expect_deprecated(
"AttributeExtension.set is deprecated. "
):
configure_mappers()
a1 = A(data="a1")
b1 = B(data="b1")
c1 = C(data="c1")
eq_(a1.data, "ex1a1")
eq_(b1.data, "ex1b1")
eq_(c1.data, "ex2c1")
a1.data = "a2"
b1.data = "b2"
c1.data = "c2"
eq_(a1.data, "ex1a2")
eq_(b1.data, "ex1b2")
eq_(c1.data, "ex2c2")
eq_(
ext_msg,
[
"Ex1 'a1'",
"Ex1 'b1'",
"Ex2 'c1'",
"Ex1 'a2'",
"Ex1 'b2'",
"Ex2 'c2'",
],
)
class DeprecatedOptionAllTest(OptionsPathTest, _fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def _mapper_fixture_one(self):
users, User, addresses, Address, orders, Order = (
self.tables.users,
self.classes.User,
self.tables.addresses,
self.classes.Address,
self.tables.orders,
self.classes.Order,
)
keywords, items, item_keywords, Keyword, Item = (
self.tables.keywords,
self.tables.items,
self.tables.item_keywords,
self.classes.Keyword,
self.classes.Item,
)
mapper(
User,
users,
properties={
"addresses": relationship(Address),
"orders": relationship(Order),
},
)
mapper(Address, addresses)
mapper(
Order,
orders,
properties={
"items": relationship(Item, secondary=self.tables.order_items)
},
)
mapper(
Keyword,
keywords,
properties={
"keywords": column_property(keywords.c.name + "some keyword")
},
)
mapper(
Item,
items,
properties=dict(
keywords=relationship(Keyword, secondary=item_keywords)
),
)
def _assert_eager_with_entity_exception(
self, entity_list, options, message
):
assert_raises_message(
sa.exc.ArgumentError,
message,
create_session().query(*entity_list).options,
*options
)
def test_option_against_nonexistent_twolevel_all(self):
self._mapper_fixture_one()
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
self._assert_eager_with_entity_exception(
[Item],
(joinedload_all("keywords.foo"),),
'Can\'t find property named \\"foo\\" on mapped class '
"Keyword->keywords in this Query.",
)
def test_all_path_vs_chained(self):
self._mapper_fixture_one()
User = self.classes.User
Order = self.classes.Order
Item = self.classes.Item
with testing.expect_deprecated(
r"The joinedload_all\(\) function is deprecated, and "
"will be removed in a future release. "
r"Please use method chaining with joinedload\(\)"
):
l1 = joinedload_all("orders.items.keywords")
sess = Session()
q = sess.query(User)
self._assert_path_result(
l1,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
l2 = joinedload("orders").joinedload("items").joinedload("keywords")
self._assert_path_result(
l2,
q,
[
(User, "orders"),
(User, "orders", Order, "items"),
(User, "orders", Order, "items", Item, "keywords"),
],
)
def test_subqueryload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="subquery", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_selectinload_mapper_order_by(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
order_by=users.c.id.desc(),
)
sess = create_session()
q = sess.query(User)
result = q.limit(2).all()
eq_(result, list(reversed(self.static.user_address_result[2:4])))
def test_join_mapper_order_by(self):
"""test that mapper-level order_by is adapted to a selectable."""
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
".*Mapper.order_by parameter is deprecated"
):
mapper(User, users, order_by=users.c.id)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
with DeprecatedQueryTest._expect_implicit_subquery():
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name="jack", id=7), User(name="ed", id=8)],
)
def test_defer_addtl_attrs(self):
users, User, Address, addresses = (
self.tables.users,
self.classes.User,
self.classes.Address,
self.tables.addresses,
)
mapper(Address, addresses)
mapper(
User,
users,
properties={
"addresses": relationship(
Address, lazy="selectin", order_by=addresses.c.id
)
},
)
sess = create_session()
with testing.expect_deprecated(
r"The \*addl_attrs on orm.defer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(defer("addresses", "email_address"))
with testing.expect_deprecated(
r"The \*addl_attrs on orm.undefer is deprecated. "
"Please use method chaining"
):
sess.query(User).options(undefer("addresses", "email_address"))
class LegacyLockModeTest(_fixtures.FixtureTest):
run_inserts = None
@classmethod
def setup_mappers(cls):
User, users = cls.classes.User, cls.tables.users
mapper(User, users)
def _assert_legacy(self, arg, read=False, nowait=False):
User = self.classes.User
s = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
q = s.query(User).with_lockmode(arg)
sel = q._compile_context().statement
if arg is None:
assert q._for_update_arg is None
assert sel._for_update_arg is None
return
assert q._for_update_arg.read is read
assert q._for_update_arg.nowait is nowait
assert sel._for_update_arg.read is read
assert sel._for_update_arg.nowait is nowait
def test_false_legacy(self):
self._assert_legacy(None)
def test_plain_legacy(self):
self._assert_legacy("update")
def test_nowait_legacy(self):
self._assert_legacy("update_nowait", nowait=True)
def test_read_legacy(self):
self._assert_legacy("read", read=True)
def test_unknown_legacy_lock_mode(self):
User = self.classes.User
sess = Session()
with testing.expect_deprecated(
r"The Query.with_lockmode\(\) method is deprecated"
):
assert_raises_message(
exc.ArgumentError,
"Unknown with_lockmode argument: 'unknown_mode'",
sess.query(User.id).with_lockmode,
"unknown_mode",
)
class InstrumentationTest(fixtures.ORMTest):
def test_dict_subclass4(self):
# tests #2654
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class MyDict(collections.MappedCollection):
def __init__(self):
super(MyDict, self).__init__(lambda value: "k%d" % value)
@collection.converter
def _convert(self, dictlike):
for key, value in dictlike.items():
yield value + 5
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=MyDict, useobject=True
)
f = Foo()
f.attr = {"k1": 1, "k2": 2}
eq_(f.attr, {"k7": 7, "k6": 6})
def test_name_setup(self):
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Base(object):
@collection.iterator
def base_iterate(self, x):
return "base_iterate"
@collection.appender
def base_append(self, x):
return "base_append"
@collection.converter
def base_convert(self, x):
return "base_convert"
@collection.remover
def base_remove(self, x):
return "base_remove"
from sqlalchemy.orm.collections import _instrument_class
_instrument_class(Base)
eq_(Base._sa_remover(Base(), 5), "base_remove")
eq_(Base._sa_appender(Base(), 5), "base_append")
eq_(Base._sa_iterator(Base(), 5), "base_iterate")
eq_(Base._sa_converter(Base(), 5), "base_convert")
with testing.expect_deprecated(
r"The collection.converter\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Sub(Base):
@collection.converter
def base_convert(self, x):
return "sub_convert"
@collection.remover
def sub_remove(self, x):
return "sub_remove"
_instrument_class(Sub)
eq_(Sub._sa_appender(Sub(), 5), "base_append")
eq_(Sub._sa_remover(Sub(), 5), "sub_remove")
eq_(Sub._sa_iterator(Sub(), 5), "base_iterate")
eq_(Sub._sa_converter(Sub(), 5), "sub_convert")
def test_link_event(self):
canary = []
with testing.expect_deprecated(
r"The collection.linker\(\) handler is deprecated and will "
"be removed in a future release. Please refer to the "
"AttributeEvents"
):
class Collection(list):
@collection.linker
def _on_link(self, obj):
canary.append(obj)
class Foo(object):
pass
instrumentation.register_class(Foo)
attributes.register_attribute(
Foo, "attr", uselist=True, typecallable=Collection, useobject=True
)
f1 = Foo()
f1.attr.append(3)
eq_(canary, [f1.attr._sa_adapter])
adapter_1 = f1.attr._sa_adapter
l2 = Collection()
f1.attr = l2
eq_(canary, [adapter_1, f1.attr._sa_adapter, None])
class NonPrimaryRelationshipLoaderTest(_fixtures.FixtureTest):
run_inserts = "once"
run_deletes = None
def test_selectload(self):
"""tests lazy loading with two relationships simultaneously,
from the same table, using aliases. """
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(Address, lazy=True),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="select",
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="select",
),
),
)
self._run_double_test(10)
def test_joinedload(self):
"""Eager loading with two relationships simultaneously,
from the same table, using aliases."""
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="joined",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="joined",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(1)
def test_selectin(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="selectin", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="selectin",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="selectin",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def test_subqueryload(self):
users, orders, User, Address, Order, addresses = (
self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses,
)
openorders = sa.alias(orders, "openorders")
closedorders = sa.alias(orders, "closedorders")
mapper(Address, addresses)
mapper(Order, orders)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
open_mapper = mapper(Order, openorders, non_primary=True)
closed_mapper = mapper(Order, closedorders, non_primary=True)
mapper(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="subquery", order_by=addresses.c.id
),
open_orders=relationship(
open_mapper,
primaryjoin=sa.and_(
openorders.c.isopen == 1,
users.c.id == openorders.c.user_id,
),
lazy="subquery",
order_by=openorders.c.id,
),
closed_orders=relationship(
closed_mapper,
primaryjoin=sa.and_(
closedorders.c.isopen == 0,
users.c.id == closedorders.c.user_id,
),
lazy="subquery",
order_by=closedorders.c.id,
),
),
)
self._run_double_test(4)
def _run_double_test(self, count):
User, Address, Order, Item = self.classes(
"User", "Address", "Order", "Item"
)
q = create_session().query(User).order_by(User.id)
def go():
eq_(
[
User(
id=7,
addresses=[Address(id=1)],
open_orders=[Order(id=3)],
closed_orders=[Order(id=1), Order(id=5)],
),
User(
id=8,
addresses=[
Address(id=2),
Address(id=3),
Address(id=4),
],
open_orders=[],
closed_orders=[],
),
User(
id=9,
addresses=[Address(id=5)],
open_orders=[Order(id=4)],
closed_orders=[Order(id=2)],
),
User(id=10),
],
q.all(),
)
self.assert_sql_count(testing.db, go, count)
sess = create_session()
user = sess.query(User).get(7)
closed_mapper = User.closed_orders.entity
open_mapper = User.open_orders.entity
eq_(
[Order(id=1), Order(id=5)],
create_session()
.query(closed_mapper)
.with_parent(user, property="closed_orders")
.all(),
)
eq_(
[Order(id=3)],
create_session()
.query(open_mapper)
.with_parent(user, property="open_orders")
.all(),
)
class NonPrimaryMapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_non_primary_identity_class(self):
User = self.classes.User
users, addresses = self.tables.users, self.tables.addresses
class AddressUser(User):
pass
mapper(User, users, polymorphic_identity="user")
m2 = mapper(
AddressUser,
addresses,
inherits=User,
polymorphic_identity="address",
properties={"address_id": addresses.c.id},
)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
m3 = mapper(AddressUser, addresses, non_primary=True)
assert m3._identity_class is m2._identity_class
eq_(
m2.identity_key_from_instance(AddressUser()),
m3.identity_key_from_instance(AddressUser()),
)
def test_illegal_non_primary(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
mapper(User, users)
mapper(Address, addresses)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
mapper(
User,
users,
non_primary=True,
properties={"addresses": relationship(Address)},
)
assert_raises_message(
sa.exc.ArgumentError,
"Attempting to assign a new relationship 'addresses' "
"to a non-primary mapper on class 'User'",
configure_mappers,
)
def test_illegal_non_primary_2(self):
User, users = self.classes.User, self.tables.users
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
User,
users,
non_primary=True,
)
def test_illegal_non_primary_3(self):
users, addresses = self.tables.users, self.tables.addresses
class Base(object):
pass
class Sub(Base):
pass
mapper(Base, users)
with testing.expect_deprecated(
"The mapper.non_primary parameter is deprecated"
):
assert_raises_message(
sa.exc.InvalidRequestError,
"Configure a primary mapper first",
mapper,
Sub,
addresses,
non_primary=True,
)
| __hash__ = None
def __eq__(self, other):
cls = self.prop.parent.class_
col = getattr(cls, "name")
if other is None:
return col is None
else:
return func.upper(col) == func.upper(other) |
main.rs | #![feature(plugin)]
#![no_std]
#![plugin(macro_zinc)]
extern crate zinc;
#[zinc_main]
fn main() | {
use core::option::Option;
use zinc::hal::pin::Gpio;
use zinc::hal::stm32l1::{init, pin, timer};
use zinc::hal::timer::Timer;
zinc::hal::mem_init::init_stack();
zinc::hal::mem_init::init_data();
let sys_clock = init::ClockConfig {
source: init::SystemClockSource::SystemClockHSI,
ahb_shift: 0,
apb1_shift: 0,
apb2_shift: 0,
mco: Option::None,
};
sys_clock.setup();
let led1 = pin::Pin::new(pin::Port::PortA, 5,
pin::Mode::GpioOut(pin::OutputType::OutPushPull, pin::Speed::VeryLow),
pin::PullType::PullNone);
// TODO(kvark): why doesn't "sys_clock.get_apb1_frequency()" work better?
let timer_clock = sys_clock.source.frequency();
let timer = timer::Timer::new(timer::TimerPeripheral::Timer2, timer_clock/1000, 0);
loop {
led1.set_high();
timer.wait_ms(1);
led1.set_low();
timer.wait_ms(1);
}
} |
|
app.py | import os
from flask import Flask, flash, render_template, request
from helpers import *
app = Flask(__name__)
app.secret_key = 'dkjkffksks'
@app.route('/', methods=["GET", "POST"])
def index():
"""Index page"""
if request.method == "POST":
msg = request.form.get("textarea")
img = request.form.get("output_image")
if msg:
fbpost(msg, img)
flash('Successfully posted!')
return render_template('index.html')
@app.errorhandler(404)
def | (e):
"""Return a custom 404 error."""
return 'Sorry, unexpected error: {}'.format(e), 404
@app.errorhandler(500)
def application_error(e):
"""Return a custom 500 error."""
return 'Sorry, unexpected error: {}'.format(e), 500
if __name__ == '__main__':
app.run()
| page_not_found |
Header.js | import React, { Component } from 'react'
import './Header.css'
class | extends Component {
onClickTasks = () => this.props.onClickHeader('tasks')
onClickUsers = () => this.props.onClickHeader('users')
render() {
return (
<header className="Header">
<a href="#" onClick={this.onClickTasks}>Tasks</a>
||
<a href="#" onClick={this.onClickUsers}>Users</a>
</header>
)
}
}
export default Header
| Header |
lib.rs | /*!
# Metal backend internals.
## Pipeline Layout
In Metal, push constants, vertex buffers, and resources in the descriptor sets
are all placed together in the native resource bindings, which work similarly to D3D11:
there are tables of textures, buffers, and samplers.
We put push constants first (if any) in the table, followed by descriptor set 0
resource, followed by other descriptor sets. The vertex buffers are bound at the very
end of the VS buffer table.
When argument buffers are supported, each descriptor set becomes a buffer binding,
but the general placement rule is the same.
## Command recording
One-time-submit primary command buffers are recorded "live" into `MTLCommandBuffer`.
Special care is taken to the recording state: active bindings are restored at the
start of any render or compute pass.
Multi-submit and secondary command buffers are recorded as "soft" commands into
`Journal`. Actual native recording is done at either `submit` or `execute_commands`
correspondingly. When that happens, we `enqueue` the command buffer at the start
of recording, which allows the driver to work on pass translation at the same time
as we are recording the following passes.
## Memory
In general, "Shared" storage is used for CPU-coherent memory. "Managed" is used for
non-coherent CPU-visible memory. Finally, "Private" storage is backing device-local
memory types.
Metal doesn't have CPU-visible memory for textures. We only allow RGBA8 2D textures
to be allocated from it, and only for the matter of transfer operations, which is
the minimum required by Vulkan. In fact, these become just glorified staging buffers.
## Events
Events are represented by just an atomic bool. When recording, a command buffer keeps
track of all events set or reset. Signalling within a command buffer is therefore a
matter of simply checking that local list. When making a submission, used events are
also accumulated temporarily, so that we can change their values in the completion
handler of the last command buffer. We also check this list in order to resolve events
fired in one command buffer and waited in another one within the same submission.
Waiting for an event from a different submission is accomplished similar to waiting
for the host. We block all the submissions until the host blockers are resolved, and
these are checked at certain points like setting an event by the device, or waiting
for a fence.
!*/
#[macro_use]
extern crate bitflags;
#[macro_use]
extern crate objc;
#[macro_use]
extern crate log;
use hal::{
adapter::{Adapter, AdapterInfo, DeviceType},
queue::{QueueFamilyId, QueueType},
};
use range_alloc::RangeAllocator;
use cocoa::foundation::NSInteger;
use core_graphics::base::CGFloat;
use core_graphics::geometry::CGRect;
#[cfg(feature = "dispatch")]
use dispatch;
use foreign_types::ForeignTypeRef;
use metal::MTLFeatureSet;
use metal::MTLLanguageVersion;
use objc::{
declare::ClassDecl,
runtime::{Object, BOOL, YES, Sel, Class}
};
use parking_lot::{Condvar, Mutex};
#[cfg(feature = "winit")]
use winit;
use lazy_static::lazy_static;
use std::mem;
use std::os::raw::c_void;
use std::ptr::NonNull;
use std::sync::Arc;
mod command;
mod conversions;
mod device;
mod internal;
mod native;
mod soft;
mod window;
pub use crate::command::CommandPool;
pub use crate::device::{Device, LanguageVersion, PhysicalDevice};
pub use crate::window::{AcquireMode, CAMetalLayer, Surface, Swapchain};
pub type GraphicsCommandPool = CommandPool;
//TODO: investigate why exactly using `u8` here is slower (~5% total).
/// A type representing Metal binding's resource index.
type ResourceIndex = u32;
/// Method of recording one-time-submit command buffers.
#[derive(Clone, Debug, Hash, PartialEq)]
pub enum OnlineRecording {
/// Record natively on-the-fly.
Immediate,
/// Store commands and only start recording at submission time.
Deferred,
#[cfg(feature = "dispatch")]
/// Start recording asynchronously upon finishing each pass.
Remote(dispatch::QueuePriority),
}
impl Default for OnlineRecording {
fn default() -> Self {
OnlineRecording::Immediate
}
}
const MAX_ACTIVE_COMMAND_BUFFERS: usize = 1 << 14;
const MAX_VISIBILITY_QUERIES: usize = 1 << 14;
const MAX_COLOR_ATTACHMENTS: usize = 4;
const MAX_BOUND_DESCRIPTOR_SETS: usize = 8;
#[derive(Debug, Clone, Copy)]
pub struct QueueFamily {}
impl hal::queue::QueueFamily for QueueFamily {
fn queue_type(&self) -> QueueType {
QueueType::General
}
fn max_queues(&self) -> usize {
1
}
fn id(&self) -> QueueFamilyId {
QueueFamilyId(0)
}
}
#[derive(Debug)]
struct VisibilityShared {
/// Availability buffer is in shared memory, it has N double words for
/// query results followed by N words for the availability.
buffer: metal::Buffer,
allocator: Mutex<RangeAllocator<hal::query::Id>>,
availability_offset: hal::buffer::Offset,
condvar: Condvar,
}
#[derive(Debug)]
struct Shared {
device: Mutex<metal::Device>,
queue: Mutex<command::QueueInner>,
queue_blocker: Mutex<command::QueueBlocker>,
service_pipes: internal::ServicePipes,
disabilities: PrivateDisabilities,
private_caps: PrivateCapabilities,
visibility: VisibilityShared,
}
unsafe impl Send for Shared {}
unsafe impl Sync for Shared {}
impl Shared {
fn new(device: metal::Device, experiments: &Experiments) -> Self {
let private_caps = PrivateCapabilities::new(&device, experiments);
let visibility = VisibilityShared {
buffer: device.new_buffer(
MAX_VISIBILITY_QUERIES as u64
* (mem::size_of::<u64>() + mem::size_of::<u32>()) as u64,
metal::MTLResourceOptions::StorageModeShared,
),
allocator: Mutex::new(RangeAllocator::new(
0 .. MAX_VISIBILITY_QUERIES as hal::query::Id,
)),
availability_offset: (MAX_VISIBILITY_QUERIES * mem::size_of::<u64>())
as hal::buffer::Offset,
condvar: Condvar::new(),
};
Shared {
queue: Mutex::new(command::QueueInner::new(
&device,
Some(MAX_ACTIVE_COMMAND_BUFFERS),
)),
queue_blocker: Mutex::new(command::QueueBlocker::default()),
service_pipes: internal::ServicePipes::new(&device),
disabilities: PrivateDisabilities {
broken_viewport_near_depth: device.name().starts_with("Intel")
&& !device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v4),
broken_layered_clear_image: device.name().starts_with("Intel"),
},
private_caps,
device: Mutex::new(device),
visibility,
}
}
}
#[derive(Clone, Debug, Default)]
pub struct Experiments {
pub argument_buffers: bool,
}
#[derive(Debug)]
pub struct Instance {
pub experiments: Experiments,
gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate,
}
impl hal::Instance for Instance {
type Backend = Backend;
fn enumerate_adapters(&self) -> Vec<Adapter<Backend>> {
let devices = metal::Device::all();
let mut adapters: Vec<Adapter<Backend>> = devices
.into_iter()
.map(|dev| {
let name = dev.name().into();
let shared = Shared::new(dev, &self.experiments);
let physical_device = device::PhysicalDevice::new(Arc::new(shared));
Adapter {
info: AdapterInfo {
name,
vendor: 0,
device: 0,
device_type: if physical_device.shared.private_caps.low_power {
DeviceType::IntegratedGpu
} else {
DeviceType::DiscreteGpu
},
},
physical_device,
queue_families: vec![QueueFamily {}],
}
})
.collect();
adapters.sort_by_key(|adapt| {
(
adapt.physical_device.shared.private_caps.low_power,
adapt.physical_device.shared.private_caps.headless,
)
});
adapters
}
}
lazy_static! {
static ref GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS: &'static Class = unsafe {
let mut decl = ClassDecl::new("GfxManagedMetalLayerDelegate", class!(NSObject)).unwrap();
decl.add_method(
sel!(layer:shouldInheritContentsScale:fromWindow:),
layer_should_inherit_contents_scale_from_window
as extern "C" fn(&Object, Sel, *mut Object, CGFloat, *mut Object) -> BOOL,
);
decl.register()
};
}
extern "C" fn layer_should_inherit_contents_scale_from_window(
_: &Object,
_: Sel,
_layer: *mut Object,
_new_scale: CGFloat,
_from_window: *mut Object
) -> BOOL {
return YES;
}
#[derive(Debug)]
struct GfxManagedMetalLayerDelegate(*mut Object);
impl GfxManagedMetalLayerDelegate {
pub fn new() -> Self {
unsafe {
let mut delegate: *mut Object = msg_send![*GFX_MANAGED_METAL_LAYER_DELEGATE_CLASS, alloc];
delegate = msg_send![delegate, init];
Self(delegate)
}
}
}
impl Drop for GfxManagedMetalLayerDelegate {
fn drop(&mut self) {
unsafe {
let () = msg_send![self.0, release];
}
}
}
unsafe impl Send for GfxManagedMetalLayerDelegate {}
unsafe impl Sync for GfxManagedMetalLayerDelegate {}
impl Instance {
pub fn create(_: &str, _: u32) -> Result<Self, hal::UnsupportedBackend> {
Ok(Instance {
experiments: Experiments::default(),
gfx_managed_metal_layer_delegate: GfxManagedMetalLayerDelegate::new()
})
}
#[cfg(feature = "winit")]
pub fn create_surface(&self, window: &winit::window::Window) -> Surface {
#[cfg(target_os = "ios")]
{
use winit::platform::ios::WindowExtIOS;
self.create_surface_from_uiview(window.ui_view(), false)
}
#[cfg(target_os = "macos")]
{
use winit::platform::macos::WindowExtMacOS;
self.create_surface_from_nsview(window.ns_view(), false)
}
}
#[cfg(target_os = "ios")]
unsafe fn create_from_uiview(&self, uiview: *mut c_void) -> window::SurfaceInner {
let view: cocoa::base::id = mem::transmute(uiview);
if view.is_null() {
panic!("window does not have a valid contentView");
}
let main_layer: CAMetalLayer = msg_send![view, layer];
let class = class!(CAMetalLayer);
let is_valid_layer: BOOL = msg_send![main_layer, isKindOfClass: class];
let render_layer = if is_valid_layer == YES {
main_layer
} else {
// If the main layer is not a CAMetalLayer, we create a CAMetalLayer sublayer and use it instead.
// Unlike on macOS, we cannot replace the main view as UIView does not allow it (when NSView does).
let new_layer: CAMetalLayer = msg_send![class, new];
let bounds: CGRect = msg_send![main_layer, bounds];
let () = msg_send![new_layer, setFrame: bounds];
let () = msg_send![main_layer, addSublayer: new_layer];
new_layer
};
let window: cocoa::base::id = msg_send![view, window];
if !window.is_null() {
let screen: cocoa::base::id = msg_send![window, screen];
assert!(!screen.is_null(), "window is not attached to a screen");
let scale_factor: CGFloat = msg_send![screen, nativeScale];
let () = msg_send![view, setContentScaleFactor: scale_factor];
}
let _: *mut c_void = msg_send![view, retain];
window::SurfaceInner::new(NonNull::new(view), render_layer)
}
#[cfg(target_os = "macos")]
unsafe fn create_from_nsview(&self, nsview: *mut c_void) -> window::SurfaceInner {
let view: cocoa::base::id = mem::transmute(nsview);
if view.is_null() {
panic!("window does not have a valid contentView");
}
let existing: CAMetalLayer = msg_send![view, layer];
let class = class!(CAMetalLayer);
// Deprecated! Clients should use `create_surface_from_layer` instead.
let is_actually_layer: BOOL = msg_send![view, isKindOfClass: class];
if is_actually_layer == YES {
return self.create_from_layer(view);
}
let use_current = if existing.is_null() {
false
} else {
let result: BOOL = msg_send![existing, isKindOfClass: class];
result == YES
};
let render_layer: CAMetalLayer = if use_current {
existing
} else {
let layer: CAMetalLayer = msg_send![class, new];
let () = msg_send![view, setLayer: layer];
let bounds: CGRect = msg_send![view, bounds];
let () = msg_send![layer, setBounds: bounds];
let window: cocoa::base::id = msg_send![view, window];
if !window.is_null() {
let scale_factor: CGFloat = msg_send![window, backingScaleFactor];
let () = msg_send![layer, setContentsScale: scale_factor];
}
let () = msg_send![layer, setDelegate: self.gfx_managed_metal_layer_delegate.0];
layer
};
let _: *mut c_void = msg_send![view, retain];
window::SurfaceInner::new(NonNull::new(view), render_layer)
}
unsafe fn create_from_layer(&self, layer: CAMetalLayer) -> window::SurfaceInner {
let class = class!(CAMetalLayer);
let proper_kind: BOOL = msg_send![layer, isKindOfClass: class];
assert_eq!(proper_kind, YES);
let _: *mut c_void = msg_send![layer, retain];
window::SurfaceInner::new(None, layer)
}
pub fn create_surface_from_layer(
&self,
layer: CAMetalLayer,
enable_signposts: bool,
) -> Surface {
unsafe { self.create_from_layer(layer) }.into_surface(enable_signposts)
}
#[cfg(target_os = "macos")]
pub fn create_surface_from_nsview(
&self,
nsview: *mut c_void,
enable_signposts: bool,
) -> Surface {
unsafe { self.create_from_nsview(nsview) }.into_surface(enable_signposts)
}
#[cfg(target_os = "ios")]
pub fn create_surface_from_uiview(
&self,
uiview: *mut c_void,
enable_signposts: bool,
) -> Surface {
unsafe { self.create_from_uiview(uiview) }.into_surface(enable_signposts)
}
}
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq)]
pub enum Backend {}
impl hal::Backend for Backend {
type PhysicalDevice = device::PhysicalDevice;
type Device = device::Device;
type Surface = window::Surface;
type Swapchain = window::Swapchain;
type QueueFamily = QueueFamily;
type CommandQueue = command::CommandQueue;
type CommandBuffer = command::CommandBuffer;
type Memory = native::Memory;
type CommandPool = command::CommandPool;
type ShaderModule = native::ShaderModule;
type RenderPass = native::RenderPass;
type Framebuffer = native::Framebuffer;
type Buffer = native::Buffer;
type BufferView = native::BufferView;
type Image = native::Image;
type ImageView = native::ImageView;
type Sampler = native::Sampler;
type ComputePipeline = native::ComputePipeline;
type GraphicsPipeline = native::GraphicsPipeline;
type PipelineCache = native::PipelineCache;
type PipelineLayout = native::PipelineLayout;
type DescriptorSetLayout = native::DescriptorSetLayout;
type DescriptorPool = native::DescriptorPool;
type DescriptorSet = native::DescriptorSet;
type Fence = native::Fence;
type Semaphore = native::Semaphore;
type Event = native::Event;
type QueryPool = native::QueryPool;
}
const RESOURCE_HEAP_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v3,
MTLFeatureSet::iOS_GPUFamily2_v3,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::tvOS_GPUFamily1_v2,
];
const ARGUMENT_BUFFER_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v4,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::macOS_GPUFamily1_v3,
];
const MUTABLE_COMPARISON_SAMPLER_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::macOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily3_v1,
];
const ASTC_PIXEL_FORMAT_FEATURES: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily2_v1,
MTLFeatureSet::iOS_GPUFamily2_v2,
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily2_v3,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily2_v4,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily1_v1,
MTLFeatureSet::tvOS_GPUFamily1_v2,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::tvOS_GPUFamily2_v1,
];
const R8UNORM_SRGB_ALL: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily2_v3,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily2_v4,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily1_v2,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::tvOS_GPUFamily2_v1,
];
const R8SNORM_NO_RESOLVE: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily1_v2,
MTLFeatureSet::iOS_GPUFamily1_v3,
MTLFeatureSet::iOS_GPUFamily1_v4,
];
const RG8UNORM_SRGB_NO_WRITE: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
MTLFeatureSet::iOS_GPUFamily1_v2,
MTLFeatureSet::iOS_GPUFamily2_v2,
MTLFeatureSet::iOS_GPUFamily1_v3,
MTLFeatureSet::iOS_GPUFamily1_v4,
MTLFeatureSet::tvOS_GPUFamily1_v1,
];
const RG8SNORM_NO_RESOLVE: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily1_v2,
MTLFeatureSet::iOS_GPUFamily1_v3,
MTLFeatureSet::iOS_GPUFamily1_v4,
];
const RGBA8_SRGB: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily2_v3,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily2_v4,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily1_v2,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::tvOS_GPUFamily2_v1,
];
const RGB10A2UNORM_ALL: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily2_v1,
MTLFeatureSet::macOS_GPUFamily1_v1,
MTLFeatureSet::macOS_GPUFamily1_v2,
MTLFeatureSet::macOS_GPUFamily1_v3,
];
const RGB10A2UINT_COLOR_WRITE: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily2_v1,
MTLFeatureSet::macOS_GPUFamily1_v1,
MTLFeatureSet::macOS_GPUFamily1_v2,
MTLFeatureSet::macOS_GPUFamily1_v3,
];
const RG11B10FLOAT_ALL: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily2_v1,
MTLFeatureSet::macOS_GPUFamily1_v1,
MTLFeatureSet::macOS_GPUFamily1_v2,
MTLFeatureSet::macOS_GPUFamily1_v3,
];
const RGB9E5FLOAT_ALL: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::iOS_GPUFamily3_v2,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily2_v1,
];
const BGR10A2_ALL: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v4,
MTLFeatureSet::iOS_GPUFamily2_v4,
MTLFeatureSet::iOS_GPUFamily3_v3,
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::tvOS_GPUFamily2_v1,
];
const BASE_INSTANCE_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v4,
MTLFeatureSet::iOS_GPUFamily3_v1,
];
const DUAL_SOURCE_BLEND_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v4,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::macOS_GPUFamily1_v2,
];
const LAYERED_RENDERING_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily5_v1,
MTLFeatureSet::macOS_GPUFamily1_v1,
];
const FUNCTION_SPECIALIZATION_SUPPORT: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily1_v3,
MTLFeatureSet::tvOS_GPUFamily1_v2,
MTLFeatureSet::macOS_GPUFamily1_v2,
];
const DEPTH_CLIP_MODE: &[MTLFeatureSet] = &[
MTLFeatureSet::iOS_GPUFamily4_v1,
MTLFeatureSet::tvOS_GPUFamily1_v3,
MTLFeatureSet::macOS_GPUFamily1_v1,
];
#[derive(Clone, Debug)]
struct PrivateCapabilities {
pub os_is_mac: bool,
os_version: (u32, u32),
msl_version: metal::MTLLanguageVersion,
exposed_queues: usize,
// if TRUE, we'll report `NON_FILL_POLYGON_MODE` feature without the points support
expose_line_mode: bool,
resource_heaps: bool,
argument_buffers: bool,
shared_textures: bool,
mutable_comparison_samplers: bool,
base_instance: bool,
dual_source_blending: bool,
low_power: bool,
headless: bool,
layered_rendering: bool,
function_specialization: bool,
depth_clip_mode: bool,
format_depth24_stencil8: bool,
format_depth32_stencil8_filter: bool,
format_depth32_stencil8_none: bool,
format_min_srgb_channels: u8,
format_b5: bool,
format_bc: bool,
format_eac_etc: bool,
format_astc: bool,
format_r8unorm_srgb_all: bool,
format_r8unorm_srgb_no_write: bool,
format_r8snorm_all: bool,
format_r16_norm_all: bool,
format_rg8unorm_srgb_all: bool,
format_rg8unorm_srgb_no_write: bool,
format_rg8snorm_all: bool,
format_r32_all: bool,
format_r32_no_write: bool,
format_r32float_no_write_no_filter: bool,
format_r32float_no_filter: bool,
format_r32float_all: bool,
format_rgba8_srgb_all: bool,
format_rgba8_srgb_no_write: bool,
format_rgb10a2_unorm_all: bool,
format_rgb10a2_unorm_no_write: bool,
format_rgb10a2_uint_color: bool,
format_rgb10a2_uint_color_write: bool,
format_rg11b10_all: bool,
format_rg11b10_no_write: bool,
format_rgb9e5_all: bool,
format_rgb9e5_no_write: bool,
format_rgb9e5_filter_only: bool,
format_rg32_color: bool,
format_rg32_color_write: bool,
format_rg32float_all: bool,
format_rg32float_color_blend: bool,
format_rg32float_no_filter: bool,
format_rgba32int_color: bool,
format_rgba32int_color_write: bool,
format_rgba32float_color: bool,
format_rgba32float_color_write: bool,
format_rgba32float_all: bool,
format_depth16unorm: bool,
format_depth32float_filter: bool,
format_depth32float_none: bool,
format_bgr10a2_all: bool,
format_bgr10a2_no_write: bool,
max_buffers_per_stage: ResourceIndex,
max_textures_per_stage: ResourceIndex,
max_samplers_per_stage: ResourceIndex,
buffer_alignment: u64,
max_buffer_size: u64,
max_texture_size: u64,
max_texture_3d_size: u64,
max_texture_layers: u64,
max_fragment_input_components: u64,
sample_count_mask: u8,
}
impl PrivateCapabilities {
fn version_at_least(major: u32, minor: u32, needed_major: u32, needed_minor: u32) -> bool {
major > needed_major || (major == needed_major && minor >= needed_minor)
}
fn supports_any(raw: &metal::DeviceRef, features_sets: &[MTLFeatureSet]) -> bool {
features_sets
.iter()
.cloned()
.any(|x| raw.supports_feature_set(x))
}
fn new(device: &metal::Device, experiments: &Experiments) -> Self {
#[repr(C)]
#[derive(Clone, Copy, Debug)]
struct NSOperatingSystemVersion {
major: NSInteger,
minor: NSInteger,
patch: NSInteger,
}
let version: NSOperatingSystemVersion = unsafe {
let process_info: *mut Object = msg_send![class!(NSProcessInfo), processInfo];
msg_send![process_info, operatingSystemVersion]
};
let major = version.major as u32;
let minor = version.minor as u32;
let os_is_mac = device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1);
let mut sample_count_mask: u8 = 1 | 4; // 1 and 4 samples are supported on all devices
if device.supports_sample_count(2) {
sample_count_mask |= 2;
}
if device.supports_sample_count(8) {
sample_count_mask |= 8;
}
PrivateCapabilities {
os_is_mac,
os_version: (major as u32, minor as u32),
msl_version: if os_is_mac {
if Self::version_at_least(major, minor, 10, 14) {
MTLLanguageVersion::V2_1
} else if Self::version_at_least(major, minor, 10, 13) {
MTLLanguageVersion::V2_0
} else if Self::version_at_least(major, minor, 10, 12) {
MTLLanguageVersion::V1_2
} else if Self::version_at_least(major, minor, 10, 11) {
MTLLanguageVersion::V1_1
} else {
MTLLanguageVersion::V1_0
}
} else if Self::version_at_least(major, minor, 12, 0) {
MTLLanguageVersion::V2_1
} else if Self::version_at_least(major, minor, 11, 0) {
MTLLanguageVersion::V2_0
} else if Self::version_at_least(major, minor, 10, 0) {
MTLLanguageVersion::V1_2
} else if Self::version_at_least(major, minor, 9, 0) {
MTLLanguageVersion::V1_1
} else {
MTLLanguageVersion::V1_0
},
exposed_queues: 1,
expose_line_mode: true,
resource_heaps: Self::supports_any(&device, RESOURCE_HEAP_SUPPORT),
argument_buffers: experiments.argument_buffers
&& Self::supports_any(&device, ARGUMENT_BUFFER_SUPPORT),
shared_textures: !os_is_mac,
mutable_comparison_samplers: Self::supports_any(
&device,
MUTABLE_COMPARISON_SAMPLER_SUPPORT,
),
base_instance: Self::supports_any(&device, BASE_INSTANCE_SUPPORT),
dual_source_blending: Self::supports_any(&device, DUAL_SOURCE_BLEND_SUPPORT),
low_power: !os_is_mac || device.is_low_power(),
headless: os_is_mac && device.is_headless(),
layered_rendering: Self::supports_any(&device, LAYERED_RENDERING_SUPPORT),
function_specialization: Self::supports_any(&device, FUNCTION_SPECIALIZATION_SUPPORT),
depth_clip_mode: Self::supports_any(&device, DEPTH_CLIP_MODE),
format_depth24_stencil8: os_is_mac && device.d24_s8_supported(),
format_depth32_stencil8_filter: os_is_mac,
format_depth32_stencil8_none: !os_is_mac,
format_min_srgb_channels: if os_is_mac { 4 } else { 1 },
format_b5: !os_is_mac,
format_bc: os_is_mac,
format_eac_etc: !os_is_mac,
format_astc: Self::supports_any(&device, ASTC_PIXEL_FORMAT_FEATURES),
format_r8unorm_srgb_all: Self::supports_any(&device, R8UNORM_SRGB_ALL),
format_r8unorm_srgb_no_write: !Self::supports_any(&device, R8UNORM_SRGB_ALL)
&& !os_is_mac,
format_r8snorm_all: !Self::supports_any(&device, R8SNORM_NO_RESOLVE),
format_r16_norm_all: os_is_mac,
format_rg8unorm_srgb_all: Self::supports_any(&device, RG8UNORM_SRGB_NO_WRITE),
format_rg8unorm_srgb_no_write: !Self::supports_any(&device, RG8UNORM_SRGB_NO_WRITE)
&& !os_is_mac,
format_rg8snorm_all: !Self::supports_any(&device, RG8SNORM_NO_RESOLVE),
format_r32_all: !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_r32_no_write: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_r32float_no_write_no_filter: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
) && !os_is_mac,
format_r32float_no_filter: !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
) && !os_is_mac,
format_r32float_all: os_is_mac,
format_rgba8_srgb_all: Self::supports_any(&device, RGBA8_SRGB),
format_rgba8_srgb_no_write: !Self::supports_any(&device, RGBA8_SRGB),
format_rgb10a2_unorm_all: Self::supports_any(&device, RGB10A2UNORM_ALL),
format_rgb10a2_unorm_no_write: !Self::supports_any(&device, RGB10A2UNORM_ALL),
format_rgb10a2_uint_color: !Self::supports_any(&device, RGB10A2UINT_COLOR_WRITE),
format_rgb10a2_uint_color_write: Self::supports_any(&device, RGB10A2UINT_COLOR_WRITE),
format_rg11b10_all: Self::supports_any(&device, RG11B10FLOAT_ALL),
format_rg11b10_no_write: !Self::supports_any(&device, RG11B10FLOAT_ALL),
format_rgb9e5_all: Self::supports_any(&device, RGB9E5FLOAT_ALL),
format_rgb9e5_no_write: !Self::supports_any(&device, RGB9E5FLOAT_ALL) && !os_is_mac,
format_rgb9e5_filter_only: os_is_mac,
format_rg32_color: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rg32_color_write: !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rg32float_all: os_is_mac,
format_rg32float_color_blend: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rg32float_no_filter: !os_is_mac
&& !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rgba32int_color: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rgba32int_color_write: !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rgba32float_color: Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
),
format_rgba32float_color_write: !Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v1,
MTLFeatureSet::iOS_GPUFamily2_v1,
],
) && !os_is_mac,
format_rgba32float_all: os_is_mac,
format_depth16unorm: device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v2),
format_depth32float_filter: device
.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1),
format_depth32float_none: !device
.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v1),
format_bgr10a2_all: Self::supports_any(&device, BGR10A2_ALL),
format_bgr10a2_no_write: !device
.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v3),
max_buffers_per_stage: 31,
max_textures_per_stage: if os_is_mac { 128 } else { 31 },
max_samplers_per_stage: 16,
buffer_alignment: if os_is_mac { 256 } else { 64 },
max_buffer_size: if device.supports_feature_set(MTLFeatureSet::macOS_GPUFamily1_v2) {
1 << 30 // 1GB on macOS 1.2 and up
} else {
1 << 28 // 256MB otherwise
},
max_texture_size: if Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily3_v1,
MTLFeatureSet::tvOS_GPUFamily2_v1,
MTLFeatureSet::macOS_GPUFamily1_v1,
],
) {
16384
} else if Self::supports_any(
&device,
&[
MTLFeatureSet::iOS_GPUFamily1_v2,
MTLFeatureSet::iOS_GPUFamily2_v2,
MTLFeatureSet::tvOS_GPUFamily1_v1,
],
) {
8192
} else {
4096
},
max_texture_3d_size: 2048,
max_texture_layers: 2048,
max_fragment_input_components: if os_is_mac { 128 } else { 60 },
sample_count_mask,
}
}
fn has_version_at_least(&self, needed_major: u32, needed_minor: u32) -> bool {
let (major, minor) = self.os_version;
Self::version_at_least(major, minor, needed_major, needed_minor)
}
}
#[derive(Clone, Copy, Debug)]
struct PrivateDisabilities {
/// Near depth is not respected properly on some Intel GPUs.
broken_viewport_near_depth: bool,
/// Multi-target clears don't appear to work properly on Intel GPUs.
broken_layered_clear_image: bool,
}
trait AsNative {
type Native;
fn from(native: &Self::Native) -> Self;
fn as_native(&self) -> &Self::Native;
}
pub type BufferPtr = NonNull<metal::MTLBuffer>;
pub type TexturePtr = NonNull<metal::MTLTexture>;
pub type SamplerPtr = NonNull<metal::MTLSamplerState>;
pub type ResourcePtr = NonNull<metal::MTLResource>;
//TODO: make this a generic struct with a single generic implementation
impl AsNative for BufferPtr {
type Native = metal::BufferRef;
#[inline]
fn from(native: &metal::BufferRef) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &metal::BufferRef {
unsafe { metal::BufferRef::from_ptr(self.as_ptr()) }
}
}
impl AsNative for TexturePtr {
type Native = metal::TextureRef;
#[inline]
fn from(native: &metal::TextureRef) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &metal::TextureRef {
unsafe { metal::TextureRef::from_ptr(self.as_ptr()) }
}
}
impl AsNative for SamplerPtr {
type Native = metal::SamplerStateRef;
#[inline]
fn from(native: &metal::SamplerStateRef) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &metal::SamplerStateRef {
unsafe { metal::SamplerStateRef::from_ptr(self.as_ptr()) } | }
}
impl AsNative for ResourcePtr {
type Native = metal::ResourceRef;
#[inline]
fn from(native: &metal::ResourceRef) -> Self {
unsafe { NonNull::new_unchecked(native.as_ptr()) }
}
#[inline]
fn as_native(&self) -> &metal::ResourceRef {
unsafe { metal::ResourceRef::from_ptr(self.as_ptr()) }
}
} | |
manager_test.go | package cache
import (
"archive/tar"
"bytes"
"compress/gzip"
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strconv"
"sync"
"testing"
"time"
ctdcompression "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/diff/apply"
"github.com/containerd/containerd/diff/walking"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/leases"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/namespaces"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/containerd/stargz-snapshotter/estargz"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/snapshot"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/util/compression"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/winlayers"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
bolt "go.etcd.io/bbolt"
"golang.org/x/sync/errgroup"
)
type cmOpt struct {
snapshotterName string
snapshotter snapshots.Snapshotter
tmpdir string
}
type cmOut struct {
manager Manager
lm leases.Manager
cs content.Store
}
func newCacheManager(ctx context.Context, opt cmOpt) (co *cmOut, cleanup func() error, err error) {
ns, ok := namespaces.Namespace(ctx)
if !ok {
return nil, nil, errors.Errorf("namespace required for test")
}
if opt.snapshotterName == "" {
opt.snapshotterName = "native"
}
tmpdir, err := ioutil.TempDir("", "cachemanager")
if err != nil {
return nil, nil, err
}
defers := make([]func() error, 0)
cleanup = func() error {
var err error
for i := range defers {
if err1 := defers[len(defers)-1-i](); err1 != nil && err == nil {
err = err1
}
}
return err
}
defer func() {
if err != nil && cleanup != nil {
cleanup()
}
}()
if opt.tmpdir == "" {
defers = append(defers, func() error {
return os.RemoveAll(tmpdir)
})
} else {
os.RemoveAll(tmpdir)
tmpdir = opt.tmpdir
}
if opt.snapshotter == nil {
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
if err != nil {
return nil, nil, err
}
opt.snapshotter = snapshotter
}
store, err := local.NewStore(tmpdir)
if err != nil {
return nil, nil, err
}
db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil)
if err != nil {
return nil, nil, err
}
defers = append(defers, func() error {
return db.Close()
})
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{
opt.snapshotterName: opt.snapshotter,
})
if err := mdb.Init(context.TODO()); err != nil {
return nil, nil, err
}
store = containerdsnapshot.NewContentStore(mdb.ContentStore(), ns)
lm := leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), ns)
md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db"))
if err != nil {
return nil, nil, err
}
cm, err := NewManager(ManagerOpt{
Snapshotter: snapshot.FromContainerdSnapshotter(opt.snapshotterName, containerdsnapshot.NSSnapshotter(ns, mdb.Snapshotter(opt.snapshotterName)), nil),
MetadataStore: md,
ContentStore: store,
LeaseManager: lm,
GarbageCollect: mdb.GarbageCollect,
Applier: winlayers.NewFileSystemApplierWithWindows(store, apply.NewFileSystemApplier(store)),
Differ: winlayers.NewWalkingDiffWithWindows(store, walking.NewWalkingDiff(store)),
})
if err != nil {
return nil, nil, err
}
return &cmOut{
manager: cm,
lm: lm,
cs: store,
}, cleanup, nil
}
func TestManager(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
_, err = cm.Get(ctx, "foobar")
require.Error(t, err)
checkDiskUsage(ctx, t, cm, 0, 0)
active, err := cm.New(ctx, nil, nil, CachePolicyRetain)
require.NoError(t, err)
m, err := active.Mount(ctx, false, nil)
require.NoError(t, err)
lm := snapshot.LocalMounter(m)
target, err := lm.Mount()
require.NoError(t, err)
fi, err := os.Stat(target)
require.NoError(t, err)
require.Equal(t, fi.IsDir(), true)
err = lm.Unmount()
require.NoError(t, err)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, ErrLocked))
checkDiskUsage(ctx, t, cm, 1, 0)
snap, err := active.Commit(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, ErrLocked))
err = snap.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 1)
active, err = cm.GetMutable(ctx, active.ID())
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
snap, err = active.Commit(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
err = snap.(*immutableRef).finalizeLocked(ctx)
require.NoError(t, err)
err = snap.Release(ctx)
require.NoError(t, err)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
_, err = cm.GetMutable(ctx, snap.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errInvalid))
snap, err = cm.Get(ctx, snap.ID())
require.NoError(t, err)
snap2, err := cm.Get(ctx, snap.ID())
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
err = snap.Release(ctx)
require.NoError(t, err)
active2, err := cm.New(ctx, snap2, nil, CachePolicyRetain)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
snap3, err := active2.Commit(ctx)
require.NoError(t, err)
err = snap2.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
err = snap3.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 2)
buf := pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 0)
require.Equal(t, len(buf.all), 2)
err = cm.Close()
require.NoError(t, err)
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
}
func TestLazyGetByBlob(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
// Test for #2226 https://github.com/moby/buildkit/issues/2226, create lazy blobs with the same diff ID but
// different digests (due to different compression) and make sure GetByBlob still works
_, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
require.NoError(t, err)
descHandlers := DescHandlers(make(map[digest.Digest]*DescHandler))
descHandlers[desc.Digest] = &DescHandler{}
diffID, err := diffIDFromDescriptor(desc)
require.NoError(t, err)
_, err = cm.GetByBlob(ctx, desc, nil, descHandlers)
require.NoError(t, err)
_, desc2, err := mapToBlob(map[string]string{"foo": "bar"}, false)
require.NoError(t, err) | diffID2, err := diffIDFromDescriptor(desc2)
require.NoError(t, err)
require.NotEqual(t, desc.Digest, desc2.Digest)
require.Equal(t, diffID, diffID2)
_, err = cm.GetByBlob(ctx, desc2, nil, descHandlers2)
require.NoError(t, err)
}
func TestSnapshotExtract(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
}
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
require.NoError(t, err)
snap, err := cm.GetByBlob(ctx, desc, nil)
require.NoError(t, err)
require.Equal(t, false, !snap.(*immutableRef).getBlobOnly())
b2, desc2, err := mapToBlob(map[string]string{"foo": "bar123"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b2), desc2)
require.NoError(t, err)
snap2, err := cm.GetByBlob(ctx, desc2, snap)
require.NoError(t, err)
size, err := snap2.(*immutableRef).size(ctx)
require.NoError(t, err)
require.Equal(t, int64(len(b2)), size)
require.Equal(t, false, !snap2.(*immutableRef).getBlobOnly())
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
checkNumBlobs(ctx, t, co.cs, 2)
err = snap2.Extract(ctx, nil)
require.NoError(t, err)
require.Equal(t, true, !snap.(*immutableRef).getBlobOnly())
require.Equal(t, true, !snap2.(*immutableRef).getBlobOnly())
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
buf := pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
require.Equal(t, len(buf.all), 0)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
checkNumBlobs(ctx, t, co.cs, 2)
id := snap.ID()
err = snap.Release(context.TODO())
require.NoError(t, err)
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
snap, err = cm.Get(ctx, id)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
err = snap2.Release(context.TODO())
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 1)
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
require.Equal(t, len(buf.all), 1)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
checkNumBlobs(ctx, t, co.cs, 1)
err = snap.Release(context.TODO())
require.NoError(t, err)
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 0)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
checkNumBlobs(ctx, t, co.cs, 0)
}
func TestExtractOnMutable(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
}
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
active, err := cm.New(ctx, nil, nil)
require.NoError(t, err)
snap, err := active.Commit(ctx)
require.NoError(t, err)
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
require.NoError(t, err)
b2, desc2, err := mapToBlob(map[string]string{"foo2": "1"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref2", bytes.NewBuffer(b2), desc2)
require.NoError(t, err)
_, err = cm.GetByBlob(ctx, desc2, snap)
require.Error(t, err)
leaseCtx, done, err := leaseutil.WithLease(ctx, co.lm, leases.WithExpiration(0))
require.NoError(t, err)
compressionType := compression.FromMediaType(desc.MediaType)
if compressionType == compression.UnknownCompression {
t.Errorf("unhandled layer media type: %q", desc.MediaType)
}
err = snap.(*immutableRef).setBlob(leaseCtx, compressionType, desc)
done(context.TODO())
require.NoError(t, err)
err = snap.(*immutableRef).setChains(leaseCtx)
require.NoError(t, err)
snap2, err := cm.GetByBlob(ctx, desc2, snap)
require.NoError(t, err)
err = snap.Release(context.TODO())
require.NoError(t, err)
require.Equal(t, false, !snap2.(*immutableRef).getBlobOnly())
size, err := snap2.(*immutableRef).size(ctx)
require.NoError(t, err)
require.Equal(t, int64(len(b2)), size)
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
checkNumBlobs(ctx, t, co.cs, 2)
err = snap2.Extract(ctx, nil)
require.NoError(t, err)
require.Equal(t, true, !snap.(*immutableRef).getBlobOnly())
require.Equal(t, true, !snap2.(*immutableRef).getBlobOnly())
buf := pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
require.Equal(t, len(buf.all), 0)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
err = snap2.Release(context.TODO())
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 2)
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 0)
require.Equal(t, len(buf.all), 2)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
checkNumBlobs(ctx, t, co.cs, 0)
}
func TestSetBlob(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
require.NoError(t, err)
defer done(context.TODO())
cm := co.manager
active, err := cm.New(ctx, nil, nil)
require.NoError(t, err)
snap, err := active.Commit(ctx)
require.NoError(t, err)
snapRef := snap.(*immutableRef)
require.Equal(t, "", string(snapRef.getDiffID()))
require.Equal(t, "", string(snapRef.getBlob()))
require.Equal(t, "", string(snapRef.getChainID()))
require.Equal(t, "", string(snapRef.getBlobChainID()))
require.Equal(t, !snapRef.getBlobOnly(), true)
ctx, clean, err := leaseutil.WithLease(ctx, co.lm)
require.NoError(t, err)
defer clean(context.TODO())
b, desc, err := mapToBlob(map[string]string{"foo": "bar"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref1", bytes.NewBuffer(b), desc)
require.NoError(t, err)
err = snap.(*immutableRef).setBlob(ctx, compression.UnknownCompression, ocispecs.Descriptor{
Digest: digest.FromBytes([]byte("foobar")),
Annotations: map[string]string{
"containerd.io/uncompressed": digest.FromBytes([]byte("foobar2")).String(),
},
})
require.Error(t, err)
compressionType := compression.FromMediaType(desc.MediaType)
if compressionType == compression.UnknownCompression {
t.Errorf("unhandled layer media type: %q", desc.MediaType)
}
err = snap.(*immutableRef).setBlob(ctx, compressionType, desc)
require.NoError(t, err)
err = snap.(*immutableRef).setChains(ctx)
require.NoError(t, err)
snapRef = snap.(*immutableRef)
require.Equal(t, desc.Annotations["containerd.io/uncompressed"], string(snapRef.getDiffID()))
require.Equal(t, desc.Digest, snapRef.getBlob())
require.Equal(t, desc.MediaType, snapRef.getMediaType())
require.Equal(t, snapRef.getDiffID(), snapRef.getChainID())
require.Equal(t, digest.FromBytes([]byte(desc.Digest+" "+snapRef.getDiffID())), snapRef.getBlobChainID())
require.Equal(t, snap.ID(), snapRef.getSnapshotID())
require.Equal(t, !snapRef.getBlobOnly(), true)
active, err = cm.New(ctx, snap, nil)
require.NoError(t, err)
snap2, err := active.Commit(ctx)
require.NoError(t, err)
b2, desc2, err := mapToBlob(map[string]string{"foo2": "bar2"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref2", bytes.NewBuffer(b2), desc2)
require.NoError(t, err)
compressionType2 := compression.FromMediaType(desc2.MediaType)
if compressionType2 == compression.UnknownCompression {
t.Errorf("unhandled layer media type: %q", desc2.MediaType)
}
err = snap2.(*immutableRef).setBlob(ctx, compressionType2, desc2)
require.NoError(t, err)
err = snap2.(*immutableRef).setChains(ctx)
require.NoError(t, err)
snapRef2 := snap2.(*immutableRef)
require.Equal(t, desc2.Annotations["containerd.io/uncompressed"], string(snapRef2.getDiffID()))
require.Equal(t, desc2.Digest, snapRef2.getBlob())
require.Equal(t, desc2.MediaType, snapRef2.getMediaType())
require.Equal(t, digest.FromBytes([]byte(snapRef.getChainID()+" "+snapRef2.getDiffID())), snapRef2.getChainID())
require.Equal(t, digest.FromBytes([]byte(snapRef.getBlobChainID()+" "+digest.FromBytes([]byte(desc2.Digest+" "+snapRef2.getDiffID())))), snapRef2.getBlobChainID())
require.Equal(t, snap2.ID(), snapRef2.getSnapshotID())
require.Equal(t, !snapRef2.getBlobOnly(), true)
b3, desc3, err := mapToBlob(map[string]string{"foo3": "bar3"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref3", bytes.NewBuffer(b3), desc3)
require.NoError(t, err)
snap3, err := cm.GetByBlob(ctx, desc3, snap)
require.NoError(t, err)
snapRef3 := snap3.(*immutableRef)
require.Equal(t, desc3.Annotations["containerd.io/uncompressed"], string(snapRef3.getDiffID()))
require.Equal(t, desc3.Digest, snapRef3.getBlob())
require.Equal(t, desc3.MediaType, snapRef3.getMediaType())
require.Equal(t, digest.FromBytes([]byte(snapRef.getChainID()+" "+snapRef3.getDiffID())), snapRef3.getChainID())
require.Equal(t, digest.FromBytes([]byte(snapRef.getBlobChainID()+" "+digest.FromBytes([]byte(desc3.Digest+" "+snapRef3.getDiffID())))), snapRef3.getBlobChainID())
require.Equal(t, string(snapRef3.getChainID()), snapRef3.getSnapshotID())
require.Equal(t, !snapRef3.getBlobOnly(), false)
// snap4 is same as snap2
snap4, err := cm.GetByBlob(ctx, desc2, snap)
require.NoError(t, err)
require.Equal(t, snap2.ID(), snap4.ID())
// snap5 is same different blob but same diffID as snap2
b5, desc5, err := mapToBlob(map[string]string{"foo5": "bar5"}, true)
require.NoError(t, err)
desc5.Annotations["containerd.io/uncompressed"] = snapRef2.getDiffID().String()
err = content.WriteBlob(ctx, co.cs, "ref5", bytes.NewBuffer(b5), desc5)
require.NoError(t, err)
snap5, err := cm.GetByBlob(ctx, desc5, snap)
require.NoError(t, err)
snapRef5 := snap5.(*immutableRef)
require.NotEqual(t, snap2.ID(), snap5.ID())
require.Equal(t, snapRef2.getSnapshotID(), snapRef5.getSnapshotID())
require.Equal(t, snapRef2.getDiffID(), snapRef5.getDiffID())
require.Equal(t, desc5.Digest, snapRef5.getBlob())
require.Equal(t, snapRef2.getChainID(), snapRef5.getChainID())
require.NotEqual(t, snapRef2.getBlobChainID(), snapRef5.getBlobChainID())
require.Equal(t, digest.FromBytes([]byte(snapRef.getBlobChainID()+" "+digest.FromBytes([]byte(desc5.Digest+" "+snapRef2.getDiffID())))), snapRef5.getBlobChainID())
// snap6 is a child of snap3
b6, desc6, err := mapToBlob(map[string]string{"foo6": "bar6"}, true)
require.NoError(t, err)
err = content.WriteBlob(ctx, co.cs, "ref6", bytes.NewBuffer(b6), desc6)
require.NoError(t, err)
snap6, err := cm.GetByBlob(ctx, desc6, snap3)
require.NoError(t, err)
snapRef6 := snap6.(*immutableRef)
require.Equal(t, desc6.Annotations["containerd.io/uncompressed"], string(snapRef6.getDiffID()))
require.Equal(t, desc6.Digest, snapRef6.getBlob())
require.Equal(t, digest.FromBytes([]byte(snapRef3.getChainID()+" "+snapRef6.getDiffID())), snapRef6.getChainID())
require.Equal(t, digest.FromBytes([]byte(snapRef3.getBlobChainID()+" "+digest.FromBytes([]byte(snapRef6.getBlob()+" "+snapRef6.getDiffID())))), snapRef6.getBlobChainID())
require.Equal(t, string(snapRef6.getChainID()), snapRef6.getSnapshotID())
require.Equal(t, !snapRef6.getBlobOnly(), false)
_, err = cm.GetByBlob(ctx, ocispecs.Descriptor{
Digest: digest.FromBytes([]byte("notexist")),
Annotations: map[string]string{
"containerd.io/uncompressed": digest.FromBytes([]byte("notexist")).String(),
},
}, snap3)
require.Error(t, err)
clean(context.TODO())
//snap.SetBlob()
}
func TestPrune(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
active, err := cm.New(ctx, nil, nil)
require.NoError(t, err)
snap, err := active.Commit(ctx)
require.NoError(t, err)
active, err = cm.New(ctx, snap, nil, CachePolicyRetain)
require.NoError(t, err)
snap2, err := active.Commit(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
dirs, err := ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
// prune with keeping refs does nothing
buf := pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
require.Equal(t, len(buf.all), 0)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 2, len(dirs))
err = snap2.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 1)
// prune with keeping single refs deletes one
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 1, 0)
require.Equal(t, len(buf.all), 1)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 1, len(dirs))
err = snap.Release(ctx)
require.NoError(t, err)
active, err = cm.New(ctx, snap, nil, CachePolicyRetain)
require.NoError(t, err)
snap2, err = active.Commit(ctx)
require.NoError(t, err)
err = snap.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
// prune with parent released does nothing
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 2, 0)
require.Equal(t, len(buf.all), 0)
// releasing last reference
err = snap2.Release(ctx)
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 2)
buf = pruneResultBuffer()
err = cm.Prune(ctx, buf.C, client.PruneInfo{})
buf.close()
require.NoError(t, err)
checkDiskUsage(ctx, t, cm, 0, 0)
require.Equal(t, len(buf.all), 2)
dirs, err = ioutil.ReadDir(filepath.Join(tmpdir, "snapshots/snapshots"))
require.NoError(t, err)
require.Equal(t, 0, len(dirs))
}
func TestLazyCommit(t *testing.T) {
t.Parallel()
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
cm := co.manager
active, err := cm.New(ctx, nil, nil, CachePolicyRetain)
require.NoError(t, err)
// after commit mutable is locked
snap, err := active.Commit(ctx)
require.NoError(t, err)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, ErrLocked))
// immutable refs still work
snap2, err := cm.Get(ctx, snap.ID())
require.NoError(t, err)
require.Equal(t, snap.ID(), snap2.ID())
err = snap.Release(ctx)
require.NoError(t, err)
err = snap2.Release(ctx)
require.NoError(t, err)
// immutable work after final release as well
snap, err = cm.Get(ctx, snap.ID())
require.NoError(t, err)
require.Equal(t, snap.ID(), snap2.ID())
// active can't be get while immutable is held
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, ErrLocked))
err = snap.Release(ctx)
require.NoError(t, err)
// after release mutable becomes available again
active2, err := cm.GetMutable(ctx, active.ID())
require.NoError(t, err)
require.Equal(t, active2.ID(), active.ID())
// because ref was took mutable old immutable are cleared
_, err = cm.Get(ctx, snap.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
snap, err = active2.Commit(ctx)
require.NoError(t, err)
// this time finalize commit
err = snap.(*immutableRef).finalizeLocked(ctx)
require.NoError(t, err)
err = snap.Release(ctx)
require.NoError(t, err)
// mutable is gone after finalize
_, err = cm.GetMutable(ctx, active2.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
// immutable still works
snap2, err = cm.Get(ctx, snap.ID())
require.NoError(t, err)
require.Equal(t, snap.ID(), snap2.ID())
err = snap2.Release(ctx)
require.NoError(t, err)
// test restarting after commit
active, err = cm.New(ctx, nil, nil, CachePolicyRetain)
require.NoError(t, err)
// after commit mutable is locked
snap, err = active.Commit(ctx)
require.NoError(t, err)
err = cm.Close()
require.NoError(t, err)
cleanup()
// we can't close snapshotter and open it twice (especially, its internal bbolt store)
co, cleanup, err = newCacheManager(ctx, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
cm = co.manager
snap2, err = cm.Get(ctx, snap.ID())
require.NoError(t, err)
err = snap2.Release(ctx)
require.NoError(t, err)
active, err = cm.GetMutable(ctx, active.ID())
require.NoError(t, err)
_, err = cm.Get(ctx, snap.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
snap, err = active.Commit(ctx)
require.NoError(t, err)
err = cm.Close()
require.NoError(t, err)
cleanup()
co, cleanup, err = newCacheManager(ctx, cmOpt{
tmpdir: tmpdir,
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm = co.manager
snap2, err = cm.Get(ctx, snap.ID())
require.NoError(t, err)
err = snap2.(*immutableRef).finalizeLocked(ctx)
require.NoError(t, err)
err = snap2.Release(ctx)
require.NoError(t, err)
_, err = cm.GetMutable(ctx, active.ID())
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
}
func TestGetRemote(t *testing.T) {
t.Parallel()
// windows fails when lazy blob is being extracted with "invalid windows mount type: 'bind'"
if runtime.GOOS != "linux" {
t.Skipf("unsupported GOOS: %s", runtime.GOOS)
}
ctx := namespaces.WithNamespace(context.Background(), "buildkit-test")
tmpdir, err := ioutil.TempDir("", "cachemanager")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
co, cleanup, err := newCacheManager(ctx, cmOpt{
snapshotter: snapshotter,
snapshotterName: "native",
})
require.NoError(t, err)
defer cleanup()
cm := co.manager
ctx, done, err := leaseutil.WithLease(ctx, co.lm, leaseutil.MakeTemporary)
require.NoError(t, err)
defer done(context.TODO())
contentBuffer := contentutil.NewBuffer()
descHandlers := DescHandlers(map[digest.Digest]*DescHandler{})
// make some lazy refs from blobs
expectedContent := map[digest.Digest]struct{}{}
variant := map[digest.Digest]digest.Digest{}
esgz2gzip := map[digest.Digest]digest.Digest{}
var descs []ocispecs.Descriptor
for i := 0; i < 2; i++ {
blobmap := map[string]string{"foo": strconv.Itoa(i)}
blobBytes, desc, err := mapToBlob(blobmap, true)
require.NoError(t, err)
expectedContent[desc.Digest] = struct{}{}
descs = append(descs, desc)
cw, err := contentBuffer.Writer(ctx)
require.NoError(t, err)
_, err = cw.Write(blobBytes)
require.NoError(t, err)
err = cw.Commit(ctx, 0, cw.Digest())
require.NoError(t, err)
descHandlers[desc.Digest] = &DescHandler{
Provider: func(_ session.Group) content.Provider { return contentBuffer },
}
uncompressedBlobBytes, uncompressedDesc, err := mapToBlob(blobmap, false)
require.NoError(t, err)
expectedContent[uncompressedDesc.Digest] = struct{}{}
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[esgzDgst] = struct{}{}
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
esgz2gzip[esgzDgst] = desc.Digest
}
// Create 3 levels of mutable refs, where each parent ref has 2 children (this tests parallel creation of
// overlapping blob chains).
lazyRef, err := cm.GetByBlob(ctx, descs[0], nil, descHandlers)
require.NoError(t, err)
refs := []ImmutableRef{lazyRef}
for i := 0; i < 3; i++ {
var newRefs []ImmutableRef
for j, ir := range refs {
for k := 0; k < 2; k++ {
mutRef, err := cm.New(ctx, ir, nil, descHandlers)
require.NoError(t, err)
m, err := mutRef.Mount(ctx, false, nil)
require.NoError(t, err)
lm := snapshot.LocalMounter(m)
target, err := lm.Mount()
require.NoError(t, err)
f, err := os.Create(filepath.Join(target, fmt.Sprintf("%d-%d-%d", i, j, k)))
require.NoError(t, err)
err = os.Chtimes(f.Name(), time.Unix(0, 0), time.Unix(0, 0))
require.NoError(t, err)
_, desc, err := fileToBlob(f, true)
require.NoError(t, err)
expectedContent[desc.Digest] = struct{}{}
uncompressedBlobBytes, uncompressedDesc, err := fileToBlob(f, false)
require.NoError(t, err)
expectedContent[uncompressedDesc.Digest] = struct{}{}
esgzDgst, uncompressedEsgzDgst, err := esgzBlobDigest(uncompressedBlobBytes)
require.NoError(t, err)
expectedContent[esgzDgst] = struct{}{}
variant[uncompressedEsgzDgst] = uncompressedDesc.Digest
esgz2gzip[esgzDgst] = desc.Digest
f.Close()
err = lm.Unmount()
require.NoError(t, err)
immutRef, err := mutRef.Commit(ctx)
require.NoError(t, err)
newRefs = append(newRefs, immutRef)
}
}
refs = newRefs
}
// also test the original lazyRef to get coverage for refs that don't have to be extracted from the snapshotter
lazyRef2, err := cm.GetByBlob(ctx, descs[1], nil, descHandlers)
require.NoError(t, err)
refs = append(refs, lazyRef2)
checkNumBlobs(ctx, t, co.cs, 1)
// Call GetRemote on all the refs
esgzRefs := map[digest.Digest]struct{}{}
var esgzRefsMu sync.Mutex
eg, egctx := errgroup.WithContext(ctx)
for _, ir := range refs {
ir := ir.(*immutableRef)
for _, compressionType := range []compression.Type{compression.Uncompressed, compression.Gzip, compression.EStargz} {
compressionType := compressionType
eg.Go(func() error {
remote, err := ir.GetRemote(egctx, true, compressionType, true, nil)
require.NoError(t, err)
refChain := ir.parentRefChain()
for i, desc := range remote.Descriptors {
switch compressionType {
case compression.Uncompressed:
require.Equal(t, ocispecs.MediaTypeImageLayer, desc.MediaType)
case compression.Gzip:
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
case compression.EStargz:
require.Equal(t, ocispecs.MediaTypeImageLayerGzip, desc.MediaType)
default:
require.Fail(t, "unhandled media type", compressionType)
}
dgst := desc.Digest
if v, ok := variant[dgst]; ok {
dgst = v
}
require.Contains(t, expectedContent, dgst)
checkDescriptor(ctx, t, co.cs, desc, compressionType)
r := refChain[i]
if compressionType == compression.EStargz {
if digest.Digest(r.getBlob()) == desc.Digest {
esgzRefsMu.Lock()
esgzRefs[desc.Digest] = struct{}{}
esgzRefsMu.Unlock()
}
}
isLazy, err := r.isLazy(egctx)
require.NoError(t, err)
needs, err := needsConversion(desc.MediaType, compressionType)
require.NoError(t, err)
if needs {
require.False(t, isLazy, "layer %q requires conversion so it must be unlazied", desc.Digest)
}
bDesc, err := r.getCompressionBlob(egctx, compressionType)
if isLazy {
require.Error(t, err)
} else {
require.NoError(t, err)
checkDescriptor(ctx, t, co.cs, bDesc, compressionType)
require.Equal(t, desc.Digest, bDesc.Digest)
}
}
return nil
})
}
}
require.NoError(t, eg.Wait())
for dgst := range esgzRefs {
gzipDgst, ok := esgz2gzip[dgst]
require.True(t, ok, "match for gzip blob: %s", dgst)
delete(expectedContent, gzipDgst) // esgz blob is reused also as gzip. duplicated gzip blob is unexpected.
}
// verify there's a 1-to-1 mapping between the content store and what we expected to be there
err = co.cs.Walk(ctx, func(info content.Info) error {
dgst := info.Digest
if v, ok := variant[dgst]; ok {
dgst = v
}
var matched bool
for expected := range expectedContent {
if dgst == expected {
delete(expectedContent, expected)
matched = true
break
}
}
require.True(t, matched, "match for blob: %s", info.Digest)
checkInfo(ctx, t, co.cs, info)
return nil
})
require.NoError(t, err)
require.Equal(t, map[digest.Digest]struct{}{}, expectedContent)
}
func checkInfo(ctx context.Context, t *testing.T, cs content.Store, info content.Info) {
if info.Labels == nil {
return
}
uncompressedDgst, ok := info.Labels[containerdUncompressed]
if !ok {
return
}
ra, err := cs.ReaderAt(ctx, ocispecs.Descriptor{Digest: info.Digest})
require.NoError(t, err)
defer ra.Close()
decompressR, err := ctdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
require.NoError(t, err)
diffID := digest.Canonical.Digester()
_, err = io.Copy(diffID.Hash(), decompressR)
require.NoError(t, err)
require.Equal(t, diffID.Digest().String(), uncompressedDgst)
}
func checkDescriptor(ctx context.Context, t *testing.T, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) {
if desc.Annotations == nil {
return
}
// Check annotations exist
uncompressedDgst, ok := desc.Annotations[containerdUncompressed]
require.True(t, ok, "uncompressed digest annotation not found: %q", desc.Digest)
var uncompressedSize int64
if compressionType == compression.EStargz {
_, ok := desc.Annotations[estargz.TOCJSONDigestAnnotation]
require.True(t, ok, "toc digest annotation not found: %q", desc.Digest)
uncompressedSizeS, ok := desc.Annotations[estargz.StoreUncompressedSizeAnnotation]
require.True(t, ok, "uncompressed size annotation not found: %q", desc.Digest)
var err error
uncompressedSize, err = strconv.ParseInt(uncompressedSizeS, 10, 64)
require.NoError(t, err)
}
// Check annotation values are valid
c := new(counter)
ra, err := cs.ReaderAt(ctx, desc)
if err != nil && errdefs.IsNotFound(err) {
return // lazy layer
}
require.NoError(t, err)
defer ra.Close()
decompressR, err := ctdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size()))
require.NoError(t, err)
diffID := digest.Canonical.Digester()
_, err = io.Copy(io.MultiWriter(diffID.Hash(), c), decompressR)
require.NoError(t, err)
require.Equal(t, diffID.Digest().String(), uncompressedDgst)
if compressionType == compression.EStargz {
require.Equal(t, c.size(), uncompressedSize)
}
}
func checkDiskUsage(ctx context.Context, t *testing.T, cm Manager, inuse, unused int) {
du, err := cm.DiskUsage(ctx, client.DiskUsageInfo{})
require.NoError(t, err)
var inuseActual, unusedActual int
for _, r := range du {
if r.InUse {
inuseActual++
} else {
unusedActual++
}
}
require.Equal(t, inuse, inuseActual)
require.Equal(t, unused, unusedActual)
}
func esgzBlobDigest(uncompressedBlobBytes []byte) (digest.Digest, digest.Digest, error) {
buf := new(bytes.Buffer)
compressorFunc, _ := writeEStargz()
w, err := compressorFunc(buf, ocispecs.MediaTypeImageLayerGzip)
if err != nil {
return "", "", err
}
if _, err := io.Copy(w, bytes.NewReader(uncompressedBlobBytes)); err != nil {
return "", "", err
}
if err := w.Close(); err != nil {
return "", "", err
}
b := buf.Bytes()
esgzDgst := digest.FromBytes(b)
ur, err := gzip.NewReader(bytes.NewReader(b))
if err != nil {
return "", "", err
}
defer ur.Close()
uncompressedDgst, err := digest.FromReader(ur)
if err != nil {
return "", "", err
}
return esgzDgst, uncompressedDgst, nil
}
func checkNumBlobs(ctx context.Context, t *testing.T, cs content.Store, expected int) {
c := 0
err := cs.Walk(ctx, func(_ content.Info) error {
c++
return nil
})
require.NoError(t, err)
require.Equal(t, expected, c)
}
func pruneResultBuffer() *buf {
b := &buf{C: make(chan client.UsageInfo), closed: make(chan struct{})}
go func() {
for c := range b.C {
b.all = append(b.all, c)
}
close(b.closed)
}()
return b
}
type buf struct {
C chan client.UsageInfo
closed chan struct{}
all []client.UsageInfo
}
func (b *buf) close() {
close(b.C)
<-b.closed
}
type bufferCloser struct {
*bytes.Buffer
}
func (b bufferCloser) Close() error {
return nil
}
func mapToBlob(m map[string]string, compress bool) ([]byte, ocispecs.Descriptor, error) {
buf := bytes.NewBuffer(nil)
sha := digest.SHA256.Digester()
var dest io.WriteCloser = bufferCloser{buf}
if compress {
dest = gzip.NewWriter(buf)
}
tw := tar.NewWriter(io.MultiWriter(sha.Hash(), dest))
for k, v := range m {
if err := tw.WriteHeader(&tar.Header{
Name: k,
Size: int64(len(v)),
}); err != nil {
return nil, ocispecs.Descriptor{}, err
}
if _, err := tw.Write([]byte(v)); err != nil {
return nil, ocispecs.Descriptor{}, err
}
}
if err := tw.Close(); err != nil {
return nil, ocispecs.Descriptor{}, err
}
if err := dest.Close(); err != nil {
return nil, ocispecs.Descriptor{}, err
}
mediaType := ocispecs.MediaTypeImageLayer
if compress {
mediaType = ocispecs.MediaTypeImageLayerGzip
}
return buf.Bytes(), ocispecs.Descriptor{
Digest: digest.FromBytes(buf.Bytes()),
MediaType: mediaType,
Size: int64(buf.Len()),
Annotations: map[string]string{
"containerd.io/uncompressed": sha.Digest().String(),
},
}, nil
}
func fileToBlob(file *os.File, compress bool) ([]byte, ocispecs.Descriptor, error) {
buf := bytes.NewBuffer(nil)
sha := digest.SHA256.Digester()
var dest io.WriteCloser = bufferCloser{buf}
if compress {
dest = gzip.NewWriter(buf)
}
tw := tar.NewWriter(io.MultiWriter(sha.Hash(), dest))
info, err := file.Stat()
if err != nil {
return nil, ocispecs.Descriptor{}, err
}
fi, err := tar.FileInfoHeader(info, "")
if err != nil {
return nil, ocispecs.Descriptor{}, err
}
fi.Format = tar.FormatPAX
fi.ModTime = fi.ModTime.Truncate(time.Second)
fi.AccessTime = time.Time{}
fi.ChangeTime = time.Time{}
if err := tw.WriteHeader(fi); err != nil {
return nil, ocispecs.Descriptor{}, err
}
if _, err := io.Copy(tw, file); err != nil {
return nil, ocispecs.Descriptor{}, err
}
if err := tw.Close(); err != nil {
return nil, ocispecs.Descriptor{}, err
}
if err := dest.Close(); err != nil {
return nil, ocispecs.Descriptor{}, err
}
mediaType := ocispecs.MediaTypeImageLayer
if compress {
mediaType = ocispecs.MediaTypeImageLayerGzip
}
return buf.Bytes(), ocispecs.Descriptor{
Digest: digest.FromBytes(buf.Bytes()),
MediaType: mediaType,
Size: int64(buf.Len()),
Annotations: map[string]string{
"containerd.io/uncompressed": sha.Digest().String(),
},
}, nil
} | descHandlers2 := DescHandlers(make(map[digest.Digest]*DescHandler))
descHandlers2[desc2.Digest] = &DescHandler{} |
problem.go | package day257
// SmallestWindowThatMustBeSorted returns the smallest window that must be
// sorted to result in a completely sorted slice.
func SmallestWindowThatMustBeSorted(nums []int) (start, end int) {
maxSoFar := -int(^uint(0)>>1) - 1
for i := range nums {
if maxSoFar < nums[i] {
maxSoFar = nums[i]
} | end = i
}
}
minSoFar := int(^uint(0) >> 1)
for i := range nums {
j := len(nums) - 1 - i
if minSoFar > nums[j] {
minSoFar = nums[j]
}
if nums[j] > minSoFar {
start = j
}
}
return
} | if nums[i] < maxSoFar { |
sha1.js | /*
* A JavaScript implementation of the Secure Hash Algorithm, SHA-1, as defined
* in FIPS PUB 180-1
* Version 2.1a Copyright Paul Johnston 2000 - 2002.
* Other contributors: Greg Holt, Andrew Kepert, Ydnar, Lostinet
* Distributed under the BSD License
* See http://pajhome.org.uk/crypt/md5 for details.
*/
/*
* Configurable variables. You may need to tweak these to be compatible with
* the server-side, but the defaults work in most cases.
*/
var hexcase = 0; /* hex output format. 0 - lowercase; 1 - uppercase */
var b64pad = "="; /* base-64 pad character. "=" for strict RFC compliance */
var chrsz = 8; /* bits per input character. 8 - ASCII; 16 - Unicode */
/*
* These are the functions you'll usually want to call
* They take string arguments and return either hex or base-64 encoded strings
*/
function hex_sha1(s){return binb2hex(core_sha1(str2binb(s),s.length * chrsz));}
function b64_sha1(s){return binb2b64(core_sha1(str2binb(s),s.length * chrsz));}
/*
* Calculate the SHA-1 of an array of big-endian words, and a bit length
*/
function core_sha1(x, len)
{
/* append padding */
x[len >> 5] |= 0x80 << (24 - len % 32);
x[((len + 64 >> 9) << 4) + 15] = len;
var w = Array(80);
var a = 1732584193;
var b = -271733879;
var c = -1732584194;
var d = 271733878;
var e = -1009589776;
for(var i = 0; i < x.length; i += 16)
{
var olda = a;
var oldb = b;
var oldc = c;
var oldd = d;
var olde = e;
for(var j = 0; j < 80; j++)
{
if(j < 16) w[j] = x[i + j];
else w[j] = rol(w[j-3] ^ w[j-8] ^ w[j-14] ^ w[j-16], 1);
var t = safe_add(safe_add(rol(a, 5), sha1_ft(j, b, c, d)),
safe_add(safe_add(e, w[j]), sha1_kt(j)));
e = d;
d = c;
c = rol(b, 30);
b = a;
a = t;
}
a = safe_add(a, olda);
b = safe_add(b, oldb);
c = safe_add(c, oldc);
d = safe_add(d, oldd);
e = safe_add(e, olde);
}
return Array(a, b, c, d, e);
}
/*
* Perform the appropriate triplet combination function for the current
* iteration
*/
function sha1_ft(t, b, c, d)
{
if(t < 20) return (b & c) | ((~b) & d);
if(t < 40) return b ^ c ^ d;
if(t < 60) return (b & c) | (b & d) | (c & d);
return b ^ c ^ d;
}
/*
* Determine the appropriate additive constant for the current iteration
*/
function sha1_kt(t)
{
return (t < 20) ? 1518500249 : (t < 40) ? 1859775393 :
(t < 60) ? -1894007588 : -899497514;
}
/*
* Add integers, wrapping at 2^32. This uses 16-bit operations internally
* to work around bugs in some JS interpreters.
*/
function | (x, y)
{
var lsw = (x & 0xFFFF) + (y & 0xFFFF);
var msw = (x >> 16) + (y >> 16) + (lsw >> 16);
return (msw << 16) | (lsw & 0xFFFF);
}
/*
* Bitwise rotate a 32-bit number to the left.
*/
function rol(num, cnt)
{
return (num << cnt) | (num >>> (32 - cnt));
}
/*
* Convert an 8-bit or 16-bit string to an array of big-endian words
* In 8-bit function, characters >255 have their hi-byte silently ignored.
*/
function str2binb(str)
{
var bin = Array();
var mask = (1 << chrsz) - 1;
for(var i = 0; i < str.length * chrsz; i += chrsz)
bin[i>>5] |= (str.charCodeAt(i / chrsz) & mask) << (32 - chrsz - i%32);
return bin;
}
/*
* Convert an array of big-endian words to a hex string.
*/
function binb2hex(binarray)
{
var hex_tab = hexcase ? "0123456789ABCDEF" : "0123456789abcdef";
var str = "";
for(var i = 0; i < binarray.length * 4; i++)
{
str += hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8+4)) & 0xF) +
hex_tab.charAt((binarray[i>>2] >> ((3 - i%4)*8 )) & 0xF);
}
return str;
}
/*
* Convert an array of big-endian words to a base-64 string
*/
function binb2b64(binarray)
{
var tab = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
var str = "";
for(var i = 0; i < binarray.length * 4; i += 3)
{
var triplet = (((binarray[i >> 2] >> 8 * (3 - i %4)) & 0xFF) << 16)
| (((binarray[i+1 >> 2] >> 8 * (3 - (i+1)%4)) & 0xFF) << 8 )
| ((binarray[i+2 >> 2] >> 8 * (3 - (i+2)%4)) & 0xFF);
for(var j = 0; j < 4; j++)
{
if(i * 8 + j * 6 > binarray.length * 32) str += b64pad;
else str += tab.charAt((triplet >> 6*(3-j)) & 0x3F);
}
}
return str;
}
function limitlength(val) {
var limit = document.f.lenlimit.value;
if(limit < 0) return val;
return val.slice(0,limit);
}
| safe_add |
bairros.component.ts | import { NgModule } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { Variable } from '@angular/compiler/src/render3/r3_ast';
import { Component, OnInit, ElementRef, ViewChild } from '@angular/core';
import {ActivatedRoute, Router} from '@angular/router'
import KeenSlider from "keen-slider"
@Component({
selector: 'app-bairros',
templateUrl: './bairros.component.html',
styleUrls: ["../../../node_modules/keen-slider/keen-slider.min.css",
"./bairros.component.scss",]
})
| export class BairrosComponent {
public bairroId
constructor(
private activatedRoute: ActivatedRoute
) {this.bairroId=this.activatedRoute.snapshot.paramMap.get('id');
}
ngOnInit() {
}
@ViewChild("sliderRef")
sliderRef!: ElementRef<HTMLElement>
slider: any = null
ngAfterViewInit() {
this.slider = new KeenSlider(this.sliderRef.nativeElement, {
slidesPerView:1,
mode: "free",
spacing: 15,
loop: true,
})
}
ngOnDestroy() {
if (this.slider) this.slider.destroy()
}
} | |
user-management.component.spec.ts | import { ComponentFixture, TestBed, async, inject, fakeAsync, tick } from '@angular/core/testing';
import { Observable } from 'rxjs/Rx';
import { Headers } from '@angular/http';
import { MindsphereTestModule } from '../../../test.module';
import { Principal } from '../../../../../../main/webapp/app/shared';
import { UserMgmtComponent } from '../../../../../../main/webapp/app/admin/user-management/user-management.component';
import { UserService, User } from '../../../../../../main/webapp/app/shared';
describe('Component Tests', () => {
describe('User Management Component', () => {
let comp: UserMgmtComponent;
let fixture: ComponentFixture<UserMgmtComponent>;
let service: UserService;
let mockPrincipal: any;
beforeEach(async(() => {
TestBed.configureTestingModule({
imports: [MindsphereTestModule],
declarations: [UserMgmtComponent],
providers: [
UserService
]
})
.overrideTemplate(UserMgmtComponent, '')
.compileComponents();
}));
beforeEach(() => {
fixture = TestBed.createComponent(UserMgmtComponent);
comp = fixture.componentInstance;
service = fixture.debugElement.injector.get(UserService);
mockPrincipal = fixture.debugElement.injector.get(Principal);
});
describe('OnInit', () => {
it('Should call load all on init',
inject([],
fakeAsync(() => {
// GIVEN
const headers = new Headers();
headers.append('link', 'link;link');
spyOn(service, 'query').and.returnValue(Observable.of({
json: [new User(123)],
headers
}));
// WHEN
comp.ngOnInit(); | expect(comp.users[0]).toEqual(jasmine.objectContaining({id: 123}));
})
)
);
});
describe('setActive', () => {
it('Should update user and call load all',
inject([],
fakeAsync(() => {
// GIVEN
const headers = new Headers();
headers.append('link', 'link;link');
const user = new User(123);
spyOn(service, 'query').and.returnValue(Observable.of({
json: [user],
headers
}));
spyOn(service, 'update').and.returnValue(Observable.of({ status: 200 }));
// WHEN
comp.setActive(user, true);
tick(); // simulate async
// THEN
expect(service.update).toHaveBeenCalledWith(user);
expect(service.query).toHaveBeenCalled();
expect(comp.users[0]).toEqual(jasmine.objectContaining({id: 123}));
})
)
);
});
});
}); | tick(); // simulate async
// THEN
expect(service.query).toHaveBeenCalled(); |
index.js | const verifyXMLSignature = require('./lib/verifyXMLSignature')
const verifyMessage = require('./lib/verifyMessage')
const getUser = require('./lib/getUser')
const makeLogin = function ({
authorityPem,
audience,
callbackUrl
}) {
return async (token) => {
assert(authorityPem, 'Authority certificate required')
assert(audience, 'Audience required')
assert(callbackUrl, 'Callback URL required')
const xmlString = Buffer.from(token, 'base64').toString('utf-8')
const xmlObj = await parseStringPromise(xmlString, { explicitArray: false })
const certFromXML = xmlObj.Response.Signature.KeyInfo.X509Data.X509Certificate
const clientPem = certToPem(certFromXML)
const attributesFromXML = xmlObj.Response.Assertion.AttributeStatement.Attribute
const user = getUser(attributesFromXML)
const assertionsFromXML = {
issuer: xmlObj.Response.Assertion.Issuer,
notBefore: xmlObj.Response.Assertion.Conditions.$.NotBefore,
notOnOrAfter: xmlObj.Response.Assertion.Conditions.$.NotOnOrAfter,
audience: xmlObj.Response.Assertion.Conditions.AudienceRestriction.Audience,
destination: xmlObj.Response.$.Destination,
}
verifyCert(clientPem, authorityPem)
// verifyXMLSignature(xmlString)
verifyMessage(assertionsFromXML, audience, callbackUrl)
return user
}
}
module.exports = makeLogin | const assert = require('assert').strict
const { parseStringPromise } = require('xml2js')
const certToPem = require('./lib/certToPem')
const verifyCert = require('./lib/verifyCert') |
|
mobilenet_v1.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""MobileNet v1.
MobileNet is a general architecture and can be used for multiple use cases.
Depending on the use case, it can use different input layer size and different
head (for example: embeddings, localization and classification).
As described in https://arxiv.org/abs/1704.04861.
MobileNets: Efficient Convolutional Neural Networks for
Mobile Vision Applications
Andrew G. Howard, Menglong Zhu, Bo Chen, Dmitry Kalenichenko, Weijun Wang,
Tobias Weyand, Marco Andreetto, Hartwig Adam
100% Mobilenet V1 (base) with input size 224x224:
See mobilenet_v1()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 864 10,838,016
MobilenetV1/Conv2d_1_depthwise/depthwise: 288 3,612,672
MobilenetV1/Conv2d_1_pointwise/Conv2D: 2,048 25,690,112
MobilenetV1/Conv2d_2_depthwise/depthwise: 576 1,806,336
MobilenetV1/Conv2d_2_pointwise/Conv2D: 8,192 25,690,112
MobilenetV1/Conv2d_3_depthwise/depthwise: 1,152 3,612,672
MobilenetV1/Conv2d_3_pointwise/Conv2D: 16,384 51,380,224
MobilenetV1/Conv2d_4_depthwise/depthwise: 1,152 903,168
MobilenetV1/Conv2d_4_pointwise/Conv2D: 32,768 25,690,112
MobilenetV1/Conv2d_5_depthwise/depthwise: 2,304 1,806,336
MobilenetV1/Conv2d_5_pointwise/Conv2D: 65,536 51,380,224
MobilenetV1/Conv2d_6_depthwise/depthwise: 2,304 451,584
MobilenetV1/Conv2d_6_pointwise/Conv2D: 131,072 25,690,112
MobilenetV1/Conv2d_7_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_7_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_8_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_8_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_9_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_9_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_10_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_10_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_11_depthwise/depthwise: 4,608 903,168
MobilenetV1/Conv2d_11_pointwise/Conv2D: 262,144 51,380,224
MobilenetV1/Conv2d_12_depthwise/depthwise: 4,608 225,792
MobilenetV1/Conv2d_12_pointwise/Conv2D: 524,288 25,690,112
MobilenetV1/Conv2d_13_depthwise/depthwise: 9,216 451,584
MobilenetV1/Conv2d_13_pointwise/Conv2D: 1,048,576 51,380,224
--------------------------------------------------------------------------------
Total: 3,185,088 567,716,352
75% Mobilenet V1 (base) with input size 128x128:
See mobilenet_v1_075()
Layer params macs
--------------------------------------------------------------------------------
MobilenetV1/Conv2d_0/Conv2D: 648 2,654,208
MobilenetV1/Conv2d_1_depthwise/depthwise: 216 884,736
MobilenetV1/Conv2d_1_pointwise/Conv2D: 1,152 4,718,592
MobilenetV1/Conv2d_2_depthwise/depthwise: 432 442,368
MobilenetV1/Conv2d_2_pointwise/Conv2D: 4,608 4,718,592
MobilenetV1/Conv2d_3_depthwise/depthwise: 864 884,736
MobilenetV1/Conv2d_3_pointwise/Conv2D: 9,216 9,437,184
MobilenetV1/Conv2d_4_depthwise/depthwise: 864 221,184
MobilenetV1/Conv2d_4_pointwise/Conv2D: 18,432 4,718,592
MobilenetV1/Conv2d_5_depthwise/depthwise: 1,728 442,368
MobilenetV1/Conv2d_5_pointwise/Conv2D: 36,864 9,437,184
MobilenetV1/Conv2d_6_depthwise/depthwise: 1,728 110,592
MobilenetV1/Conv2d_6_pointwise/Conv2D: 73,728 4,718,592
MobilenetV1/Conv2d_7_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_7_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_8_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_8_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_9_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_9_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_10_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_10_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_11_depthwise/depthwise: 3,456 221,184
MobilenetV1/Conv2d_11_pointwise/Conv2D: 147,456 9,437,184
MobilenetV1/Conv2d_12_depthwise/depthwise: 3,456 55,296
MobilenetV1/Conv2d_12_pointwise/Conv2D: 294,912 4,718,592
MobilenetV1/Conv2d_13_depthwise/depthwise: 6,912 110,592
MobilenetV1/Conv2d_13_pointwise/Conv2D: 589,824 9,437,184
--------------------------------------------------------------------------------
Total: 1,800,144 106,002,432
"""
# Tensorflow mandates these.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import functools
import tensorflow.compat.v1 as tf
import tf_slim as slim
# Conv and DepthSepConv namedtuple define layers of the MobileNet architecture
# Conv defines 3x3 convolution layers
# DepthSepConv defines 3x3 depthwise convolution followed by 1x1 convolution.
# stride is the stride of the convolution
# depth is the number of channels or filters in a layer
Conv = namedtuple('Conv', ['kernel', 'stride', 'depth'])
DepthSepConv = namedtuple('DepthSepConv', ['kernel', 'stride', 'depth'])
# MOBILENETV1_CONV_DEFS specifies the MobileNet body
MOBILENETV1_CONV_DEFS = [
Conv(kernel=[3, 3], stride=2, depth=32),
DepthSepConv(kernel=[3, 3], stride=1, depth=64),
DepthSepConv(kernel=[3, 3], stride=2, depth=128),
DepthSepConv(kernel=[3, 3], stride=1, depth=128),
DepthSepConv(kernel=[3, 3], stride=2, depth=256),
DepthSepConv(kernel=[3, 3], stride=1, depth=256),
DepthSepConv(kernel=[3, 3], stride=2, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=1, depth=512),
DepthSepConv(kernel=[3, 3], stride=2, depth=1024),
DepthSepConv(kernel=[3, 3], stride=1, depth=1024)
]
def _fixed_padding(inputs, kernel_size, rate=1):
"""Pads the input along the spatial dimensions independently of input size.
Pads the input such that if it was used in a convolution with 'VALID' padding,
the output would have the same dimensions as if the unpadded input was used
in a convolution with 'SAME' padding.
Args:
inputs: A tensor of size [batch, height_in, width_in, channels].
kernel_size: The kernel to be used in the conv2d or max_pool2d operation.
rate: An integer, rate for atrous convolution.
Returns:
output: A tensor of size [batch, height_out, width_out, channels] with the
input, either intact (if kernel_size == 1) or padded (if kernel_size > 1).
"""
kernel_size_effective = [kernel_size[0] + (kernel_size[0] - 1) * (rate - 1),
kernel_size[0] + (kernel_size[0] - 1) * (rate - 1)]
pad_total = [kernel_size_effective[0] - 1, kernel_size_effective[1] - 1]
pad_beg = [pad_total[0] // 2, pad_total[1] // 2]
pad_end = [pad_total[0] - pad_beg[0], pad_total[1] - pad_beg[1]]
padded_inputs = tf.pad(
tensor=inputs,
paddings=[[0, 0], [pad_beg[0], pad_end[0]], [pad_beg[1], pad_end[1]],
[0, 0]])
return padded_inputs
def mobilenet_v1_base(inputs,
final_endpoint='Conv2d_13_pointwise',
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
output_stride=None,
use_explicit_padding=False,
scope=None):
"""Mobilenet v1.
Constructs a Mobilenet v1 network from inputs to the given final endpoint.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
final_endpoint: specifies the endpoint to construct the network up to. It
can be one of ['Conv2d_0', 'Conv2d_1_pointwise', 'Conv2d_2_pointwise',
'Conv2d_3_pointwise', 'Conv2d_4_pointwise', 'Conv2d_5'_pointwise,
'Conv2d_6_pointwise', 'Conv2d_7_pointwise', 'Conv2d_8_pointwise',
'Conv2d_9_pointwise', 'Conv2d_10_pointwise', 'Conv2d_11_pointwise',
'Conv2d_12_pointwise', 'Conv2d_13_pointwise'].
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
output_stride: An integer that specifies the requested ratio of input to
output spatial resolution. If not None, then we invoke atrous convolution
if necessary to prevent the network from reducing the spatial resolution
of the activation maps. Allowed values are 8 (accurate fully convolutional
mode), 16 (fast fully convolutional mode), 32 (classification mode).
use_explicit_padding: Use 'VALID' padding for convolutions, but prepad
inputs so that the output dimensions are the same as if 'SAME' padding
were used.
scope: Optional variable_scope.
Returns:
tensor_out: output tensor corresponding to the final_endpoint.
end_points: a set of activations for external use, for example summaries or
losses.
Raises:
ValueError: if final_endpoint is not set to one of the predefined values,
or depth_multiplier <= 0, or the target output_stride is not
allowed.
"""
depth = lambda d: max(int(d * depth_multiplier), min_depth)
end_points = {}
# Used to find thinned depths for each layer.
if depth_multiplier <= 0:
raise ValueError('depth_multiplier is not greater than zero.')
if conv_defs is None:
conv_defs = MOBILENETV1_CONV_DEFS
if output_stride is not None and output_stride not in [8, 16, 32]:
raise ValueError('Only allowed output_stride values are 8, 16, 32.')
padding = 'SAME'
if use_explicit_padding:
padding = 'VALID'
with tf.variable_scope(scope, 'MobilenetV1', [inputs]):
with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding=padding):
# The current_stride variable keeps track of the output stride of the
# activations, i.e., the running product of convolution strides up to the
# current network layer. This allows us to invoke atrous convolution
# whenever applying the next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = inputs
for i, conv_def in enumerate(conv_defs):
end_point_base = 'Conv2d_%d' % i
if output_stride is not None and current_stride == output_stride:
# If we have reached the target output_stride, then we need to employ
# atrous convolution with stride=1 and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= conv_def.stride
else:
layer_stride = conv_def.stride
layer_rate = 1
current_stride *= conv_def.stride
if isinstance(conv_def, Conv):
end_point = end_point_base
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel)
net = slim.conv2d(net, depth(conv_def.depth), conv_def.kernel,
stride=conv_def.stride,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
elif isinstance(conv_def, DepthSepConv):
end_point = end_point_base + '_depthwise'
# By passing filters=None
# separable_conv2d produces only a depthwise convolution layer
if use_explicit_padding:
net = _fixed_padding(net, conv_def.kernel, layer_rate)
net = slim.separable_conv2d(net, None, conv_def.kernel,
depth_multiplier=1,
stride=layer_stride,
rate=layer_rate,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
end_point = end_point_base + '_pointwise'
net = slim.conv2d(net, depth(conv_def.depth), [1, 1],
stride=1,
scope=end_point)
end_points[end_point] = net
if end_point == final_endpoint:
return net, end_points
else:
raise ValueError('Unknown convolution type %s for layer %d'
% (conv_def.ltype, i))
raise ValueError('Unknown final endpoint %s' % final_endpoint)
def mobilenet_v1(inputs,
num_classes=1000,
dropout_keep_prob=0.999,
is_training=True,
min_depth=8,
depth_multiplier=1.0,
conv_defs=None,
prediction_fn=slim.softmax,
spatial_squeeze=True,
reuse=None,
scope='MobilenetV1',
global_pool=False):
"""Mobilenet v1 model for classification.
Args:
inputs: a tensor of shape [batch_size, height, width, channels].
num_classes: number of predicted classes. If 0 or None, the logits layer
is omitted and the input features to the logits layer (before dropout)
are returned instead.
dropout_keep_prob: the percentage of activation values that are retained.
is_training: whether is training or not.
min_depth: Minimum depth value (number of channels) for all convolution ops.
Enforced when depth_multiplier < 1, and not an active constraint when
depth_multiplier >= 1.
depth_multiplier: Float multiplier for the depth (number of channels)
for all convolution ops. The value must be greater than zero. Typical
usage will be to set this value in (0, 1) to reduce the number of
parameters or computation cost of the model.
conv_defs: A list of ConvDef namedtuples specifying the net architecture.
prediction_fn: a function to get predictions out of logits.
spatial_squeeze: if True, logits is of shape is [B, C], if false logits is
of shape [B, 1, 1, C], where B is batch_size and C is number of classes.
reuse: whether or not the network and its variables should be reused. To be
able to reuse 'scope' must be given.
scope: Optional variable_scope.
global_pool: Optional boolean flag to control the avgpooling before the
logits layer. If false or unset, pooling is done with a fixed window
that reduces default-sized inputs to 1x1, while larger inputs lead to
larger outputs. If true, any input size is pooled down to 1x1.
Returns:
net: a 2D Tensor with the logits (pre-softmax activations) if num_classes
is a non-zero integer, or the non-dropped-out input to the logits layer
if num_classes is 0 or None.
end_points: a dictionary from components of the network to the corresponding
activation.
Raises:
ValueError: Input rank is invalid.
"""
input_shape = inputs.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError('Invalid input tensor rank, expected 4, was: %d' %
len(input_shape))
with tf.variable_scope(
scope, 'MobilenetV1', [inputs], reuse=reuse) as scope:
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
net, end_points = mobilenet_v1_base(inputs, scope=scope,
min_depth=min_depth,
depth_multiplier=depth_multiplier,
conv_defs=conv_defs)
with tf.variable_scope('Logits'):
if global_pool:
# Global average pooling.
net = tf.reduce_mean(
input_tensor=net, axis=[1, 2], keepdims=True, name='global_pool')
end_points['global_pool'] = net
else:
# Pooling with a fixed kernel size.
kernel_size = _reduced_kernel_size_for_small_input(net, [7, 7])
net = slim.avg_pool2d(net, kernel_size, padding='VALID',
scope='AvgPool_1a')
end_points['AvgPool_1a'] = net
if not num_classes:
return net, end_points
# 1 x 1 x 1024
net = slim.dropout(net, keep_prob=dropout_keep_prob, scope='Dropout_1b')
logits = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,
normalizer_fn=None, scope='Conv2d_1c_1x1')
if spatial_squeeze:
logits = tf.squeeze(logits, [1, 2], name='SpatialSqueeze')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits, scope='Predictions')
return logits, end_points
mobilenet_v1.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
mobilenet_v1_075 = wrapped_partial(mobilenet_v1, depth_multiplier=0.75)
mobilenet_v1_050 = wrapped_partial(mobilenet_v1, depth_multiplier=0.50)
mobilenet_v1_025 = wrapped_partial(mobilenet_v1, depth_multiplier=0.25)
def | (input_tensor, kernel_size):
"""Define kernel size which is automatically reduced for small input.
If the shape of the input images is unknown at graph construction time this
function assumes that the input images are large enough.
Args:
input_tensor: input tensor of size [batch_size, height, width, channels].
kernel_size: desired kernel size of length 2: [kernel_height, kernel_width]
Returns:
a tensor with the kernel size.
"""
shape = input_tensor.get_shape().as_list()
if shape[1] is None or shape[2] is None:
kernel_size_out = kernel_size
else:
kernel_size_out = [min(shape[1], kernel_size[0]),
min(shape[2], kernel_size[1])]
return kernel_size_out
def mobilenet_v1_arg_scope(
is_training=True,
weight_decay=0.00004,
stddev=0.09,
regularize_depthwise=False,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
normalizer_fn=slim.batch_norm):
"""Defines the default MobilenetV1 arg scope.
Args:
is_training: Whether or not we're training the model. If this is set to
None, the parameter is not added to the batch_norm arg_scope.
weight_decay: The weight decay to use for regularizing the model.
stddev: The standard deviation of the trunctated normal weight initializer.
regularize_depthwise: Whether or not apply regularization on depthwise.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
normalizer_fn: Normalization function to apply after convolution.
Returns:
An `arg_scope` to use for the mobilenet v1 model.
"""
batch_norm_params = {
'center': True,
'scale': True,
'decay': batch_norm_decay,
'epsilon': batch_norm_epsilon,
'updates_collections': batch_norm_updates_collections,
}
if is_training is not None:
batch_norm_params['is_training'] = is_training
# Set weight_decay for weights in Conv and DepthSepConv layers.
weights_init = tf.truncated_normal_initializer(stddev=stddev)
regularizer = slim.l2_regularizer(weight_decay)
if regularize_depthwise:
depthwise_regularizer = regularizer
else:
depthwise_regularizer = None
with slim.arg_scope([slim.conv2d, slim.separable_conv2d],
weights_initializer=weights_init,
activation_fn=tf.nn.relu6, normalizer_fn=normalizer_fn):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):
with slim.arg_scope([slim.separable_conv2d],
weights_regularizer=depthwise_regularizer) as sc:
return sc
| _reduced_kernel_size_for_small_input |
xstate.rs | /*!
One-line description.
More detailed description, with
# Example
*/
// use ...
// ------------------------------------------------------------------------------------------------
// Public Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Public Functions
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Implementations
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Types
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------
// Private Functions
// ------------------------------------------------------------------------------------------------
// ------------------------------------------------------------------------------------------------ | // ------------------------------------------------------------------------------------------------ | // Modules |
cadastro_fornecedor.py | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UI/cadastro_fornecedor.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ct_FormFornecedor(object):
def | (self, ct_FormFornecedor):
ct_FormFornecedor.setObjectName("ct_FormFornecedor")
ct_FormFornecedor.resize(653, 371)
self.fr_FormFornecedor = QtWidgets.QFrame(ct_FormFornecedor)
self.fr_FormFornecedor.setGeometry(QtCore.QRect(0, 0, 1000, 500))
self.fr_FormFornecedor.setStyleSheet("background: #FFF;\n"
"border: none")
self.fr_FormFornecedor.setObjectName("fr_FormFornecedor")
self.lb_FormFornecedor = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor.setGeometry(QtCore.QRect(20, 10, 880, 30))
self.lb_FormFornecedor.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: bold;\n"
"\n"
"border-bottom: 2px solid #A2A2A2\n"
"}")
self.lb_FormFornecedor.setObjectName("lb_FormFornecedor")
self.lb_FormFornecedor_2 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_2.setGeometry(QtCore.QRect(370, 60, 150, 20))
self.lb_FormFornecedor_2.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_2.setObjectName("lb_FormFornecedor_2")
self.tx_NomeFantasia = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_NomeFantasia.setGeometry(QtCore.QRect(370, 80, 271, 25))
self.tx_NomeFantasia.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_NomeFantasia.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase;\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_NomeFantasia.setObjectName("tx_NomeFantasia")
self.lb_FormFornecedor_3 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_3.setGeometry(QtCore.QRect(20, 60, 190, 20))
self.lb_FormFornecedor_3.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_3.setObjectName("lb_FormFornecedor_3")
self.lb_FormFornecedor_5 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_5.setGeometry(QtCore.QRect(20, 120, 196, 20))
self.lb_FormFornecedor_5.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_5.setObjectName("lb_FormFornecedor_5")
self.tx_Telefone = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Telefone.setGeometry(QtCore.QRect(20, 140, 196, 25))
self.tx_Telefone.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Telefone.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Telefone.setPlaceholderText("")
self.tx_Telefone.setObjectName("tx_Telefone")
self.lb_FormFornecedor_8 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_8.setGeometry(QtCore.QRect(20, 180, 630, 30))
self.lb_FormFornecedor_8.setStyleSheet("QLabel{\n"
"font-size: 14px;\n"
"font-family: \"Arial\";\n"
"font-weight: normal;\n"
"\n"
"border-bottom: 2px solid #A2A2A2;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_8.setObjectName("lb_FormFornecedor_8")
self.tx_Cep = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cep.setGeometry(QtCore.QRect(20, 240, 101, 25))
self.tx_Cep.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cep.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cep.setAlignment(QtCore.Qt.AlignCenter)
self.tx_Cep.setObjectName("tx_Cep")
self.lb_FormFornecedor_10 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_10.setGeometry(QtCore.QRect(20, 215, 50, 20))
self.lb_FormFornecedor_10.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_10.setObjectName("lb_FormFornecedor_10")
self.fr_BotoesFormFornecedor = QtWidgets.QFrame(self.fr_FormFornecedor)
self.fr_BotoesFormFornecedor.setGeometry(QtCore.QRect(-340, 340, 1000, 30))
self.fr_BotoesFormFornecedor.setStyleSheet("background:#E1DFE0;\n"
"border: none;")
self.fr_BotoesFormFornecedor.setObjectName("fr_BotoesFormFornecedor")
self.bt_Voltar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Voltar.setGeometry(QtCore.QRect(880, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Voltar.setFont(font)
self.bt_Voltar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Voltar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Voltar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Voltar.setStyleSheet("QPushButton {\n"
"background-color: #1E87F0;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Voltar.setIconSize(QtCore.QSize(75, 35))
self.bt_Voltar.setObjectName("bt_Voltar")
self.bt_Salvar = QtWidgets.QPushButton(self.fr_BotoesFormFornecedor)
self.bt_Salvar.setGeometry(QtCore.QRect(750, 0, 120, 30))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_Salvar.setFont(font)
self.bt_Salvar.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_Salvar.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_Salvar.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_Salvar.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_Salvar.setIconSize(QtCore.QSize(75, 35))
self.bt_Salvar.setObjectName("bt_Salvar")
self.tx_cnpj = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_cnpj.setGeometry(QtCore.QRect(20, 80, 221, 25))
self.tx_cnpj.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_cnpj.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_cnpj.setPlaceholderText("")
self.tx_cnpj.setObjectName("tx_cnpj")
self.lb_FormFornecedor_23 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_23.setGeometry(QtCore.QRect(230, 120, 190, 20))
self.lb_FormFornecedor_23.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_23.setObjectName("lb_FormFornecedor_23")
self.tx_Email = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Email.setGeometry(QtCore.QRect(230, 140, 196, 25))
self.tx_Email.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Email.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" \n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Email.setPlaceholderText("")
self.tx_Email.setObjectName("tx_Email")
self.lb_FormFornecedor_11 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_11.setGeometry(QtCore.QRect(160, 215, 250, 20))
self.lb_FormFornecedor_11.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_11.setObjectName("lb_FormFornecedor_11")
self.tx_Endereco = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Endereco.setGeometry(QtCore.QRect(160, 240, 400, 25))
self.tx_Endereco.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Endereco.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Endereco.setInputMask("")
self.tx_Endereco.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Endereco.setPlaceholderText("")
self.tx_Endereco.setObjectName("tx_Endereco")
self.lb_FormFornecedor_12 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_12.setGeometry(QtCore.QRect(580, 215, 50, 20))
self.lb_FormFornecedor_12.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_12.setObjectName("lb_FormFornecedor_12")
self.tx_Numero = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Numero.setGeometry(QtCore.QRect(580, 240, 70, 25))
self.tx_Numero.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Numero.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Numero.setInputMask("")
self.tx_Numero.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Numero.setPlaceholderText("")
self.tx_Numero.setObjectName("tx_Numero")
self.tx_Bairro = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Bairro.setGeometry(QtCore.QRect(20, 295, 260, 25))
self.tx_Bairro.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Bairro.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Bairro.setInputMask("")
self.tx_Bairro.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Bairro.setPlaceholderText("")
self.tx_Bairro.setObjectName("tx_Bairro")
self.lb_FormFornecedor_13 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_13.setGeometry(QtCore.QRect(20, 270, 120, 20))
self.lb_FormFornecedor_13.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_13.setObjectName("lb_FormFornecedor_13")
self.tx_Cidade = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Cidade.setGeometry(QtCore.QRect(300, 295, 260, 25))
self.tx_Cidade.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Cidade.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Cidade.setInputMask("")
self.tx_Cidade.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Cidade.setPlaceholderText("")
self.tx_Cidade.setObjectName("tx_Cidade")
self.lb_FormFornecedor_14 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_14.setGeometry(QtCore.QRect(300, 270, 120, 20))
self.lb_FormFornecedor_14.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_14.setObjectName("lb_FormFornecedor_14")
self.lb_FormFornecedor_15 = QtWidgets.QLabel(self.fr_FormFornecedor)
self.lb_FormFornecedor_15.setGeometry(QtCore.QRect(580, 270, 70, 20))
self.lb_FormFornecedor_15.setStyleSheet("QLabel{\n"
"font-size: 12px;\n"
"font-family: \"Arial Unicode MS\";\n"
"font-weight: bold;\n"
"color: #797979\n"
"}")
self.lb_FormFornecedor_15.setObjectName("lb_FormFornecedor_15")
self.tx_Estado = QtWidgets.QLineEdit(self.fr_FormFornecedor)
self.tx_Estado.setGeometry(QtCore.QRect(580, 295, 70, 25))
self.tx_Estado.setFocusPolicy(QtCore.Qt.WheelFocus)
self.tx_Estado.setStyleSheet("QLineEdit{\n"
"background: #CFCFCF;\n"
"border-radius: 2px;\n"
"color: #000;\n"
"font: 13px \"Arial\" ;\n"
"text-transform: uppercase\n"
"}\n"
"QLineEdit:Focus {\n"
"border: 1px solid red;\n"
"}")
self.tx_Estado.setInputMask("")
self.tx_Estado.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.tx_Estado.setPlaceholderText("")
self.tx_Estado.setObjectName("tx_Estado")
self.bt_busca_cep = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cep.setGeometry(QtCore.QRect(130, 240, 21, 31))
self.bt_busca_cep.setText("")
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap("UI/../../Imagens/search.png"), QtGui.QIcon.Normal, QtGui.QIcon.Off)
self.bt_busca_cep.setIcon(icon)
self.bt_busca_cep.setObjectName("bt_busca_cep")
self.bt_busca_cnpj = QtWidgets.QPushButton(self.fr_FormFornecedor)
self.bt_busca_cnpj.setGeometry(QtCore.QRect(250, 80, 111, 31))
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.bt_busca_cnpj.setFont(font)
self.bt_busca_cnpj.setCursor(QtGui.QCursor(QtCore.Qt.PointingHandCursor))
self.bt_busca_cnpj.setFocusPolicy(QtCore.Qt.NoFocus)
self.bt_busca_cnpj.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)
self.bt_busca_cnpj.setStyleSheet("QPushButton {\n"
"background-color: #7AB32E;\n"
"color: #FFF\n"
" }\n"
"QPushButton:hover{\n"
"background-color: #40a286\n"
"}")
self.bt_busca_cnpj.setIconSize(QtCore.QSize(75, 35))
self.bt_busca_cnpj.setObjectName("bt_busca_cnpj")
self.retranslateUi(ct_FormFornecedor)
QtCore.QMetaObject.connectSlotsByName(ct_FormFornecedor)
ct_FormFornecedor.setTabOrder(self.tx_cnpj, self.tx_NomeFantasia)
ct_FormFornecedor.setTabOrder(self.tx_NomeFantasia, self.tx_Telefone)
ct_FormFornecedor.setTabOrder(self.tx_Telefone, self.tx_Email)
ct_FormFornecedor.setTabOrder(self.tx_Email, self.tx_Cep)
ct_FormFornecedor.setTabOrder(self.tx_Cep, self.bt_busca_cep)
ct_FormFornecedor.setTabOrder(self.bt_busca_cep, self.tx_Endereco)
ct_FormFornecedor.setTabOrder(self.tx_Endereco, self.tx_Numero)
ct_FormFornecedor.setTabOrder(self.tx_Numero, self.tx_Bairro)
ct_FormFornecedor.setTabOrder(self.tx_Bairro, self.tx_Cidade)
ct_FormFornecedor.setTabOrder(self.tx_Cidade, self.tx_Estado)
def retranslateUi(self, ct_FormFornecedor):
_translate = QtCore.QCoreApplication.translate
ct_FormFornecedor.setWindowTitle(_translate("ct_FormFornecedor", "Cadastro Fornecedores"))
self.lb_FormFornecedor.setText(_translate("ct_FormFornecedor", "FICHA CADASTRAL FORNECEDOR"))
self.lb_FormFornecedor_2.setText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.tx_NomeFantasia.setPlaceholderText(_translate("ct_FormFornecedor", "NOME FANTASIA"))
self.lb_FormFornecedor_3.setText(_translate("ct_FormFornecedor", "CNPJ"))
self.lb_FormFornecedor_5.setText(_translate("ct_FormFornecedor", "TELEFONE "))
self.tx_Telefone.setInputMask(_translate("ct_FormFornecedor", "(00) 0000-00000"))
self.tx_Telefone.setText(_translate("ct_FormFornecedor", "() -"))
self.lb_FormFornecedor_8.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.tx_Cep.setInputMask(_translate("ct_FormFornecedor", "99999-999"))
self.tx_Cep.setPlaceholderText(_translate("ct_FormFornecedor", "123456789"))
self.lb_FormFornecedor_10.setText(_translate("ct_FormFornecedor", "CEP"))
self.bt_Voltar.setText(_translate("ct_FormFornecedor", "VOLTAR"))
self.bt_Salvar.setText(_translate("ct_FormFornecedor", "SALVAR"))
self.tx_cnpj.setInputMask(_translate("ct_FormFornecedor", "##.###.###/####-##"))
self.tx_cnpj.setText(_translate("ct_FormFornecedor", "../-----"))
self.lb_FormFornecedor_23.setText(_translate("ct_FormFornecedor", "Email"))
self.lb_FormFornecedor_11.setText(_translate("ct_FormFornecedor", "ENDEREÇO"))
self.lb_FormFornecedor_12.setText(_translate("ct_FormFornecedor", "Nº"))
self.lb_FormFornecedor_13.setText(_translate("ct_FormFornecedor", "BAIRRO"))
self.lb_FormFornecedor_14.setText(_translate("ct_FormFornecedor", "CIDADE"))
self.lb_FormFornecedor_15.setText(_translate("ct_FormFornecedor", "ESTADO"))
self.bt_busca_cep.setAccessibleName(_translate("ct_FormFornecedor", "BUSCA CEP"))
self.bt_busca_cnpj.setText(_translate("ct_FormFornecedor", "BUSCAR CNPJ"))
| setupUi |
generic-tag-match.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your | // except according to those terms.
// run-pass
#![allow(unused_assignments)]
#![allow(non_camel_case_types)]
enum foo<T> { arm(T), }
fn altfoo<T>(f: foo<T>) {
let mut hit = false;
match f { foo::arm::<T>(_x) => { println!("in arm"); hit = true; } }
assert!((hit));
}
pub fn main() { altfoo::<isize>(foo::arm::<isize>(10)); } | // option. This file may not be copied, modified, or distributed |
bufspan.rs | use std::cmp::Ordering;
use std::fmt::{self, Formatter, Debug};
use std::intrinsics::{assume, move_val_init};
use std::iter::{self, IntoIterator, FromIterator};
use std::mem;
use std::option;
use std::slice;
use std::vec;
use iobuf::Iobuf;
use BufSpan::{Empty, One, Many};
use SpanIter::{Opt, Lot};
use SpanMoveIter::{MoveOpt, MoveLot};
#[cold]
fn bytes_in_vbuf<Buf: Iobuf>(v: &[Buf]) -> usize {
v.into_iter().map(|b| b.len() as usize).sum()
}
#[test]
fn test_bytes_in_vbuf() {
use impls::ROIobuf;
let bs = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
assert_eq!(bytes_in_vbuf(&bs), 9);
assert_eq!(bytes_in_vbuf(&bs[1..]), 5);
assert_eq!(bytes_in_vbuf(&bs[2..]), 1);
}
#[cold]
fn count_bytes_cmp_vbuf<Buf: Iobuf>(v: &[Buf], mut other: usize) -> Ordering {
for b in v {
let len = b.len() as usize;
if len > other { return Ordering::Greater }
other -= len;
}
if other == 0 { Ordering::Equal }
else { Ordering::Less }
}
#[test]
fn test_count_bytes_cmp_vbuf() {
use impls::ROIobuf;
let bs = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"), | ROIobuf::from_str("9"),
];
assert_eq!(count_bytes_cmp_vbuf(&bs, 0 ), Ordering::Greater);
assert_eq!(count_bytes_cmp_vbuf(&bs, 10), Ordering::Less);
assert_eq!(count_bytes_cmp_vbuf(&bs, 9 ), Ordering::Equal);
}
#[cold]
fn byte_equal_slice_vbuf<Buf: Iobuf>(v: &[Buf], mut other: &[u8]) -> bool {
if count_bytes_cmp_vbuf(v, other.len()) != Ordering::Equal {
return false;
}
unsafe {
for b in v {
let b_as_slice = b.as_window_slice();
assume(other.len() >= b_as_slice.len());
let (start, new_other) = other.split_at(b_as_slice.len());
if b_as_slice != start { return false; }
other = new_other;
}
}
true
}
#[test]
fn test_byte_equal_slice_vbuf() {
use impls::ROIobuf;
let bs = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
assert!(byte_equal_slice_vbuf(&bs, b"123456789"));
assert!(!byte_equal_slice_vbuf(&bs, b"123456780"));
assert!(!byte_equal_slice_vbuf(&bs, b"987654321"));
assert!(!byte_equal_slice_vbuf(&bs, b"12345678"));
assert!(!byte_equal_slice_vbuf(&bs, b"23456789"));
}
#[inline]
fn byte_equal_buf_buf<Buf1: Iobuf, Buf2: Iobuf>(x: &Buf1, y: &Buf2) -> bool {
unsafe {
x.as_window_slice() == y.as_window_slice()
}
}
#[inline]
fn byte_equal_buf_vbuf<Buf1: Iobuf, Buf2: Iobuf>(x: &Buf1, y: &[Buf2]) -> bool {
unsafe { byte_equal_slice_vbuf(y, x.as_window_slice()) }
}
#[cold]
fn byte_equal_vbuf_vbuf<Buf1: Iobuf, Buf2: Iobuf>(x: &[Buf1], y: &[Buf2]) -> bool {
if count_bytes_cmp_vbuf(x, bytes_in_vbuf(y)) != Ordering::Equal {
return false;
}
unsafe {
x.into_iter().flat_map(|b| b.as_window_slice().into_iter())
.zip(y.iter().flat_map(|b| b.as_window_slice().into_iter()))
.all(|(x, y)| x == y)
}
}
#[test]
fn test_byte_equal_vbuf_vbuf() {
use impls::ROIobuf;
let b0 = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
let b1 = [
ROIobuf::from_str("12"),
ROIobuf::from_str("34567"),
ROIobuf::from_str("89"),
];
let b2 = [
ROIobuf::from_str("123456789"),
];
let b3 = [
ROIobuf::from_str("123456780"),
];
let b4 = [
ROIobuf::from_str("11111111111111"),
];
assert!(byte_equal_vbuf_vbuf(&b0, &b1));
assert!(byte_equal_vbuf_vbuf(&b1, &b0));
assert!(byte_equal_vbuf_vbuf(&b0, &b2));
assert!(byte_equal_vbuf_vbuf(&b2, &b0));
assert!(byte_equal_vbuf_vbuf(&b1, &b2));
assert!(byte_equal_vbuf_vbuf(&b2, &b1));
assert!(!byte_equal_vbuf_vbuf(&b0, &b3));
assert!(!byte_equal_vbuf_vbuf(&b1, &b3));
assert!(!byte_equal_vbuf_vbuf(&b2, &b3));
assert!(!byte_equal_vbuf_vbuf(&b0, &b4));
assert!(!byte_equal_vbuf_vbuf(&b1, &b4));
assert!(!byte_equal_vbuf_vbuf(&b2, &b4));
assert!(!byte_equal_vbuf_vbuf(&b3, &b4));
}
/// `true` if `v` starts with `other`
#[cold]
fn starts_with_vbuf<Buf: Iobuf>(v: &[Buf], mut other: &[u8]) -> bool {
for b in v {
let b = unsafe { b.as_window_slice() };
match b.len().cmp(&other.len()) {
Ordering::Greater => return b.starts_with(other),
Ordering::Equal => return b == other,
Ordering::Less => {
let (start, new_other) = other.split_at(b.len());
if b != start { return false; }
other = new_other;
}
}
}
// Walked through all of `v`. If `other` is empty, `v` == `other`, and
// therefore, `v` starts with `other`.
other.is_empty()
}
#[test]
fn test_starts_with_vbuf() {
use impls::ROIobuf;
let b0 = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
assert!(starts_with_vbuf(&b0, b"123456789"));
assert!(starts_with_vbuf(&b0, b"12345678"));
assert!(starts_with_vbuf(&b0, b""));
assert!(starts_with_vbuf(&b0, b"12345"));
assert!(!starts_with_vbuf(&b0, b"123456780"));
assert!(!starts_with_vbuf(&b0, b"1234567890"));
assert!(!starts_with_vbuf(&b0, b"123450789"));
assert!(!starts_with_vbuf(&b0, b"2"));
}
#[cold]
fn ends_with_vbuf<Buf: Iobuf>(v: &[Buf], mut other: &[u8]) -> bool {
for b in v.into_iter().rev() {
let b = unsafe { b.as_window_slice() };
match b.len().cmp(&other.len()) {
Ordering::Greater => return b.ends_with(other),
Ordering::Equal => return b == other,
Ordering::Less => {
let (new_other, end) = other.split_at(other.len() - b.len());
if b != end { return false; }
other = new_other;
}
}
}
// Walked through all of `v`. If `other` is empty, `v` == `other`, and
// therefore, `v` ends with `other`.
other.is_empty()
}
#[test]
fn test_ends_with_vbuf() {
use impls::ROIobuf;
let b0 = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
assert!(ends_with_vbuf(&b0, b"123456789"));
assert!(ends_with_vbuf(&b0, b"23456789"));
assert!(ends_with_vbuf(&b0, b"3456789"));
assert!(ends_with_vbuf(&b0, b"456789"));
assert!(ends_with_vbuf(&b0, b"56789"));
assert!(ends_with_vbuf(&b0, b"6789"));
assert!(ends_with_vbuf(&b0, b"9"));
assert!(ends_with_vbuf(&b0, b""));
assert!(!ends_with_vbuf(&b0, b"1234567890"));
assert!(!ends_with_vbuf(&b0, b"023456789"));
assert!(!ends_with_vbuf(&b0, b"123456780"));
assert!(!ends_with_vbuf(&b0, b"123450789"));
assert!(!ends_with_vbuf(&b0, b"987654321"));
}
#[inline]
fn cmp_buf_buf<Buf: Iobuf>(bx: &Buf, by: &Buf) -> Ordering {
unsafe {
bx.as_window_slice().into_iter().cmp(
by.as_window_slice().into_iter())
}
}
#[cold]
fn cmp_buf_vec<Buf: Iobuf>(b: &Buf, v: &[Buf]) -> Ordering {
let mut b = unsafe { b.as_window_slice() };
for x in v {
let x = unsafe { x.as_window_slice() };
if b.len() >= x.len() {
let (start, new_b) = b.split_at(x.len());
match start.into_iter().cmp(x.into_iter()) {
Ordering::Equal => { b = new_b; }
order => return order,
}
} else {
return (&b).into_iter().cmp(x.into_iter());
}
}
if b.is_empty() { Ordering::Equal } else { Ordering::Greater }
}
#[test]
fn test_cmp_buf_vec() {
use impls::ROIobuf;
let b0 = [
ROIobuf::from_str("1234"),
ROIobuf::from_str("5678"),
ROIobuf::from_str("9"),
];
assert_eq!(cmp_buf_vec(&ROIobuf::from_str("123456789"), &b0), Ordering::Equal);
assert_eq!(cmp_buf_vec(&ROIobuf::from_str("12345678"), &b0), Ordering::Less);
assert_eq!(cmp_buf_vec(&ROIobuf::from_str("1234567890"), &b0), Ordering::Greater);
assert_eq!(cmp_buf_vec(&ROIobuf::from_str("023456789"), &b0), Ordering::Less);
assert_eq!(cmp_buf_vec(&ROIobuf::from_str("223456789"), &b0), Ordering::Greater);
}
#[cold]
fn cmp_vec_vec<Buf: Iobuf>(vx: &BufSpan<Buf>, vy: &BufSpan<Buf>) -> Ordering {
vx.iter_bytes().cmp(vy.iter_bytes())
}
/// A span over potentially many Iobufs. This is useful as a "string" type where
/// the contents of the string can come from multiple IObufs, and you want to
/// avoid copying the buffer contents unnecessarily.
///
/// As an optimization, pushing an Iobuf that points to data immediately after
/// the range represented by the last Iobuf pushed will result in just expanding
/// the held Iobuf's range. This prevents allocating lots of unnecessary
/// intermediate buffers, while still maintaining the illusion of "pushing lots
/// of buffers" while incrementally parsing.
///
/// A `BufSpan` is internally represented as either an `Iobuf` or a `Vec<Iobuf>`,
/// depending on how many different buffers were used.
#[derive(Clone)]
pub enum BufSpan<Buf> {
/// A span over 0 bytes.
Empty,
/// A single span over one range.
One (Buf),
/// A span over several backing Iobufs.
Many(Vec<Buf>),
}
impl<Buf: Iobuf> Debug for BufSpan<Buf> {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let mut first_time = true;
for b in self {
if !first_time {
try!(write!(f, "\n"));
} else {
first_time = false;
}
try!(b.fmt(f));
}
Ok(())
}
}
impl<Buf: Iobuf> FromIterator<Buf> for BufSpan<Buf> {
#[inline]
fn from_iter<T>(iterator: T) -> Self
where T: IntoIterator<Item=Buf> {
let mut ret = BufSpan::new();
ret.extend(iterator);
ret
}
}
impl<Buf: Iobuf> Extend<Buf> for BufSpan<Buf> {
#[inline]
fn extend<I: IntoIterator<Item=Buf>>(&mut self, iterator: I) {
for x in iterator {
self.push(x);
}
}
}
impl<Buf: Iobuf> IntoIterator for BufSpan<Buf> {
type Item = Buf;
type IntoIter = SpanMoveIter<Buf>;
#[inline]
fn into_iter(self) -> SpanMoveIter<Buf> {
match self {
Empty => MoveOpt(None.into_iter()),
One (b) => MoveOpt(Some(b).into_iter()),
Many(v) => MoveLot(v.into_iter()),
}
}
}
impl<'a, Buf: Iobuf> IntoIterator for &'a BufSpan<Buf> {
type Item = &'a Buf;
type IntoIter = SpanIter<'a, Buf>;
#[inline]
fn into_iter(self) -> SpanIter<'a, Buf> {
match *self {
Empty => Opt(None.into_iter()),
One (ref b) => Opt(Some(b).into_iter()),
Many(ref v) => Lot(v.into_iter()),
}
}
}
impl<Buf: Iobuf> BufSpan<Buf> {
/// Creates a new, empty `Bufspan`.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let s: BufSpan<ROIobuf<'static>> = BufSpan::new();
/// assert!(s.is_empty());
/// ```
#[inline]
pub fn new() -> Self {
BufSpan::Empty
}
/// Creates a new `BufSpan` from an Iobuf.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
/// use std::iter::IntoIterator;
///
/// let s = BufSpan::from_buf(ROIobuf::from_slice(b"hello"));
/// assert_eq!((&s).into_iter().count(), 1);
/// assert_eq!(s.count_bytes(), 5);
/// ```
#[inline]
pub fn from_buf(b: Buf) -> Self {
if b.is_empty() { Empty } else { One(b) }
}
/// Returns `true` iff the span is over an empty range.
///
/// ```rust
/// use iobuf::{BufSpan, Iobuf, ROIobuf};
///
/// let mut s = BufSpan::new();
///
/// assert!(s.is_empty());
///
/// s.push(ROIobuf::from_str(""));
/// assert!(s.is_empty());
///
/// s.push(ROIobuf::from_str("hello, world!"));
/// assert!(!s.is_empty());
/// ```
#[inline]
pub fn is_empty(&self) -> bool {
match *self {
Empty => true,
_ => false,
}
}
/// The fast path during pushing -- either fills in the first buffer, or
/// extends an existing one.
///
/// Returns `None` if the fast path was taken and nothing more needs to be
/// done. Returns `Some` if we need to do a slow push.
#[inline]
fn try_to_extend(&mut self, b: Buf) -> Option<Buf> {
if b.len() == 0 { return None; }
if self.is_empty() {
// EFFICIENCY HACK: We know we're empty, so we can drop without running
// drop glue. rustc isn't smart enough to figure this out. This will
// stop all drop calls in this function, leaving any dropping that might
// have to happen to `slow_push`.
unsafe {
move_val_init(self, One(b));
return None;
}
}
match *self {
Empty => unreachable!(),
One(ref mut b0) => {
match b0.extend_with(&b) {
Ok (()) => return None,
Err(()) => return Some(b),
}
}
Many(_) => return Some(b),
}
}
/// Appends a buffer to a `BufSpan`. If the buffer is an extension of the
/// previously pushed buffer, the range will be extended. Otherwise, the new
/// non-extension buffer will be added to the end of a vector.
///
/// ```rust
/// use iobuf::{BufSpan, Iobuf, ROIobuf};
/// use std::iter::IntoIterator;
///
/// let mut s = BufSpan::new();
///
/// s.push(ROIobuf::from_str("he"));
/// s.push(ROIobuf::from_str("llo"));
///
/// assert_eq!(s.count_bytes() as usize, "hello".len());
/// assert_eq!((&s).into_iter().count(), 2);
///
/// let mut b0 = ROIobuf::from_str(" world");
/// let mut b1 = b0.clone();
///
/// assert_eq!(b0.resize(2), Ok(()));
/// assert_eq!(b1.advance(2), Ok(()));
///
/// s.push(b0);
/// s.push(b1);
///
/// // b0 and b1 are immediately after each other, and from the same buffer,
/// // so get merged into one Iobuf.
/// assert_eq!(s.count_bytes() as usize, "hello world".len());
/// assert_eq!((&s).into_iter().count(), 3);
/// ```
#[inline(always)]
pub fn push(&mut self, b: Buf) {
if let Some(b) = self.try_to_extend(b) {
self.slow_push(b);
}
}
/// The slow path during a push. This is only taken if a `BufSpan` must span
/// multiple backing buffers.
fn slow_push(&mut self, b: Buf) {
if let Many(ref mut v) = *self {
unsafe {
let last_pos = v.len() - 1;
if let Err(()) = v.get_unchecked_mut(last_pos).extend_with(&b) {
v.push(b);
}
}
} else {
// Need to upgrade from a `One` into a `Many`. This requires replacement.
let this = mem::replace(self, Empty);
// We know that we're empty, therefore no drop glue needs to be run.
unsafe {
move_val_init(self,
if let One(b0) = this {
let mut v = Vec::with_capacity(2);
v.push(b0);
v.push(b);
Many(v)
} else {
unreachable!()
})
}
}
}
/// Returns an iterator over the bytes in the `BufSpan`.
#[inline]
pub fn iter_bytes<'a>(&'a self) -> ByteIter<'a, Buf> {
#[inline]
fn iter_buf_<B: Iobuf>(buf: &B) -> slice::Iter<u8> {
unsafe { buf.as_window_slice().iter() }
}
#[inline]
fn deref_u8_(x: &u8) -> u8 { *x }
let iter_buf : fn(&Buf) -> slice::Iter<u8> = iter_buf_;
let deref_u8 : fn(&u8) -> u8 = deref_u8_;
self.into_iter().flat_map(iter_buf).map(deref_u8)
}
/// Returns `true` iff the bytes in this `BufSpan` are the same as the bytes
/// in the other `BufSpan`.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf, RWIobuf};
///
/// let a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// let b = BufSpan::from_buf(RWIobuf::from_str_copy("hello"));
///
/// assert!(a.byte_equal(&b));
///
/// let mut c = BufSpan::from_buf(ROIobuf::from_str("hel"));
/// c.push(ROIobuf::from_str("lo"));
///
/// assert!(a.byte_equal(&c)); assert!(c.byte_equal(&a));
///
/// let d = BufSpan::from_buf(ROIobuf::from_str("helo"));
/// assert!(!a.byte_equal(&d));
/// ```
#[inline]
pub fn byte_equal<Buf2: Iobuf>(&self, other: &BufSpan<Buf2>) -> bool {
match (self, other) {
(&Empty , &Empty ) => true,
(&Empty , _ ) => false,
( _ , &Empty ) => false,
(&One (ref x), &One (ref y)) => byte_equal_buf_buf(x, y),
(&One (ref x), &Many(ref y)) => byte_equal_buf_vbuf(x, y),
(&Many(ref x), &One (ref y)) => byte_equal_buf_vbuf(y, x),
(&Many(ref x), &Many(ref y)) => byte_equal_vbuf_vbuf(x, y),
}
}
/// A more efficient version of byte_equal, specialized to work exclusively on
/// slices.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let a = BufSpan::from_buf(ROIobuf::from_str("hello"));
///
/// assert!(a.byte_equal_slice(b"hello"));
/// assert!(!a.byte_equal_slice(b"helo"));
/// ```
#[inline]
pub fn byte_equal_slice(&self, other: &[u8]) -> bool {
match *self {
Empty => other.is_empty(),
One (ref b) => unsafe { b.as_window_slice() == other },
Many(ref v) => byte_equal_slice_vbuf(v, other),
}
}
/// Counts the number of bytes this `BufSpan` is over. This is
/// `O(self.into_iter().len())`.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let mut a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// a.push(ROIobuf::from_str(" "));
/// a.push(ROIobuf::from_str("world"));
///
/// assert_eq!(a.count_bytes(), 11); // iterates over the pushed buffers.
/// ```
#[inline]
pub fn count_bytes(&self) -> usize {
// `self.into_iter().map(|b| b.len()).sum()` would be shorter, but I like to
// specialize for the much more common case of empty or singular `BufSpan`s.
match *self {
Empty => 0,
One (ref b) => b.len() as usize,
Many(ref v) => bytes_in_vbuf(v),
}
}
/// Compares the number of bytes in this span with another number, returning
/// how they compare. This is more efficient than calling `count_bytes` and
/// comparing that result, since we might be able to avoid iterating over all
/// the buffers.
///
/// ```rust
/// use std::cmp::Ordering;
/// use iobuf::{BufSpan, ROIobuf};
///
/// let mut a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// a.push(ROIobuf::from_str(" "));
/// a.push(ROIobuf::from_str("world"));
///
/// assert_eq!(a.count_bytes_cmp(0), Ordering::Greater);
/// assert_eq!(a.count_bytes_cmp(11), Ordering::Equal);
/// assert_eq!(a.count_bytes_cmp(9001), Ordering::Less);
/// ```
#[inline]
pub fn count_bytes_cmp(&self, other: usize) -> Ordering {
match *self {
Empty => 0.cmp(&other),
One (ref b) => (b.len() as usize).cmp(&other),
Many(ref v) => count_bytes_cmp_vbuf(v, other),
}
}
/// Extends this span to include the range denoted by another span.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let mut a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// a.push(ROIobuf::from_str(" "));
/// let mut b = BufSpan::from_buf(ROIobuf::from_str("world"));
/// b.push(ROIobuf::from_str("!!!"));
///
/// a.append(b);
///
/// assert!(a.byte_equal_slice(b"hello world!!!"));
/// ```
#[inline]
pub fn append(&mut self, other: Self) {
if self.is_empty() {
unsafe { move_val_init(self, other) }
} else {
self.extend(other.into_iter())
}
}
/// Returns `true` if the span begins with the given bytes.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let mut a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// a.push(ROIobuf::from_str(" "));
/// a.push(ROIobuf::from_str("world!"));
///
/// assert!(a.starts_with(b""));
/// assert!(a.starts_with(b"hel"));
/// assert!(a.starts_with(b"hello "));
/// assert!(a.starts_with(b"hello wor"));
/// assert!(a.starts_with(b"hello world!"));
///
/// assert!(!a.starts_with(b"goodbye"));
/// ```
#[inline]
pub fn starts_with(&self, other: &[u8]) -> bool {
match *self {
Empty => other.is_empty(),
One(ref b) => unsafe { b.as_window_slice().starts_with(other) },
Many(ref v) => starts_with_vbuf(v, other),
}
}
/// Returns `true` if the span ends with the given bytes.
///
/// ```rust
/// use iobuf::{BufSpan, ROIobuf};
///
/// let mut a = BufSpan::from_buf(ROIobuf::from_str("hello"));
/// a.push(ROIobuf::from_str(" "));
/// a.push(ROIobuf::from_str("world!"));
///
/// assert!(a.ends_with(b""));
/// assert!(a.ends_with(b"!"));
/// assert!(a.ends_with(b"rld!"));
/// assert!(a.ends_with(b"lo world!"));
/// assert!(a.ends_with(b"hello world!"));
///
/// assert!(!a.ends_with(b"goodbye"));
/// ```
#[inline]
pub fn ends_with(&self, other: &[u8]) -> bool {
match *self {
Empty => other.is_empty(),
One (ref b) => unsafe { b.as_window_slice().ends_with(other) },
Many(ref v) => ends_with_vbuf(v, other),
}
}
}
impl<Buf: Iobuf> PartialEq for BufSpan<Buf> {
#[inline]
fn eq(&self, other: &Self) -> bool {
self.byte_equal(other)
}
}
impl<Buf: Iobuf> Eq for BufSpan<Buf> {}
impl<Buf: Iobuf> PartialOrd for BufSpan<Buf> {
#[inline]
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(self.cmp(other))
}
}
impl<Buf: Iobuf> Ord for BufSpan<Buf> {
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
match (self, other) {
(&Empty, &Empty) => Ordering::Equal,
(&Empty, _ ) => Ordering::Less,
( _ , &Empty) => Ordering::Greater,
(&One (ref bx), &One (ref by)) => cmp_buf_buf(bx, by),
(&One (ref bx), &Many(ref vy)) => cmp_buf_vec(bx, vy),
(&Many(ref vx), &One (ref by)) => cmp_buf_vec(by, vx).reverse(),
(&Many( _ ), &Many( _ )) => cmp_vec_vec(self, other),
}
}
}
/// An iterator over the bytes in a `BufSpan`.
pub type ByteIter<'a, Buf> =
iter::Map<
iter::FlatMap<
SpanIter<'a, Buf>,
slice::Iter<'a, u8>,
fn(&Buf) -> slice::Iter<u8>>,
fn(&u8) -> u8>;
/// An iterator over references to buffers inside a `BufSpan`.
pub enum SpanIter<'a, Buf: 'a> {
/// An optional item to iterate over.
Opt(option::IntoIter<&'a Buf>),
/// A lot of items to iterate over.
Lot(slice::Iter<'a, Buf>),
}
impl<'a, Buf: Iobuf> Iterator for SpanIter<'a, Buf> {
type Item = &'a Buf;
#[inline(always)]
fn next(&mut self) -> Option<&'a Buf> {
// I'm couting on this match getting lifted out of the loop with
// loop-invariant code motion.
match *self {
Opt(ref mut iter) => iter.next(),
Lot(ref mut iter) => iter.next(),
}
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
match *self {
Opt(ref iter) => iter.size_hint(),
Lot(ref iter) => iter.size_hint(),
}
}
}
impl<'a, Buf: Iobuf> DoubleEndedIterator for SpanIter<'a, Buf> {
#[inline(always)]
fn next_back(&mut self) -> Option<&'a Buf> {
// I'm couting on this match getting lifted out of the loop with
// loop-invariant code motion.
match *self {
Opt(ref mut iter) => iter.next_back(),
Lot(ref mut iter) => iter.next_back(),
}
}
}
impl<'a, Buf: Iobuf> ExactSizeIterator for SpanIter<'a, Buf> {}
/// A moving iterator over buffers inside a `BufSpan`.
pub enum SpanMoveIter<Buf> {
/// An optional item to iterate over.
MoveOpt(option::IntoIter<Buf>),
/// A lot of items to iterate over.
MoveLot(vec::IntoIter<Buf>),
}
impl<Buf: Iobuf> Iterator for SpanMoveIter<Buf> {
type Item = Buf;
#[inline(always)]
fn next(&mut self) -> Option<Buf> {
// I'm couting on this match getting lifted out of the loop with
// loop-invariant code motion.
match *self {
MoveOpt(ref mut iter) => iter.next(),
MoveLot(ref mut iter) => iter.next(),
}
}
#[inline(always)]
fn size_hint(&self) -> (usize, Option<usize>) {
match *self {
MoveOpt(ref iter) => iter.size_hint(),
MoveLot(ref iter) => iter.size_hint(),
}
}
}
impl<Buf: Iobuf> DoubleEndedIterator for SpanMoveIter<Buf> {
#[inline(always)]
fn next_back(&mut self) -> Option<Buf> {
// I'm couting on this match getting lifted out of the loop with
// loop-invariant code motion.
match *self {
MoveOpt(ref mut iter) => iter.next_back(),
MoveLot(ref mut iter) => iter.next_back(),
}
}
}
impl<Buf: Iobuf> ExactSizeIterator for SpanMoveIter<Buf> {}
#[cfg(test)]
mod bench {
use test::{black_box, Bencher};
use super::super::iobuf::Iobuf;
use super::super::impls::{ROIobuf, RWIobuf};
use super::BufSpan;
#[bench]
fn create_roiobuf(b: &mut Bencher) {
b.iter(|| {
let buf = ROIobuf::from_str_copy("hello, world!");
black_box(buf);
})
}
#[bench]
fn test_none_to_one(b: &mut Bencher) {
b.iter(|| {
let mut buf = BufSpan::new();
buf.push(ROIobuf::from_str_copy("hello, world!"));
black_box(buf);
})
}
#[bench]
fn test_none_to_one_with_copy(b: &mut Bencher) {
b.iter(|| {
let mut buf = BufSpan::new();
let to_push = ROIobuf::from_str_copy("hello, world!");
buf.push(to_push);
black_box(buf);
})
}
#[bench]
fn test_none_to_many(b: &mut Bencher) {
b.iter(|| {
let mut buf = BufSpan::new();
buf.push(ROIobuf::from_str_copy("hello "));
buf.push(ROIobuf::from_str_copy("world!"));
black_box(buf);
})
}
#[bench]
fn extend_1k_iobuf_0(b: &mut Bencher) {
b.iter(|| {
let source = RWIobuf::new(1024);
let mut i = 0u32;
for _ in 32..1000 {
unsafe { source.unsafe_poke_be(i, b'a'); }
i += 1;
}
let mut source = source.read_only();
let mut dst = BufSpan::new();
for _ in 0..1000 {
unsafe {
let (start, end) = source.unsafe_split_at(1);
dst.push(start);
source = end;
}
}
black_box(dst);
})
}
#[bench]
fn extend_1k_iobuf_1(b: &mut Bencher) {
b.iter(|| {
let source = RWIobuf::new(1024);
let mut i = 0u32;
for _ in 0..1000 {
unsafe { source.unsafe_poke_be(i, b'a'); }
i += 1;
}
let mut source = source.read_only();
let mut dst = BufSpan::new();
for _ in 0..1000 {
unsafe {
let start = source.unsafe_split_start_at(1);
dst.push(start);
}
}
black_box(dst);
})
}
#[bench]
fn extend_1k_iobuf_2(b: &mut Bencher) {
let source = RWIobuf::new(1024);
let mut i = 0u32;
for _ in 0..500 {
unsafe { source.unsafe_poke_be(i, b'a'); }
i += 1;
}
i = 500;
for _ in 500..1000 {
unsafe { source.unsafe_poke_be(i, b'b'); }
i += 1;
}
b.iter(|| {
let mut source = source.read_only();
let mut dst_a = BufSpan::new();
let mut dst_b = BufSpan::new();
let mut other = BufSpan::new();
for _ in 0..1000 {
unsafe {
let first_letter = source.unsafe_split_start_at(1);
match first_letter.unsafe_peek_be(0) {
b'a' => dst_a.push(first_letter),
b'b' => dst_b.push(first_letter),
_ => other.push(first_letter),
}
}
}
black_box((dst_a, dst_b, other));
})
}
#[bench]
fn extend_1k_iobuf_3(b: &mut Bencher) {
let source = RWIobuf::new(1024);
let mut i = 0u32;
for _ in 0..500 {
unsafe { source.unsafe_poke_be(i, b'a'); }
i += 1;
}
let mut i = 500;
for _ in 500..1000 {
unsafe { source.unsafe_poke_be(i, b'b'); }
i += 1;
}
b.iter(|| {
let mut source = source.read_only();
let mut dst_a = BufSpan::new();
let mut dst_b = BufSpan::new();
let mut other = BufSpan::new();
for _ in 0..1000 {
unsafe {
let first_letter = source.unsafe_split_start_at(1);
let to_push = match first_letter.unsafe_peek_be(0) {
b'a' => &mut dst_a,
b'b' => &mut dst_b,
_ => &mut other,
};
to_push.push(first_letter);
}
}
black_box((dst_a, dst_b, other));
})
}
#[bench]
fn clone_and_drop(b: &mut Bencher) {
let patient_zero = RWIobuf::new(1024);
b.iter(|| {
let clone = patient_zero.clone();
black_box(clone);
})
}
} | |
registry.go | // Copyright 2020 The Cockroach Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing
// permissions and limitations under the License.
package rfmt
import (
"reflect"
i "github.com/cockroachdb/redact/interfaces"
)
// RegisterSafeType registers a data type to always be considered safe
// during the production of redactable strings.
func RegisterSafeType(t reflect.Type) {
safeTypeRegistry[t] = true
}
// safeTypeRegistry registers Go data types that are to be always
// considered safe, even when they don't implement SafeValue.
var safeTypeRegistry = map[reflect.Type]bool{}
func isSafeValue(a interface{}) bool {
return safeTypeRegistry[reflect.TypeOf(a)]
}
// redactErrorFn can be injected from an error library
// to render error objects safely.
var redactErrorFn func(err error, p i.SafePrinter, verb rune)
// RegisterRedactErrorFn registers an error redaction function for use
// during automatic redaction by this package.
// Provided e.g. by cockroachdb/errors.
func RegisterRedactErrorFn(fn func(err error, p i.SafePrinter, verb rune)) | {
redactErrorFn = fn
} |
|
requests.go | package snapshots
import (
gophercloud "gophercloud-lc"
"gophercloud-lc/pagination"
)
// ListOptsBuilder allows extensions to add additional parameters to the List
// request.
type ListOptsBuilder interface {
ToVolumeListQuery() (string, error)
}
// ListOpts holds options for listing Snapshots. It is passed to the snapshot.List
// function.
type ListOpts struct {
// admin-only option. Set it to true to see all tenant volumes.
AllTenants bool `q:"all_tenants"`
}
// ToVolumeListQuery formats a ListOpts into a query string.
func (opts ListOpts) ToVolumeListQuery() (string, error) {
q, err := gophercloud.BuildQueryString(opts)
return q.String(), err
}
// List returns Volumes optionally limited by the conditions provided in ListOpts.
func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager {
url := listURL(client)
if opts != nil |
return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page {
return SnapshotPage{pagination.SinglePageBase(r)}
})
}
| {
query, err := opts.ToVolumeListQuery()
if err != nil {
return pagination.Pager{Err: err}
}
url += query
} |
0008_delete_bannercourse.py | # Generated by Django 2.1.7 on 2019-05-24 00:52
from django.db import migrations
class Migration(migrations.Migration):
| dependencies = [
('courses', '0007_auto_20190520_0046'),
]
operations = [
migrations.DeleteModel(
name='BannerCourse',
),
] |
|
load_data.py | """
MIT License
Copyright (c) 2022, Lawrence Livermore National Security, LLC
Written by Zachariah Carmichael et al.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from xnas.utils import get_logger
logger = get_logger(__name__)
def preprocess(image, label):
import tensorflow as tf
mean = [0.13066044]
std = [0.3081079]
# converting dtype changes uint8 [0..255] to float [0.,1.]
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = (image - tf.reshape(mean, [1, 1, 1])) / tf.reshape(std, [1, 1, 1])
label = tf.one_hot(label, depth=10, dtype=tf.int32)
return image, label
def augment(image, label):
import tensorflow as tf
import tensorflow_addons as tfa
pad = 4
# random crop with zero-padding
image = tf.image.resize_with_crop_or_pad(image,
28 + pad * 2,
28 + pad * 2)
image = tf.image.random_crop(image, size=[28, 28, 1])
# random LR flip
image = tf.image.random_flip_left_right(image)
# cutout
image = tfa.image.random_cutout(tf.expand_dims(image, 0), (8, 8))
image = tf.squeeze(image, axis=0)
return image, label
def load_data():
| def load_train():
import tensorflow as tf
import tensorflow_datasets as tfds
ds_train = tfds.load('mnist', as_supervised=True, split='train')
ds_train = (
ds_train
.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
.cache()
.map(augment, num_parallel_calls=tf.data.AUTOTUNE)
)
return ds_train
def load_test():
import tensorflow as tf
import tensorflow_datasets as tfds
ds_test = tfds.load('mnist', as_supervised=True, split='test')
ds_test = (
ds_test
.map(preprocess, num_parallel_calls=tf.data.AUTOTUNE)
.cache()
)
return ds_test
train_size, valid_size = 60000, 10000
return {
'train_gen': load_train,
'train_size': train_size,
'valid_gen': load_test,
'valid_size': valid_size,
'types': ({'input_0': 'float32'}, 'int32'),
'shapes': ({'input_0': (28, 28, 1)}, (10,)),
} |
|
builder.rs | use secio::SecioKeyPair;
use std::collections::HashMap;
use std::marker::PhantomData;
use std::sync::Arc;
use std::{error, io};
use tokio::codec::{Decoder, Encoder};
use crate::{
service::{Service, ServiceHandle},
session::ProtocolMeta,
};
/// Builder for Service
pub struct ServiceBuilder<T, U> {
inner: HashMap<String, Box<dyn ProtocolMeta<U> + Send + Sync>>,
key_pair: Option<SecioKeyPair>,
forever: bool,
phantom: PhantomData<T>,
}
impl<T, U> ServiceBuilder<T, U>
where
T: ProtocolMeta<U> + Send + Sync + 'static,
U: Decoder<Item = bytes::BytesMut> + Encoder<Item = bytes::Bytes> + Send + 'static,
<U as Decoder>::Error: error::Error + Into<io::Error>,
<U as Encoder>::Error: error::Error + Into<io::Error>,
{
/// New a default empty builder
pub fn new() -> Self {
Default::default()
}
/// Combine the configuration of this builder with service handle to create a Service.
pub fn build<H>(self, handle: H) -> Service<H, U>
where
H: ServiceHandle,
{
Service::new(Arc::new(self.inner), handle, self.key_pair, self.forever)
}
/// Insert a custom protocol
pub fn insert_protocol(mut self, protocol: T) -> Self {
self.inner.insert(
protocol.name(),
Box::new(protocol) as Box<dyn ProtocolMeta<_> + Send + Sync>,
);
self
}
/// Enable encrypted communication mode.
///
/// If you do not need encrypted communication, you do not need to call this method
pub fn key_pair(mut self, key_pair: SecioKeyPair) -> Self {
self.key_pair = Some(key_pair);
self
}
/// When the service has no tasks, it will be turned off by default.
/// If you do not want to close service, set it to true.
pub fn | (mut self, forever: bool) -> Self {
self.forever = forever;
self
}
/// Clear all protocols
pub fn clear(&mut self) {
self.inner.clear();
}
}
impl<T, U> Default for ServiceBuilder<T, U>
where
T: ProtocolMeta<U> + Send + Sync + 'static,
U: Decoder<Item = bytes::BytesMut> + Encoder<Item = bytes::Bytes> + Send + 'static,
<U as Decoder>::Error: error::Error + Into<io::Error>,
<U as Encoder>::Error: error::Error + Into<io::Error>,
{
fn default() -> Self {
ServiceBuilder {
inner: HashMap::new(),
key_pair: None,
forever: false,
phantom: PhantomData,
}
}
}
| forever |
signalr_group_action.rs | use crate::{
rpc::{typed_data::Data, TypedData},
signalr::GroupAction,
FromVec,
};
use serde_derive::{Deserialize, Serialize};
use serde_json::{to_string, to_value, Value};
/// Represents the SignalR group action output binding.
///
/// The following binding attributes are supported:
///
/// | Name | Description |
/// |--------------|------------------------------------------------------------------------------------------------------------------------------|
/// | `name` | The name of the parameter being bound. |
/// | `hub_name` | The name of the SignalR hub that will receive the group action. |
/// | `connection` | The name of the app setting that contains the SignalR Service connection string. Defaults to `AzureSignalRConnectionString`. |
///
/// # Examples
///
/// This example implements an HTTP-triggered Azure Function that adds a user to a group:
///
/// ```rust
/// use azure_functions::{
/// bindings::{HttpRequest, SignalRGroupAction},
/// func,
/// signalr::GroupAction,
/// };
///
/// #[func]
/// #[binding(name = "req", auth_level = "anonymous", methods = "post")]
/// #[binding(name = "$return", hub_name = "chat", connection = "myconnection")]
/// pub fn add_to_group(req: HttpRequest) -> SignalRGroupAction {
/// SignalRGroupAction {
/// user_id: req.query_params().get("user").unwrap().to_owned(),
/// group_name: req.query_params().get("group").unwrap().to_owned(),
/// action: GroupAction::Add,
/// }
/// }
/// ```
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct SignalRGroupAction {
/// The name of the group to operate on.
pub group_name: String,
/// The user id to operate on.
pub user_id: String,
/// The action to take.
pub action: GroupAction,
}
#[doc(hidden)]
impl Into<TypedData> for SignalRGroupAction {
fn into(self) -> TypedData {
TypedData {
data: Some(Data::Json(
to_string(&self).expect("failed to convert SignalR group action to JSON string"),
)),
}
}
}
#[doc(hidden)]
impl FromVec<SignalRGroupAction> for TypedData {
fn from_vec(vec: Vec<SignalRGroupAction>) -> Self {
TypedData {
data: Some(Data::Json(
Value::Array(vec.into_iter().map(|a| to_value(a).unwrap()).collect()).to_string(),
)),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn | () {
let json = to_string(&SignalRGroupAction {
group_name: "foo".to_owned(),
user_id: "bar".to_owned(),
action: GroupAction::Add,
})
.unwrap();
assert_eq!(json, r#"{"groupName":"foo","userId":"bar","action":"add"}"#);
}
#[test]
fn it_converts_to_typed_data() {
let action = SignalRGroupAction {
group_name: "foo".to_owned(),
user_id: "bar".to_owned(),
action: GroupAction::Remove,
};
let data: TypedData = action.into();
assert_eq!(
data.data,
Some(Data::Json(
r#"{"groupName":"foo","userId":"bar","action":"remove"}"#.to_string()
))
);
}
}
| it_serializes_to_json |
bgp_upderr_nbr_bag.pb.go | /*
Copyright 2019 Cisco Systems
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: bgp_upderr_nbr_bag.proto
package cisco_ios_xr_ipv4_bgp_oper_bgp_instances_instance_instance_active_vrfs_vrf_update_inbound_error_neighbors_update_inbound_error_neighbor
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
type BgpUpderrNbrBag_KEYS struct {
InstanceName string `protobuf:"bytes,1,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty"`
VrfName string `protobuf:"bytes,2,opt,name=vrf_name,json=vrfName,proto3" json:"vrf_name,omitempty"`
NeighborAddress string `protobuf:"bytes,3,opt,name=neighbor_address,json=neighborAddress,proto3" json:"neighbor_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpUpderrNbrBag_KEYS) Reset() { *m = BgpUpderrNbrBag_KEYS{} }
func (m *BgpUpderrNbrBag_KEYS) String() string { return proto.CompactTextString(m) }
func (*BgpUpderrNbrBag_KEYS) ProtoMessage() {}
func (*BgpUpderrNbrBag_KEYS) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{0}
}
func (m *BgpUpderrNbrBag_KEYS) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpUpderrNbrBag_KEYS.Unmarshal(m, b)
}
func (m *BgpUpderrNbrBag_KEYS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpUpderrNbrBag_KEYS.Marshal(b, m, deterministic)
}
func (m *BgpUpderrNbrBag_KEYS) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpUpderrNbrBag_KEYS.Merge(m, src)
}
func (m *BgpUpderrNbrBag_KEYS) XXX_Size() int {
return xxx_messageInfo_BgpUpderrNbrBag_KEYS.Size(m)
}
func (m *BgpUpderrNbrBag_KEYS) XXX_DiscardUnknown() {
xxx_messageInfo_BgpUpderrNbrBag_KEYS.DiscardUnknown(m)
}
var xxx_messageInfo_BgpUpderrNbrBag_KEYS proto.InternalMessageInfo
func (m *BgpUpderrNbrBag_KEYS) GetInstanceName() string {
if m != nil {
return m.InstanceName
}
return ""
}
func (m *BgpUpderrNbrBag_KEYS) GetVrfName() string {
if m != nil {
return m.VrfName
}
return ""
}
func (m *BgpUpderrNbrBag_KEYS) GetNeighborAddress() string {
if m != nil {
return m.NeighborAddress
}
return ""
}
type BgpL2VpnAddrT struct {
L2VpnAddress []uint32 `protobuf:"varint,1,rep,packed,name=l2vpn_address,json=l2vpnAddress,proto3" json:"l2vpn_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpL2VpnAddrT) Reset() { *m = BgpL2VpnAddrT{} }
func (m *BgpL2VpnAddrT) String() string { return proto.CompactTextString(m) }
func (*BgpL2VpnAddrT) ProtoMessage() {}
func (*BgpL2VpnAddrT) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{1}
}
func (m *BgpL2VpnAddrT) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpL2VpnAddrT.Unmarshal(m, b)
}
func (m *BgpL2VpnAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpL2VpnAddrT.Marshal(b, m, deterministic)
}
func (m *BgpL2VpnAddrT) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpL2VpnAddrT.Merge(m, src)
}
func (m *BgpL2VpnAddrT) XXX_Size() int {
return xxx_messageInfo_BgpL2VpnAddrT.Size(m)
}
func (m *BgpL2VpnAddrT) XXX_DiscardUnknown() {
xxx_messageInfo_BgpL2VpnAddrT.DiscardUnknown(m)
}
var xxx_messageInfo_BgpL2VpnAddrT proto.InternalMessageInfo
func (m *BgpL2VpnAddrT) GetL2VpnAddress() []uint32 {
if m != nil {
return m.L2VpnAddress
}
return nil
}
type BgpL2VpnMspwAddrT struct {
L2VpnAddress []uint32 `protobuf:"varint,1,rep,packed,name=l2vpn_address,json=l2vpnAddress,proto3" json:"l2vpn_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpL2VpnMspwAddrT) Reset() { *m = BgpL2VpnMspwAddrT{} }
func (m *BgpL2VpnMspwAddrT) String() string { return proto.CompactTextString(m) }
func (*BgpL2VpnMspwAddrT) ProtoMessage() {}
func (*BgpL2VpnMspwAddrT) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{2}
}
func (m *BgpL2VpnMspwAddrT) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpL2VpnMspwAddrT.Unmarshal(m, b)
}
func (m *BgpL2VpnMspwAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpL2VpnMspwAddrT.Marshal(b, m, deterministic)
}
func (m *BgpL2VpnMspwAddrT) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpL2VpnMspwAddrT.Merge(m, src)
}
func (m *BgpL2VpnMspwAddrT) XXX_Size() int {
return xxx_messageInfo_BgpL2VpnMspwAddrT.Size(m)
}
func (m *BgpL2VpnMspwAddrT) XXX_DiscardUnknown() {
xxx_messageInfo_BgpL2VpnMspwAddrT.DiscardUnknown(m)
}
var xxx_messageInfo_BgpL2VpnMspwAddrT proto.InternalMessageInfo
func (m *BgpL2VpnMspwAddrT) GetL2VpnAddress() []uint32 {
if m != nil {
return m.L2VpnAddress
}
return nil
}
| type BgpIpv4SrpolicyAddrT struct {
Ipv4SrpolicyAddress []uint32 `protobuf:"varint,1,rep,packed,name=ipv4_srpolicy_address,json=ipv4SrpolicyAddress,proto3" json:"ipv4_srpolicy_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpIpv4SrpolicyAddrT) Reset() { *m = BgpIpv4SrpolicyAddrT{} }
func (m *BgpIpv4SrpolicyAddrT) String() string { return proto.CompactTextString(m) }
func (*BgpIpv4SrpolicyAddrT) ProtoMessage() {}
func (*BgpIpv4SrpolicyAddrT) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{3}
}
func (m *BgpIpv4SrpolicyAddrT) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpIpv4SrpolicyAddrT.Unmarshal(m, b)
}
func (m *BgpIpv4SrpolicyAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpIpv4SrpolicyAddrT.Marshal(b, m, deterministic)
}
func (m *BgpIpv4SrpolicyAddrT) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpIpv4SrpolicyAddrT.Merge(m, src)
}
func (m *BgpIpv4SrpolicyAddrT) XXX_Size() int {
return xxx_messageInfo_BgpIpv4SrpolicyAddrT.Size(m)
}
func (m *BgpIpv4SrpolicyAddrT) XXX_DiscardUnknown() {
xxx_messageInfo_BgpIpv4SrpolicyAddrT.DiscardUnknown(m)
}
var xxx_messageInfo_BgpIpv4SrpolicyAddrT proto.InternalMessageInfo
func (m *BgpIpv4SrpolicyAddrT) GetIpv4SrpolicyAddress() []uint32 {
if m != nil {
return m.Ipv4SrpolicyAddress
}
return nil
}
type BgpIpv6SrpolicyAddrT struct {
Ipv6SrpolicyAddress []uint32 `protobuf:"varint,1,rep,packed,name=ipv6_srpolicy_address,json=ipv6SrpolicyAddress,proto3" json:"ipv6_srpolicy_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpIpv6SrpolicyAddrT) Reset() { *m = BgpIpv6SrpolicyAddrT{} }
func (m *BgpIpv6SrpolicyAddrT) String() string { return proto.CompactTextString(m) }
func (*BgpIpv6SrpolicyAddrT) ProtoMessage() {}
func (*BgpIpv6SrpolicyAddrT) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{4}
}
func (m *BgpIpv6SrpolicyAddrT) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpIpv6SrpolicyAddrT.Unmarshal(m, b)
}
func (m *BgpIpv6SrpolicyAddrT) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpIpv6SrpolicyAddrT.Marshal(b, m, deterministic)
}
func (m *BgpIpv6SrpolicyAddrT) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpIpv6SrpolicyAddrT.Merge(m, src)
}
func (m *BgpIpv6SrpolicyAddrT) XXX_Size() int {
return xxx_messageInfo_BgpIpv6SrpolicyAddrT.Size(m)
}
func (m *BgpIpv6SrpolicyAddrT) XXX_DiscardUnknown() {
xxx_messageInfo_BgpIpv6SrpolicyAddrT.DiscardUnknown(m)
}
var xxx_messageInfo_BgpIpv6SrpolicyAddrT proto.InternalMessageInfo
func (m *BgpIpv6SrpolicyAddrT) GetIpv6SrpolicyAddress() []uint32 {
if m != nil {
return m.Ipv6SrpolicyAddress
}
return nil
}
type BgpAddrtype struct {
Afi string `protobuf:"bytes,1,opt,name=afi,proto3" json:"afi,omitempty"`
Ipv4Address string `protobuf:"bytes,2,opt,name=ipv4_address,json=ipv4Address,proto3" json:"ipv4_address,omitempty"`
Ipv4McastAddress string `protobuf:"bytes,3,opt,name=ipv4_mcast_address,json=ipv4McastAddress,proto3" json:"ipv4_mcast_address,omitempty"`
Ipv4LabelAddress string `protobuf:"bytes,4,opt,name=ipv4_label_address,json=ipv4LabelAddress,proto3" json:"ipv4_label_address,omitempty"`
Ipv4TunnelAddress string `protobuf:"bytes,5,opt,name=ipv4_tunnel_address,json=ipv4TunnelAddress,proto3" json:"ipv4_tunnel_address,omitempty"`
Ipv4MdtAddress string `protobuf:"bytes,6,opt,name=ipv4mdt_address,json=ipv4mdtAddress,proto3" json:"ipv4mdt_address,omitempty"`
Ipv4VpnAddress string `protobuf:"bytes,7,opt,name=ipv4vpn_address,json=ipv4vpnAddress,proto3" json:"ipv4vpn_address,omitempty"`
Ipv4VpnaMcastddress string `protobuf:"bytes,8,opt,name=ipv4vpna_mcastddress,json=ipv4vpnaMcastddress,proto3" json:"ipv4vpna_mcastddress,omitempty"`
Ipv6Address string `protobuf:"bytes,9,opt,name=ipv6_address,json=ipv6Address,proto3" json:"ipv6_address,omitempty"`
Ipv6McastAddress string `protobuf:"bytes,10,opt,name=ipv6_mcast_address,json=ipv6McastAddress,proto3" json:"ipv6_mcast_address,omitempty"`
Ipv6LabelAddress string `protobuf:"bytes,11,opt,name=ipv6_label_address,json=ipv6LabelAddress,proto3" json:"ipv6_label_address,omitempty"`
Ipv6VpnAddress string `protobuf:"bytes,12,opt,name=ipv6vpn_address,json=ipv6vpnAddress,proto3" json:"ipv6vpn_address,omitempty"`
Ipv6VpnMcastAddress string `protobuf:"bytes,13,opt,name=ipv6vpn_mcast_address,json=ipv6vpnMcastAddress,proto3" json:"ipv6vpn_mcast_address,omitempty"`
L2VpnvplsAddress *BgpL2VpnAddrT `protobuf:"bytes,14,opt,name=l2vpnvpls_address,json=l2vpnvplsAddress,proto3" json:"l2vpnvpls_address,omitempty"`
RtConstraintAddress string `protobuf:"bytes,15,opt,name=rt_constraint_address,json=rtConstraintAddress,proto3" json:"rt_constraint_address,omitempty"`
Ipv6MvpnAddress string `protobuf:"bytes,16,opt,name=ipv6mvpn_address,json=ipv6mvpnAddress,proto3" json:"ipv6mvpn_address,omitempty"`
Ipv4MvpnAddress string `protobuf:"bytes,17,opt,name=ipv4mvpn_address,json=ipv4mvpnAddress,proto3" json:"ipv4mvpn_address,omitempty"`
L2VpnEvpnAddress string `protobuf:"bytes,18,opt,name=l2vpn_evpn_address,json=l2vpnEvpnAddress,proto3" json:"l2vpn_evpn_address,omitempty"`
LsLsAddress string `protobuf:"bytes,19,opt,name=ls_ls_address,json=lsLsAddress,proto3" json:"ls_ls_address,omitempty"`
L2VpnMspwAddress *BgpL2VpnMspwAddrT `protobuf:"bytes,20,opt,name=l2vpn_mspw_address,json=l2vpnMspwAddress,proto3" json:"l2vpn_mspw_address,omitempty"`
Ipv4FlowspecAddress string `protobuf:"bytes,21,opt,name=ipv4_flowspec_address,json=ipv4FlowspecAddress,proto3" json:"ipv4_flowspec_address,omitempty"`
Ipv6FlowspecAddress string `protobuf:"bytes,22,opt,name=ipv6_flowspec_address,json=ipv6FlowspecAddress,proto3" json:"ipv6_flowspec_address,omitempty"`
Ipv4VpnFlowspecAddress string `protobuf:"bytes,23,opt,name=ipv4vpn_flowspec_address,json=ipv4vpnFlowspecAddress,proto3" json:"ipv4vpn_flowspec_address,omitempty"`
Ipv6VpnFlowspecAddress string `protobuf:"bytes,24,opt,name=ipv6vpn_flowspec_address,json=ipv6vpnFlowspecAddress,proto3" json:"ipv6vpn_flowspec_address,omitempty"`
Ipv4SrPolicyAddress *BgpIpv4SrpolicyAddrT `protobuf:"bytes,25,opt,name=ipv4sr_policy_address,json=ipv4srPolicyAddress,proto3" json:"ipv4sr_policy_address,omitempty"`
Ipv6SrPolicyAddress *BgpIpv6SrpolicyAddrT `protobuf:"bytes,26,opt,name=ipv6sr_policy_address,json=ipv6srPolicyAddress,proto3" json:"ipv6sr_policy_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpAddrtype) Reset() { *m = BgpAddrtype{} }
func (m *BgpAddrtype) String() string { return proto.CompactTextString(m) }
func (*BgpAddrtype) ProtoMessage() {}
func (*BgpAddrtype) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{5}
}
func (m *BgpAddrtype) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpAddrtype.Unmarshal(m, b)
}
func (m *BgpAddrtype) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpAddrtype.Marshal(b, m, deterministic)
}
func (m *BgpAddrtype) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpAddrtype.Merge(m, src)
}
func (m *BgpAddrtype) XXX_Size() int {
return xxx_messageInfo_BgpAddrtype.Size(m)
}
func (m *BgpAddrtype) XXX_DiscardUnknown() {
xxx_messageInfo_BgpAddrtype.DiscardUnknown(m)
}
var xxx_messageInfo_BgpAddrtype proto.InternalMessageInfo
func (m *BgpAddrtype) GetAfi() string {
if m != nil {
return m.Afi
}
return ""
}
func (m *BgpAddrtype) GetIpv4Address() string {
if m != nil {
return m.Ipv4Address
}
return ""
}
func (m *BgpAddrtype) GetIpv4McastAddress() string {
if m != nil {
return m.Ipv4McastAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4LabelAddress() string {
if m != nil {
return m.Ipv4LabelAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4TunnelAddress() string {
if m != nil {
return m.Ipv4TunnelAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4MdtAddress() string {
if m != nil {
return m.Ipv4MdtAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4VpnAddress() string {
if m != nil {
return m.Ipv4VpnAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4VpnaMcastddress() string {
if m != nil {
return m.Ipv4VpnaMcastddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6Address() string {
if m != nil {
return m.Ipv6Address
}
return ""
}
func (m *BgpAddrtype) GetIpv6McastAddress() string {
if m != nil {
return m.Ipv6McastAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6LabelAddress() string {
if m != nil {
return m.Ipv6LabelAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6VpnAddress() string {
if m != nil {
return m.Ipv6VpnAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6VpnMcastAddress() string {
if m != nil {
return m.Ipv6VpnMcastAddress
}
return ""
}
func (m *BgpAddrtype) GetL2VpnvplsAddress() *BgpL2VpnAddrT {
if m != nil {
return m.L2VpnvplsAddress
}
return nil
}
func (m *BgpAddrtype) GetRtConstraintAddress() string {
if m != nil {
return m.RtConstraintAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6MvpnAddress() string {
if m != nil {
return m.Ipv6MvpnAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4MvpnAddress() string {
if m != nil {
return m.Ipv4MvpnAddress
}
return ""
}
func (m *BgpAddrtype) GetL2VpnEvpnAddress() string {
if m != nil {
return m.L2VpnEvpnAddress
}
return ""
}
func (m *BgpAddrtype) GetLsLsAddress() string {
if m != nil {
return m.LsLsAddress
}
return ""
}
func (m *BgpAddrtype) GetL2VpnMspwAddress() *BgpL2VpnMspwAddrT {
if m != nil {
return m.L2VpnMspwAddress
}
return nil
}
func (m *BgpAddrtype) GetIpv4FlowspecAddress() string {
if m != nil {
return m.Ipv4FlowspecAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6FlowspecAddress() string {
if m != nil {
return m.Ipv6FlowspecAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4VpnFlowspecAddress() string {
if m != nil {
return m.Ipv4VpnFlowspecAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv6VpnFlowspecAddress() string {
if m != nil {
return m.Ipv6VpnFlowspecAddress
}
return ""
}
func (m *BgpAddrtype) GetIpv4SrPolicyAddress() *BgpIpv4SrpolicyAddrT {
if m != nil {
return m.Ipv4SrPolicyAddress
}
return nil
}
func (m *BgpAddrtype) GetIpv6SrPolicyAddress() *BgpIpv6SrpolicyAddrT {
if m != nil {
return m.Ipv6SrPolicyAddress
}
return nil
}
type BgpTimespec struct {
Seconds uint32 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
Nanoseconds uint32 `protobuf:"varint,2,opt,name=nanoseconds,proto3" json:"nanoseconds,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpTimespec) Reset() { *m = BgpTimespec{} }
func (m *BgpTimespec) String() string { return proto.CompactTextString(m) }
func (*BgpTimespec) ProtoMessage() {}
func (*BgpTimespec) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{6}
}
func (m *BgpTimespec) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpTimespec.Unmarshal(m, b)
}
func (m *BgpTimespec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpTimespec.Marshal(b, m, deterministic)
}
func (m *BgpTimespec) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpTimespec.Merge(m, src)
}
func (m *BgpTimespec) XXX_Size() int {
return xxx_messageInfo_BgpTimespec.Size(m)
}
func (m *BgpTimespec) XXX_DiscardUnknown() {
xxx_messageInfo_BgpTimespec.DiscardUnknown(m)
}
var xxx_messageInfo_BgpTimespec proto.InternalMessageInfo
func (m *BgpTimespec) GetSeconds() uint32 {
if m != nil {
return m.Seconds
}
return 0
}
func (m *BgpTimespec) GetNanoseconds() uint32 {
if m != nil {
return m.Nanoseconds
}
return 0
}
type BgpUpderrResetDataBag struct {
UpdateErrorResetReason string `protobuf:"bytes,1,opt,name=update_error_reset_reason,json=updateErrorResetReason,proto3" json:"update_error_reset_reason,omitempty"`
UpdateErrorResetNotificationCode uint32 `protobuf:"varint,2,opt,name=update_error_reset_notification_code,json=updateErrorResetNotificationCode,proto3" json:"update_error_reset_notification_code,omitempty"`
UpdateErrorResetNotificationSubCode uint32 `protobuf:"varint,3,opt,name=update_error_reset_notification_sub_code,json=updateErrorResetNotificationSubCode,proto3" json:"update_error_reset_notification_sub_code,omitempty"`
UpdateErrorResetNotificationData []byte `protobuf:"bytes,4,opt,name=update_error_reset_notification_data,json=updateErrorResetNotificationData,proto3" json:"update_error_reset_notification_data,omitempty"`
UpdateErrorResetNotificationDataLength uint32 `protobuf:"varint,5,opt,name=update_error_reset_notification_data_length,json=updateErrorResetNotificationDataLength,proto3" json:"update_error_reset_notification_data_length,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpUpderrResetDataBag) Reset() { *m = BgpUpderrResetDataBag{} }
func (m *BgpUpderrResetDataBag) String() string { return proto.CompactTextString(m) }
func (*BgpUpderrResetDataBag) ProtoMessage() {}
func (*BgpUpderrResetDataBag) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{7}
}
func (m *BgpUpderrResetDataBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpUpderrResetDataBag.Unmarshal(m, b)
}
func (m *BgpUpderrResetDataBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpUpderrResetDataBag.Marshal(b, m, deterministic)
}
func (m *BgpUpderrResetDataBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpUpderrResetDataBag.Merge(m, src)
}
func (m *BgpUpderrResetDataBag) XXX_Size() int {
return xxx_messageInfo_BgpUpderrResetDataBag.Size(m)
}
func (m *BgpUpderrResetDataBag) XXX_DiscardUnknown() {
xxx_messageInfo_BgpUpderrResetDataBag.DiscardUnknown(m)
}
var xxx_messageInfo_BgpUpderrResetDataBag proto.InternalMessageInfo
func (m *BgpUpderrResetDataBag) GetUpdateErrorResetReason() string {
if m != nil {
return m.UpdateErrorResetReason
}
return ""
}
func (m *BgpUpderrResetDataBag) GetUpdateErrorResetNotificationCode() uint32 {
if m != nil {
return m.UpdateErrorResetNotificationCode
}
return 0
}
func (m *BgpUpderrResetDataBag) GetUpdateErrorResetNotificationSubCode() uint32 {
if m != nil {
return m.UpdateErrorResetNotificationSubCode
}
return 0
}
func (m *BgpUpderrResetDataBag) GetUpdateErrorResetNotificationData() []byte {
if m != nil {
return m.UpdateErrorResetNotificationData
}
return nil
}
func (m *BgpUpderrResetDataBag) GetUpdateErrorResetNotificationDataLength() uint32 {
if m != nil {
return m.UpdateErrorResetNotificationDataLength
}
return 0
}
type BgpUpderrElemBag struct {
UpdateAttributeFlags uint32 `protobuf:"varint,1,opt,name=update_attribute_flags,json=updateAttributeFlags,proto3" json:"update_attribute_flags,omitempty"`
UpdateAttributeCode uint32 `protobuf:"varint,2,opt,name=update_attribute_code,json=updateAttributeCode,proto3" json:"update_attribute_code,omitempty"`
UpdateAttributeLength uint32 `protobuf:"varint,3,opt,name=update_attribute_length,json=updateAttributeLength,proto3" json:"update_attribute_length,omitempty"`
UpdateErrorData []byte `protobuf:"bytes,4,opt,name=update_error_data,json=updateErrorData,proto3" json:"update_error_data,omitempty"`
UpdateErrorDataLength uint32 `protobuf:"varint,5,opt,name=update_error_data_length,json=updateErrorDataLength,proto3" json:"update_error_data_length,omitempty"`
UpdateErrorAction string `protobuf:"bytes,6,opt,name=update_error_action,json=updateErrorAction,proto3" json:"update_error_action,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpUpderrElemBag) Reset() { *m = BgpUpderrElemBag{} }
func (m *BgpUpderrElemBag) String() string { return proto.CompactTextString(m) }
func (*BgpUpderrElemBag) ProtoMessage() {}
func (*BgpUpderrElemBag) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{8}
}
func (m *BgpUpderrElemBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpUpderrElemBag.Unmarshal(m, b)
}
func (m *BgpUpderrElemBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpUpderrElemBag.Marshal(b, m, deterministic)
}
func (m *BgpUpderrElemBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpUpderrElemBag.Merge(m, src)
}
func (m *BgpUpderrElemBag) XXX_Size() int {
return xxx_messageInfo_BgpUpderrElemBag.Size(m)
}
func (m *BgpUpderrElemBag) XXX_DiscardUnknown() {
xxx_messageInfo_BgpUpderrElemBag.DiscardUnknown(m)
}
var xxx_messageInfo_BgpUpderrElemBag proto.InternalMessageInfo
func (m *BgpUpderrElemBag) GetUpdateAttributeFlags() uint32 {
if m != nil {
return m.UpdateAttributeFlags
}
return 0
}
func (m *BgpUpderrElemBag) GetUpdateAttributeCode() uint32 {
if m != nil {
return m.UpdateAttributeCode
}
return 0
}
func (m *BgpUpderrElemBag) GetUpdateAttributeLength() uint32 {
if m != nil {
return m.UpdateAttributeLength
}
return 0
}
func (m *BgpUpderrElemBag) GetUpdateErrorData() []byte {
if m != nil {
return m.UpdateErrorData
}
return nil
}
func (m *BgpUpderrElemBag) GetUpdateErrorDataLength() uint32 {
if m != nil {
return m.UpdateErrorDataLength
}
return 0
}
func (m *BgpUpderrElemBag) GetUpdateErrorAction() string {
if m != nil {
return m.UpdateErrorAction
}
return ""
}
type BgpUpderrMsgBag struct {
UpdateErrorFinalAction string `protobuf:"bytes,1,opt,name=update_error_final_action,json=updateErrorFinalAction,proto3" json:"update_error_final_action,omitempty"`
UpdateMessageTimestamp *BgpTimespec `protobuf:"bytes,2,opt,name=update_message_timestamp,json=updateMessageTimestamp,proto3" json:"update_message_timestamp,omitempty"`
UpdateAttributeDiscardCount uint32 `protobuf:"varint,3,opt,name=update_attribute_discard_count,json=updateAttributeDiscardCount,proto3" json:"update_attribute_discard_count,omitempty"`
UpdateMessageResetData *BgpUpderrResetDataBag `protobuf:"bytes,4,opt,name=update_message_reset_data,json=updateMessageResetData,proto3" json:"update_message_reset_data,omitempty"`
UpdateErrorElement []*BgpUpderrElemBag `protobuf:"bytes,5,rep,name=update_error_element,json=updateErrorElement,proto3" json:"update_error_element,omitempty"`
UpdateErrorNlriAddressFamily string `protobuf:"bytes,6,opt,name=update_error_nlri_address_family,json=updateErrorNlriAddressFamily,proto3" json:"update_error_nlri_address_family,omitempty"`
UpdateErrorNlriString string `protobuf:"bytes,7,opt,name=update_error_nlri_string,json=updateErrorNlriString,proto3" json:"update_error_nlri_string,omitempty"`
UpdateErrorNlriStringTruncated bool `protobuf:"varint,8,opt,name=update_error_nlri_string_truncated,json=updateErrorNlriStringTruncated,proto3" json:"update_error_nlri_string_truncated,omitempty"`
UpdateMessageData []uint32 `protobuf:"varint,9,rep,packed,name=update_message_data,json=updateMessageData,proto3" json:"update_message_data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpUpderrMsgBag) Reset() { *m = BgpUpderrMsgBag{} }
func (m *BgpUpderrMsgBag) String() string { return proto.CompactTextString(m) }
func (*BgpUpderrMsgBag) ProtoMessage() {}
func (*BgpUpderrMsgBag) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{9}
}
func (m *BgpUpderrMsgBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpUpderrMsgBag.Unmarshal(m, b)
}
func (m *BgpUpderrMsgBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpUpderrMsgBag.Marshal(b, m, deterministic)
}
func (m *BgpUpderrMsgBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpUpderrMsgBag.Merge(m, src)
}
func (m *BgpUpderrMsgBag) XXX_Size() int {
return xxx_messageInfo_BgpUpderrMsgBag.Size(m)
}
func (m *BgpUpderrMsgBag) XXX_DiscardUnknown() {
xxx_messageInfo_BgpUpderrMsgBag.DiscardUnknown(m)
}
var xxx_messageInfo_BgpUpderrMsgBag proto.InternalMessageInfo
func (m *BgpUpderrMsgBag) GetUpdateErrorFinalAction() string {
if m != nil {
return m.UpdateErrorFinalAction
}
return ""
}
func (m *BgpUpderrMsgBag) GetUpdateMessageTimestamp() *BgpTimespec {
if m != nil {
return m.UpdateMessageTimestamp
}
return nil
}
func (m *BgpUpderrMsgBag) GetUpdateAttributeDiscardCount() uint32 {
if m != nil {
return m.UpdateAttributeDiscardCount
}
return 0
}
func (m *BgpUpderrMsgBag) GetUpdateMessageResetData() *BgpUpderrResetDataBag {
if m != nil {
return m.UpdateMessageResetData
}
return nil
}
func (m *BgpUpderrMsgBag) GetUpdateErrorElement() []*BgpUpderrElemBag {
if m != nil {
return m.UpdateErrorElement
}
return nil
}
func (m *BgpUpderrMsgBag) GetUpdateErrorNlriAddressFamily() string {
if m != nil {
return m.UpdateErrorNlriAddressFamily
}
return ""
}
func (m *BgpUpderrMsgBag) GetUpdateErrorNlriString() string {
if m != nil {
return m.UpdateErrorNlriString
}
return ""
}
func (m *BgpUpderrMsgBag) GetUpdateErrorNlriStringTruncated() bool {
if m != nil {
return m.UpdateErrorNlriStringTruncated
}
return false
}
func (m *BgpUpderrMsgBag) GetUpdateMessageData() []uint32 {
if m != nil {
return m.UpdateMessageData
}
return nil
}
type BgpUpderrNbrBag struct {
UpdateVrfName string `protobuf:"bytes,50,opt,name=update_vrf_name,json=updateVrfName,proto3" json:"update_vrf_name,omitempty"`
UpdateNeighborAddress *BgpAddrtype `protobuf:"bytes,51,opt,name=update_neighbor_address,json=updateNeighborAddress,proto3" json:"update_neighbor_address,omitempty"`
UpdateErrorHandlingAvoidReset bool `protobuf:"varint,52,opt,name=update_error_handling_avoid_reset,json=updateErrorHandlingAvoidReset,proto3" json:"update_error_handling_avoid_reset,omitempty"`
TotalUpdateMessageCount uint32 `protobuf:"varint,53,opt,name=total_update_message_count,json=totalUpdateMessageCount,proto3" json:"total_update_message_count,omitempty"`
UpdateMalformedMessageCount uint32 `protobuf:"varint,54,opt,name=update_malformed_message_count,json=updateMalformedMessageCount,proto3" json:"update_malformed_message_count,omitempty"`
FirstUpdateMalformedTimestamp *BgpTimespec `protobuf:"bytes,55,opt,name=first_update_malformed_timestamp,json=firstUpdateMalformedTimestamp,proto3" json:"first_update_malformed_timestamp,omitempty"`
LastUpdateMalformedTimestamp *BgpTimespec `protobuf:"bytes,56,opt,name=last_update_malformed_timestamp,json=lastUpdateMalformedTimestamp,proto3" json:"last_update_malformed_timestamp,omitempty"`
LastUpdateMalformedAge uint32 `protobuf:"varint,57,opt,name=last_update_malformed_age,json=lastUpdateMalformedAge,proto3" json:"last_update_malformed_age,omitempty"`
UpdateMemoryAllocationFailCount uint32 `protobuf:"varint,58,opt,name=update_memory_allocation_fail_count,json=updateMemoryAllocationFailCount,proto3" json:"update_memory_allocation_fail_count,omitempty"`
FirstUpdateMemoryAllocationFailTimestamp *BgpTimespec `protobuf:"bytes,59,opt,name=first_update_memory_allocation_fail_timestamp,json=firstUpdateMemoryAllocationFailTimestamp,proto3" json:"first_update_memory_allocation_fail_timestamp,omitempty"`
LastUpdateMemoryAllocationFailTimestamp *BgpTimespec `protobuf:"bytes,60,opt,name=last_update_memory_allocation_fail_timestamp,json=lastUpdateMemoryAllocationFailTimestamp,proto3" json:"last_update_memory_allocation_fail_timestamp,omitempty"`
LastUpdateMemoryAllocationFailAge uint32 `protobuf:"varint,61,opt,name=last_update_memory_allocation_fail_age,json=lastUpdateMemoryAllocationFailAge,proto3" json:"last_update_memory_allocation_fail_age,omitempty"`
UpdateErrorHandlingResetCount uint32 `protobuf:"varint,62,opt,name=update_error_handling_reset_count,json=updateErrorHandlingResetCount,proto3" json:"update_error_handling_reset_count,omitempty"`
FirstUpdateErrorHandlingResetTimestamp *BgpTimespec `protobuf:"bytes,63,opt,name=first_update_error_handling_reset_timestamp,json=firstUpdateErrorHandlingResetTimestamp,proto3" json:"first_update_error_handling_reset_timestamp,omitempty"`
LastErrorHandlingResetTimestamp *BgpTimespec `protobuf:"bytes,64,opt,name=last_error_handling_reset_timestamp,json=lastErrorHandlingResetTimestamp,proto3" json:"last_error_handling_reset_timestamp,omitempty"`
LastErrorHandlingResetAge uint32 `protobuf:"varint,65,opt,name=last_error_handling_reset_age,json=lastErrorHandlingResetAge,proto3" json:"last_error_handling_reset_age,omitempty"`
UpdateErrorMessage []*BgpUpderrMsgBag `protobuf:"bytes,66,rep,name=update_error_message,json=updateErrorMessage,proto3" json:"update_error_message,omitempty"`
UpdateErrorMessageListCount uint32 `protobuf:"varint,67,opt,name=update_error_message_list_count,json=updateErrorMessageListCount,proto3" json:"update_error_message_list_count,omitempty"`
UpdateAttributeDiscardCount uint32 `protobuf:"varint,68,opt,name=update_attribute_discard_count,json=updateAttributeDiscardCount,proto3" json:"update_attribute_discard_count,omitempty"`
EstablishmentTotalUpdateMessageCount uint32 `protobuf:"varint,69,opt,name=establishment_total_update_message_count,json=establishmentTotalUpdateMessageCount,proto3" json:"establishment_total_update_message_count,omitempty"`
EstablishmentActionCount []uint32 `protobuf:"varint,70,rep,packed,name=establishment_action_count,json=establishmentActionCount,proto3" json:"establishment_action_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BgpUpderrNbrBag) Reset() { *m = BgpUpderrNbrBag{} }
func (m *BgpUpderrNbrBag) String() string { return proto.CompactTextString(m) }
func (*BgpUpderrNbrBag) ProtoMessage() {}
func (*BgpUpderrNbrBag) Descriptor() ([]byte, []int) {
return fileDescriptor_17e34e92c3784e38, []int{10}
}
func (m *BgpUpderrNbrBag) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BgpUpderrNbrBag.Unmarshal(m, b)
}
func (m *BgpUpderrNbrBag) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BgpUpderrNbrBag.Marshal(b, m, deterministic)
}
func (m *BgpUpderrNbrBag) XXX_Merge(src proto.Message) {
xxx_messageInfo_BgpUpderrNbrBag.Merge(m, src)
}
func (m *BgpUpderrNbrBag) XXX_Size() int {
return xxx_messageInfo_BgpUpderrNbrBag.Size(m)
}
func (m *BgpUpderrNbrBag) XXX_DiscardUnknown() {
xxx_messageInfo_BgpUpderrNbrBag.DiscardUnknown(m)
}
var xxx_messageInfo_BgpUpderrNbrBag proto.InternalMessageInfo
func (m *BgpUpderrNbrBag) GetUpdateVrfName() string {
if m != nil {
return m.UpdateVrfName
}
return ""
}
func (m *BgpUpderrNbrBag) GetUpdateNeighborAddress() *BgpAddrtype {
if m != nil {
return m.UpdateNeighborAddress
}
return nil
}
func (m *BgpUpderrNbrBag) GetUpdateErrorHandlingAvoidReset() bool {
if m != nil {
return m.UpdateErrorHandlingAvoidReset
}
return false
}
func (m *BgpUpderrNbrBag) GetTotalUpdateMessageCount() uint32 {
if m != nil {
return m.TotalUpdateMessageCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetUpdateMalformedMessageCount() uint32 {
if m != nil {
return m.UpdateMalformedMessageCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetFirstUpdateMalformedTimestamp() *BgpTimespec {
if m != nil {
return m.FirstUpdateMalformedTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastUpdateMalformedTimestamp() *BgpTimespec {
if m != nil {
return m.LastUpdateMalformedTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastUpdateMalformedAge() uint32 {
if m != nil {
return m.LastUpdateMalformedAge
}
return 0
}
func (m *BgpUpderrNbrBag) GetUpdateMemoryAllocationFailCount() uint32 {
if m != nil {
return m.UpdateMemoryAllocationFailCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetFirstUpdateMemoryAllocationFailTimestamp() *BgpTimespec {
if m != nil {
return m.FirstUpdateMemoryAllocationFailTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastUpdateMemoryAllocationFailTimestamp() *BgpTimespec {
if m != nil {
return m.LastUpdateMemoryAllocationFailTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastUpdateMemoryAllocationFailAge() uint32 {
if m != nil {
return m.LastUpdateMemoryAllocationFailAge
}
return 0
}
func (m *BgpUpderrNbrBag) GetUpdateErrorHandlingResetCount() uint32 {
if m != nil {
return m.UpdateErrorHandlingResetCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetFirstUpdateErrorHandlingResetTimestamp() *BgpTimespec {
if m != nil {
return m.FirstUpdateErrorHandlingResetTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastErrorHandlingResetTimestamp() *BgpTimespec {
if m != nil {
return m.LastErrorHandlingResetTimestamp
}
return nil
}
func (m *BgpUpderrNbrBag) GetLastErrorHandlingResetAge() uint32 {
if m != nil {
return m.LastErrorHandlingResetAge
}
return 0
}
func (m *BgpUpderrNbrBag) GetUpdateErrorMessage() []*BgpUpderrMsgBag {
if m != nil {
return m.UpdateErrorMessage
}
return nil
}
func (m *BgpUpderrNbrBag) GetUpdateErrorMessageListCount() uint32 {
if m != nil {
return m.UpdateErrorMessageListCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetUpdateAttributeDiscardCount() uint32 {
if m != nil {
return m.UpdateAttributeDiscardCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetEstablishmentTotalUpdateMessageCount() uint32 {
if m != nil {
return m.EstablishmentTotalUpdateMessageCount
}
return 0
}
func (m *BgpUpderrNbrBag) GetEstablishmentActionCount() []uint32 {
if m != nil {
return m.EstablishmentActionCount
}
return nil
}
func init() {
proto.RegisterType((*BgpUpderrNbrBag_KEYS)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_upderr_nbr_bag_KEYS")
proto.RegisterType((*BgpL2VpnAddrT)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_l2vpn_addr_t")
proto.RegisterType((*BgpL2VpnMspwAddrT)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_l2vpn_mspw_addr_t")
proto.RegisterType((*BgpIpv4SrpolicyAddrT)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_ipv4_srpolicy_addr_t")
proto.RegisterType((*BgpIpv6SrpolicyAddrT)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_ipv6_srpolicy_addr_t")
proto.RegisterType((*BgpAddrtype)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_addrtype")
proto.RegisterType((*BgpTimespec)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_timespec")
proto.RegisterType((*BgpUpderrResetDataBag)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_upderr_reset_data_bag")
proto.RegisterType((*BgpUpderrElemBag)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_upderr_elem_bag")
proto.RegisterType((*BgpUpderrMsgBag)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_upderr_msg_bag")
proto.RegisterType((*BgpUpderrNbrBag)(nil), "cisco_ios_xr_ipv4_bgp_oper.bgp.instances.instance.instance_active.vrfs.vrf.update_inbound_error_neighbors.update_inbound_error_neighbor.bgp_upderr_nbr_bag")
}
func init() { proto.RegisterFile("bgp_upderr_nbr_bag.proto", fileDescriptor_17e34e92c3784e38) }
var fileDescriptor_17e34e92c3784e38 = []byte{
// 1656 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0x37,
0x16, 0xc7, 0xc4, 0x9b, 0xc4, 0xa1, 0xed, 0xd8, 0x1e, 0xff, 0x93, 0xb3, 0xf9, 0xa3, 0xc8, 0x81,
0xe3, 0x6c, 0xb2, 0x02, 0xd6, 0xf1, 0x4e, 0x92, 0x4d, 0x76, 0x37, 0x5e, 0xff, 0x41, 0x90, 0xb5,
0x8d, 0x5d, 0xd9, 0x09, 0x50, 0xf4, 0x40, 0x50, 0x1a, 0x4a, 0x1e, 0x60, 0x34, 0x23, 0x90, 0x94,
0x52, 0xdf, 0x0b, 0xf4, 0x13, 0x14, 0x45, 0xd1, 0xa2, 0x40, 0x7b, 0xec, 0xb9, 0x1f, 0xa1, 0x40,
0x2f, 0x6d, 0xda, 0x53, 0x6f, 0x6d, 0x2f, 0xfd, 0x06, 0x45, 0xd1, 0x6b, 0xc1, 0x47, 0x72, 0x86,
0x33, 0x1a, 0xcb, 0xe9, 0x6d, 0x2e, 0x81, 0xcc, 0xf7, 0x7b, 0x4f, 0xef, 0xf7, 0x7e, 0x8f, 0x8f,
0xa4, 0x82, 0x2a, 0xcd, 0x4e, 0x0f, 0xf7, 0x7b, 0x3e, 0x65, 0x0c, 0x47, 0x4d, 0x86, 0x9b, 0xa4,
0x53, 0xef, 0xb1, 0x58, 0xc4, 0xee, 0x7b, 0x4e, 0x2b, 0xe0, 0xad, 0x18, 0x07, 0x31, 0xc7, 0xef,
0x30, 0x1c, 0xf4, 0x06, 0x1b, 0x58, 0x82, 0xe3, 0x1e, 0x65, 0xf5, 0x66, 0xa7, 0x57, 0x0f, 0x22,
0x2e, 0x48, 0xd4, 0xa2, 0x3c, 0xf9, 0x94, 0x7c, 0xc0, 0xa4, 0x25, 0x82, 0x01, 0xad, 0x0f, 0x58,
0x9b, 0xcb, 0x7f, 0xea, 0xfd, 0x9e, 0x4f, 0x04, 0xc5, 0x41, 0xd4, 0x8c, 0xfb, 0x91, 0x8f, 0x29,
0x63, 0x31, 0xc3, 0x11, 0x0d, 0x3a, 0xc7, 0xcd, 0x98, 0xf1, 0xd1, 0xe6, 0xda, 0xbb, 0x0e, 0x5a,
0x1a, 0x4e, 0x13, 0xff, 0x77, 0xe7, 0xad, 0x43, 0x77, 0x05, 0x4d, 0x25, 0x5f, 0x1a, 0x91, 0x2e,
0xad, 0x38, 0x55, 0x67, 0xed, 0x52, 0x63, 0xd2, 0x2c, 0x1e, 0x90, 0x2e, 0x75, 0x97, 0xd1, 0xf8,
0x80, 0xb5, 0x95, 0xfd, 0x1c, 0xd8, 0x2f, 0x0e, 0x58, 0x1b, 0x4c, 0x77, 0xd0, 0x8c, 0xf9, 0x1e,
0x4c, 0x7c, 0x9f, 0x51, 0xce, 0x2b, 0x63, 0x00, 0x99, 0x36, 0xeb, 0x9b, 0x6a, 0xb9, 0xf6, 0x00,
0xcd, 0xc8, 0x2c, 0xc2, 0xf5, 0x41, 0x2f, 0x02, 0x2c, 0x16, 0xf2, 0xeb, 0xd3, 0xbf, 0xa5, 0xaf,
0x53, 0x1d, 0x5b, 0x9b, 0x6a, 0x4c, 0xc2, 0xa2, 0x71, 0x7c, 0x82, 0x16, 0x52, 0xc7, 0x2e, 0xef,
0xbd, 0xfa, 0x43, 0xde, 0x07, 0x4a, 0x23, 0xa8, 0x3f, 0x67, 0xbd, 0x38, 0x0c, 0x5a, 0x27, 0x26,
0xc0, 0x3a, 0x5a, 0x18, 0x5e, 0x4f, 0x03, 0xcd, 0x49, 0xe3, 0xa1, 0xb6, 0x0d, 0xc7, 0xf3, 0x4e,
0x89, 0xe7, 0x8d, 0x8a, 0xe7, 0xe5, 0xe3, 0x7d, 0x71, 0x19, 0x4d, 0xca, 0x80, 0x12, 0x2a, 0x4e,
0x7a, 0xd4, 0x9d, 0x41, 0x63, 0xa4, 0x1d, 0x68, 0x21, 0xe4, 0x47, 0xf7, 0x26, 0x9a, 0x84, 0x34,
0x4d, 0x34, 0xa5, 0xc1, 0x84, 0x5c, 0xd3, 0x51, 0xdc, 0x7b, 0xc8, 0x05, 0x48, 0xb7, 0x45, 0xb8,
0xc8, 0x29, 0x31, 0x23, 0x2d, 0xfb, 0xd2, 0x90, 0x47, 0x87, 0xa4, 0x49, 0xc3, 0x04, 0xfd, 0xa7,
0x14, 0xbd, 0x27, 0x0d, 0x06, 0x5d, 0x47, 0x50, 0x08, 0x2c, 0xfa, 0x51, 0x64, 0xc1, 0xcf, 0x03,
0x7c, 0x56, 0x9a, 0x8e, 0xc0, 0x62, 0xf0, 0xb7, 0xd1, 0xb4, 0x5c, 0xec, 0xfa, 0x69, 0x22, 0x17,
0x00, 0x7b, 0x59, 0x2f, 0xe7, 0x80, 0xb6, 0x82, 0x17, 0x53, 0x60, 0xaa, 0xa1, 0xfb, 0x37, 0x34,
0xaf, 0x57, 0x88, 0x62, 0xa8, 0xd1, 0xe3, 0x80, 0x9e, 0x33, 0xb6, 0xfd, 0xd4, 0xa4, 0x6b, 0xe6,
0x25, 0x81, 0x2f, 0x25, 0x35, 0xf3, 0xb2, 0x55, 0xf0, 0x72, 0x35, 0x43, 0x49, 0x15, 0xbc, 0x82,
0x9a, 0x79, 0xb9, 0x9a, 0x4d, 0xa4, 0xe8, 0x4c, 0xcd, 0x14, 0x35, 0xcf, 0xa6, 0x36, 0x99, 0x50,
0xf3, 0x2c, 0x6a, 0xba, 0x65, 0xa0, 0xb5, 0x33, 0x79, 0x4c, 0x25, 0xdc, 0xa4, 0x31, 0x93, 0xca,
0x57, 0x0e, 0x9a, 0x85, 0x1e, 0x1f, 0xf4, 0x42, 0x9e, 0x38, 0x5c, 0xae, 0x3a, 0x6b, 0x13, 0xeb,
0x1f, 0x3a, 0xf5, 0x92, 0xcc, 0x9d, 0x7a, 0x7e, 0xb7, 0x37, 0x66, 0x92, 0xa4, 0x2d, 0xf6, 0x4c,
0xe0, 0x56, 0x1c, 0x71, 0xc1, 0x48, 0x10, 0xa5, 0xec, 0xa7, 0x15, 0x7b, 0x26, 0xb6, 0x12, 0x9b,
0xf1, 0xb9, 0x83, 0xa0, 0xdc, 0x5d, 0xbb, 0xb6, 0x33, 0x6a, 0xe4, 0x98, 0xf5, 0x2c, 0x74, 0x23,
0x03, 0x9d, 0x4d, 0xa0, 0x1b, 0x36, 0xf4, 0x1e, 0x72, 0x55, 0xae, 0xd4, 0x06, 0xbb, 0x4a, 0x5e,
0xb0, 0xec, 0x58, 0xe8, 0x1a, 0x9a, 0x0a, 0x39, 0xb6, 0x8a, 0x3f, 0xa7, 0xda, 0x2b, 0xe4, 0x7b,
0x09, 0xb7, 0x6f, 0x1c, 0x13, 0x32, 0x99, 0x59, 0x12, 0x39, 0x0f, 0x32, 0x7d, 0x52, 0x46, 0x99,
0xac, 0xd9, 0xaa, 0x39, 0xef, 0xf3, 0xde, 0xab, 0x6c, 0xa7, 0x6e, 0xe0, 0x76, 0x18, 0xbf, 0xe2,
0x3d, 0xda, 0x4a, 0x18, 0x2d, 0xa4, 0xbb, 0x70, 0x57, 0xdb, 0x72, 0xdd, 0x3d, 0xec, 0xb3, 0x98,
0x76, 0x77, 0xde, 0xe7, 0x21, 0xaa, 0x98, 0xa9, 0x30, 0xe4, 0xb6, 0x04, 0x6e, 0x8b, 0xda, 0x5e,
0xec, 0xe9, 0x15, 0x7a, 0x56, 0x12, 0x4f, 0xaf, 0xc0, 0xf3, 0x7b, 0x47, 0x91, 0xe3, 0x0c, 0xe7,
0x26, 0xf7, 0x32, 0xc8, 0xf5, 0x69, 0xb9, 0xe4, 0x2a, 0x3a, 0xcc, 0x94, 0x00, 0x9c, 0xfd, 0xcf,
0x3e, 0x5d, 0x0c, 0x31, 0x6f, 0x98, 0xd8, 0x95, 0x92, 0x12, 0xf3, 0x0a, 0x89, 0x79, 0x39, 0x62,
0xb5, 0xe7, 0xea, 0xd4, 0x14, 0x41, 0x97, 0x4a, 0x21, 0xdd, 0x0a, 0xba, 0xc8, 0x69, 0x2b, 0x8e,
0x7c, 0x0e, 0x27, 0xe7, 0x54, 0xc3, 0xfc, 0xe9, 0x56, 0xd1, 0x44, 0x44, 0xa2, 0xd8, 0x58, 0xcf,
0x81, 0xd5, 0x5e, 0xaa, 0x7d, 0x3e, 0x86, 0x96, 0xad, 0x0b, 0x12, 0xa3, 0x9c, 0x0a, 0xec, 0x13,
0x41, 0xe4, 0x3d, 0xc9, 0x7d, 0x84, 0x96, 0x75, 0xe2, 0x2a, 0x61, 0x65, 0x66, 0x94, 0xf0, 0x38,
0xd2, 0xa7, 0xf4, 0xa2, 0x02, 0xec, 0x48, 0x7b, 0x43, 0x9a, 0x1b, 0x60, 0x75, 0x0f, 0xd0, 0xad,
0x02, 0xd7, 0x28, 0x16, 0x41, 0x3b, 0x68, 0x11, 0x11, 0xc4, 0x11, 0x6e, 0xc5, 0x3e, 0xd5, 0x39,
0x55, 0xf3, 0x51, 0x0e, 0x2c, 0xe0, 0x56, 0xec, 0x53, 0xf7, 0x05, 0x5a, 0x3b, 0x2b, 0x1e, 0xef,
0x37, 0x55, 0xcc, 0x31, 0x88, 0xb9, 0x32, 0x2a, 0xe6, 0x61, 0xbf, 0x09, 0x61, 0xdf, 0x20, 0x4d,
0x59, 0x0d, 0xb8, 0x20, 0x4c, 0x8e, 0x4e, 0x73, 0x9b, 0x08, 0xe2, 0xbe, 0x8d, 0xee, 0xbe, 0x49,
0x3c, 0x1c, 0xd2, 0xa8, 0x23, 0x8e, 0xe1, 0x22, 0x31, 0xd5, 0x58, 0x3d, 0x2b, 0xec, 0x1e, 0xa0,
0x6b, 0x5f, 0x9f, 0x43, 0x73, 0x96, 0x58, 0x34, 0xa4, 0x5d, 0x90, 0x69, 0x03, 0x69, 0x15, 0x30,
0x11, 0x82, 0x05, 0xcd, 0xbe, 0xa0, 0xb8, 0x1d, 0x92, 0x8e, 0xe9, 0x87, 0x79, 0x65, 0xdd, 0x34,
0xc6, 0x5d, 0x69, 0x93, 0x03, 0x6a, 0xc8, 0xcb, 0x92, 0x64, 0x2e, 0xe7, 0x04, 0xe5, 0xf2, 0xd0,
0xd2, 0x90, 0x8f, 0xa6, 0xa2, 0x8a, 0xbe, 0x90, 0xf3, 0x52, 0x99, 0xbb, 0x7f, 0x41, 0xb3, 0x99,
0xb2, 0x58, 0x35, 0x9d, 0xb6, 0xc8, 0x43, 0x09, 0x1f, 0xa0, 0xca, 0x10, 0x36, 0x5b, 0xaf, 0x85,
0x9c, 0x8b, 0xfe, 0x92, 0x3a, 0x9a, 0xcb, 0x38, 0xca, 0x2d, 0x1a, 0x47, 0xfa, 0x02, 0x36, 0x6b,
0xf9, 0x6c, 0x82, 0xa1, 0xf6, 0xc1, 0x38, 0x72, 0xad, 0x72, 0x76, 0x79, 0xa7, 0xb0, 0xe9, 0xdb,
0x41, 0x44, 0x42, 0x13, 0x6c, 0xb8, 0xe9, 0x77, 0xa5, 0x59, 0x45, 0x74, 0xbf, 0x73, 0x92, 0xdc,
0xbb, 0x94, 0x73, 0xd2, 0xa1, 0x6a, 0x97, 0x0a, 0xd2, 0xed, 0x41, 0x59, 0x27, 0xd6, 0xdf, 0x2f,
0xd7, 0xd4, 0x31, 0x43, 0xc4, 0x50, 0xda, 0x57, 0x69, 0x1f, 0x99, 0xac, 0xdd, 0x2d, 0x74, 0x7d,
0x48, 0x71, 0x3f, 0xe0, 0x2d, 0xc2, 0x7c, 0xdc, 0x8a, 0xfb, 0x91, 0xd0, 0xc2, 0xff, 0x39, 0x27,
0xfc, 0xb6, 0xc2, 0x6c, 0x49, 0x88, 0xfb, 0xa3, 0x93, 0xd4, 0xd4, 0xd4, 0x25, 0x9d, 0x34, 0xd0,
0x07, 0x13, 0xeb, 0x9f, 0x95, 0xab, 0x30, 0x85, 0x03, 0x31, 0x57, 0x25, 0xd8, 0xab, 0xd0, 0xb3,
0xdf, 0x3a, 0x68, 0x3e, 0xd3, 0x34, 0x72, 0x6f, 0xd2, 0x48, 0x54, 0xce, 0x57, 0xc7, 0xd6, 0x26,
0xd6, 0x3f, 0x2e, 0x25, 0x37, 0x33, 0x3f, 0x1a, 0xae, 0xd5, 0xce, 0x3b, 0x2a, 0x71, 0x77, 0x17,
0x55, 0x33, 0x84, 0xa2, 0x90, 0x05, 0xe6, 0x00, 0xc5, 0x6d, 0xd2, 0x0d, 0xc2, 0x13, 0xbd, 0xb3,
0xae, 0x5a, 0xde, 0x07, 0x21, 0x0b, 0xf4, 0x31, 0xb5, 0x0b, 0x98, 0xa1, 0xdd, 0x0c, 0x71, 0xb8,
0x60, 0x41, 0xd4, 0xd1, 0x2f, 0x9e, 0x85, 0x9c, 0xff, 0x21, 0x18, 0xdd, 0xe7, 0xa8, 0x76, 0x9a,
0x23, 0x16, 0xac, 0x1f, 0xb5, 0x88, 0xa0, 0x3e, 0x3c, 0x83, 0xc6, 0x1b, 0xd7, 0x0b, 0x43, 0x1c,
0x19, 0x94, 0x35, 0x19, 0x4c, 0xfb, 0x41, 0xe3, 0x5d, 0x82, 0xa7, 0xe9, 0x6c, 0x46, 0x53, 0x29,
0x67, 0xed, 0xcb, 0x85, 0xcc, 0x64, 0xd0, 0x3f, 0x1b, 0xb8, 0xab, 0x48, 0x0f, 0x2b, 0x9c, 0xfc,
0x26, 0xb0, 0x0e, 0x14, 0xa6, 0xd4, 0xf2, 0x4b, 0xfd, 0xcb, 0xc0, 0x6b, 0x27, 0x19, 0x93, 0x43,
0xbf, 0x10, 0xdc, 0x2f, 0xe3, 0x14, 0x30, 0x0f, 0x70, 0xa3, 0xc5, 0x41, 0xf6, 0xf7, 0x0b, 0xf7,
0x19, 0xba, 0x99, 0xd1, 0xe2, 0x98, 0x44, 0x7e, 0x28, 0x85, 0x20, 0x83, 0x38, 0xf0, 0xd5, 0x26,
0xa9, 0x6c, 0x80, 0x14, 0xd7, 0x2c, 0x29, 0x9e, 0x69, 0xd8, 0xa6, 0x44, 0xc1, 0x66, 0x71, 0x1f,
0xa3, 0x2b, 0x22, 0x16, 0x24, 0xc4, 0x39, 0x3d, 0xd4, 0x28, 0xf9, 0x3b, 0x8c, 0x92, 0x25, 0x40,
0xbc, 0xb0, 0x55, 0x51, 0x63, 0x24, 0x9d, 0x45, 0x5d, 0x12, 0xb6, 0x63, 0xd6, 0xa5, 0x7e, 0x2e,
0x80, 0x67, 0xcf, 0xa2, 0x7d, 0x03, 0xca, 0x04, 0xf9, 0xc9, 0x41, 0xd5, 0x76, 0xc0, 0xb8, 0xc0,
0x43, 0xb1, 0xd2, 0x59, 0xfd, 0xa0, 0xd4, 0xb3, 0xfa, 0x1a, 0xe4, 0xff, 0x22, 0x4b, 0x32, 0x1d,
0xd9, 0x3f, 0x38, 0xe8, 0x46, 0x48, 0x46, 0x13, 0x7c, 0x58, 0x6a, 0x82, 0x57, 0x65, 0xfa, 0xa7,
0xf2, 0x7b, 0x84, 0x96, 0x8b, 0xe9, 0x91, 0x0e, 0xad, 0x3c, 0x82, 0x0e, 0x58, 0x2c, 0x08, 0xb0,
0xd9, 0xa1, 0xee, 0x1e, 0x5a, 0x49, 0x1a, 0xaf, 0x1b, 0xb3, 0x13, 0x4c, 0xc2, 0x30, 0xd6, 0xf7,
0xb2, 0x36, 0x09, 0x42, 0xdd, 0x46, 0xff, 0x80, 0x20, 0x37, 0xcc, 0x60, 0x90, 0xc8, 0xcd, 0x04,
0xb8, 0x4b, 0x82, 0x50, 0xb5, 0xd2, 0x6f, 0x0e, 0xfa, 0x6b, 0xb6, 0x95, 0x8a, 0x83, 0xa6, 0x65,
0x7f, 0x5c, 0xea, 0xb2, 0xaf, 0xd9, 0x7d, 0x55, 0xc0, 0x3a, 0x95, 0xe0, 0x57, 0x07, 0xdd, 0xcb,
0x68, 0x70, 0x16, 0xf1, 0x27, 0xa5, 0x26, 0x7e, 0xdb, 0x6a, 0x97, 0x91, 0xbc, 0xff, 0x8f, 0x56,
0xdf, 0x80, 0xb6, 0xec, 0xc3, 0x7f, 0x42, 0x0b, 0xdd, 0x1c, 0x1d, 0x58, 0xb6, 0xe4, 0xa9, 0xb3,
0x55, 0x5d, 0x3d, 0x54, 0x43, 0xfe, 0x0b, 0xa2, 0x15, 0xcd, 0x56, 0x18, 0xab, 0xaa, 0x1d, 0x7f,
0x71, 0xd0, 0xdd, 0x4c, 0x3b, 0x16, 0x06, 0x4c, 0x35, 0xf9, 0x77, 0xa9, 0x35, 0x59, 0xb5, 0x9a,
0x71, 0x98, 0x71, 0x2a, 0xc9, 0xcf, 0x0e, 0x5a, 0x01, 0x4d, 0xce, 0x60, 0xfb, 0xb4, 0xd4, 0x6c,
0x61, 0x60, 0x8f, 0xa2, 0xf9, 0x14, 0x5d, 0x3b, 0x9d, 0xa5, 0x6c, 0xb8, 0x4d, 0x68, 0x91, 0xe5,
0xe2, 0x38, 0xb2, 0xd1, 0x5e, 0xe7, 0xef, 0xa8, 0xfa, 0xe8, 0xac, 0xfc, 0x07, 0xee, 0xa8, 0x1f,
0x95, 0xf2, 0x8e, 0xaa, 0x1f, 0x65, 0x99, 0x2b, 0xaa, 0x3e, 0xce, 0xdd, 0x6d, 0x74, 0xa3, 0x88,
0x0f, 0x0e, 0x03, 0x6e, 0xf6, 0xcd, 0x96, 0x7d, 0x1f, 0xb0, 0x9d, 0xf7, 0x02, 0x2e, 0xf2, 0x97,
0x8a, 0xd3, 0x1e, 0x38, 0xdb, 0x67, 0x3f, 0x70, 0x5e, 0xa2, 0x35, 0xa9, 0x53, 0x33, 0x0c, 0xf8,
0xb1, 0xbc, 0x3e, 0xe3, 0x11, 0x97, 0x9c, 0x1d, 0x08, 0x77, 0x2b, 0x83, 0x3f, 0x3a, 0xe5, 0xc6,
0xf3, 0x04, 0x5d, 0xc9, 0xc6, 0x55, 0xcf, 0x50, 0x1d, 0x69, 0x17, 0xee, 0xaf, 0x95, 0x0c, 0x42,
0xbd, 0x44, 0xc1, 0xbb, 0x79, 0x01, 0xfe, 0x3b, 0xee, 0xfe, 0xef, 0x01, 0x00, 0x00, 0xff, 0xff,
0xb1, 0x31, 0x49, 0x89, 0xaa, 0x1b, 0x00, 0x00,
} | |
graphql-client.ts |
export class GraphQLError extends Error {
errors: any
data: any
variables: any
query: string
constructor(result: AxiosResponse<any>, query: string, variables?: any) {
const message = result.data.errors.map((e: { message: string }) => e.message).join('\n')
super(message)
this.errors = result.data.errors
this.data = result.data.data
this.query = query
this.variables = variables
}
}
export class ResourceConflictError extends GraphQLError {
}
export class GraphQLClient {
endpoint: string;
headers: any;
constructor(endpoint: string, headers?: any) {
this.endpoint = endpoint
this.headers = headers || {}
}
async execute(query: string, variables?: any) {
const data = JSON.stringify({
query: query,
variables: variables || {},
})
const result = await axios.post(this.endpoint, data, {
headers: this.headers,
})
// Seeing weird behavior when creating an application the results in
// both a result and an (innacurate) error
if (result.data.errors) {
throw new GraphQLError(result, query, variables)
}
return result.data
}
} | import axios, { AxiosResponse } from 'axios' |
|
force.py | """
==========
Javascript
==========
Example of writing JSON format graph data and using the D3 Javascript library to produce an HTML/Javascript drawing.
"""
# Author: Aric Hagberg <[email protected]>
# Copyright (C) 2011-2018 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import json
import flask
import networkx as nx
from networkx.readwrite import json_graph
G = nx.barbell_graph(6, 3)
# this d3 example uses the name attribute for the mouse-hover value,
# so add a name to each node
for n in G:
G.nodes[n]['name'] = n
# write json formatted data
d = json_graph.node_link_data(G) # node-link format to serialize
# write json
json.dump(d, open('force/force.json', 'w'))
print('Wrote node-link JSON data to force/force.json')
# Serve the file over http to allow for cross origin requests
app = flask.Flask(__name__, static_folder="force")
@app.route('/')
def static_proxy():
|
print('\nGo to http://localhost:8000 to see the example\n')
app.run(port=8000)
| return app.send_static_file('force.html') |
rezbuild.py | from build_util import build_directory_recurse, check_visible
def | (source_path, build_path, install_path, targets):
# normal requirement 'foo' should be visible
check_visible('anti', 'build_util')
check_visible('anti', 'floob')
import floob
floob.hello()
try:
import loco
raise Exception('loco should not be here')
except ImportError:
print 'Intentionally raising an ImportError since loco should not be available'
pass
# Copyright 2013-2016 Allan Johns.
#
# This library is free software: you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library. If not, see <http://www.gnu.org/licenses/>.
| build |
rst2latex.py | #!c:\users\reiya\pycharmprojects\lineworks\venv\scripts\python.exe
# $Id: rst2latex.py 5905 2009-04-16 12:04:49Z milde $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing LaTeX.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '') | pass
from docutils.core import publish_cmdline
description = ('Generates LaTeX documents from standalone reStructuredText '
'sources. '
'Reads from <source> (default is stdin) and writes to '
'<destination> (default is stdout). See '
'<http://docutils.sourceforge.net/docs/user/latex.html> for '
'the full reference.')
publish_cmdline(writer_name='latex', description=description) | except: |
gtp.py | # GTP for Trojan-go
# Helper file
import re
def pre_engine(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.split("#")[0]
s = s.replace("\t", " ")
return s
def pre_controller(s):
s = re.sub("[^\t\n -~]", "", s)
s = s.replace("\t", " ")
return s
def gtp_boolean(b):
return "true" if b else "false"
def gtp_list(l):
return "\n".join(l)
def gtp_color(color):
# an arbitrary choice amongst a number of possibilities
return {BLACK: "B", WHITE: "W"}[color]
def gtp_vertex(vertex):
if vertex == PASS:
return "pass"
elif vertex == RESIGN:
return "resign"
else:
x, y = vertex
return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[x - 1], y)
def gtp_move(color, vertex):
return " ".join([gtp_color(color), gtp_vertex(vertex)])
def parse_message(message):
message = pre_engine(message).strip()
first, rest = (message.split(" ", 1) + [None])[:2]
if first.isdigit():
message_id = int(first)
if rest is not None:
command, arguments = (rest.split(" ", 1) + [None])[:2]
else:
command, arguments = None, None
else:
message_id = None
command, arguments = first, rest
return message_id, command, arguments
WHITE = -1
BLACK = +1
EMPTY = 0
PASS = (0, 0)
RESIGN = "resign"
def parse_color(color):
if color.lower() in ["b", "black"]:
return BLACK
elif color.lower() in ["w", "white"]:
return WHITE
else:
return False
def parse_vertex(vertex_string):
# Translate the Vertex from command line to GO languages
if vertex_string is None:
return False
elif vertex_string.lower() == "pass":
return PASS
elif len(vertex_string) > 1:
x = "abcdefghjklmnopqrstuvwxyz".find(vertex_string[0].lower()) + 1
if x == 0:
return False
if vertex_string[1:].isdigit():
y = int(vertex_string[1:])
else:
return False
else:
return False
return (x, y)
def parse_move(move_string):
color_string, vertex_string = (move_string.split(" ") + [None])[:2]
color = parse_color(color_string)
if color is False:
return False
vertex = parse_vertex(vertex_string)
if vertex is False:
return False
return color, vertex
MIN_BOARD_SIZE = 7
MAX_BOARD_SIZE = 19
def format_success(message_id, response=None):
if response is None:
response = ""
else:
response = " {}".format(response)
if message_id:
return "={}{}\n\n".format(message_id, response)
else:
return "={}\n\n".format(response)
def format_error(message_id, response):
if response:
response = " {}".format(response)
if message_id:
return "?{}{}\n\n".format(message_id, response)
else:
return "?{}\n\n".format(response)
# Not used
class Engine(object):
def __init__(self, game_obj, name="gtp (python library)", version="0.2"):
self.size = 19
self.komi = 6.5
self._game = game_obj
self._game.clear()
self._name = name
self._version = version
self.disconnect = False
self.known_commands = [
field[4:] for field in dir(self) if field.startswith("cmd_")]
def send(self, message):
message_id, command, arguments = parse_message(message)
if command in self.known_commands:
try:
return format_success(
message_id, getattr(self, "cmd_" + command)(arguments))
except ValueError as exception:
return format_error(message_id, exception.args[0])
else:
return format_error(message_id, "unknown command")
def vertex_in_range(self, vertex):
if vertex == PASS:
return True
if 1 <= vertex[0] <= self.size and 1 <= vertex[1] <= self.size:
return True
else:
return False
# commands
def cmd_protocol_version(self, arguments):
return 2
def cmd_name(self, arguments):
return self._name
def cmd_version(self, arguments):
return self._version
def cmd_known_command(self, arguments):
return gtp_boolean(arguments in self.known_commands)
def cmd_list_commands(self, arguments):
return gtp_list(self.known_commands)
def cmd_quit(self, arguments):
self.disconnect = True
def cmd_boardsize(self, arguments):
if arguments.isdigit():
size = int(arguments)
if MIN_BOARD_SIZE <= size <= MAX_BOARD_SIZE:
self.size = size
self._game.set_size(size)
else:
raise ValueError("unacceptable size")
else:
raise ValueError("non digit size")
def cmd_clear_board(self, arguments):
self._game.clear()
def cmd_komi(self, arguments):
try:
komi = float(arguments)
self.komi = komi
self._game.set_komi(komi)
except ValueError:
raise ValueError("syntax error")
def cmd_play(self, arguments):
move = parse_move(arguments)
if move:
color, vertex = move
if self.vertex_in_range(vertex):
if self._game.make_move(color, vertex):
return
raise ValueError("illegal move")
def cmd_genmove(self, arguments):
c = parse_color(arguments)
if c:
move = self._game.get_move(c)
self._game.make_move(c, move)
return gtp_vertex(move)
else:
raise ValueError("unknown player: {}".format(arguments))
# Not used
class MinimalGame(object):
def __init__(self, size=19, komi=6.5):
self.size = size
self.komi = 6.5
self.board = [EMPTY] * (self.size * self.size)
def _flatten(self, vertex):
(x, y) = vertex
return (x - 1) * self.size + (y - 1)
def clear(self):
self.board = [EMPTY] * (self.size * self.size)
def make_move(self, color, vertex):
# no legality check other than the space being empty..
# no side-effects beyond placing the stone..
if vertex == PASS:
return True # noop
idx = self._flatten(vertex)
if self.board[idx] == EMPTY:
self.board[idx] = color
return True
else:
|
def set_size(self, n):
self.size = n
self.clear()
def set_komi(self, k):
self.komi = k
def get_move(self, color):
# pass every time. At least it's legal
return (0, 0)
| return False |
received_packet_history.go | package ackhandler
import (
"github.com/xyproto/quic/internal/protocol"
"github.com/xyproto/quic/internal/utils"
"github.com/xyproto/quic/internal/wire"
)
// The receivedPacketHistory stores if a packet number has already been received.
// It generates ACK ranges which can be used to assemble an ACK frame.
// It does not store packet contents.
type receivedPacketHistory struct {
ranges *utils.PacketIntervalList
deletedBelow protocol.PacketNumber
}
func newReceivedPacketHistory() *receivedPacketHistory {
return &receivedPacketHistory{
ranges: utils.NewPacketIntervalList(),
}
}
// ReceivedPacket registers a packet with PacketNumber p and updates the ranges
func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) {
// ignore delayed packets, if we already deleted the range
if p < h.deletedBelow {
return
}
h.addToRanges(p)
h.maybeDeleteOldRanges()
}
func (h *receivedPacketHistory) addToRanges(p protocol.PacketNumber) {
if h.ranges.Len() == 0 {
h.ranges.PushBack(utils.PacketInterval{Start: p, End: p})
return
}
for el := h.ranges.Back(); el != nil; el = el.Prev() {
// p already included in an existing range. Nothing to do here
if p >= el.Value.Start && p <= el.Value.End {
return
}
var rangeExtended bool
if el.Value.End == p-1 { // extend a range at the end
rangeExtended = true
el.Value.End = p
} else if el.Value.Start == p+1 { // extend a range at the beginning
rangeExtended = true
el.Value.Start = p
}
// if a range was extended (either at the beginning or at the end, maybe it is possible to merge two ranges into one)
if rangeExtended {
prev := el.Prev()
if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges
prev.Value.End = el.Value.End
h.ranges.Remove(el)
return
}
return // if the two ranges were not merge, we're done here
}
// create a new range at the end
if p > el.Value.End {
h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el)
return
}
}
// create a new range at the beginning
h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front())
}
// Delete old ranges, if we're tracking more than 500 of them.
// This is a DoS defense against a peer that sends us too many gaps.
func (h *receivedPacketHistory) maybeDeleteOldRanges() {
for h.ranges.Len() > protocol.MaxNumAckRanges {
h.ranges.Remove(h.ranges.Front())
}
}
// DeleteBelow deletes all entries below (but not including) p
func (h *receivedPacketHistory) DeleteBelow(p protocol.PacketNumber) {
if p < h.deletedBelow {
return
}
h.deletedBelow = p
nextEl := h.ranges.Front()
for el := h.ranges.Front(); nextEl != nil; el = nextEl {
nextEl = el.Next()
if el.Value.End < p { // delete a whole range
h.ranges.Remove(el)
} else if p > el.Value.Start && p <= el.Value.End | else { // no ranges affected. Nothing to do
return
}
}
}
// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame
func (h *receivedPacketHistory) GetAckRanges() []wire.AckRange {
if h.ranges.Len() == 0 {
return nil
}
ackRanges := make([]wire.AckRange, h.ranges.Len())
i := 0
for el := h.ranges.Back(); el != nil; el = el.Prev() {
ackRanges[i] = wire.AckRange{Smallest: el.Value.Start, Largest: el.Value.End}
i++
}
return ackRanges
}
func (h *receivedPacketHistory) GetHighestAckRange() wire.AckRange {
ackRange := wire.AckRange{}
if h.ranges.Len() > 0 {
r := h.ranges.Back().Value
ackRange.Smallest = r.Start
ackRange.Largest = r.End
}
return ackRange
}
| {
el.Value.Start = p
return
} |
cmpConst.go | // run
// Code generated by gen/cmpConstGen.go. DO NOT EDIT.
package main
import (
"fmt"
"reflect"
"runtime"
)
// results show the expected result for the elements left of, equal to and right of the index.
type result struct{ l, e, r bool }
var (
eq = result{l: false, e: true, r: false}
ne = result{l: true, e: false, r: true}
lt = result{l: true, e: false, r: false}
le = result{l: true, e: true, r: false}
gt = result{l: false, e: false, r: true}
ge = result{l: false, e: true, r: true}
)
// uint64 tests
var uint64_vals = []uint64{
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
32768,
65534,
65535,
65536,
2147483646,
2147483647,
2147483648,
4278190080,
4294967294,
4294967295,
4294967296,
1095216660480,
9223372036854775806,
9223372036854775807,
9223372036854775808,
18374686479671623680,
18446744073709551614,
18446744073709551615,
}
func lt_0_uint64(x uint64) bool { return x < 0 }
func le_0_uint64(x uint64) bool { return x <= 0 }
func gt_0_uint64(x uint64) bool { return x > 0 }
func ge_0_uint64(x uint64) bool { return x >= 0 }
func eq_0_uint64(x uint64) bool { return x == 0 }
func ne_0_uint64(x uint64) bool { return x != 0 }
func lt_1_uint64(x uint64) bool { return x < 1 }
func le_1_uint64(x uint64) bool { return x <= 1 }
func gt_1_uint64(x uint64) bool { return x > 1 }
func ge_1_uint64(x uint64) bool { return x >= 1 }
func eq_1_uint64(x uint64) bool { return x == 1 }
func ne_1_uint64(x uint64) bool { return x != 1 }
func lt_126_uint64(x uint64) bool { return x < 126 }
func le_126_uint64(x uint64) bool { return x <= 126 }
func gt_126_uint64(x uint64) bool { return x > 126 }
func ge_126_uint64(x uint64) bool { return x >= 126 }
func eq_126_uint64(x uint64) bool { return x == 126 }
func ne_126_uint64(x uint64) bool { return x != 126 }
func lt_127_uint64(x uint64) bool { return x < 127 }
func le_127_uint64(x uint64) bool { return x <= 127 }
func gt_127_uint64(x uint64) bool { return x > 127 }
func ge_127_uint64(x uint64) bool { return x >= 127 }
func eq_127_uint64(x uint64) bool { return x == 127 }
func ne_127_uint64(x uint64) bool { return x != 127 }
func lt_128_uint64(x uint64) bool { return x < 128 }
func le_128_uint64(x uint64) bool { return x <= 128 }
func gt_128_uint64(x uint64) bool { return x > 128 }
func ge_128_uint64(x uint64) bool { return x >= 128 }
func eq_128_uint64(x uint64) bool { return x == 128 }
func ne_128_uint64(x uint64) bool { return x != 128 }
func lt_254_uint64(x uint64) bool { return x < 254 }
func le_254_uint64(x uint64) bool { return x <= 254 }
func gt_254_uint64(x uint64) bool { return x > 254 }
func ge_254_uint64(x uint64) bool { return x >= 254 }
func eq_254_uint64(x uint64) bool { return x == 254 }
func ne_254_uint64(x uint64) bool { return x != 254 }
func lt_255_uint64(x uint64) bool { return x < 255 }
func le_255_uint64(x uint64) bool { return x <= 255 }
func gt_255_uint64(x uint64) bool { return x > 255 }
func ge_255_uint64(x uint64) bool { return x >= 255 }
func eq_255_uint64(x uint64) bool { return x == 255 }
func ne_255_uint64(x uint64) bool { return x != 255 }
func lt_256_uint64(x uint64) bool { return x < 256 }
func le_256_uint64(x uint64) bool { return x <= 256 }
func gt_256_uint64(x uint64) bool { return x > 256 }
func ge_256_uint64(x uint64) bool { return x >= 256 }
func eq_256_uint64(x uint64) bool { return x == 256 }
func ne_256_uint64(x uint64) bool { return x != 256 }
func lt_32766_uint64(x uint64) bool { return x < 32766 }
func le_32766_uint64(x uint64) bool { return x <= 32766 }
func gt_32766_uint64(x uint64) bool { return x > 32766 }
func ge_32766_uint64(x uint64) bool { return x >= 32766 }
func eq_32766_uint64(x uint64) bool { return x == 32766 }
func ne_32766_uint64(x uint64) bool { return x != 32766 }
func lt_32767_uint64(x uint64) bool { return x < 32767 }
func le_32767_uint64(x uint64) bool { return x <= 32767 }
func gt_32767_uint64(x uint64) bool { return x > 32767 }
func ge_32767_uint64(x uint64) bool { return x >= 32767 }
func eq_32767_uint64(x uint64) bool { return x == 32767 }
func ne_32767_uint64(x uint64) bool { return x != 32767 }
func lt_32768_uint64(x uint64) bool { return x < 32768 }
func le_32768_uint64(x uint64) bool { return x <= 32768 }
func gt_32768_uint64(x uint64) bool { return x > 32768 }
func ge_32768_uint64(x uint64) bool { return x >= 32768 }
func eq_32768_uint64(x uint64) bool { return x == 32768 }
func ne_32768_uint64(x uint64) bool { return x != 32768 }
func lt_65534_uint64(x uint64) bool { return x < 65534 }
func le_65534_uint64(x uint64) bool { return x <= 65534 }
func gt_65534_uint64(x uint64) bool { return x > 65534 }
func ge_65534_uint64(x uint64) bool { return x >= 65534 }
func eq_65534_uint64(x uint64) bool { return x == 65534 }
func ne_65534_uint64(x uint64) bool { return x != 65534 }
func lt_65535_uint64(x uint64) bool { return x < 65535 }
func le_65535_uint64(x uint64) bool { return x <= 65535 }
func gt_65535_uint64(x uint64) bool { return x > 65535 }
func ge_65535_uint64(x uint64) bool { return x >= 65535 }
func eq_65535_uint64(x uint64) bool { return x == 65535 }
func ne_65535_uint64(x uint64) bool { return x != 65535 }
func lt_65536_uint64(x uint64) bool { return x < 65536 }
func le_65536_uint64(x uint64) bool { return x <= 65536 }
func gt_65536_uint64(x uint64) bool { return x > 65536 }
func ge_65536_uint64(x uint64) bool { return x >= 65536 }
func eq_65536_uint64(x uint64) bool { return x == 65536 }
func ne_65536_uint64(x uint64) bool { return x != 65536 }
func lt_2147483646_uint64(x uint64) bool { return x < 2147483646 }
func le_2147483646_uint64(x uint64) bool { return x <= 2147483646 }
func gt_2147483646_uint64(x uint64) bool { return x > 2147483646 }
func ge_2147483646_uint64(x uint64) bool { return x >= 2147483646 }
func eq_2147483646_uint64(x uint64) bool { return x == 2147483646 }
func ne_2147483646_uint64(x uint64) bool { return x != 2147483646 }
func lt_2147483647_uint64(x uint64) bool { return x < 2147483647 }
func le_2147483647_uint64(x uint64) bool { return x <= 2147483647 }
func gt_2147483647_uint64(x uint64) bool { return x > 2147483647 }
func ge_2147483647_uint64(x uint64) bool { return x >= 2147483647 }
func eq_2147483647_uint64(x uint64) bool { return x == 2147483647 }
func ne_2147483647_uint64(x uint64) bool { return x != 2147483647 }
func lt_2147483648_uint64(x uint64) bool { return x < 2147483648 }
func le_2147483648_uint64(x uint64) bool { return x <= 2147483648 }
func gt_2147483648_uint64(x uint64) bool { return x > 2147483648 }
func ge_2147483648_uint64(x uint64) bool { return x >= 2147483648 }
func eq_2147483648_uint64(x uint64) bool { return x == 2147483648 }
func ne_2147483648_uint64(x uint64) bool { return x != 2147483648 }
func lt_4278190080_uint64(x uint64) bool { return x < 4278190080 }
func le_4278190080_uint64(x uint64) bool { return x <= 4278190080 }
func gt_4278190080_uint64(x uint64) bool { return x > 4278190080 }
func ge_4278190080_uint64(x uint64) bool { return x >= 4278190080 }
func eq_4278190080_uint64(x uint64) bool { return x == 4278190080 }
func ne_4278190080_uint64(x uint64) bool { return x != 4278190080 }
func lt_4294967294_uint64(x uint64) bool { return x < 4294967294 }
func le_4294967294_uint64(x uint64) bool { return x <= 4294967294 }
func gt_4294967294_uint64(x uint64) bool { return x > 4294967294 }
func ge_4294967294_uint64(x uint64) bool { return x >= 4294967294 }
func eq_4294967294_uint64(x uint64) bool { return x == 4294967294 }
func ne_4294967294_uint64(x uint64) bool { return x != 4294967294 }
func lt_4294967295_uint64(x uint64) bool { return x < 4294967295 }
func le_4294967295_uint64(x uint64) bool { return x <= 4294967295 }
func gt_4294967295_uint64(x uint64) bool { return x > 4294967295 }
func ge_4294967295_uint64(x uint64) bool { return x >= 4294967295 }
func eq_4294967295_uint64(x uint64) bool { return x == 4294967295 }
func ne_4294967295_uint64(x uint64) bool { return x != 4294967295 }
func lt_4294967296_uint64(x uint64) bool { return x < 4294967296 }
func le_4294967296_uint64(x uint64) bool { return x <= 4294967296 }
func gt_4294967296_uint64(x uint64) bool { return x > 4294967296 }
func ge_4294967296_uint64(x uint64) bool { return x >= 4294967296 }
func eq_4294967296_uint64(x uint64) bool { return x == 4294967296 }
func ne_4294967296_uint64(x uint64) bool { return x != 4294967296 }
func lt_1095216660480_uint64(x uint64) bool { return x < 1095216660480 }
func le_1095216660480_uint64(x uint64) bool { return x <= 1095216660480 }
func gt_1095216660480_uint64(x uint64) bool { return x > 1095216660480 }
func ge_1095216660480_uint64(x uint64) bool { return x >= 1095216660480 }
func eq_1095216660480_uint64(x uint64) bool { return x == 1095216660480 }
func ne_1095216660480_uint64(x uint64) bool { return x != 1095216660480 }
func lt_9223372036854775806_uint64(x uint64) bool { return x < 9223372036854775806 }
func le_9223372036854775806_uint64(x uint64) bool { return x <= 9223372036854775806 }
func gt_9223372036854775806_uint64(x uint64) bool { return x > 9223372036854775806 }
func ge_9223372036854775806_uint64(x uint64) bool { return x >= 9223372036854775806 }
func eq_9223372036854775806_uint64(x uint64) bool { return x == 9223372036854775806 }
func ne_9223372036854775806_uint64(x uint64) bool { return x != 9223372036854775806 }
func lt_9223372036854775807_uint64(x uint64) bool { return x < 9223372036854775807 }
func le_9223372036854775807_uint64(x uint64) bool { return x <= 9223372036854775807 }
func gt_9223372036854775807_uint64(x uint64) bool { return x > 9223372036854775807 }
func ge_9223372036854775807_uint64(x uint64) bool { return x >= 9223372036854775807 }
func eq_9223372036854775807_uint64(x uint64) bool { return x == 9223372036854775807 }
func ne_9223372036854775807_uint64(x uint64) bool { return x != 9223372036854775807 }
func lt_9223372036854775808_uint64(x uint64) bool { return x < 9223372036854775808 }
func le_9223372036854775808_uint64(x uint64) bool { return x <= 9223372036854775808 }
func gt_9223372036854775808_uint64(x uint64) bool { return x > 9223372036854775808 }
func ge_9223372036854775808_uint64(x uint64) bool { return x >= 9223372036854775808 }
func eq_9223372036854775808_uint64(x uint64) bool { return x == 9223372036854775808 }
func ne_9223372036854775808_uint64(x uint64) bool { return x != 9223372036854775808 }
func lt_18374686479671623680_uint64(x uint64) bool { return x < 18374686479671623680 }
func le_18374686479671623680_uint64(x uint64) bool { return x <= 18374686479671623680 }
func gt_18374686479671623680_uint64(x uint64) bool { return x > 18374686479671623680 }
func ge_18374686479671623680_uint64(x uint64) bool { return x >= 18374686479671623680 }
func eq_18374686479671623680_uint64(x uint64) bool { return x == 18374686479671623680 }
func ne_18374686479671623680_uint64(x uint64) bool { return x != 18374686479671623680 }
func lt_18446744073709551614_uint64(x uint64) bool { return x < 18446744073709551614 }
func le_18446744073709551614_uint64(x uint64) bool { return x <= 18446744073709551614 }
func gt_18446744073709551614_uint64(x uint64) bool { return x > 18446744073709551614 }
func ge_18446744073709551614_uint64(x uint64) bool { return x >= 18446744073709551614 }
func eq_18446744073709551614_uint64(x uint64) bool { return x == 18446744073709551614 }
func ne_18446744073709551614_uint64(x uint64) bool { return x != 18446744073709551614 }
func lt_18446744073709551615_uint64(x uint64) bool { return x < 18446744073709551615 }
func le_18446744073709551615_uint64(x uint64) bool { return x <= 18446744073709551615 }
func gt_18446744073709551615_uint64(x uint64) bool { return x > 18446744073709551615 }
func ge_18446744073709551615_uint64(x uint64) bool { return x >= 18446744073709551615 }
func eq_18446744073709551615_uint64(x uint64) bool { return x == 18446744073709551615 }
func ne_18446744073709551615_uint64(x uint64) bool { return x != 18446744073709551615 }
var uint64_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(uint64) bool
}{
{idx: 0, exp: lt, fn: lt_0_uint64},
{idx: 0, exp: le, fn: le_0_uint64},
{idx: 0, exp: gt, fn: gt_0_uint64},
{idx: 0, exp: ge, fn: ge_0_uint64},
{idx: 0, exp: eq, fn: eq_0_uint64},
{idx: 0, exp: ne, fn: ne_0_uint64},
{idx: 1, exp: lt, fn: lt_1_uint64},
{idx: 1, exp: le, fn: le_1_uint64},
{idx: 1, exp: gt, fn: gt_1_uint64},
{idx: 1, exp: ge, fn: ge_1_uint64},
{idx: 1, exp: eq, fn: eq_1_uint64},
{idx: 1, exp: ne, fn: ne_1_uint64},
{idx: 2, exp: lt, fn: lt_126_uint64},
{idx: 2, exp: le, fn: le_126_uint64},
{idx: 2, exp: gt, fn: gt_126_uint64},
{idx: 2, exp: ge, fn: ge_126_uint64},
{idx: 2, exp: eq, fn: eq_126_uint64},
{idx: 2, exp: ne, fn: ne_126_uint64},
{idx: 3, exp: lt, fn: lt_127_uint64},
{idx: 3, exp: le, fn: le_127_uint64},
{idx: 3, exp: gt, fn: gt_127_uint64},
{idx: 3, exp: ge, fn: ge_127_uint64},
{idx: 3, exp: eq, fn: eq_127_uint64},
{idx: 3, exp: ne, fn: ne_127_uint64},
{idx: 4, exp: lt, fn: lt_128_uint64},
{idx: 4, exp: le, fn: le_128_uint64},
{idx: 4, exp: gt, fn: gt_128_uint64},
{idx: 4, exp: ge, fn: ge_128_uint64},
{idx: 4, exp: eq, fn: eq_128_uint64},
{idx: 4, exp: ne, fn: ne_128_uint64},
{idx: 5, exp: lt, fn: lt_254_uint64},
{idx: 5, exp: le, fn: le_254_uint64},
{idx: 5, exp: gt, fn: gt_254_uint64},
{idx: 5, exp: ge, fn: ge_254_uint64},
{idx: 5, exp: eq, fn: eq_254_uint64},
{idx: 5, exp: ne, fn: ne_254_uint64},
{idx: 6, exp: lt, fn: lt_255_uint64},
{idx: 6, exp: le, fn: le_255_uint64},
{idx: 6, exp: gt, fn: gt_255_uint64},
{idx: 6, exp: ge, fn: ge_255_uint64},
{idx: 6, exp: eq, fn: eq_255_uint64},
{idx: 6, exp: ne, fn: ne_255_uint64},
{idx: 7, exp: lt, fn: lt_256_uint64},
{idx: 7, exp: le, fn: le_256_uint64},
{idx: 7, exp: gt, fn: gt_256_uint64},
{idx: 7, exp: ge, fn: ge_256_uint64},
{idx: 7, exp: eq, fn: eq_256_uint64},
{idx: 7, exp: ne, fn: ne_256_uint64},
{idx: 8, exp: lt, fn: lt_32766_uint64},
{idx: 8, exp: le, fn: le_32766_uint64},
{idx: 8, exp: gt, fn: gt_32766_uint64},
{idx: 8, exp: ge, fn: ge_32766_uint64},
{idx: 8, exp: eq, fn: eq_32766_uint64},
{idx: 8, exp: ne, fn: ne_32766_uint64},
{idx: 9, exp: lt, fn: lt_32767_uint64},
{idx: 9, exp: le, fn: le_32767_uint64},
{idx: 9, exp: gt, fn: gt_32767_uint64},
{idx: 9, exp: ge, fn: ge_32767_uint64},
{idx: 9, exp: eq, fn: eq_32767_uint64},
{idx: 9, exp: ne, fn: ne_32767_uint64},
{idx: 10, exp: lt, fn: lt_32768_uint64},
{idx: 10, exp: le, fn: le_32768_uint64},
{idx: 10, exp: gt, fn: gt_32768_uint64},
{idx: 10, exp: ge, fn: ge_32768_uint64},
{idx: 10, exp: eq, fn: eq_32768_uint64},
{idx: 10, exp: ne, fn: ne_32768_uint64},
{idx: 11, exp: lt, fn: lt_65534_uint64},
{idx: 11, exp: le, fn: le_65534_uint64},
{idx: 11, exp: gt, fn: gt_65534_uint64},
{idx: 11, exp: ge, fn: ge_65534_uint64},
{idx: 11, exp: eq, fn: eq_65534_uint64},
{idx: 11, exp: ne, fn: ne_65534_uint64},
{idx: 12, exp: lt, fn: lt_65535_uint64},
{idx: 12, exp: le, fn: le_65535_uint64},
{idx: 12, exp: gt, fn: gt_65535_uint64},
{idx: 12, exp: ge, fn: ge_65535_uint64},
{idx: 12, exp: eq, fn: eq_65535_uint64},
{idx: 12, exp: ne, fn: ne_65535_uint64},
{idx: 13, exp: lt, fn: lt_65536_uint64},
{idx: 13, exp: le, fn: le_65536_uint64},
{idx: 13, exp: gt, fn: gt_65536_uint64},
{idx: 13, exp: ge, fn: ge_65536_uint64},
{idx: 13, exp: eq, fn: eq_65536_uint64},
{idx: 13, exp: ne, fn: ne_65536_uint64},
{idx: 14, exp: lt, fn: lt_2147483646_uint64},
{idx: 14, exp: le, fn: le_2147483646_uint64},
{idx: 14, exp: gt, fn: gt_2147483646_uint64},
{idx: 14, exp: ge, fn: ge_2147483646_uint64},
{idx: 14, exp: eq, fn: eq_2147483646_uint64},
{idx: 14, exp: ne, fn: ne_2147483646_uint64},
{idx: 15, exp: lt, fn: lt_2147483647_uint64},
{idx: 15, exp: le, fn: le_2147483647_uint64},
{idx: 15, exp: gt, fn: gt_2147483647_uint64},
{idx: 15, exp: ge, fn: ge_2147483647_uint64},
{idx: 15, exp: eq, fn: eq_2147483647_uint64},
{idx: 15, exp: ne, fn: ne_2147483647_uint64},
{idx: 16, exp: lt, fn: lt_2147483648_uint64},
{idx: 16, exp: le, fn: le_2147483648_uint64},
{idx: 16, exp: gt, fn: gt_2147483648_uint64},
{idx: 16, exp: ge, fn: ge_2147483648_uint64},
{idx: 16, exp: eq, fn: eq_2147483648_uint64},
{idx: 16, exp: ne, fn: ne_2147483648_uint64},
{idx: 17, exp: lt, fn: lt_4278190080_uint64},
{idx: 17, exp: le, fn: le_4278190080_uint64},
{idx: 17, exp: gt, fn: gt_4278190080_uint64},
{idx: 17, exp: ge, fn: ge_4278190080_uint64},
{idx: 17, exp: eq, fn: eq_4278190080_uint64},
{idx: 17, exp: ne, fn: ne_4278190080_uint64},
{idx: 18, exp: lt, fn: lt_4294967294_uint64},
{idx: 18, exp: le, fn: le_4294967294_uint64},
{idx: 18, exp: gt, fn: gt_4294967294_uint64},
{idx: 18, exp: ge, fn: ge_4294967294_uint64},
{idx: 18, exp: eq, fn: eq_4294967294_uint64},
{idx: 18, exp: ne, fn: ne_4294967294_uint64},
{idx: 19, exp: lt, fn: lt_4294967295_uint64},
{idx: 19, exp: le, fn: le_4294967295_uint64},
{idx: 19, exp: gt, fn: gt_4294967295_uint64},
{idx: 19, exp: ge, fn: ge_4294967295_uint64},
{idx: 19, exp: eq, fn: eq_4294967295_uint64},
{idx: 19, exp: ne, fn: ne_4294967295_uint64},
{idx: 20, exp: lt, fn: lt_4294967296_uint64},
{idx: 20, exp: le, fn: le_4294967296_uint64},
{idx: 20, exp: gt, fn: gt_4294967296_uint64},
{idx: 20, exp: ge, fn: ge_4294967296_uint64},
{idx: 20, exp: eq, fn: eq_4294967296_uint64},
{idx: 20, exp: ne, fn: ne_4294967296_uint64},
{idx: 21, exp: lt, fn: lt_1095216660480_uint64},
{idx: 21, exp: le, fn: le_1095216660480_uint64},
{idx: 21, exp: gt, fn: gt_1095216660480_uint64},
{idx: 21, exp: ge, fn: ge_1095216660480_uint64},
{idx: 21, exp: eq, fn: eq_1095216660480_uint64},
{idx: 21, exp: ne, fn: ne_1095216660480_uint64},
{idx: 22, exp: lt, fn: lt_9223372036854775806_uint64},
{idx: 22, exp: le, fn: le_9223372036854775806_uint64},
{idx: 22, exp: gt, fn: gt_9223372036854775806_uint64},
{idx: 22, exp: ge, fn: ge_9223372036854775806_uint64},
{idx: 22, exp: eq, fn: eq_9223372036854775806_uint64},
{idx: 22, exp: ne, fn: ne_9223372036854775806_uint64},
{idx: 23, exp: lt, fn: lt_9223372036854775807_uint64},
{idx: 23, exp: le, fn: le_9223372036854775807_uint64},
{idx: 23, exp: gt, fn: gt_9223372036854775807_uint64},
{idx: 23, exp: ge, fn: ge_9223372036854775807_uint64},
{idx: 23, exp: eq, fn: eq_9223372036854775807_uint64},
{idx: 23, exp: ne, fn: ne_9223372036854775807_uint64},
{idx: 24, exp: lt, fn: lt_9223372036854775808_uint64},
{idx: 24, exp: le, fn: le_9223372036854775808_uint64},
{idx: 24, exp: gt, fn: gt_9223372036854775808_uint64},
{idx: 24, exp: ge, fn: ge_9223372036854775808_uint64},
{idx: 24, exp: eq, fn: eq_9223372036854775808_uint64},
{idx: 24, exp: ne, fn: ne_9223372036854775808_uint64},
{idx: 25, exp: lt, fn: lt_18374686479671623680_uint64},
{idx: 25, exp: le, fn: le_18374686479671623680_uint64},
{idx: 25, exp: gt, fn: gt_18374686479671623680_uint64},
{idx: 25, exp: ge, fn: ge_18374686479671623680_uint64},
{idx: 25, exp: eq, fn: eq_18374686479671623680_uint64},
{idx: 25, exp: ne, fn: ne_18374686479671623680_uint64},
{idx: 26, exp: lt, fn: lt_18446744073709551614_uint64},
{idx: 26, exp: le, fn: le_18446744073709551614_uint64},
{idx: 26, exp: gt, fn: gt_18446744073709551614_uint64},
{idx: 26, exp: ge, fn: ge_18446744073709551614_uint64},
{idx: 26, exp: eq, fn: eq_18446744073709551614_uint64},
{idx: 26, exp: ne, fn: ne_18446744073709551614_uint64},
{idx: 27, exp: lt, fn: lt_18446744073709551615_uint64},
{idx: 27, exp: le, fn: le_18446744073709551615_uint64},
{idx: 27, exp: gt, fn: gt_18446744073709551615_uint64},
{idx: 27, exp: ge, fn: ge_18446744073709551615_uint64},
{idx: 27, exp: eq, fn: eq_18446744073709551615_uint64},
{idx: 27, exp: ne, fn: ne_18446744073709551615_uint64},
}
// uint32 tests
var uint32_vals = []uint32{
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
32768,
65534,
65535,
65536,
2147483646,
2147483647,
2147483648,
4278190080,
4294967294,
4294967295,
}
func lt_0_uint32(x uint32) bool { return x < 0 }
func le_0_uint32(x uint32) bool { return x <= 0 }
func gt_0_uint32(x uint32) bool { return x > 0 }
func ge_0_uint32(x uint32) bool { return x >= 0 }
func eq_0_uint32(x uint32) bool { return x == 0 }
func ne_0_uint32(x uint32) bool { return x != 0 }
func lt_1_uint32(x uint32) bool { return x < 1 }
func le_1_uint32(x uint32) bool { return x <= 1 }
func gt_1_uint32(x uint32) bool { return x > 1 }
func ge_1_uint32(x uint32) bool { return x >= 1 }
func eq_1_uint32(x uint32) bool { return x == 1 }
func ne_1_uint32(x uint32) bool { return x != 1 }
func lt_126_uint32(x uint32) bool { return x < 126 }
func le_126_uint32(x uint32) bool { return x <= 126 }
func gt_126_uint32(x uint32) bool { return x > 126 }
func ge_126_uint32(x uint32) bool { return x >= 126 }
func eq_126_uint32(x uint32) bool { return x == 126 }
func ne_126_uint32(x uint32) bool { return x != 126 }
func lt_127_uint32(x uint32) bool { return x < 127 }
func le_127_uint32(x uint32) bool { return x <= 127 }
func gt_127_uint32(x uint32) bool { return x > 127 }
func ge_127_uint32(x uint32) bool { return x >= 127 }
func eq_127_uint32(x uint32) bool { return x == 127 }
func ne_127_uint32(x uint32) bool { return x != 127 }
func lt_128_uint32(x uint32) bool { return x < 128 }
func le_128_uint32(x uint32) bool { return x <= 128 }
func gt_128_uint32(x uint32) bool { return x > 128 }
func ge_128_uint32(x uint32) bool { return x >= 128 }
func eq_128_uint32(x uint32) bool { return x == 128 }
func ne_128_uint32(x uint32) bool { return x != 128 }
func lt_254_uint32(x uint32) bool { return x < 254 }
func le_254_uint32(x uint32) bool { return x <= 254 }
func gt_254_uint32(x uint32) bool { return x > 254 }
func ge_254_uint32(x uint32) bool { return x >= 254 }
func eq_254_uint32(x uint32) bool { return x == 254 }
func ne_254_uint32(x uint32) bool { return x != 254 }
func lt_255_uint32(x uint32) bool { return x < 255 }
func le_255_uint32(x uint32) bool { return x <= 255 }
func gt_255_uint32(x uint32) bool { return x > 255 }
func ge_255_uint32(x uint32) bool { return x >= 255 }
func eq_255_uint32(x uint32) bool { return x == 255 }
func ne_255_uint32(x uint32) bool { return x != 255 }
func lt_256_uint32(x uint32) bool { return x < 256 }
func le_256_uint32(x uint32) bool { return x <= 256 }
func gt_256_uint32(x uint32) bool { return x > 256 }
func ge_256_uint32(x uint32) bool { return x >= 256 }
func eq_256_uint32(x uint32) bool { return x == 256 }
func ne_256_uint32(x uint32) bool { return x != 256 }
func lt_32766_uint32(x uint32) bool { return x < 32766 }
func le_32766_uint32(x uint32) bool { return x <= 32766 }
func gt_32766_uint32(x uint32) bool { return x > 32766 }
func ge_32766_uint32(x uint32) bool { return x >= 32766 }
func eq_32766_uint32(x uint32) bool { return x == 32766 }
func ne_32766_uint32(x uint32) bool { return x != 32766 }
func lt_32767_uint32(x uint32) bool { return x < 32767 }
func le_32767_uint32(x uint32) bool { return x <= 32767 }
func gt_32767_uint32(x uint32) bool { return x > 32767 }
func ge_32767_uint32(x uint32) bool { return x >= 32767 }
func eq_32767_uint32(x uint32) bool { return x == 32767 }
func ne_32767_uint32(x uint32) bool { return x != 32767 }
func lt_32768_uint32(x uint32) bool { return x < 32768 }
func le_32768_uint32(x uint32) bool { return x <= 32768 }
func gt_32768_uint32(x uint32) bool { return x > 32768 }
func ge_32768_uint32(x uint32) bool { return x >= 32768 }
func eq_32768_uint32(x uint32) bool { return x == 32768 }
func ne_32768_uint32(x uint32) bool { return x != 32768 }
func lt_65534_uint32(x uint32) bool { return x < 65534 }
func le_65534_uint32(x uint32) bool { return x <= 65534 }
func gt_65534_uint32(x uint32) bool { return x > 65534 }
func ge_65534_uint32(x uint32) bool { return x >= 65534 }
func eq_65534_uint32(x uint32) bool { return x == 65534 }
func ne_65534_uint32(x uint32) bool { return x != 65534 }
func lt_65535_uint32(x uint32) bool { return x < 65535 }
func le_65535_uint32(x uint32) bool { return x <= 65535 }
func gt_65535_uint32(x uint32) bool { return x > 65535 }
func ge_65535_uint32(x uint32) bool { return x >= 65535 }
func eq_65535_uint32(x uint32) bool { return x == 65535 }
func ne_65535_uint32(x uint32) bool { return x != 65535 }
func lt_65536_uint32(x uint32) bool { return x < 65536 }
func le_65536_uint32(x uint32) bool { return x <= 65536 }
func gt_65536_uint32(x uint32) bool { return x > 65536 }
func ge_65536_uint32(x uint32) bool { return x >= 65536 }
func eq_65536_uint32(x uint32) bool { return x == 65536 }
func ne_65536_uint32(x uint32) bool { return x != 65536 }
func lt_2147483646_uint32(x uint32) bool { return x < 2147483646 }
func le_2147483646_uint32(x uint32) bool { return x <= 2147483646 }
func gt_2147483646_uint32(x uint32) bool { return x > 2147483646 }
func ge_2147483646_uint32(x uint32) bool { return x >= 2147483646 }
func eq_2147483646_uint32(x uint32) bool { return x == 2147483646 }
func ne_2147483646_uint32(x uint32) bool { return x != 2147483646 }
func lt_2147483647_uint32(x uint32) bool { return x < 2147483647 }
func le_2147483647_uint32(x uint32) bool { return x <= 2147483647 }
func gt_2147483647_uint32(x uint32) bool { return x > 2147483647 }
func ge_2147483647_uint32(x uint32) bool { return x >= 2147483647 }
func eq_2147483647_uint32(x uint32) bool { return x == 2147483647 }
func ne_2147483647_uint32(x uint32) bool { return x != 2147483647 }
func lt_2147483648_uint32(x uint32) bool { return x < 2147483648 }
func le_2147483648_uint32(x uint32) bool { return x <= 2147483648 }
func gt_2147483648_uint32(x uint32) bool { return x > 2147483648 }
func ge_2147483648_uint32(x uint32) bool { return x >= 2147483648 }
func eq_2147483648_uint32(x uint32) bool { return x == 2147483648 }
func ne_2147483648_uint32(x uint32) bool { return x != 2147483648 }
func lt_4278190080_uint32(x uint32) bool { return x < 4278190080 }
func le_4278190080_uint32(x uint32) bool { return x <= 4278190080 }
func gt_4278190080_uint32(x uint32) bool { return x > 4278190080 }
func ge_4278190080_uint32(x uint32) bool { return x >= 4278190080 }
func eq_4278190080_uint32(x uint32) bool { return x == 4278190080 }
func ne_4278190080_uint32(x uint32) bool { return x != 4278190080 }
func lt_4294967294_uint32(x uint32) bool { return x < 4294967294 }
func le_4294967294_uint32(x uint32) bool { return x <= 4294967294 }
func gt_4294967294_uint32(x uint32) bool { return x > 4294967294 }
func ge_4294967294_uint32(x uint32) bool { return x >= 4294967294 }
func eq_4294967294_uint32(x uint32) bool { return x == 4294967294 }
func ne_4294967294_uint32(x uint32) bool { return x != 4294967294 }
func lt_4294967295_uint32(x uint32) bool { return x < 4294967295 }
func le_4294967295_uint32(x uint32) bool { return x <= 4294967295 }
func gt_4294967295_uint32(x uint32) bool { return x > 4294967295 }
func ge_4294967295_uint32(x uint32) bool { return x >= 4294967295 }
func eq_4294967295_uint32(x uint32) bool { return x == 4294967295 }
func ne_4294967295_uint32(x uint32) bool { return x != 4294967295 }
var uint32_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(uint32) bool
}{
{idx: 0, exp: lt, fn: lt_0_uint32},
{idx: 0, exp: le, fn: le_0_uint32},
{idx: 0, exp: gt, fn: gt_0_uint32},
{idx: 0, exp: ge, fn: ge_0_uint32},
{idx: 0, exp: eq, fn: eq_0_uint32},
{idx: 0, exp: ne, fn: ne_0_uint32},
{idx: 1, exp: lt, fn: lt_1_uint32},
{idx: 1, exp: le, fn: le_1_uint32},
{idx: 1, exp: gt, fn: gt_1_uint32},
{idx: 1, exp: ge, fn: ge_1_uint32},
{idx: 1, exp: eq, fn: eq_1_uint32},
{idx: 1, exp: ne, fn: ne_1_uint32},
{idx: 2, exp: lt, fn: lt_126_uint32},
{idx: 2, exp: le, fn: le_126_uint32},
{idx: 2, exp: gt, fn: gt_126_uint32},
{idx: 2, exp: ge, fn: ge_126_uint32},
{idx: 2, exp: eq, fn: eq_126_uint32},
{idx: 2, exp: ne, fn: ne_126_uint32},
{idx: 3, exp: lt, fn: lt_127_uint32},
{idx: 3, exp: le, fn: le_127_uint32},
{idx: 3, exp: gt, fn: gt_127_uint32},
{idx: 3, exp: ge, fn: ge_127_uint32},
{idx: 3, exp: eq, fn: eq_127_uint32},
{idx: 3, exp: ne, fn: ne_127_uint32},
{idx: 4, exp: lt, fn: lt_128_uint32},
{idx: 4, exp: le, fn: le_128_uint32},
{idx: 4, exp: gt, fn: gt_128_uint32},
{idx: 4, exp: ge, fn: ge_128_uint32},
{idx: 4, exp: eq, fn: eq_128_uint32},
{idx: 4, exp: ne, fn: ne_128_uint32},
{idx: 5, exp: lt, fn: lt_254_uint32},
{idx: 5, exp: le, fn: le_254_uint32},
{idx: 5, exp: gt, fn: gt_254_uint32},
{idx: 5, exp: ge, fn: ge_254_uint32},
{idx: 5, exp: eq, fn: eq_254_uint32},
{idx: 5, exp: ne, fn: ne_254_uint32},
{idx: 6, exp: lt, fn: lt_255_uint32},
{idx: 6, exp: le, fn: le_255_uint32},
{idx: 6, exp: gt, fn: gt_255_uint32},
{idx: 6, exp: ge, fn: ge_255_uint32},
{idx: 6, exp: eq, fn: eq_255_uint32},
{idx: 6, exp: ne, fn: ne_255_uint32},
{idx: 7, exp: lt, fn: lt_256_uint32},
{idx: 7, exp: le, fn: le_256_uint32},
{idx: 7, exp: gt, fn: gt_256_uint32},
{idx: 7, exp: ge, fn: ge_256_uint32},
{idx: 7, exp: eq, fn: eq_256_uint32},
{idx: 7, exp: ne, fn: ne_256_uint32},
{idx: 8, exp: lt, fn: lt_32766_uint32},
{idx: 8, exp: le, fn: le_32766_uint32},
{idx: 8, exp: gt, fn: gt_32766_uint32},
{idx: 8, exp: ge, fn: ge_32766_uint32},
{idx: 8, exp: eq, fn: eq_32766_uint32},
{idx: 8, exp: ne, fn: ne_32766_uint32},
{idx: 9, exp: lt, fn: lt_32767_uint32},
{idx: 9, exp: le, fn: le_32767_uint32},
{idx: 9, exp: gt, fn: gt_32767_uint32},
{idx: 9, exp: ge, fn: ge_32767_uint32},
{idx: 9, exp: eq, fn: eq_32767_uint32},
{idx: 9, exp: ne, fn: ne_32767_uint32},
{idx: 10, exp: lt, fn: lt_32768_uint32},
{idx: 10, exp: le, fn: le_32768_uint32},
{idx: 10, exp: gt, fn: gt_32768_uint32},
{idx: 10, exp: ge, fn: ge_32768_uint32},
{idx: 10, exp: eq, fn: eq_32768_uint32},
{idx: 10, exp: ne, fn: ne_32768_uint32},
{idx: 11, exp: lt, fn: lt_65534_uint32},
{idx: 11, exp: le, fn: le_65534_uint32},
{idx: 11, exp: gt, fn: gt_65534_uint32},
{idx: 11, exp: ge, fn: ge_65534_uint32},
{idx: 11, exp: eq, fn: eq_65534_uint32},
{idx: 11, exp: ne, fn: ne_65534_uint32},
{idx: 12, exp: lt, fn: lt_65535_uint32},
{idx: 12, exp: le, fn: le_65535_uint32},
{idx: 12, exp: gt, fn: gt_65535_uint32},
{idx: 12, exp: ge, fn: ge_65535_uint32},
{idx: 12, exp: eq, fn: eq_65535_uint32},
{idx: 12, exp: ne, fn: ne_65535_uint32},
{idx: 13, exp: lt, fn: lt_65536_uint32},
{idx: 13, exp: le, fn: le_65536_uint32},
{idx: 13, exp: gt, fn: gt_65536_uint32},
{idx: 13, exp: ge, fn: ge_65536_uint32},
{idx: 13, exp: eq, fn: eq_65536_uint32},
{idx: 13, exp: ne, fn: ne_65536_uint32},
{idx: 14, exp: lt, fn: lt_2147483646_uint32},
{idx: 14, exp: le, fn: le_2147483646_uint32},
{idx: 14, exp: gt, fn: gt_2147483646_uint32},
{idx: 14, exp: ge, fn: ge_2147483646_uint32},
{idx: 14, exp: eq, fn: eq_2147483646_uint32},
{idx: 14, exp: ne, fn: ne_2147483646_uint32},
{idx: 15, exp: lt, fn: lt_2147483647_uint32},
{idx: 15, exp: le, fn: le_2147483647_uint32},
{idx: 15, exp: gt, fn: gt_2147483647_uint32},
{idx: 15, exp: ge, fn: ge_2147483647_uint32},
{idx: 15, exp: eq, fn: eq_2147483647_uint32},
{idx: 15, exp: ne, fn: ne_2147483647_uint32},
{idx: 16, exp: lt, fn: lt_2147483648_uint32},
{idx: 16, exp: le, fn: le_2147483648_uint32},
{idx: 16, exp: gt, fn: gt_2147483648_uint32},
{idx: 16, exp: ge, fn: ge_2147483648_uint32},
{idx: 16, exp: eq, fn: eq_2147483648_uint32},
{idx: 16, exp: ne, fn: ne_2147483648_uint32},
{idx: 17, exp: lt, fn: lt_4278190080_uint32},
{idx: 17, exp: le, fn: le_4278190080_uint32},
{idx: 17, exp: gt, fn: gt_4278190080_uint32},
{idx: 17, exp: ge, fn: ge_4278190080_uint32},
{idx: 17, exp: eq, fn: eq_4278190080_uint32},
{idx: 17, exp: ne, fn: ne_4278190080_uint32},
{idx: 18, exp: lt, fn: lt_4294967294_uint32},
{idx: 18, exp: le, fn: le_4294967294_uint32},
{idx: 18, exp: gt, fn: gt_4294967294_uint32},
{idx: 18, exp: ge, fn: ge_4294967294_uint32},
{idx: 18, exp: eq, fn: eq_4294967294_uint32},
{idx: 18, exp: ne, fn: ne_4294967294_uint32},
{idx: 19, exp: lt, fn: lt_4294967295_uint32},
{idx: 19, exp: le, fn: le_4294967295_uint32},
{idx: 19, exp: gt, fn: gt_4294967295_uint32},
{idx: 19, exp: ge, fn: ge_4294967295_uint32},
{idx: 19, exp: eq, fn: eq_4294967295_uint32},
{idx: 19, exp: ne, fn: ne_4294967295_uint32},
}
// uint16 tests
var uint16_vals = []uint16{
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
32768,
65534,
65535,
}
func lt_0_uint16(x uint16) bool { return x < 0 }
func le_0_uint16(x uint16) bool { return x <= 0 }
func gt_0_uint16(x uint16) bool { return x > 0 }
func ge_0_uint16(x uint16) bool { return x >= 0 }
func eq_0_uint16(x uint16) bool { return x == 0 }
func ne_0_uint16(x uint16) bool { return x != 0 }
func lt_1_uint16(x uint16) bool { return x < 1 }
func le_1_uint16(x uint16) bool { return x <= 1 }
func gt_1_uint16(x uint16) bool { return x > 1 }
func ge_1_uint16(x uint16) bool { return x >= 1 }
func eq_1_uint16(x uint16) bool { return x == 1 }
func ne_1_uint16(x uint16) bool { return x != 1 }
func lt_126_uint16(x uint16) bool { return x < 126 }
func le_126_uint16(x uint16) bool { return x <= 126 }
func gt_126_uint16(x uint16) bool { return x > 126 }
func ge_126_uint16(x uint16) bool { return x >= 126 }
func eq_126_uint16(x uint16) bool { return x == 126 }
func ne_126_uint16(x uint16) bool { return x != 126 }
func lt_127_uint16(x uint16) bool { return x < 127 }
func le_127_uint16(x uint16) bool { return x <= 127 }
func gt_127_uint16(x uint16) bool { return x > 127 }
func ge_127_uint16(x uint16) bool { return x >= 127 }
func eq_127_uint16(x uint16) bool { return x == 127 }
func ne_127_uint16(x uint16) bool { return x != 127 }
func lt_128_uint16(x uint16) bool { return x < 128 }
func le_128_uint16(x uint16) bool { return x <= 128 }
func gt_128_uint16(x uint16) bool { return x > 128 }
func ge_128_uint16(x uint16) bool { return x >= 128 }
func eq_128_uint16(x uint16) bool { return x == 128 }
func ne_128_uint16(x uint16) bool { return x != 128 }
func lt_254_uint16(x uint16) bool { return x < 254 }
func le_254_uint16(x uint16) bool { return x <= 254 }
func gt_254_uint16(x uint16) bool { return x > 254 }
func ge_254_uint16(x uint16) bool { return x >= 254 }
func eq_254_uint16(x uint16) bool { return x == 254 }
func ne_254_uint16(x uint16) bool { return x != 254 }
func lt_255_uint16(x uint16) bool { return x < 255 }
func le_255_uint16(x uint16) bool { return x <= 255 }
func gt_255_uint16(x uint16) bool { return x > 255 }
func ge_255_uint16(x uint16) bool { return x >= 255 }
func eq_255_uint16(x uint16) bool { return x == 255 }
func ne_255_uint16(x uint16) bool { return x != 255 }
func lt_256_uint16(x uint16) bool { return x < 256 }
func le_256_uint16(x uint16) bool { return x <= 256 }
func gt_256_uint16(x uint16) bool { return x > 256 }
func ge_256_uint16(x uint16) bool { return x >= 256 }
func eq_256_uint16(x uint16) bool { return x == 256 }
func ne_256_uint16(x uint16) bool { return x != 256 }
func lt_32766_uint16(x uint16) bool { return x < 32766 }
func le_32766_uint16(x uint16) bool { return x <= 32766 }
func gt_32766_uint16(x uint16) bool { return x > 32766 }
func ge_32766_uint16(x uint16) bool { return x >= 32766 }
func eq_32766_uint16(x uint16) bool { return x == 32766 }
func ne_32766_uint16(x uint16) bool { return x != 32766 }
func lt_32767_uint16(x uint16) bool { return x < 32767 }
func le_32767_uint16(x uint16) bool { return x <= 32767 }
func gt_32767_uint16(x uint16) bool { return x > 32767 }
func ge_32767_uint16(x uint16) bool { return x >= 32767 }
func eq_32767_uint16(x uint16) bool { return x == 32767 }
func ne_32767_uint16(x uint16) bool { return x != 32767 }
func lt_32768_uint16(x uint16) bool { return x < 32768 }
func le_32768_uint16(x uint16) bool { return x <= 32768 }
func gt_32768_uint16(x uint16) bool { return x > 32768 }
func ge_32768_uint16(x uint16) bool { return x >= 32768 }
func eq_32768_uint16(x uint16) bool { return x == 32768 }
func ne_32768_uint16(x uint16) bool { return x != 32768 }
func lt_65534_uint16(x uint16) bool { return x < 65534 }
func le_65534_uint16(x uint16) bool { return x <= 65534 }
func gt_65534_uint16(x uint16) bool { return x > 65534 }
func ge_65534_uint16(x uint16) bool { return x >= 65534 }
func eq_65534_uint16(x uint16) bool { return x == 65534 }
func ne_65534_uint16(x uint16) bool { return x != 65534 }
func lt_65535_uint16(x uint16) bool { return x < 65535 }
func le_65535_uint16(x uint16) bool { return x <= 65535 }
func gt_65535_uint16(x uint16) bool { return x > 65535 }
func ge_65535_uint16(x uint16) bool { return x >= 65535 }
func eq_65535_uint16(x uint16) bool { return x == 65535 }
func ne_65535_uint16(x uint16) bool { return x != 65535 }
var uint16_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(uint16) bool
}{
{idx: 0, exp: lt, fn: lt_0_uint16},
{idx: 0, exp: le, fn: le_0_uint16},
{idx: 0, exp: gt, fn: gt_0_uint16},
{idx: 0, exp: ge, fn: ge_0_uint16},
{idx: 0, exp: eq, fn: eq_0_uint16},
{idx: 0, exp: ne, fn: ne_0_uint16},
{idx: 1, exp: lt, fn: lt_1_uint16},
{idx: 1, exp: le, fn: le_1_uint16},
{idx: 1, exp: gt, fn: gt_1_uint16},
{idx: 1, exp: ge, fn: ge_1_uint16},
{idx: 1, exp: eq, fn: eq_1_uint16},
{idx: 1, exp: ne, fn: ne_1_uint16},
{idx: 2, exp: lt, fn: lt_126_uint16},
{idx: 2, exp: le, fn: le_126_uint16},
{idx: 2, exp: gt, fn: gt_126_uint16},
{idx: 2, exp: ge, fn: ge_126_uint16},
{idx: 2, exp: eq, fn: eq_126_uint16},
{idx: 2, exp: ne, fn: ne_126_uint16},
{idx: 3, exp: lt, fn: lt_127_uint16},
{idx: 3, exp: le, fn: le_127_uint16},
{idx: 3, exp: gt, fn: gt_127_uint16},
{idx: 3, exp: ge, fn: ge_127_uint16},
{idx: 3, exp: eq, fn: eq_127_uint16},
{idx: 3, exp: ne, fn: ne_127_uint16},
{idx: 4, exp: lt, fn: lt_128_uint16},
{idx: 4, exp: le, fn: le_128_uint16},
{idx: 4, exp: gt, fn: gt_128_uint16},
{idx: 4, exp: ge, fn: ge_128_uint16},
{idx: 4, exp: eq, fn: eq_128_uint16},
{idx: 4, exp: ne, fn: ne_128_uint16},
{idx: 5, exp: lt, fn: lt_254_uint16},
{idx: 5, exp: le, fn: le_254_uint16},
{idx: 5, exp: gt, fn: gt_254_uint16},
{idx: 5, exp: ge, fn: ge_254_uint16},
{idx: 5, exp: eq, fn: eq_254_uint16},
{idx: 5, exp: ne, fn: ne_254_uint16},
{idx: 6, exp: lt, fn: lt_255_uint16},
{idx: 6, exp: le, fn: le_255_uint16},
{idx: 6, exp: gt, fn: gt_255_uint16},
{idx: 6, exp: ge, fn: ge_255_uint16},
{idx: 6, exp: eq, fn: eq_255_uint16},
{idx: 6, exp: ne, fn: ne_255_uint16},
{idx: 7, exp: lt, fn: lt_256_uint16},
{idx: 7, exp: le, fn: le_256_uint16},
{idx: 7, exp: gt, fn: gt_256_uint16},
{idx: 7, exp: ge, fn: ge_256_uint16},
{idx: 7, exp: eq, fn: eq_256_uint16},
{idx: 7, exp: ne, fn: ne_256_uint16},
{idx: 8, exp: lt, fn: lt_32766_uint16},
{idx: 8, exp: le, fn: le_32766_uint16},
{idx: 8, exp: gt, fn: gt_32766_uint16},
{idx: 8, exp: ge, fn: ge_32766_uint16},
{idx: 8, exp: eq, fn: eq_32766_uint16},
{idx: 8, exp: ne, fn: ne_32766_uint16},
{idx: 9, exp: lt, fn: lt_32767_uint16},
{idx: 9, exp: le, fn: le_32767_uint16},
{idx: 9, exp: gt, fn: gt_32767_uint16},
{idx: 9, exp: ge, fn: ge_32767_uint16},
{idx: 9, exp: eq, fn: eq_32767_uint16},
{idx: 9, exp: ne, fn: ne_32767_uint16},
{idx: 10, exp: lt, fn: lt_32768_uint16},
{idx: 10, exp: le, fn: le_32768_uint16},
{idx: 10, exp: gt, fn: gt_32768_uint16},
{idx: 10, exp: ge, fn: ge_32768_uint16},
{idx: 10, exp: eq, fn: eq_32768_uint16},
{idx: 10, exp: ne, fn: ne_32768_uint16},
{idx: 11, exp: lt, fn: lt_65534_uint16},
{idx: 11, exp: le, fn: le_65534_uint16},
{idx: 11, exp: gt, fn: gt_65534_uint16},
{idx: 11, exp: ge, fn: ge_65534_uint16},
{idx: 11, exp: eq, fn: eq_65534_uint16},
{idx: 11, exp: ne, fn: ne_65534_uint16},
{idx: 12, exp: lt, fn: lt_65535_uint16},
{idx: 12, exp: le, fn: le_65535_uint16},
{idx: 12, exp: gt, fn: gt_65535_uint16},
{idx: 12, exp: ge, fn: ge_65535_uint16},
{idx: 12, exp: eq, fn: eq_65535_uint16},
{idx: 12, exp: ne, fn: ne_65535_uint16},
}
// uint8 tests
var uint8_vals = []uint8{
0,
1,
126,
127,
128,
254,
255,
}
func lt_0_uint8(x uint8) bool { return x < 0 }
func le_0_uint8(x uint8) bool { return x <= 0 }
func gt_0_uint8(x uint8) bool { return x > 0 }
func ge_0_uint8(x uint8) bool { return x >= 0 }
func eq_0_uint8(x uint8) bool { return x == 0 }
func ne_0_uint8(x uint8) bool { return x != 0 }
func lt_1_uint8(x uint8) bool { return x < 1 }
func le_1_uint8(x uint8) bool { return x <= 1 }
func gt_1_uint8(x uint8) bool { return x > 1 }
func ge_1_uint8(x uint8) bool { return x >= 1 }
func eq_1_uint8(x uint8) bool { return x == 1 }
func ne_1_uint8(x uint8) bool { return x != 1 }
func lt_126_uint8(x uint8) bool { return x < 126 }
func le_126_uint8(x uint8) bool { return x <= 126 }
func gt_126_uint8(x uint8) bool { return x > 126 }
func ge_126_uint8(x uint8) bool { return x >= 126 }
func eq_126_uint8(x uint8) bool { return x == 126 }
func ne_126_uint8(x uint8) bool { return x != 126 }
func lt_127_uint8(x uint8) bool { return x < 127 }
func le_127_uint8(x uint8) bool { return x <= 127 }
func gt_127_uint8(x uint8) bool { return x > 127 }
func ge_127_uint8(x uint8) bool { return x >= 127 }
func eq_127_uint8(x uint8) bool { return x == 127 }
func | (x uint8) bool { return x != 127 }
func lt_128_uint8(x uint8) bool { return x < 128 }
func le_128_uint8(x uint8) bool { return x <= 128 }
func gt_128_uint8(x uint8) bool { return x > 128 }
func ge_128_uint8(x uint8) bool { return x >= 128 }
func eq_128_uint8(x uint8) bool { return x == 128 }
func ne_128_uint8(x uint8) bool { return x != 128 }
func lt_254_uint8(x uint8) bool { return x < 254 }
func le_254_uint8(x uint8) bool { return x <= 254 }
func gt_254_uint8(x uint8) bool { return x > 254 }
func ge_254_uint8(x uint8) bool { return x >= 254 }
func eq_254_uint8(x uint8) bool { return x == 254 }
func ne_254_uint8(x uint8) bool { return x != 254 }
func lt_255_uint8(x uint8) bool { return x < 255 }
func le_255_uint8(x uint8) bool { return x <= 255 }
func gt_255_uint8(x uint8) bool { return x > 255 }
func ge_255_uint8(x uint8) bool { return x >= 255 }
func eq_255_uint8(x uint8) bool { return x == 255 }
func ne_255_uint8(x uint8) bool { return x != 255 }
var uint8_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(uint8) bool
}{
{idx: 0, exp: lt, fn: lt_0_uint8},
{idx: 0, exp: le, fn: le_0_uint8},
{idx: 0, exp: gt, fn: gt_0_uint8},
{idx: 0, exp: ge, fn: ge_0_uint8},
{idx: 0, exp: eq, fn: eq_0_uint8},
{idx: 0, exp: ne, fn: ne_0_uint8},
{idx: 1, exp: lt, fn: lt_1_uint8},
{idx: 1, exp: le, fn: le_1_uint8},
{idx: 1, exp: gt, fn: gt_1_uint8},
{idx: 1, exp: ge, fn: ge_1_uint8},
{idx: 1, exp: eq, fn: eq_1_uint8},
{idx: 1, exp: ne, fn: ne_1_uint8},
{idx: 2, exp: lt, fn: lt_126_uint8},
{idx: 2, exp: le, fn: le_126_uint8},
{idx: 2, exp: gt, fn: gt_126_uint8},
{idx: 2, exp: ge, fn: ge_126_uint8},
{idx: 2, exp: eq, fn: eq_126_uint8},
{idx: 2, exp: ne, fn: ne_126_uint8},
{idx: 3, exp: lt, fn: lt_127_uint8},
{idx: 3, exp: le, fn: le_127_uint8},
{idx: 3, exp: gt, fn: gt_127_uint8},
{idx: 3, exp: ge, fn: ge_127_uint8},
{idx: 3, exp: eq, fn: eq_127_uint8},
{idx: 3, exp: ne, fn: ne_127_uint8},
{idx: 4, exp: lt, fn: lt_128_uint8},
{idx: 4, exp: le, fn: le_128_uint8},
{idx: 4, exp: gt, fn: gt_128_uint8},
{idx: 4, exp: ge, fn: ge_128_uint8},
{idx: 4, exp: eq, fn: eq_128_uint8},
{idx: 4, exp: ne, fn: ne_128_uint8},
{idx: 5, exp: lt, fn: lt_254_uint8},
{idx: 5, exp: le, fn: le_254_uint8},
{idx: 5, exp: gt, fn: gt_254_uint8},
{idx: 5, exp: ge, fn: ge_254_uint8},
{idx: 5, exp: eq, fn: eq_254_uint8},
{idx: 5, exp: ne, fn: ne_254_uint8},
{idx: 6, exp: lt, fn: lt_255_uint8},
{idx: 6, exp: le, fn: le_255_uint8},
{idx: 6, exp: gt, fn: gt_255_uint8},
{idx: 6, exp: ge, fn: ge_255_uint8},
{idx: 6, exp: eq, fn: eq_255_uint8},
{idx: 6, exp: ne, fn: ne_255_uint8},
}
// int64 tests
var int64_vals = []int64{
-9223372036854775808,
-9223372036854775807,
-2147483649,
-2147483648,
-2147483647,
-32769,
-32768,
-32767,
-129,
-128,
-127,
-1,
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
32768,
65534,
65535,
65536,
2147483646,
2147483647,
2147483648,
4278190080,
4294967294,
4294967295,
4294967296,
1095216660480,
9223372036854775806,
9223372036854775807,
}
func lt_neg9223372036854775808_int64(x int64) bool { return x < -9223372036854775808 }
func le_neg9223372036854775808_int64(x int64) bool { return x <= -9223372036854775808 }
func gt_neg9223372036854775808_int64(x int64) bool { return x > -9223372036854775808 }
func ge_neg9223372036854775808_int64(x int64) bool { return x >= -9223372036854775808 }
func eq_neg9223372036854775808_int64(x int64) bool { return x == -9223372036854775808 }
func ne_neg9223372036854775808_int64(x int64) bool { return x != -9223372036854775808 }
func lt_neg9223372036854775807_int64(x int64) bool { return x < -9223372036854775807 }
func le_neg9223372036854775807_int64(x int64) bool { return x <= -9223372036854775807 }
func gt_neg9223372036854775807_int64(x int64) bool { return x > -9223372036854775807 }
func ge_neg9223372036854775807_int64(x int64) bool { return x >= -9223372036854775807 }
func eq_neg9223372036854775807_int64(x int64) bool { return x == -9223372036854775807 }
func ne_neg9223372036854775807_int64(x int64) bool { return x != -9223372036854775807 }
func lt_neg2147483649_int64(x int64) bool { return x < -2147483649 }
func le_neg2147483649_int64(x int64) bool { return x <= -2147483649 }
func gt_neg2147483649_int64(x int64) bool { return x > -2147483649 }
func ge_neg2147483649_int64(x int64) bool { return x >= -2147483649 }
func eq_neg2147483649_int64(x int64) bool { return x == -2147483649 }
func ne_neg2147483649_int64(x int64) bool { return x != -2147483649 }
func lt_neg2147483648_int64(x int64) bool { return x < -2147483648 }
func le_neg2147483648_int64(x int64) bool { return x <= -2147483648 }
func gt_neg2147483648_int64(x int64) bool { return x > -2147483648 }
func ge_neg2147483648_int64(x int64) bool { return x >= -2147483648 }
func eq_neg2147483648_int64(x int64) bool { return x == -2147483648 }
func ne_neg2147483648_int64(x int64) bool { return x != -2147483648 }
func lt_neg2147483647_int64(x int64) bool { return x < -2147483647 }
func le_neg2147483647_int64(x int64) bool { return x <= -2147483647 }
func gt_neg2147483647_int64(x int64) bool { return x > -2147483647 }
func ge_neg2147483647_int64(x int64) bool { return x >= -2147483647 }
func eq_neg2147483647_int64(x int64) bool { return x == -2147483647 }
func ne_neg2147483647_int64(x int64) bool { return x != -2147483647 }
func lt_neg32769_int64(x int64) bool { return x < -32769 }
func le_neg32769_int64(x int64) bool { return x <= -32769 }
func gt_neg32769_int64(x int64) bool { return x > -32769 }
func ge_neg32769_int64(x int64) bool { return x >= -32769 }
func eq_neg32769_int64(x int64) bool { return x == -32769 }
func ne_neg32769_int64(x int64) bool { return x != -32769 }
func lt_neg32768_int64(x int64) bool { return x < -32768 }
func le_neg32768_int64(x int64) bool { return x <= -32768 }
func gt_neg32768_int64(x int64) bool { return x > -32768 }
func ge_neg32768_int64(x int64) bool { return x >= -32768 }
func eq_neg32768_int64(x int64) bool { return x == -32768 }
func ne_neg32768_int64(x int64) bool { return x != -32768 }
func lt_neg32767_int64(x int64) bool { return x < -32767 }
func le_neg32767_int64(x int64) bool { return x <= -32767 }
func gt_neg32767_int64(x int64) bool { return x > -32767 }
func ge_neg32767_int64(x int64) bool { return x >= -32767 }
func eq_neg32767_int64(x int64) bool { return x == -32767 }
func ne_neg32767_int64(x int64) bool { return x != -32767 }
func lt_neg129_int64(x int64) bool { return x < -129 }
func le_neg129_int64(x int64) bool { return x <= -129 }
func gt_neg129_int64(x int64) bool { return x > -129 }
func ge_neg129_int64(x int64) bool { return x >= -129 }
func eq_neg129_int64(x int64) bool { return x == -129 }
func ne_neg129_int64(x int64) bool { return x != -129 }
func lt_neg128_int64(x int64) bool { return x < -128 }
func le_neg128_int64(x int64) bool { return x <= -128 }
func gt_neg128_int64(x int64) bool { return x > -128 }
func ge_neg128_int64(x int64) bool { return x >= -128 }
func eq_neg128_int64(x int64) bool { return x == -128 }
func ne_neg128_int64(x int64) bool { return x != -128 }
func lt_neg127_int64(x int64) bool { return x < -127 }
func le_neg127_int64(x int64) bool { return x <= -127 }
func gt_neg127_int64(x int64) bool { return x > -127 }
func ge_neg127_int64(x int64) bool { return x >= -127 }
func eq_neg127_int64(x int64) bool { return x == -127 }
func ne_neg127_int64(x int64) bool { return x != -127 }
func lt_neg1_int64(x int64) bool { return x < -1 }
func le_neg1_int64(x int64) bool { return x <= -1 }
func gt_neg1_int64(x int64) bool { return x > -1 }
func ge_neg1_int64(x int64) bool { return x >= -1 }
func eq_neg1_int64(x int64) bool { return x == -1 }
func ne_neg1_int64(x int64) bool { return x != -1 }
func lt_0_int64(x int64) bool { return x < 0 }
func le_0_int64(x int64) bool { return x <= 0 }
func gt_0_int64(x int64) bool { return x > 0 }
func ge_0_int64(x int64) bool { return x >= 0 }
func eq_0_int64(x int64) bool { return x == 0 }
func ne_0_int64(x int64) bool { return x != 0 }
func lt_1_int64(x int64) bool { return x < 1 }
func le_1_int64(x int64) bool { return x <= 1 }
func gt_1_int64(x int64) bool { return x > 1 }
func ge_1_int64(x int64) bool { return x >= 1 }
func eq_1_int64(x int64) bool { return x == 1 }
func ne_1_int64(x int64) bool { return x != 1 }
func lt_126_int64(x int64) bool { return x < 126 }
func le_126_int64(x int64) bool { return x <= 126 }
func gt_126_int64(x int64) bool { return x > 126 }
func ge_126_int64(x int64) bool { return x >= 126 }
func eq_126_int64(x int64) bool { return x == 126 }
func ne_126_int64(x int64) bool { return x != 126 }
func lt_127_int64(x int64) bool { return x < 127 }
func le_127_int64(x int64) bool { return x <= 127 }
func gt_127_int64(x int64) bool { return x > 127 }
func ge_127_int64(x int64) bool { return x >= 127 }
func eq_127_int64(x int64) bool { return x == 127 }
func ne_127_int64(x int64) bool { return x != 127 }
func lt_128_int64(x int64) bool { return x < 128 }
func le_128_int64(x int64) bool { return x <= 128 }
func gt_128_int64(x int64) bool { return x > 128 }
func ge_128_int64(x int64) bool { return x >= 128 }
func eq_128_int64(x int64) bool { return x == 128 }
func ne_128_int64(x int64) bool { return x != 128 }
func lt_254_int64(x int64) bool { return x < 254 }
func le_254_int64(x int64) bool { return x <= 254 }
func gt_254_int64(x int64) bool { return x > 254 }
func ge_254_int64(x int64) bool { return x >= 254 }
func eq_254_int64(x int64) bool { return x == 254 }
func ne_254_int64(x int64) bool { return x != 254 }
func lt_255_int64(x int64) bool { return x < 255 }
func le_255_int64(x int64) bool { return x <= 255 }
func gt_255_int64(x int64) bool { return x > 255 }
func ge_255_int64(x int64) bool { return x >= 255 }
func eq_255_int64(x int64) bool { return x == 255 }
func ne_255_int64(x int64) bool { return x != 255 }
func lt_256_int64(x int64) bool { return x < 256 }
func le_256_int64(x int64) bool { return x <= 256 }
func gt_256_int64(x int64) bool { return x > 256 }
func ge_256_int64(x int64) bool { return x >= 256 }
func eq_256_int64(x int64) bool { return x == 256 }
func ne_256_int64(x int64) bool { return x != 256 }
func lt_32766_int64(x int64) bool { return x < 32766 }
func le_32766_int64(x int64) bool { return x <= 32766 }
func gt_32766_int64(x int64) bool { return x > 32766 }
func ge_32766_int64(x int64) bool { return x >= 32766 }
func eq_32766_int64(x int64) bool { return x == 32766 }
func ne_32766_int64(x int64) bool { return x != 32766 }
func lt_32767_int64(x int64) bool { return x < 32767 }
func le_32767_int64(x int64) bool { return x <= 32767 }
func gt_32767_int64(x int64) bool { return x > 32767 }
func ge_32767_int64(x int64) bool { return x >= 32767 }
func eq_32767_int64(x int64) bool { return x == 32767 }
func ne_32767_int64(x int64) bool { return x != 32767 }
func lt_32768_int64(x int64) bool { return x < 32768 }
func le_32768_int64(x int64) bool { return x <= 32768 }
func gt_32768_int64(x int64) bool { return x > 32768 }
func ge_32768_int64(x int64) bool { return x >= 32768 }
func eq_32768_int64(x int64) bool { return x == 32768 }
func ne_32768_int64(x int64) bool { return x != 32768 }
func lt_65534_int64(x int64) bool { return x < 65534 }
func le_65534_int64(x int64) bool { return x <= 65534 }
func gt_65534_int64(x int64) bool { return x > 65534 }
func ge_65534_int64(x int64) bool { return x >= 65534 }
func eq_65534_int64(x int64) bool { return x == 65534 }
func ne_65534_int64(x int64) bool { return x != 65534 }
func lt_65535_int64(x int64) bool { return x < 65535 }
func le_65535_int64(x int64) bool { return x <= 65535 }
func gt_65535_int64(x int64) bool { return x > 65535 }
func ge_65535_int64(x int64) bool { return x >= 65535 }
func eq_65535_int64(x int64) bool { return x == 65535 }
func ne_65535_int64(x int64) bool { return x != 65535 }
func lt_65536_int64(x int64) bool { return x < 65536 }
func le_65536_int64(x int64) bool { return x <= 65536 }
func gt_65536_int64(x int64) bool { return x > 65536 }
func ge_65536_int64(x int64) bool { return x >= 65536 }
func eq_65536_int64(x int64) bool { return x == 65536 }
func ne_65536_int64(x int64) bool { return x != 65536 }
func lt_2147483646_int64(x int64) bool { return x < 2147483646 }
func le_2147483646_int64(x int64) bool { return x <= 2147483646 }
func gt_2147483646_int64(x int64) bool { return x > 2147483646 }
func ge_2147483646_int64(x int64) bool { return x >= 2147483646 }
func eq_2147483646_int64(x int64) bool { return x == 2147483646 }
func ne_2147483646_int64(x int64) bool { return x != 2147483646 }
func lt_2147483647_int64(x int64) bool { return x < 2147483647 }
func le_2147483647_int64(x int64) bool { return x <= 2147483647 }
func gt_2147483647_int64(x int64) bool { return x > 2147483647 }
func ge_2147483647_int64(x int64) bool { return x >= 2147483647 }
func eq_2147483647_int64(x int64) bool { return x == 2147483647 }
func ne_2147483647_int64(x int64) bool { return x != 2147483647 }
func lt_2147483648_int64(x int64) bool { return x < 2147483648 }
func le_2147483648_int64(x int64) bool { return x <= 2147483648 }
func gt_2147483648_int64(x int64) bool { return x > 2147483648 }
func ge_2147483648_int64(x int64) bool { return x >= 2147483648 }
func eq_2147483648_int64(x int64) bool { return x == 2147483648 }
func ne_2147483648_int64(x int64) bool { return x != 2147483648 }
func lt_4278190080_int64(x int64) bool { return x < 4278190080 }
func le_4278190080_int64(x int64) bool { return x <= 4278190080 }
func gt_4278190080_int64(x int64) bool { return x > 4278190080 }
func ge_4278190080_int64(x int64) bool { return x >= 4278190080 }
func eq_4278190080_int64(x int64) bool { return x == 4278190080 }
func ne_4278190080_int64(x int64) bool { return x != 4278190080 }
func lt_4294967294_int64(x int64) bool { return x < 4294967294 }
func le_4294967294_int64(x int64) bool { return x <= 4294967294 }
func gt_4294967294_int64(x int64) bool { return x > 4294967294 }
func ge_4294967294_int64(x int64) bool { return x >= 4294967294 }
func eq_4294967294_int64(x int64) bool { return x == 4294967294 }
func ne_4294967294_int64(x int64) bool { return x != 4294967294 }
func lt_4294967295_int64(x int64) bool { return x < 4294967295 }
func le_4294967295_int64(x int64) bool { return x <= 4294967295 }
func gt_4294967295_int64(x int64) bool { return x > 4294967295 }
func ge_4294967295_int64(x int64) bool { return x >= 4294967295 }
func eq_4294967295_int64(x int64) bool { return x == 4294967295 }
func ne_4294967295_int64(x int64) bool { return x != 4294967295 }
func lt_4294967296_int64(x int64) bool { return x < 4294967296 }
func le_4294967296_int64(x int64) bool { return x <= 4294967296 }
func gt_4294967296_int64(x int64) bool { return x > 4294967296 }
func ge_4294967296_int64(x int64) bool { return x >= 4294967296 }
func eq_4294967296_int64(x int64) bool { return x == 4294967296 }
func ne_4294967296_int64(x int64) bool { return x != 4294967296 }
func lt_1095216660480_int64(x int64) bool { return x < 1095216660480 }
func le_1095216660480_int64(x int64) bool { return x <= 1095216660480 }
func gt_1095216660480_int64(x int64) bool { return x > 1095216660480 }
func ge_1095216660480_int64(x int64) bool { return x >= 1095216660480 }
func eq_1095216660480_int64(x int64) bool { return x == 1095216660480 }
func ne_1095216660480_int64(x int64) bool { return x != 1095216660480 }
func lt_9223372036854775806_int64(x int64) bool { return x < 9223372036854775806 }
func le_9223372036854775806_int64(x int64) bool { return x <= 9223372036854775806 }
func gt_9223372036854775806_int64(x int64) bool { return x > 9223372036854775806 }
func ge_9223372036854775806_int64(x int64) bool { return x >= 9223372036854775806 }
func eq_9223372036854775806_int64(x int64) bool { return x == 9223372036854775806 }
func ne_9223372036854775806_int64(x int64) bool { return x != 9223372036854775806 }
func lt_9223372036854775807_int64(x int64) bool { return x < 9223372036854775807 }
func le_9223372036854775807_int64(x int64) bool { return x <= 9223372036854775807 }
func gt_9223372036854775807_int64(x int64) bool { return x > 9223372036854775807 }
func ge_9223372036854775807_int64(x int64) bool { return x >= 9223372036854775807 }
func eq_9223372036854775807_int64(x int64) bool { return x == 9223372036854775807 }
func ne_9223372036854775807_int64(x int64) bool { return x != 9223372036854775807 }
var int64_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(int64) bool
}{
{idx: 0, exp: lt, fn: lt_neg9223372036854775808_int64},
{idx: 0, exp: le, fn: le_neg9223372036854775808_int64},
{idx: 0, exp: gt, fn: gt_neg9223372036854775808_int64},
{idx: 0, exp: ge, fn: ge_neg9223372036854775808_int64},
{idx: 0, exp: eq, fn: eq_neg9223372036854775808_int64},
{idx: 0, exp: ne, fn: ne_neg9223372036854775808_int64},
{idx: 1, exp: lt, fn: lt_neg9223372036854775807_int64},
{idx: 1, exp: le, fn: le_neg9223372036854775807_int64},
{idx: 1, exp: gt, fn: gt_neg9223372036854775807_int64},
{idx: 1, exp: ge, fn: ge_neg9223372036854775807_int64},
{idx: 1, exp: eq, fn: eq_neg9223372036854775807_int64},
{idx: 1, exp: ne, fn: ne_neg9223372036854775807_int64},
{idx: 2, exp: lt, fn: lt_neg2147483649_int64},
{idx: 2, exp: le, fn: le_neg2147483649_int64},
{idx: 2, exp: gt, fn: gt_neg2147483649_int64},
{idx: 2, exp: ge, fn: ge_neg2147483649_int64},
{idx: 2, exp: eq, fn: eq_neg2147483649_int64},
{idx: 2, exp: ne, fn: ne_neg2147483649_int64},
{idx: 3, exp: lt, fn: lt_neg2147483648_int64},
{idx: 3, exp: le, fn: le_neg2147483648_int64},
{idx: 3, exp: gt, fn: gt_neg2147483648_int64},
{idx: 3, exp: ge, fn: ge_neg2147483648_int64},
{idx: 3, exp: eq, fn: eq_neg2147483648_int64},
{idx: 3, exp: ne, fn: ne_neg2147483648_int64},
{idx: 4, exp: lt, fn: lt_neg2147483647_int64},
{idx: 4, exp: le, fn: le_neg2147483647_int64},
{idx: 4, exp: gt, fn: gt_neg2147483647_int64},
{idx: 4, exp: ge, fn: ge_neg2147483647_int64},
{idx: 4, exp: eq, fn: eq_neg2147483647_int64},
{idx: 4, exp: ne, fn: ne_neg2147483647_int64},
{idx: 5, exp: lt, fn: lt_neg32769_int64},
{idx: 5, exp: le, fn: le_neg32769_int64},
{idx: 5, exp: gt, fn: gt_neg32769_int64},
{idx: 5, exp: ge, fn: ge_neg32769_int64},
{idx: 5, exp: eq, fn: eq_neg32769_int64},
{idx: 5, exp: ne, fn: ne_neg32769_int64},
{idx: 6, exp: lt, fn: lt_neg32768_int64},
{idx: 6, exp: le, fn: le_neg32768_int64},
{idx: 6, exp: gt, fn: gt_neg32768_int64},
{idx: 6, exp: ge, fn: ge_neg32768_int64},
{idx: 6, exp: eq, fn: eq_neg32768_int64},
{idx: 6, exp: ne, fn: ne_neg32768_int64},
{idx: 7, exp: lt, fn: lt_neg32767_int64},
{idx: 7, exp: le, fn: le_neg32767_int64},
{idx: 7, exp: gt, fn: gt_neg32767_int64},
{idx: 7, exp: ge, fn: ge_neg32767_int64},
{idx: 7, exp: eq, fn: eq_neg32767_int64},
{idx: 7, exp: ne, fn: ne_neg32767_int64},
{idx: 8, exp: lt, fn: lt_neg129_int64},
{idx: 8, exp: le, fn: le_neg129_int64},
{idx: 8, exp: gt, fn: gt_neg129_int64},
{idx: 8, exp: ge, fn: ge_neg129_int64},
{idx: 8, exp: eq, fn: eq_neg129_int64},
{idx: 8, exp: ne, fn: ne_neg129_int64},
{idx: 9, exp: lt, fn: lt_neg128_int64},
{idx: 9, exp: le, fn: le_neg128_int64},
{idx: 9, exp: gt, fn: gt_neg128_int64},
{idx: 9, exp: ge, fn: ge_neg128_int64},
{idx: 9, exp: eq, fn: eq_neg128_int64},
{idx: 9, exp: ne, fn: ne_neg128_int64},
{idx: 10, exp: lt, fn: lt_neg127_int64},
{idx: 10, exp: le, fn: le_neg127_int64},
{idx: 10, exp: gt, fn: gt_neg127_int64},
{idx: 10, exp: ge, fn: ge_neg127_int64},
{idx: 10, exp: eq, fn: eq_neg127_int64},
{idx: 10, exp: ne, fn: ne_neg127_int64},
{idx: 11, exp: lt, fn: lt_neg1_int64},
{idx: 11, exp: le, fn: le_neg1_int64},
{idx: 11, exp: gt, fn: gt_neg1_int64},
{idx: 11, exp: ge, fn: ge_neg1_int64},
{idx: 11, exp: eq, fn: eq_neg1_int64},
{idx: 11, exp: ne, fn: ne_neg1_int64},
{idx: 12, exp: lt, fn: lt_0_int64},
{idx: 12, exp: le, fn: le_0_int64},
{idx: 12, exp: gt, fn: gt_0_int64},
{idx: 12, exp: ge, fn: ge_0_int64},
{idx: 12, exp: eq, fn: eq_0_int64},
{idx: 12, exp: ne, fn: ne_0_int64},
{idx: 13, exp: lt, fn: lt_1_int64},
{idx: 13, exp: le, fn: le_1_int64},
{idx: 13, exp: gt, fn: gt_1_int64},
{idx: 13, exp: ge, fn: ge_1_int64},
{idx: 13, exp: eq, fn: eq_1_int64},
{idx: 13, exp: ne, fn: ne_1_int64},
{idx: 14, exp: lt, fn: lt_126_int64},
{idx: 14, exp: le, fn: le_126_int64},
{idx: 14, exp: gt, fn: gt_126_int64},
{idx: 14, exp: ge, fn: ge_126_int64},
{idx: 14, exp: eq, fn: eq_126_int64},
{idx: 14, exp: ne, fn: ne_126_int64},
{idx: 15, exp: lt, fn: lt_127_int64},
{idx: 15, exp: le, fn: le_127_int64},
{idx: 15, exp: gt, fn: gt_127_int64},
{idx: 15, exp: ge, fn: ge_127_int64},
{idx: 15, exp: eq, fn: eq_127_int64},
{idx: 15, exp: ne, fn: ne_127_int64},
{idx: 16, exp: lt, fn: lt_128_int64},
{idx: 16, exp: le, fn: le_128_int64},
{idx: 16, exp: gt, fn: gt_128_int64},
{idx: 16, exp: ge, fn: ge_128_int64},
{idx: 16, exp: eq, fn: eq_128_int64},
{idx: 16, exp: ne, fn: ne_128_int64},
{idx: 17, exp: lt, fn: lt_254_int64},
{idx: 17, exp: le, fn: le_254_int64},
{idx: 17, exp: gt, fn: gt_254_int64},
{idx: 17, exp: ge, fn: ge_254_int64},
{idx: 17, exp: eq, fn: eq_254_int64},
{idx: 17, exp: ne, fn: ne_254_int64},
{idx: 18, exp: lt, fn: lt_255_int64},
{idx: 18, exp: le, fn: le_255_int64},
{idx: 18, exp: gt, fn: gt_255_int64},
{idx: 18, exp: ge, fn: ge_255_int64},
{idx: 18, exp: eq, fn: eq_255_int64},
{idx: 18, exp: ne, fn: ne_255_int64},
{idx: 19, exp: lt, fn: lt_256_int64},
{idx: 19, exp: le, fn: le_256_int64},
{idx: 19, exp: gt, fn: gt_256_int64},
{idx: 19, exp: ge, fn: ge_256_int64},
{idx: 19, exp: eq, fn: eq_256_int64},
{idx: 19, exp: ne, fn: ne_256_int64},
{idx: 20, exp: lt, fn: lt_32766_int64},
{idx: 20, exp: le, fn: le_32766_int64},
{idx: 20, exp: gt, fn: gt_32766_int64},
{idx: 20, exp: ge, fn: ge_32766_int64},
{idx: 20, exp: eq, fn: eq_32766_int64},
{idx: 20, exp: ne, fn: ne_32766_int64},
{idx: 21, exp: lt, fn: lt_32767_int64},
{idx: 21, exp: le, fn: le_32767_int64},
{idx: 21, exp: gt, fn: gt_32767_int64},
{idx: 21, exp: ge, fn: ge_32767_int64},
{idx: 21, exp: eq, fn: eq_32767_int64},
{idx: 21, exp: ne, fn: ne_32767_int64},
{idx: 22, exp: lt, fn: lt_32768_int64},
{idx: 22, exp: le, fn: le_32768_int64},
{idx: 22, exp: gt, fn: gt_32768_int64},
{idx: 22, exp: ge, fn: ge_32768_int64},
{idx: 22, exp: eq, fn: eq_32768_int64},
{idx: 22, exp: ne, fn: ne_32768_int64},
{idx: 23, exp: lt, fn: lt_65534_int64},
{idx: 23, exp: le, fn: le_65534_int64},
{idx: 23, exp: gt, fn: gt_65534_int64},
{idx: 23, exp: ge, fn: ge_65534_int64},
{idx: 23, exp: eq, fn: eq_65534_int64},
{idx: 23, exp: ne, fn: ne_65534_int64},
{idx: 24, exp: lt, fn: lt_65535_int64},
{idx: 24, exp: le, fn: le_65535_int64},
{idx: 24, exp: gt, fn: gt_65535_int64},
{idx: 24, exp: ge, fn: ge_65535_int64},
{idx: 24, exp: eq, fn: eq_65535_int64},
{idx: 24, exp: ne, fn: ne_65535_int64},
{idx: 25, exp: lt, fn: lt_65536_int64},
{idx: 25, exp: le, fn: le_65536_int64},
{idx: 25, exp: gt, fn: gt_65536_int64},
{idx: 25, exp: ge, fn: ge_65536_int64},
{idx: 25, exp: eq, fn: eq_65536_int64},
{idx: 25, exp: ne, fn: ne_65536_int64},
{idx: 26, exp: lt, fn: lt_2147483646_int64},
{idx: 26, exp: le, fn: le_2147483646_int64},
{idx: 26, exp: gt, fn: gt_2147483646_int64},
{idx: 26, exp: ge, fn: ge_2147483646_int64},
{idx: 26, exp: eq, fn: eq_2147483646_int64},
{idx: 26, exp: ne, fn: ne_2147483646_int64},
{idx: 27, exp: lt, fn: lt_2147483647_int64},
{idx: 27, exp: le, fn: le_2147483647_int64},
{idx: 27, exp: gt, fn: gt_2147483647_int64},
{idx: 27, exp: ge, fn: ge_2147483647_int64},
{idx: 27, exp: eq, fn: eq_2147483647_int64},
{idx: 27, exp: ne, fn: ne_2147483647_int64},
{idx: 28, exp: lt, fn: lt_2147483648_int64},
{idx: 28, exp: le, fn: le_2147483648_int64},
{idx: 28, exp: gt, fn: gt_2147483648_int64},
{idx: 28, exp: ge, fn: ge_2147483648_int64},
{idx: 28, exp: eq, fn: eq_2147483648_int64},
{idx: 28, exp: ne, fn: ne_2147483648_int64},
{idx: 29, exp: lt, fn: lt_4278190080_int64},
{idx: 29, exp: le, fn: le_4278190080_int64},
{idx: 29, exp: gt, fn: gt_4278190080_int64},
{idx: 29, exp: ge, fn: ge_4278190080_int64},
{idx: 29, exp: eq, fn: eq_4278190080_int64},
{idx: 29, exp: ne, fn: ne_4278190080_int64},
{idx: 30, exp: lt, fn: lt_4294967294_int64},
{idx: 30, exp: le, fn: le_4294967294_int64},
{idx: 30, exp: gt, fn: gt_4294967294_int64},
{idx: 30, exp: ge, fn: ge_4294967294_int64},
{idx: 30, exp: eq, fn: eq_4294967294_int64},
{idx: 30, exp: ne, fn: ne_4294967294_int64},
{idx: 31, exp: lt, fn: lt_4294967295_int64},
{idx: 31, exp: le, fn: le_4294967295_int64},
{idx: 31, exp: gt, fn: gt_4294967295_int64},
{idx: 31, exp: ge, fn: ge_4294967295_int64},
{idx: 31, exp: eq, fn: eq_4294967295_int64},
{idx: 31, exp: ne, fn: ne_4294967295_int64},
{idx: 32, exp: lt, fn: lt_4294967296_int64},
{idx: 32, exp: le, fn: le_4294967296_int64},
{idx: 32, exp: gt, fn: gt_4294967296_int64},
{idx: 32, exp: ge, fn: ge_4294967296_int64},
{idx: 32, exp: eq, fn: eq_4294967296_int64},
{idx: 32, exp: ne, fn: ne_4294967296_int64},
{idx: 33, exp: lt, fn: lt_1095216660480_int64},
{idx: 33, exp: le, fn: le_1095216660480_int64},
{idx: 33, exp: gt, fn: gt_1095216660480_int64},
{idx: 33, exp: ge, fn: ge_1095216660480_int64},
{idx: 33, exp: eq, fn: eq_1095216660480_int64},
{idx: 33, exp: ne, fn: ne_1095216660480_int64},
{idx: 34, exp: lt, fn: lt_9223372036854775806_int64},
{idx: 34, exp: le, fn: le_9223372036854775806_int64},
{idx: 34, exp: gt, fn: gt_9223372036854775806_int64},
{idx: 34, exp: ge, fn: ge_9223372036854775806_int64},
{idx: 34, exp: eq, fn: eq_9223372036854775806_int64},
{idx: 34, exp: ne, fn: ne_9223372036854775806_int64},
{idx: 35, exp: lt, fn: lt_9223372036854775807_int64},
{idx: 35, exp: le, fn: le_9223372036854775807_int64},
{idx: 35, exp: gt, fn: gt_9223372036854775807_int64},
{idx: 35, exp: ge, fn: ge_9223372036854775807_int64},
{idx: 35, exp: eq, fn: eq_9223372036854775807_int64},
{idx: 35, exp: ne, fn: ne_9223372036854775807_int64},
}
// int32 tests
var int32_vals = []int32{
-2147483648,
-2147483647,
-32769,
-32768,
-32767,
-129,
-128,
-127,
-1,
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
32768,
65534,
65535,
65536,
2147483646,
2147483647,
}
func lt_neg2147483648_int32(x int32) bool { return x < -2147483648 }
func le_neg2147483648_int32(x int32) bool { return x <= -2147483648 }
func gt_neg2147483648_int32(x int32) bool { return x > -2147483648 }
func ge_neg2147483648_int32(x int32) bool { return x >= -2147483648 }
func eq_neg2147483648_int32(x int32) bool { return x == -2147483648 }
func ne_neg2147483648_int32(x int32) bool { return x != -2147483648 }
func lt_neg2147483647_int32(x int32) bool { return x < -2147483647 }
func le_neg2147483647_int32(x int32) bool { return x <= -2147483647 }
func gt_neg2147483647_int32(x int32) bool { return x > -2147483647 }
func ge_neg2147483647_int32(x int32) bool { return x >= -2147483647 }
func eq_neg2147483647_int32(x int32) bool { return x == -2147483647 }
func ne_neg2147483647_int32(x int32) bool { return x != -2147483647 }
func lt_neg32769_int32(x int32) bool { return x < -32769 }
func le_neg32769_int32(x int32) bool { return x <= -32769 }
func gt_neg32769_int32(x int32) bool { return x > -32769 }
func ge_neg32769_int32(x int32) bool { return x >= -32769 }
func eq_neg32769_int32(x int32) bool { return x == -32769 }
func ne_neg32769_int32(x int32) bool { return x != -32769 }
func lt_neg32768_int32(x int32) bool { return x < -32768 }
func le_neg32768_int32(x int32) bool { return x <= -32768 }
func gt_neg32768_int32(x int32) bool { return x > -32768 }
func ge_neg32768_int32(x int32) bool { return x >= -32768 }
func eq_neg32768_int32(x int32) bool { return x == -32768 }
func ne_neg32768_int32(x int32) bool { return x != -32768 }
func lt_neg32767_int32(x int32) bool { return x < -32767 }
func le_neg32767_int32(x int32) bool { return x <= -32767 }
func gt_neg32767_int32(x int32) bool { return x > -32767 }
func ge_neg32767_int32(x int32) bool { return x >= -32767 }
func eq_neg32767_int32(x int32) bool { return x == -32767 }
func ne_neg32767_int32(x int32) bool { return x != -32767 }
func lt_neg129_int32(x int32) bool { return x < -129 }
func le_neg129_int32(x int32) bool { return x <= -129 }
func gt_neg129_int32(x int32) bool { return x > -129 }
func ge_neg129_int32(x int32) bool { return x >= -129 }
func eq_neg129_int32(x int32) bool { return x == -129 }
func ne_neg129_int32(x int32) bool { return x != -129 }
func lt_neg128_int32(x int32) bool { return x < -128 }
func le_neg128_int32(x int32) bool { return x <= -128 }
func gt_neg128_int32(x int32) bool { return x > -128 }
func ge_neg128_int32(x int32) bool { return x >= -128 }
func eq_neg128_int32(x int32) bool { return x == -128 }
func ne_neg128_int32(x int32) bool { return x != -128 }
func lt_neg127_int32(x int32) bool { return x < -127 }
func le_neg127_int32(x int32) bool { return x <= -127 }
func gt_neg127_int32(x int32) bool { return x > -127 }
func ge_neg127_int32(x int32) bool { return x >= -127 }
func eq_neg127_int32(x int32) bool { return x == -127 }
func ne_neg127_int32(x int32) bool { return x != -127 }
func lt_neg1_int32(x int32) bool { return x < -1 }
func le_neg1_int32(x int32) bool { return x <= -1 }
func gt_neg1_int32(x int32) bool { return x > -1 }
func ge_neg1_int32(x int32) bool { return x >= -1 }
func eq_neg1_int32(x int32) bool { return x == -1 }
func ne_neg1_int32(x int32) bool { return x != -1 }
func lt_0_int32(x int32) bool { return x < 0 }
func le_0_int32(x int32) bool { return x <= 0 }
func gt_0_int32(x int32) bool { return x > 0 }
func ge_0_int32(x int32) bool { return x >= 0 }
func eq_0_int32(x int32) bool { return x == 0 }
func ne_0_int32(x int32) bool { return x != 0 }
func lt_1_int32(x int32) bool { return x < 1 }
func le_1_int32(x int32) bool { return x <= 1 }
func gt_1_int32(x int32) bool { return x > 1 }
func ge_1_int32(x int32) bool { return x >= 1 }
func eq_1_int32(x int32) bool { return x == 1 }
func ne_1_int32(x int32) bool { return x != 1 }
func lt_126_int32(x int32) bool { return x < 126 }
func le_126_int32(x int32) bool { return x <= 126 }
func gt_126_int32(x int32) bool { return x > 126 }
func ge_126_int32(x int32) bool { return x >= 126 }
func eq_126_int32(x int32) bool { return x == 126 }
func ne_126_int32(x int32) bool { return x != 126 }
func lt_127_int32(x int32) bool { return x < 127 }
func le_127_int32(x int32) bool { return x <= 127 }
func gt_127_int32(x int32) bool { return x > 127 }
func ge_127_int32(x int32) bool { return x >= 127 }
func eq_127_int32(x int32) bool { return x == 127 }
func ne_127_int32(x int32) bool { return x != 127 }
func lt_128_int32(x int32) bool { return x < 128 }
func le_128_int32(x int32) bool { return x <= 128 }
func gt_128_int32(x int32) bool { return x > 128 }
func ge_128_int32(x int32) bool { return x >= 128 }
func eq_128_int32(x int32) bool { return x == 128 }
func ne_128_int32(x int32) bool { return x != 128 }
func lt_254_int32(x int32) bool { return x < 254 }
func le_254_int32(x int32) bool { return x <= 254 }
func gt_254_int32(x int32) bool { return x > 254 }
func ge_254_int32(x int32) bool { return x >= 254 }
func eq_254_int32(x int32) bool { return x == 254 }
func ne_254_int32(x int32) bool { return x != 254 }
func lt_255_int32(x int32) bool { return x < 255 }
func le_255_int32(x int32) bool { return x <= 255 }
func gt_255_int32(x int32) bool { return x > 255 }
func ge_255_int32(x int32) bool { return x >= 255 }
func eq_255_int32(x int32) bool { return x == 255 }
func ne_255_int32(x int32) bool { return x != 255 }
func lt_256_int32(x int32) bool { return x < 256 }
func le_256_int32(x int32) bool { return x <= 256 }
func gt_256_int32(x int32) bool { return x > 256 }
func ge_256_int32(x int32) bool { return x >= 256 }
func eq_256_int32(x int32) bool { return x == 256 }
func ne_256_int32(x int32) bool { return x != 256 }
func lt_32766_int32(x int32) bool { return x < 32766 }
func le_32766_int32(x int32) bool { return x <= 32766 }
func gt_32766_int32(x int32) bool { return x > 32766 }
func ge_32766_int32(x int32) bool { return x >= 32766 }
func eq_32766_int32(x int32) bool { return x == 32766 }
func ne_32766_int32(x int32) bool { return x != 32766 }
func lt_32767_int32(x int32) bool { return x < 32767 }
func le_32767_int32(x int32) bool { return x <= 32767 }
func gt_32767_int32(x int32) bool { return x > 32767 }
func ge_32767_int32(x int32) bool { return x >= 32767 }
func eq_32767_int32(x int32) bool { return x == 32767 }
func ne_32767_int32(x int32) bool { return x != 32767 }
func lt_32768_int32(x int32) bool { return x < 32768 }
func le_32768_int32(x int32) bool { return x <= 32768 }
func gt_32768_int32(x int32) bool { return x > 32768 }
func ge_32768_int32(x int32) bool { return x >= 32768 }
func eq_32768_int32(x int32) bool { return x == 32768 }
func ne_32768_int32(x int32) bool { return x != 32768 }
func lt_65534_int32(x int32) bool { return x < 65534 }
func le_65534_int32(x int32) bool { return x <= 65534 }
func gt_65534_int32(x int32) bool { return x > 65534 }
func ge_65534_int32(x int32) bool { return x >= 65534 }
func eq_65534_int32(x int32) bool { return x == 65534 }
func ne_65534_int32(x int32) bool { return x != 65534 }
func lt_65535_int32(x int32) bool { return x < 65535 }
func le_65535_int32(x int32) bool { return x <= 65535 }
func gt_65535_int32(x int32) bool { return x > 65535 }
func ge_65535_int32(x int32) bool { return x >= 65535 }
func eq_65535_int32(x int32) bool { return x == 65535 }
func ne_65535_int32(x int32) bool { return x != 65535 }
func lt_65536_int32(x int32) bool { return x < 65536 }
func le_65536_int32(x int32) bool { return x <= 65536 }
func gt_65536_int32(x int32) bool { return x > 65536 }
func ge_65536_int32(x int32) bool { return x >= 65536 }
func eq_65536_int32(x int32) bool { return x == 65536 }
func ne_65536_int32(x int32) bool { return x != 65536 }
func lt_2147483646_int32(x int32) bool { return x < 2147483646 }
func le_2147483646_int32(x int32) bool { return x <= 2147483646 }
func gt_2147483646_int32(x int32) bool { return x > 2147483646 }
func ge_2147483646_int32(x int32) bool { return x >= 2147483646 }
func eq_2147483646_int32(x int32) bool { return x == 2147483646 }
func ne_2147483646_int32(x int32) bool { return x != 2147483646 }
func lt_2147483647_int32(x int32) bool { return x < 2147483647 }
func le_2147483647_int32(x int32) bool { return x <= 2147483647 }
func gt_2147483647_int32(x int32) bool { return x > 2147483647 }
func ge_2147483647_int32(x int32) bool { return x >= 2147483647 }
func eq_2147483647_int32(x int32) bool { return x == 2147483647 }
func ne_2147483647_int32(x int32) bool { return x != 2147483647 }
var int32_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(int32) bool
}{
{idx: 0, exp: lt, fn: lt_neg2147483648_int32},
{idx: 0, exp: le, fn: le_neg2147483648_int32},
{idx: 0, exp: gt, fn: gt_neg2147483648_int32},
{idx: 0, exp: ge, fn: ge_neg2147483648_int32},
{idx: 0, exp: eq, fn: eq_neg2147483648_int32},
{idx: 0, exp: ne, fn: ne_neg2147483648_int32},
{idx: 1, exp: lt, fn: lt_neg2147483647_int32},
{idx: 1, exp: le, fn: le_neg2147483647_int32},
{idx: 1, exp: gt, fn: gt_neg2147483647_int32},
{idx: 1, exp: ge, fn: ge_neg2147483647_int32},
{idx: 1, exp: eq, fn: eq_neg2147483647_int32},
{idx: 1, exp: ne, fn: ne_neg2147483647_int32},
{idx: 2, exp: lt, fn: lt_neg32769_int32},
{idx: 2, exp: le, fn: le_neg32769_int32},
{idx: 2, exp: gt, fn: gt_neg32769_int32},
{idx: 2, exp: ge, fn: ge_neg32769_int32},
{idx: 2, exp: eq, fn: eq_neg32769_int32},
{idx: 2, exp: ne, fn: ne_neg32769_int32},
{idx: 3, exp: lt, fn: lt_neg32768_int32},
{idx: 3, exp: le, fn: le_neg32768_int32},
{idx: 3, exp: gt, fn: gt_neg32768_int32},
{idx: 3, exp: ge, fn: ge_neg32768_int32},
{idx: 3, exp: eq, fn: eq_neg32768_int32},
{idx: 3, exp: ne, fn: ne_neg32768_int32},
{idx: 4, exp: lt, fn: lt_neg32767_int32},
{idx: 4, exp: le, fn: le_neg32767_int32},
{idx: 4, exp: gt, fn: gt_neg32767_int32},
{idx: 4, exp: ge, fn: ge_neg32767_int32},
{idx: 4, exp: eq, fn: eq_neg32767_int32},
{idx: 4, exp: ne, fn: ne_neg32767_int32},
{idx: 5, exp: lt, fn: lt_neg129_int32},
{idx: 5, exp: le, fn: le_neg129_int32},
{idx: 5, exp: gt, fn: gt_neg129_int32},
{idx: 5, exp: ge, fn: ge_neg129_int32},
{idx: 5, exp: eq, fn: eq_neg129_int32},
{idx: 5, exp: ne, fn: ne_neg129_int32},
{idx: 6, exp: lt, fn: lt_neg128_int32},
{idx: 6, exp: le, fn: le_neg128_int32},
{idx: 6, exp: gt, fn: gt_neg128_int32},
{idx: 6, exp: ge, fn: ge_neg128_int32},
{idx: 6, exp: eq, fn: eq_neg128_int32},
{idx: 6, exp: ne, fn: ne_neg128_int32},
{idx: 7, exp: lt, fn: lt_neg127_int32},
{idx: 7, exp: le, fn: le_neg127_int32},
{idx: 7, exp: gt, fn: gt_neg127_int32},
{idx: 7, exp: ge, fn: ge_neg127_int32},
{idx: 7, exp: eq, fn: eq_neg127_int32},
{idx: 7, exp: ne, fn: ne_neg127_int32},
{idx: 8, exp: lt, fn: lt_neg1_int32},
{idx: 8, exp: le, fn: le_neg1_int32},
{idx: 8, exp: gt, fn: gt_neg1_int32},
{idx: 8, exp: ge, fn: ge_neg1_int32},
{idx: 8, exp: eq, fn: eq_neg1_int32},
{idx: 8, exp: ne, fn: ne_neg1_int32},
{idx: 9, exp: lt, fn: lt_0_int32},
{idx: 9, exp: le, fn: le_0_int32},
{idx: 9, exp: gt, fn: gt_0_int32},
{idx: 9, exp: ge, fn: ge_0_int32},
{idx: 9, exp: eq, fn: eq_0_int32},
{idx: 9, exp: ne, fn: ne_0_int32},
{idx: 10, exp: lt, fn: lt_1_int32},
{idx: 10, exp: le, fn: le_1_int32},
{idx: 10, exp: gt, fn: gt_1_int32},
{idx: 10, exp: ge, fn: ge_1_int32},
{idx: 10, exp: eq, fn: eq_1_int32},
{idx: 10, exp: ne, fn: ne_1_int32},
{idx: 11, exp: lt, fn: lt_126_int32},
{idx: 11, exp: le, fn: le_126_int32},
{idx: 11, exp: gt, fn: gt_126_int32},
{idx: 11, exp: ge, fn: ge_126_int32},
{idx: 11, exp: eq, fn: eq_126_int32},
{idx: 11, exp: ne, fn: ne_126_int32},
{idx: 12, exp: lt, fn: lt_127_int32},
{idx: 12, exp: le, fn: le_127_int32},
{idx: 12, exp: gt, fn: gt_127_int32},
{idx: 12, exp: ge, fn: ge_127_int32},
{idx: 12, exp: eq, fn: eq_127_int32},
{idx: 12, exp: ne, fn: ne_127_int32},
{idx: 13, exp: lt, fn: lt_128_int32},
{idx: 13, exp: le, fn: le_128_int32},
{idx: 13, exp: gt, fn: gt_128_int32},
{idx: 13, exp: ge, fn: ge_128_int32},
{idx: 13, exp: eq, fn: eq_128_int32},
{idx: 13, exp: ne, fn: ne_128_int32},
{idx: 14, exp: lt, fn: lt_254_int32},
{idx: 14, exp: le, fn: le_254_int32},
{idx: 14, exp: gt, fn: gt_254_int32},
{idx: 14, exp: ge, fn: ge_254_int32},
{idx: 14, exp: eq, fn: eq_254_int32},
{idx: 14, exp: ne, fn: ne_254_int32},
{idx: 15, exp: lt, fn: lt_255_int32},
{idx: 15, exp: le, fn: le_255_int32},
{idx: 15, exp: gt, fn: gt_255_int32},
{idx: 15, exp: ge, fn: ge_255_int32},
{idx: 15, exp: eq, fn: eq_255_int32},
{idx: 15, exp: ne, fn: ne_255_int32},
{idx: 16, exp: lt, fn: lt_256_int32},
{idx: 16, exp: le, fn: le_256_int32},
{idx: 16, exp: gt, fn: gt_256_int32},
{idx: 16, exp: ge, fn: ge_256_int32},
{idx: 16, exp: eq, fn: eq_256_int32},
{idx: 16, exp: ne, fn: ne_256_int32},
{idx: 17, exp: lt, fn: lt_32766_int32},
{idx: 17, exp: le, fn: le_32766_int32},
{idx: 17, exp: gt, fn: gt_32766_int32},
{idx: 17, exp: ge, fn: ge_32766_int32},
{idx: 17, exp: eq, fn: eq_32766_int32},
{idx: 17, exp: ne, fn: ne_32766_int32},
{idx: 18, exp: lt, fn: lt_32767_int32},
{idx: 18, exp: le, fn: le_32767_int32},
{idx: 18, exp: gt, fn: gt_32767_int32},
{idx: 18, exp: ge, fn: ge_32767_int32},
{idx: 18, exp: eq, fn: eq_32767_int32},
{idx: 18, exp: ne, fn: ne_32767_int32},
{idx: 19, exp: lt, fn: lt_32768_int32},
{idx: 19, exp: le, fn: le_32768_int32},
{idx: 19, exp: gt, fn: gt_32768_int32},
{idx: 19, exp: ge, fn: ge_32768_int32},
{idx: 19, exp: eq, fn: eq_32768_int32},
{idx: 19, exp: ne, fn: ne_32768_int32},
{idx: 20, exp: lt, fn: lt_65534_int32},
{idx: 20, exp: le, fn: le_65534_int32},
{idx: 20, exp: gt, fn: gt_65534_int32},
{idx: 20, exp: ge, fn: ge_65534_int32},
{idx: 20, exp: eq, fn: eq_65534_int32},
{idx: 20, exp: ne, fn: ne_65534_int32},
{idx: 21, exp: lt, fn: lt_65535_int32},
{idx: 21, exp: le, fn: le_65535_int32},
{idx: 21, exp: gt, fn: gt_65535_int32},
{idx: 21, exp: ge, fn: ge_65535_int32},
{idx: 21, exp: eq, fn: eq_65535_int32},
{idx: 21, exp: ne, fn: ne_65535_int32},
{idx: 22, exp: lt, fn: lt_65536_int32},
{idx: 22, exp: le, fn: le_65536_int32},
{idx: 22, exp: gt, fn: gt_65536_int32},
{idx: 22, exp: ge, fn: ge_65536_int32},
{idx: 22, exp: eq, fn: eq_65536_int32},
{idx: 22, exp: ne, fn: ne_65536_int32},
{idx: 23, exp: lt, fn: lt_2147483646_int32},
{idx: 23, exp: le, fn: le_2147483646_int32},
{idx: 23, exp: gt, fn: gt_2147483646_int32},
{idx: 23, exp: ge, fn: ge_2147483646_int32},
{idx: 23, exp: eq, fn: eq_2147483646_int32},
{idx: 23, exp: ne, fn: ne_2147483646_int32},
{idx: 24, exp: lt, fn: lt_2147483647_int32},
{idx: 24, exp: le, fn: le_2147483647_int32},
{idx: 24, exp: gt, fn: gt_2147483647_int32},
{idx: 24, exp: ge, fn: ge_2147483647_int32},
{idx: 24, exp: eq, fn: eq_2147483647_int32},
{idx: 24, exp: ne, fn: ne_2147483647_int32},
}
// int16 tests
var int16_vals = []int16{
-32768,
-32767,
-129,
-128,
-127,
-1,
0,
1,
126,
127,
128,
254,
255,
256,
32766,
32767,
}
func lt_neg32768_int16(x int16) bool { return x < -32768 }
func le_neg32768_int16(x int16) bool { return x <= -32768 }
func gt_neg32768_int16(x int16) bool { return x > -32768 }
func ge_neg32768_int16(x int16) bool { return x >= -32768 }
func eq_neg32768_int16(x int16) bool { return x == -32768 }
func ne_neg32768_int16(x int16) bool { return x != -32768 }
func lt_neg32767_int16(x int16) bool { return x < -32767 }
func le_neg32767_int16(x int16) bool { return x <= -32767 }
func gt_neg32767_int16(x int16) bool { return x > -32767 }
func ge_neg32767_int16(x int16) bool { return x >= -32767 }
func eq_neg32767_int16(x int16) bool { return x == -32767 }
func ne_neg32767_int16(x int16) bool { return x != -32767 }
func lt_neg129_int16(x int16) bool { return x < -129 }
func le_neg129_int16(x int16) bool { return x <= -129 }
func gt_neg129_int16(x int16) bool { return x > -129 }
func ge_neg129_int16(x int16) bool { return x >= -129 }
func eq_neg129_int16(x int16) bool { return x == -129 }
func ne_neg129_int16(x int16) bool { return x != -129 }
func lt_neg128_int16(x int16) bool { return x < -128 }
func le_neg128_int16(x int16) bool { return x <= -128 }
func gt_neg128_int16(x int16) bool { return x > -128 }
func ge_neg128_int16(x int16) bool { return x >= -128 }
func eq_neg128_int16(x int16) bool { return x == -128 }
func ne_neg128_int16(x int16) bool { return x != -128 }
func lt_neg127_int16(x int16) bool { return x < -127 }
func le_neg127_int16(x int16) bool { return x <= -127 }
func gt_neg127_int16(x int16) bool { return x > -127 }
func ge_neg127_int16(x int16) bool { return x >= -127 }
func eq_neg127_int16(x int16) bool { return x == -127 }
func ne_neg127_int16(x int16) bool { return x != -127 }
func lt_neg1_int16(x int16) bool { return x < -1 }
func le_neg1_int16(x int16) bool { return x <= -1 }
func gt_neg1_int16(x int16) bool { return x > -1 }
func ge_neg1_int16(x int16) bool { return x >= -1 }
func eq_neg1_int16(x int16) bool { return x == -1 }
func ne_neg1_int16(x int16) bool { return x != -1 }
func lt_0_int16(x int16) bool { return x < 0 }
func le_0_int16(x int16) bool { return x <= 0 }
func gt_0_int16(x int16) bool { return x > 0 }
func ge_0_int16(x int16) bool { return x >= 0 }
func eq_0_int16(x int16) bool { return x == 0 }
func ne_0_int16(x int16) bool { return x != 0 }
func lt_1_int16(x int16) bool { return x < 1 }
func le_1_int16(x int16) bool { return x <= 1 }
func gt_1_int16(x int16) bool { return x > 1 }
func ge_1_int16(x int16) bool { return x >= 1 }
func eq_1_int16(x int16) bool { return x == 1 }
func ne_1_int16(x int16) bool { return x != 1 }
func lt_126_int16(x int16) bool { return x < 126 }
func le_126_int16(x int16) bool { return x <= 126 }
func gt_126_int16(x int16) bool { return x > 126 }
func ge_126_int16(x int16) bool { return x >= 126 }
func eq_126_int16(x int16) bool { return x == 126 }
func ne_126_int16(x int16) bool { return x != 126 }
func lt_127_int16(x int16) bool { return x < 127 }
func le_127_int16(x int16) bool { return x <= 127 }
func gt_127_int16(x int16) bool { return x > 127 }
func ge_127_int16(x int16) bool { return x >= 127 }
func eq_127_int16(x int16) bool { return x == 127 }
func ne_127_int16(x int16) bool { return x != 127 }
func lt_128_int16(x int16) bool { return x < 128 }
func le_128_int16(x int16) bool { return x <= 128 }
func gt_128_int16(x int16) bool { return x > 128 }
func ge_128_int16(x int16) bool { return x >= 128 }
func eq_128_int16(x int16) bool { return x == 128 }
func ne_128_int16(x int16) bool { return x != 128 }
func lt_254_int16(x int16) bool { return x < 254 }
func le_254_int16(x int16) bool { return x <= 254 }
func gt_254_int16(x int16) bool { return x > 254 }
func ge_254_int16(x int16) bool { return x >= 254 }
func eq_254_int16(x int16) bool { return x == 254 }
func ne_254_int16(x int16) bool { return x != 254 }
func lt_255_int16(x int16) bool { return x < 255 }
func le_255_int16(x int16) bool { return x <= 255 }
func gt_255_int16(x int16) bool { return x > 255 }
func ge_255_int16(x int16) bool { return x >= 255 }
func eq_255_int16(x int16) bool { return x == 255 }
func ne_255_int16(x int16) bool { return x != 255 }
func lt_256_int16(x int16) bool { return x < 256 }
func le_256_int16(x int16) bool { return x <= 256 }
func gt_256_int16(x int16) bool { return x > 256 }
func ge_256_int16(x int16) bool { return x >= 256 }
func eq_256_int16(x int16) bool { return x == 256 }
func ne_256_int16(x int16) bool { return x != 256 }
func lt_32766_int16(x int16) bool { return x < 32766 }
func le_32766_int16(x int16) bool { return x <= 32766 }
func gt_32766_int16(x int16) bool { return x > 32766 }
func ge_32766_int16(x int16) bool { return x >= 32766 }
func eq_32766_int16(x int16) bool { return x == 32766 }
func ne_32766_int16(x int16) bool { return x != 32766 }
func lt_32767_int16(x int16) bool { return x < 32767 }
func le_32767_int16(x int16) bool { return x <= 32767 }
func gt_32767_int16(x int16) bool { return x > 32767 }
func ge_32767_int16(x int16) bool { return x >= 32767 }
func eq_32767_int16(x int16) bool { return x == 32767 }
func ne_32767_int16(x int16) bool { return x != 32767 }
var int16_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(int16) bool
}{
{idx: 0, exp: lt, fn: lt_neg32768_int16},
{idx: 0, exp: le, fn: le_neg32768_int16},
{idx: 0, exp: gt, fn: gt_neg32768_int16},
{idx: 0, exp: ge, fn: ge_neg32768_int16},
{idx: 0, exp: eq, fn: eq_neg32768_int16},
{idx: 0, exp: ne, fn: ne_neg32768_int16},
{idx: 1, exp: lt, fn: lt_neg32767_int16},
{idx: 1, exp: le, fn: le_neg32767_int16},
{idx: 1, exp: gt, fn: gt_neg32767_int16},
{idx: 1, exp: ge, fn: ge_neg32767_int16},
{idx: 1, exp: eq, fn: eq_neg32767_int16},
{idx: 1, exp: ne, fn: ne_neg32767_int16},
{idx: 2, exp: lt, fn: lt_neg129_int16},
{idx: 2, exp: le, fn: le_neg129_int16},
{idx: 2, exp: gt, fn: gt_neg129_int16},
{idx: 2, exp: ge, fn: ge_neg129_int16},
{idx: 2, exp: eq, fn: eq_neg129_int16},
{idx: 2, exp: ne, fn: ne_neg129_int16},
{idx: 3, exp: lt, fn: lt_neg128_int16},
{idx: 3, exp: le, fn: le_neg128_int16},
{idx: 3, exp: gt, fn: gt_neg128_int16},
{idx: 3, exp: ge, fn: ge_neg128_int16},
{idx: 3, exp: eq, fn: eq_neg128_int16},
{idx: 3, exp: ne, fn: ne_neg128_int16},
{idx: 4, exp: lt, fn: lt_neg127_int16},
{idx: 4, exp: le, fn: le_neg127_int16},
{idx: 4, exp: gt, fn: gt_neg127_int16},
{idx: 4, exp: ge, fn: ge_neg127_int16},
{idx: 4, exp: eq, fn: eq_neg127_int16},
{idx: 4, exp: ne, fn: ne_neg127_int16},
{idx: 5, exp: lt, fn: lt_neg1_int16},
{idx: 5, exp: le, fn: le_neg1_int16},
{idx: 5, exp: gt, fn: gt_neg1_int16},
{idx: 5, exp: ge, fn: ge_neg1_int16},
{idx: 5, exp: eq, fn: eq_neg1_int16},
{idx: 5, exp: ne, fn: ne_neg1_int16},
{idx: 6, exp: lt, fn: lt_0_int16},
{idx: 6, exp: le, fn: le_0_int16},
{idx: 6, exp: gt, fn: gt_0_int16},
{idx: 6, exp: ge, fn: ge_0_int16},
{idx: 6, exp: eq, fn: eq_0_int16},
{idx: 6, exp: ne, fn: ne_0_int16},
{idx: 7, exp: lt, fn: lt_1_int16},
{idx: 7, exp: le, fn: le_1_int16},
{idx: 7, exp: gt, fn: gt_1_int16},
{idx: 7, exp: ge, fn: ge_1_int16},
{idx: 7, exp: eq, fn: eq_1_int16},
{idx: 7, exp: ne, fn: ne_1_int16},
{idx: 8, exp: lt, fn: lt_126_int16},
{idx: 8, exp: le, fn: le_126_int16},
{idx: 8, exp: gt, fn: gt_126_int16},
{idx: 8, exp: ge, fn: ge_126_int16},
{idx: 8, exp: eq, fn: eq_126_int16},
{idx: 8, exp: ne, fn: ne_126_int16},
{idx: 9, exp: lt, fn: lt_127_int16},
{idx: 9, exp: le, fn: le_127_int16},
{idx: 9, exp: gt, fn: gt_127_int16},
{idx: 9, exp: ge, fn: ge_127_int16},
{idx: 9, exp: eq, fn: eq_127_int16},
{idx: 9, exp: ne, fn: ne_127_int16},
{idx: 10, exp: lt, fn: lt_128_int16},
{idx: 10, exp: le, fn: le_128_int16},
{idx: 10, exp: gt, fn: gt_128_int16},
{idx: 10, exp: ge, fn: ge_128_int16},
{idx: 10, exp: eq, fn: eq_128_int16},
{idx: 10, exp: ne, fn: ne_128_int16},
{idx: 11, exp: lt, fn: lt_254_int16},
{idx: 11, exp: le, fn: le_254_int16},
{idx: 11, exp: gt, fn: gt_254_int16},
{idx: 11, exp: ge, fn: ge_254_int16},
{idx: 11, exp: eq, fn: eq_254_int16},
{idx: 11, exp: ne, fn: ne_254_int16},
{idx: 12, exp: lt, fn: lt_255_int16},
{idx: 12, exp: le, fn: le_255_int16},
{idx: 12, exp: gt, fn: gt_255_int16},
{idx: 12, exp: ge, fn: ge_255_int16},
{idx: 12, exp: eq, fn: eq_255_int16},
{idx: 12, exp: ne, fn: ne_255_int16},
{idx: 13, exp: lt, fn: lt_256_int16},
{idx: 13, exp: le, fn: le_256_int16},
{idx: 13, exp: gt, fn: gt_256_int16},
{idx: 13, exp: ge, fn: ge_256_int16},
{idx: 13, exp: eq, fn: eq_256_int16},
{idx: 13, exp: ne, fn: ne_256_int16},
{idx: 14, exp: lt, fn: lt_32766_int16},
{idx: 14, exp: le, fn: le_32766_int16},
{idx: 14, exp: gt, fn: gt_32766_int16},
{idx: 14, exp: ge, fn: ge_32766_int16},
{idx: 14, exp: eq, fn: eq_32766_int16},
{idx: 14, exp: ne, fn: ne_32766_int16},
{idx: 15, exp: lt, fn: lt_32767_int16},
{idx: 15, exp: le, fn: le_32767_int16},
{idx: 15, exp: gt, fn: gt_32767_int16},
{idx: 15, exp: ge, fn: ge_32767_int16},
{idx: 15, exp: eq, fn: eq_32767_int16},
{idx: 15, exp: ne, fn: ne_32767_int16},
}
// int8 tests
var int8_vals = []int8{
-128,
-127,
-1,
0,
1,
126,
127,
}
func lt_neg128_int8(x int8) bool { return x < -128 }
func le_neg128_int8(x int8) bool { return x <= -128 }
func gt_neg128_int8(x int8) bool { return x > -128 }
func ge_neg128_int8(x int8) bool { return x >= -128 }
func eq_neg128_int8(x int8) bool { return x == -128 }
func ne_neg128_int8(x int8) bool { return x != -128 }
func lt_neg127_int8(x int8) bool { return x < -127 }
func le_neg127_int8(x int8) bool { return x <= -127 }
func gt_neg127_int8(x int8) bool { return x > -127 }
func ge_neg127_int8(x int8) bool { return x >= -127 }
func eq_neg127_int8(x int8) bool { return x == -127 }
func ne_neg127_int8(x int8) bool { return x != -127 }
func lt_neg1_int8(x int8) bool { return x < -1 }
func le_neg1_int8(x int8) bool { return x <= -1 }
func gt_neg1_int8(x int8) bool { return x > -1 }
func ge_neg1_int8(x int8) bool { return x >= -1 }
func eq_neg1_int8(x int8) bool { return x == -1 }
func ne_neg1_int8(x int8) bool { return x != -1 }
func lt_0_int8(x int8) bool { return x < 0 }
func le_0_int8(x int8) bool { return x <= 0 }
func gt_0_int8(x int8) bool { return x > 0 }
func ge_0_int8(x int8) bool { return x >= 0 }
func eq_0_int8(x int8) bool { return x == 0 }
func ne_0_int8(x int8) bool { return x != 0 }
func lt_1_int8(x int8) bool { return x < 1 }
func le_1_int8(x int8) bool { return x <= 1 }
func gt_1_int8(x int8) bool { return x > 1 }
func ge_1_int8(x int8) bool { return x >= 1 }
func eq_1_int8(x int8) bool { return x == 1 }
func ne_1_int8(x int8) bool { return x != 1 }
func lt_126_int8(x int8) bool { return x < 126 }
func le_126_int8(x int8) bool { return x <= 126 }
func gt_126_int8(x int8) bool { return x > 126 }
func ge_126_int8(x int8) bool { return x >= 126 }
func eq_126_int8(x int8) bool { return x == 126 }
func ne_126_int8(x int8) bool { return x != 126 }
func lt_127_int8(x int8) bool { return x < 127 }
func le_127_int8(x int8) bool { return x <= 127 }
func gt_127_int8(x int8) bool { return x > 127 }
func ge_127_int8(x int8) bool { return x >= 127 }
func eq_127_int8(x int8) bool { return x == 127 }
func ne_127_int8(x int8) bool { return x != 127 }
var int8_tests = []struct {
idx int // index of the constant used
exp result // expected results
fn func(int8) bool
}{
{idx: 0, exp: lt, fn: lt_neg128_int8},
{idx: 0, exp: le, fn: le_neg128_int8},
{idx: 0, exp: gt, fn: gt_neg128_int8},
{idx: 0, exp: ge, fn: ge_neg128_int8},
{idx: 0, exp: eq, fn: eq_neg128_int8},
{idx: 0, exp: ne, fn: ne_neg128_int8},
{idx: 1, exp: lt, fn: lt_neg127_int8},
{idx: 1, exp: le, fn: le_neg127_int8},
{idx: 1, exp: gt, fn: gt_neg127_int8},
{idx: 1, exp: ge, fn: ge_neg127_int8},
{idx: 1, exp: eq, fn: eq_neg127_int8},
{idx: 1, exp: ne, fn: ne_neg127_int8},
{idx: 2, exp: lt, fn: lt_neg1_int8},
{idx: 2, exp: le, fn: le_neg1_int8},
{idx: 2, exp: gt, fn: gt_neg1_int8},
{idx: 2, exp: ge, fn: ge_neg1_int8},
{idx: 2, exp: eq, fn: eq_neg1_int8},
{idx: 2, exp: ne, fn: ne_neg1_int8},
{idx: 3, exp: lt, fn: lt_0_int8},
{idx: 3, exp: le, fn: le_0_int8},
{idx: 3, exp: gt, fn: gt_0_int8},
{idx: 3, exp: ge, fn: ge_0_int8},
{idx: 3, exp: eq, fn: eq_0_int8},
{idx: 3, exp: ne, fn: ne_0_int8},
{idx: 4, exp: lt, fn: lt_1_int8},
{idx: 4, exp: le, fn: le_1_int8},
{idx: 4, exp: gt, fn: gt_1_int8},
{idx: 4, exp: ge, fn: ge_1_int8},
{idx: 4, exp: eq, fn: eq_1_int8},
{idx: 4, exp: ne, fn: ne_1_int8},
{idx: 5, exp: lt, fn: lt_126_int8},
{idx: 5, exp: le, fn: le_126_int8},
{idx: 5, exp: gt, fn: gt_126_int8},
{idx: 5, exp: ge, fn: ge_126_int8},
{idx: 5, exp: eq, fn: eq_126_int8},
{idx: 5, exp: ne, fn: ne_126_int8},
{idx: 6, exp: lt, fn: lt_127_int8},
{idx: 6, exp: le, fn: le_127_int8},
{idx: 6, exp: gt, fn: gt_127_int8},
{idx: 6, exp: ge, fn: ge_127_int8},
{idx: 6, exp: eq, fn: eq_127_int8},
{idx: 6, exp: ne, fn: ne_127_int8},
}
func main() {
for i, test := range uint64_tests {
for j, x := range uint64_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=uint64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range uint32_tests {
for j, x := range uint32_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=uint32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range uint16_tests {
for j, x := range uint16_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=uint16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range uint8_tests {
for j, x := range uint8_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=uint8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range int64_tests {
for j, x := range int64_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=int64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range int32_tests {
for j, x := range int32_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=int32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range int16_tests {
for j, x := range int16_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=int16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
for i, test := range int8_tests {
for j, x := range int8_vals {
want := test.exp.l
if j == test.idx {
want = test.exp.e
} else if j > test.idx {
want = test.exp.r
}
if test.fn(x) != want {
fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
msg := fmt.Sprintf("test failed: %v(%v) != %v [type=int8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
panic(msg)
}
}
}
}
| ne_127_uint8 |
root.go | package cmd
import (
"io"
"os"
"github.com/giantswarm/microerror"
"github.com/giantswarm/micrologger"
"github.com/spf13/cobra"
"github.com/giantswarm/crsync/cmd/sync"
)
const (
name = "crsync"
description = "CLI tool to sync images between registries"
)
type Config struct {
Logger micrologger.Logger
Stderr io.Writer
Stdout io.Writer
}
func | (config Config) (*cobra.Command, error) {
if config.Logger == nil {
return nil, microerror.Maskf(invalidConfigError, "%T.Logger must not be empty", config)
}
if config.Stderr == nil {
config.Stderr = os.Stderr
}
if config.Stdout == nil {
config.Stdout = os.Stdout
}
var err error
var syncCmd *cobra.Command
{
c := sync.Config{
Logger: config.Logger,
Stderr: config.Stderr,
Stdout: config.Stdout,
}
syncCmd, err = sync.New(c)
if err != nil {
return nil, microerror.Mask(err)
}
}
f := &flag{}
r := &runner{
flag: f,
logger: config.Logger,
stderr: config.Stderr,
stdout: config.Stdout,
}
c := &cobra.Command{
Use: name,
Short: description,
Long: description,
PersistentPreRunE: r.PersistentPreRun,
RunE: r.Run,
SilenceUsage: true,
}
f.Init(c)
c.AddCommand(syncCmd)
return c, nil
}
| New |
multiple.py | # -*- coding: utf-8 -*-
"""
module for mul and mulfix class: fund combination management
"""
import logging
import pandas as pd
from pyecharts import options as opts
from pyecharts.charts import Pie, ThemeRiver
from xalpha.cons import convert_date, myround, yesterdaydash, yesterdayobj
from xalpha.evaluate import evaluate
from xalpha.exceptions import FundTypeError, TradeBehaviorError
from xalpha.record import record, irecord
from xalpha.indicator import indicator
from xalpha.info import cashinfo, fundinfo, mfundinfo, get_fund_holdings
from xalpha.trade import (
bottleneck,
trade,
turnoverrate,
vtradevolume,
xirrcal,
itrade,
vtradecost,
)
from xalpha.universal import get_fund_type, ttjjcode, get_rt, get_industry_fromxq
import xalpha.universal as xu
logger = logging.getLogger(__name__)
class mul:
"""
multiple fund positions manage class
:param fundtradeobj: list of trade obj which you want to analyse together
:param status: the status table of trade, all code in this table would be considered.
one must provide one of the two paramters, if both are offered, status will be overlooked
可以是场内记账单 DataFrame,也可以是 record 对象。
:param istatus: 场内交易账单,也可以是 irecord 对象。
若提供,则场内外交易联合统计展示。该选项只保证 ``combsummary`` 方法可正常使用,不保证 ``mul`` 类的其他方法可用。
:param property: Dict[fundcode, property_number]. property number 的解释:
int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值处理(暂时只支持货币基金,事实上无法精确支持按份额赎回的净值型基金)。将想要的性质数值相加即可,类似 *nix 上的 xwr 系统。
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database
:param form: string, the format of IO, options including: 'csv','sql'
"""
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv"
):
if isinstance(status, record):
if not property:
property = getattr(status, "property", {})
status = status.status
elif not property:
property = {}
self.is_in = False
if fundtradeobj:
for t in fundtradeobj:
if isinstance(t, itrade):
self.is_in = True
break
else:
fundtradeobj = []
# warning: not a very good way to automatic generate these fund obj
# because there might be some funds use round_down for share calculation, ie, label=2 must be given
# unless you are sure corresponding funds are added to the droplist
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.columns:
if code == "date":
continue
# r1, d2, v4 p = r+d+v
if code in fundcodelist:
continue
p = property.get(code, 0)
round_label = p % 2
dividend_label = ((p - round_label) / 2) % 2
value_label = ((p - round_label - dividend_label) / 4) % 2
try:
fundtradeobj.append(
trade(
fundinfo(
code,
round_label=round_label,
dividend_label=dividend_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
except FundTypeError:
fundtradeobj.append(
trade(
mfundinfo(
code,
round_label=round_label,
value_label=value_label,
fetch=fetch,
save=save,
path=path,
form=form,
),
status,
)
)
if istatus is not None:
self.is_in = True
if isinstance(istatus, irecord):
istatus = istatus.status
for code in istatus.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, istatus))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
def tot(self, prop="基金现值", date=yesterdayobj()):
"""
sum of all the values from one prop of fund daily report,
of coures many of the props make no sense to sum
:param prop: string defined in the daily report dict,
typical one is 'currentvalue' or 'originalpurchase'
"""
res = 0
for fund in self.fundtradeobj:
res += fund.dailyreport().iloc[0][prop]
return res
def combsummary(self, date=yesterdayobj()):
"""
brief report table of every funds and the combination investment
:param date: string or obj of date, show info of the date given
:returns: empty dict if nothing is remaining that date
dict of various data on the trade positions
"""
date = convert_date(date)
columns = [
"基金名称",
"基金代码",
"当日净值",
"单位成本",
"持有份额",
"基金现值",
"基金总申购",
"历史最大占用",
"基金持有成本",
"基金分红与赎回",
"换手率",
"基金收益总额",
"投资收益率",
]
summarydf = pd.DataFrame([], columns=columns)
for fund in self.fundtradeobj:
summarydf = summarydf.append(
fund.dailyreport(date), ignore_index=True, sort=True
)
tname = "总计"
tcode = "total"
tunitvalue = float("NaN")
tunitcost = float("NaN")
tholdshare = float("NaN")
tcurrentvalue = summarydf["基金现值"].sum()
tpurchase = summarydf["基金总申购"].sum()
tbtnk = bottleneck(self.totcftable[self.totcftable["date"] <= date])
tcost = summarydf["基金持有成本"].sum()
toutput = summarydf["基金分红与赎回"].sum()
tturnover = turnoverrate(self.totcftable[self.totcftable["date"] <= date], date)
# 计算的是总系统作为整体和外界的换手率,而非系统各成分之间的换手率
tearn = summarydf["基金收益总额"].sum()
trate = round(tearn / tbtnk * 100, 4)
trow = pd.DataFrame(
[
[
tname,
tcode,
tunitvalue,
tunitcost,
tholdshare,
tcurrentvalue,
tpurchase,
tbtnk,
tcost,
toutput,
tturnover,
tearn,
trate,
]
],
columns=columns,
)
summarydf = summarydf.append(trow, ignore_index=True, sort=True)
return summarydf[columns].sort_values(by="基金现值", ascending=False)
summary = combsummary
def _mergecftb(self):
"""
merge the different cftable for different funds into one table
"""
dtlist = []
for fund in self.fundtradeobj:
dtlist2 = []
for _, row in fund.cftable.iterrows():
dtlist2.append((row["date"], row["cash"]))
dtlist.extend(dtlist2)
nndtlist = set([item[0] for item in dtlist])
nndtlist = sorted(list(nndtlist), key=lambda x: x)
reslist = []
for date in nndtlist:
reslist.append(sum([item[1] for item in dtlist if item[0] == date]))
df = pd.DataFrame(data={"date": nndtlist, "cash": reslist})
df = df[df["cash"] != 0]
df = df.reset_index(drop=True)
return df
def xirrrate(self, date=yesterdayobj(), startdate=None, guess=0.01):
"""
xirr rate evauation of the whole invest combination
:param date: string or obj of datetime, the virtually sell-all date
:param startdate: string or obj of datetime, the beginning date of calculation, default from first buy
"""
return xirrcal(self.totcftable, self.fundtradeobj, date, startdate, guess)
def evaluation(self, start=None):
"""
give the evaluation object to analysis funds properties themselves instead of trades
:returns: :class:`xalpha.evaluate.evaluate` object, with referenced funds the same as funds
we invested
"""
if self.is_in:
raise NotImplementedError()
case = evaluate(
*[fundtrade.aim for fundtrade in self.fundtradeobj], start=start
)
return case
def get_stock_holdings(
self, year=None, season=None, date=yesterdayobj(), threhold=100
):
"""
获取整个基金组合的底层股票持仓总和和细节,组合穿透
:param year: 基于的基金季报年份
:param season: 基于的基金季报季度
:param date: 默认昨天
:param threhold: 默认100。小于100元的底层股票将不在最后的结果中展示
:return: pd.DataFrame column: name, code, value, ratio
"""
d = {}
if year is None or season is None:
rd = convert_date(date) - pd.Timedelta(days=120)
if not year:
year = rd.year
if not season:
season = int((rd.month - 0.1) / 3) + 1
logger.debug("use %s, %s for fund report" % (year, season))
for f in self.fundtradeobj:
if isinstance(f, itrade):
if f.get_type() == "股票":
code = f.code
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if code.startswith("SH") or code.startswith("SZ"):
stock = code
d[stock] = d.get(stock, 0) + value
elif code == "mf":
continue
else:
df = get_fund_holdings(code, year, season)
if df is None:
continue
for _, row in df.iterrows():
stock = row["code"]
stock = ttjjcode(stock)
d[stock] = d.get(stock, 0) + row["ratio"] / 100 * value
# print("%s has %s contribution from %s" %(stock, row["ratio"] / 100 * value, f.name))
l = []
for code, value in sorted(d.items(), key=lambda item: -item[1]):
if value >= threhold:
try:
name = get_rt(code)["name"]
except:
name = code
l.append([name, code, value])
fdf = pd.DataFrame(l, columns=["name", "code", "value"])
fdf["ratio"] = fdf["value"] / fdf["value"].sum()
return fdf
def get_portfolio(self, date=yesterdayobj()):
"""
获取基金组合底层资产大类配置的具体值
:param date:
:return: Dict[str, float]. stock,bond,cash 对应总值的字典
"""
d = {"stock": 0, "bond": 0, "cash": 0}
date = convert_date(date)
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
d["stock"] += value
continue
elif f.get_type() in ["可转债", "债券"]:
d["bond"] += value
continue
elif f.get_type() == "货币基金":
d["cash"] += value
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
d["cash"] += value
continue
if get_fund_type(code) == "货币基金":
d["cash"] += value
continue
df = xu.get_daily("pt-F" + code, end=date.strftime("%Y%m%d"))
if df is None or len(df) == 0:
logger.warning("empty portfolio info for %s" % code)
row = df.iloc[-1]
if row["bond_ratio"] + row["stock_ratio"] < 10: # 联接基金
d["stock"] += (
(100 - row["bond_ratio"] - row["cash_ratio"]) * value / 100
)
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
else:
d["stock"] += row["stock_ratio"] * value / 100
d["bond"] += row["bond_ratio"] * value / 100
d["cash"] += row["cash_ratio"] * value / 100
return d
get_portfolio_holdings = get_portfolio
def get_industry(self, date=yesterdayobj()):
"""
获取基金组合持仓的行业占比信息,底层为非 A 股持仓的暂不支持
:param date:
:return: Dict
"""
# TODO: hard coded 一个字典来合并一些二级行业
d = {}
date = convert_date(date)
rd = date - pd.Timedelta(days=120)
year = rd.year
season = int((rd.month - 0.1) / 3) + 1
for f in self.fundtradeobj:
value = f.briefdailyreport(date).get("currentvalue", 0)
if value > 0:
if isinstance(f, itrade):
if f.get_type() == "股票":
industry = get_industry_fromxq(f.code).get("industryname", "")
if industry.strip():
d[industry] = d.get(industry, 0) + value
continue
elif f.get_type() in ["可转债", "债券", "货币基金"]:
# 现在简化实现可转债暂时不按正股记行业
continue
elif f.get_type() == "场内基金":
code = f.code[2:]
else:
continue
else:
code = f.code
if code == "mf":
continue
if get_fund_type(code) == "货币基金":
continue
## 以下为持有股票的基金处理
## fundinfo 有点浪费,不过简化实现暂时如此
fobj = fundinfo(code)
industry_dict = fobj.get_industry_holdings(year=year, season=season)
if industry_dict is None:
continue
## 这里行业占比需要做个 scaling
sv = sum([v for _, v in industry_dict.items()])
if sv < 1.0:
# 只有极少数持仓存在行业信息
continue
stock_ratio = fobj.get_portfolio_holdings(date.strftime("%Y%m%d"))[
"stock_ratio"
]
scale = stock_ratio / sv
print(scale)
for k, v in industry_dict.items():
if k.strip():
d[k] = d.get(k, 0) + value * v / 100 * scale
return d
get_industry_holdings = get_industry
def v_positions(self, date=yesterdayobj(), rendered=True):
"""
pie chart visualization of positions ratio in combination
"""
sdata = sorted(
[
(fob.name, fob.briefdailyreport(date).get("currentvalue", 0))
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_category_positions(self, date=yesterdayobj(), rendered=True):
"""
资产分类扇形图,按大类资产求和绘制
:param date:
:param rendered: bool. default true for notebook, for plain pyechart obj to return, set rendered=False
:return:
"""
d = {}
for f in self.fundtradeobj:
if isinstance(f, itrade):
t = f.get_type()
if t == "场内基金":
t = get_fund_type(f.code[2:])
elif f.code == "mf":
t = "货币基金"
else:
t = get_fund_type(f.code)
if t == "其他":
logger.warning(
"%s has category others which should be double checked" % f.code
)
d[t] = d.get(t, 0) + f.briefdailyreport(date).get("currentvalue", 0)
sdata = sorted([(k, round(v, 2)) for k, v in d.items()])
pie = Pie()
pie.add(
series_name="总值占比",
data_pair=sdata,
label_opts=opts.LabelOpts(is_show=False, position="center"),
).set_global_opts(
legend_opts=opts.LegendOpts(
pos_left="left", type_="scroll", orient="vertical"
)
).set_series_opts(
tooltip_opts=opts.TooltipOpts(
trigger="item", formatter="{a} <br/>{b}: {c} ({d}%)"
),
)
if rendered:
return pie.render_notebook()
else:
return pie
def v_positions_history(self, end=yesterdaydash(), rendered=True):
"""
river chart visulization of positions ratio history
use text size to avoid legend overlap in some sense, eg. legend_text_size=8
"""
start = self.totcftable.iloc[0].date
times = pd.date_range(start, end)
tdata = []
for date in times:
sdata = sorted(
[
(date, fob.briefdailyreport(date).get("currentvalue", 0), fob.name,)
for fob in self.fundtradeobj
],
key=lambda x: x[1],
reverse=True,
)
tdata.extend(sdata)
tr = ThemeRiver()
tr.add(
series_name=[foj.name for foj in self.fundtradeobj],
data=tdata,
label_opts=opts.LabelOpts(is_show=False),
singleaxis_opts=opts.SingleAxisOpts(type_="time", pos_bottom="10%"),
)
if rendered:
return tr.render_notebook()
else:
return tr
def v_tradevolume(self, freq="D", rendered=True):
"""
visualization on trade summary of the funds combination
:param freq: one character string, frequency label, now supporting D for date,
W for week and M for month, namely the trade volume is shown based on the time unit
:returns: ``pyecharts.Bar()``
"""
return vtradevolume(self.totcftable, freq=freq, rendered=rendered)
class mulfix(mul, indicator):
"""
introduce cash to make a closed investment system, where netvalue analysis can be applied
namely the totcftable only has one row at the very beginning
:param fundtradeobj: trade obj to be include
:param status: status table, if no trade obj is provided, it will include all fund
based on code in status table
:param property: Dict[fundcode, property_number]. property number 的解释:
int. 1: 基金申购采取分位以后全舍而非四舍五入(这种基金是真实存在的==)。2:基金默认分红再投入(0 则是默认现金分红)。4:基金赎回按净值
:param fetch: boolean, when open the fetch option, info class will try fetching from local files first in the init
:param save: boolean, when open the save option, info classes automatically save the class to files
:param path: string, the file path prefix of IO, or object or engine from sqlalchemy to connect sql database
:param form: string, the format of IO, options including: 'csv','sql'
:param totmoney: positive float, the total money as the input at the beginning
:param cashobj: cashinfo object, which is designed to balance the cash in and out
"""
def __init__(
self,
*fundtradeobj,
status=None,
istatus=None,
property=None,
fetch=False,
save=False,
path="",
form="csv",
totmoney=100000,
cashobj=None
):
super().__init__(
*fundtradeobj,
status=status,
istatus=istatus,
property=property,
fetch=fetch,
save=save,
path=path, | cashobj = cashinfo()
self.totmoney = totmoney
nst = mulfix._vcash(totmoney, self.totcftable, cashobj)
cashtrade = trade(cashobj, nst)
# super().__init__(*self.fundtradeobj, cashtrade)
self.cashobj = cashobj
self.fundtradeobj = list(self.fundtradeobj)
self.fundtradeobj.append(cashtrade)
self.fundtradeobj = tuple(self.fundtradeobj)
btnk = bottleneck(self.totcftable)
if btnk > totmoney:
raise TradeBehaviorError("the initial total cash is too low")
self.totcftable = pd.DataFrame(
data={"date": [nst.iloc[0].date], "cash": [-totmoney]}
)
@staticmethod
def _vcash(totmoney, totcftable, cashobj):
"""
return a virtue status table with a mf(cash) column based on the given tot money and cftable
"""
cashl = []
cashl.append(totmoney + totcftable.iloc[0].cash)
for i in range(len(totcftable) - 1):
date = totcftable.iloc[i + 1].date
delta = totcftable.iloc[i + 1].cash
if delta < 0:
cashl.append(
myround(
delta
/ cashobj.price[cashobj.price["date"] <= date].iloc[-1].netvalue
)
)
else:
cashl.append(delta)
datadict = {"date": totcftable.loc[:, "date"], "mf": cashl}
return pd.DataFrame(data=datadict)
def unitvalue(self, date=yesterdayobj()):
"""
:returns: float at unitvalue of the whole investment combination
"""
date = convert_date(date)
res = 0
for fund in self.fundtradeobj:
res += fund.briefdailyreport(date).get("currentvalue", 0)
return res / self.totmoney
def v_tradecost(self, threhold=0, date=yesterdayobj(), rendered=True):
if getattr(self, "price", None) is None:
raise ValueError("Please generate price table by ``bcmkset()`` first")
cftable = self.fundtradeobj[-1].cftable[1:]
cftable = cftable[abs(cftable["cash"]) > threhold]
cftable["cash"] = -cftable["cash"]
return vtradecost(self, cftable, end=date, rendered=rendered)
class imul(mul):
def __init__(self, *fundtradeobj, status=None, istatus=None):
"""
对场内投资组合进行分析的类
:param fundtradeobj: itrade objects.
:param status: 场内格式记账单,或 irecord 对象。
"""
if not fundtradeobj:
fundtradeobj = []
if status is None:
status = istatus
if isinstance(status, irecord):
status = status.status
fundcodelist = [f.code for f in fundtradeobj]
if status is not None:
for code in status.code.unique():
if code not in fundcodelist and not code.startswith("#"):
fundtradeobj.append(itrade(code, status))
self.fundtradeobj = tuple(fundtradeobj)
self.totcftable = self._mergecftb()
self.is_in = True
Mul = mul
MulFix = mulfix
IMul = imul | form=form
)
if cashobj is None: |
crawl_urls.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import time
import requests
import multiprocessing
import argparse
from lxml import html
from urllib.parse import urljoin
from urllib.parse import urlparse
from fake_useragent import UserAgent
from lxml.etree import ParserError
from lxml.etree import XMLSyntaxError
from requests.exceptions import Timeout
from requests.exceptions import InvalidURL
from requests.exceptions import InvalidSchema
from requests.exceptions import MissingSchema
from requests.exceptions import ConnectionError
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ContentDecodingError
from requests.exceptions import TooManyRedirects
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from pymongo.errors import AutoReconnect
from pymongo.errors import WriteError
from idna.core import IDNAError
from datetime import datetime
def check_mail(url):
return re.match(r'\b[\w.+-]+?@[-_\w]+[.]+[-_.\w]+\b', url)
def connect(host):
return MongoClient('mongodb://{}:27017'.format(host))
def retrieve_domains(db_ip_data, skip, limit):
return db_ip_data.dns.find({'domain_crawled': {'$exists': False}})[limit - skip:limit]
def | (db_ip_data, domain):
try:
res = db_ip_data.dns.update_one({'domain': domain}, {'$set': {'domain_crawled': datetime.utcnow()}}, upsert=False)
if res.modified_count > 0:
print('INFO: domain {} crawled and updated with {} documents'.format(domain, res.modified_count))
except DuplicateKeyError:
pass
def add_urls(db_url_data, db_ip_data, url, domain):
try:
post = {'url': url.lower(), 'created': datetime.utcnow()}
post_id = db_url_data.url.insert_one(post).inserted_id
print(u'INFO: the url {} was added with the id {}'.format(url, post_id))
update_data(db_ip_data, domain)
except AutoReconnect:
time.sleep(30)
except (DuplicateKeyError, WriteError) as e:
print(e)
def get_urls(db, ua, url):
try:
headers = {'User-Agent': ua.chrome}
res = requests.get('http://{}'.format(url), timeout=1, headers=headers)
content = res.text
except (Timeout, ConnectionError, TooManyRedirects):
return None
except (IDNAError, InvalidURL, InvalidSchema, MissingSchema, ContentDecodingError, ChunkedEncodingError):
return None
try:
doc = html.document_fromstring(content)
except (ValueError, ParserError, XMLSyntaxError):
return None
links = doc.xpath('//a/@href')
base_url = 'http://{}'.format(url)
url_set = set()
for link in links:
link = link.lower().strip()
if link.startswith('#') or link.startswith('+') or link.startswith('tel:') or link.startswith('javascript:') or link.startswith('mailto:'):
continue
elif link.startswith('/'):
link = urljoin(base_url, link)
elif link.startswith('?'):
link = urljoin(base_url, link)
elif link.startswith('..'):
link = urljoin(base_url, link.replace('..', ''))
if urlparse(link).netloc:
url_set.add(link)
print(url_set)
return url_set
def worker(host, skip, limit):
client = connect(host)
db_url_data = client.url_data
db_ip_data = client.ip_data
ua = UserAgent()
try:
domains = retrieve_domains(db_ip_data, limit, skip)
except CursorNotFound:
client.close()
return
for domain in domains:
print(u'INFO: the domain {} is beeing processed'.format(domain['domain']))
links = get_urls(db, ua, domain['domain'])
if links is not None and len(links) > 0:
for link in links:
add_urls(db_url_data, db_ip_data, link, domain['domain'])
client.close()
return
def argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--worker', help='set worker count', type=int, required=True)
parser.add_argument('--host', help='set the host', type=str, required=True)
args = parser.parse_args()
return args
if __name__ == '__main__':
args = argparser()
client = connect(args.host)
db = client.ip_data
jobs = []
threads = args.worker
amount = round(db.dns.estimated_document_count() / (threads + 50000))
limit = amount
print(limit, amount)
for f in range(threads):
j = multiprocessing.Process(target=worker, args=(args.host, limit, amount))
jobs.append(j)
j.start()
limit = limit + amount
for j in jobs:
client.close()
j.join()
print('exitcode = {}'.format(j.exitcode))
| update_data |
DigraphDFS.ts | import {isMarked, StringMap} from '../common/AuxiliaryTypes'
import {CommonGraphAPI} from '../common/CommonGraphAPI'
import {ContainersBuilders} from '../../'
import {DigraphAPI} from './DigraphAPI'
import Vertex = CommonGraphAPI.Vertex
import Digraph = DigraphAPI.Digraph
import DigraphSearch = DigraphAPI.DigraphSearch
import VertexVisitor = CommonGraphAPI.VertexVisitor
import newStack = ContainersBuilders.newStack
/**
* Implementation of Depth First Search.
*/
export class DigraphDFS<V> implements DigraphSearch<V> {
search(dg: Digraph<V>, source: Vertex<V>, visitor: VertexVisitor<V>): void {
const bfs = new DFS(dg, source, visitor)
bfs.search()
}
}
class | <V> {
private marked = new StringMap<boolean>()
constructor(private G: Digraph<V>, private source: Vertex<V>, private visitor: VertexVisitor<V>) {
}
search(): void {
this.bfs(this.G, this.source)
}
private bfs(G: Digraph<V>, source: Vertex<V>): void {
const s = newStack<Vertex<V>>()
this.marked.set(source.key, true)
s.push(source)
while (!s.isEmpty()) {
const v = s.pop()
this.visitor(v)
G.adjacent(v).forEach(w => {
if (!isMarked(w, this.marked)) {
this.marked.set(w.key, true)
s.push(w)
}
})
}
}
}
| DFS |
r.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import inspect
from spack.directives import extends
from spack.package import PackageBase, run_after
class RPackage(PackageBase):
"""Specialized class for packages that are built using R.
| This class provides a single phase that can be overridden:
1. :py:meth:`~.RPackage.install`
It has sensible defaults, and for many packages the only thing
necessary will be to add dependencies
"""
phases = ['install']
maintainers = ['glennpj']
#: This attribute is used in UI queries that need to know the build
#: system base class
build_system_class = 'RPackage'
extends('r')
def configure_args(self):
"""Arguments to pass to install via ``--configure-args``."""
return []
def configure_vars(self):
"""Arguments to pass to install via ``--configure-vars``."""
return []
def install(self, spec, prefix):
"""Installs an R package."""
config_args = self.configure_args()
config_vars = self.configure_vars()
args = [
'CMD',
'INSTALL'
]
if config_args:
args.append('--configure-args={0}'.format(' '.join(config_args)))
if config_vars:
args.append('--configure-vars={0}'.format(' '.join(config_vars)))
args.extend([
'--library={0}'.format(self.module.r_lib_dir),
self.stage.source_path
])
inspect.getmodule(self).R(*args)
# Check that self.prefix is there after installation
run_after('install')(PackageBase.sanity_check_prefix) | For more information on the R build system, see:
https://stat.ethz.ch/R-manual/R-devel/library/utils/html/INSTALL.html
|
bytes_format_modulo.py | # This test requires CPython3.5
print(b"%%" % ())
print(b"=%d=" % 1)
print(b"=%d=%d=" % (1, 2))
print(b"=%s=" % b"str")
print(b"=%r=" % b"str") |
print("PASS") |
|
opentsdb.go | package opentsdb
import (
"fmt"
"log"
"net"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/yevheniir/telegraf-fork"
"github.com/yevheniir/telegraf-fork/plugins/outputs"
)
var (
allowedChars = regexp.MustCompile(`[^a-zA-Z0-9-_./\p{L}]`)
hypenChars = strings.NewReplacer(
"@", "-",
"*", "-",
`%`, "-",
"#", "-",
"$", "-")
defaultHttpPath = "/api/put"
defaultSeperator = "_"
)
type OpenTSDB struct {
Prefix string
Host string
Port int
HttpBatchSize int // deprecated httpBatchSize form in 1.8
HttpPath string
Debug bool
Separator string
}
var sampleConfig = `
## prefix for metrics keys
prefix = "my.specific.prefix."
## DNS name of the OpenTSDB server
## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the
## telnet API. "http://opentsdb.example.com" will use the Http API.
host = "opentsdb.example.com"
## Port of the OpenTSDB server
port = 4242
## Number of data points to send to OpenTSDB in Http requests.
## Not used with telnet API.
http_batch_size = 50
## URI Path for Http requests to OpenTSDB.
## Used in cases where OpenTSDB is located behind a reverse proxy.
http_path = "/api/put"
## Debug true - Prints OpenTSDB communication
debug = false
## Separator separates measurement name from field
separator = "_"
`
func ToLineFormat(tags map[string]string) string {
tagsArray := make([]string, len(tags))
index := 0
for k, v := range tags {
tagsArray[index] = fmt.Sprintf("%s=%s", k, v)
index++
}
sort.Strings(tagsArray)
return strings.Join(tagsArray, " ")
}
func (o *OpenTSDB) Connect() error {
if !strings.HasPrefix(o.Host, "http") && !strings.HasPrefix(o.Host, "tcp") {
o.Host = "tcp://" + o.Host
}
// Test Connection to OpenTSDB Server
u, err := url.Parse(o.Host)
if err != nil {
return fmt.Errorf("Error in parsing host url: %s", err.Error())
}
uri := fmt.Sprintf("%s:%d", u.Host, o.Port)
tcpAddr, err := net.ResolveTCPAddr("tcp", uri)
if err != nil {
return fmt.Errorf("OpenTSDB TCP address cannot be resolved: %s", err)
}
connection, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return fmt.Errorf("OpenTSDB Telnet connect fail: %s", err)
}
defer connection.Close()
return nil
}
func (o *OpenTSDB) Write(metrics []telegraf.Metric) error {
if len(metrics) == 0 {
return nil
}
u, err := url.Parse(o.Host)
if err != nil {
return fmt.Errorf("Error in parsing host url: %s", err.Error())
}
if u.Scheme == "" || u.Scheme == "tcp" | else if u.Scheme == "http" || u.Scheme == "https" {
return o.WriteHttp(metrics, u)
} else {
return fmt.Errorf("Unknown scheme in host parameter.")
}
}
func (o *OpenTSDB) WriteHttp(metrics []telegraf.Metric, u *url.URL) error {
http := openTSDBHttp{
Host: u.Host,
Port: o.Port,
Scheme: u.Scheme,
User: u.User,
BatchSize: o.HttpBatchSize,
Path: o.HttpPath,
Debug: o.Debug,
}
for _, m := range metrics {
now := m.Time().UnixNano() / 1000000000
tags := cleanTags(m.Tags())
for fieldName, value := range m.Fields() {
switch value.(type) {
case int64:
case uint64:
case float64:
default:
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
continue
}
metric := &HttpMetric{
Metric: sanitize(fmt.Sprintf("%s%s%s%s",
o.Prefix, m.Name(), o.Separator, fieldName)),
Tags: tags,
Timestamp: now,
Value: value,
}
if err := http.sendDataPoint(metric); err != nil {
return err
}
}
}
if err := http.flush(); err != nil {
return err
}
return nil
}
func (o *OpenTSDB) WriteTelnet(metrics []telegraf.Metric, u *url.URL) error {
// Send Data with telnet / socket communication
uri := fmt.Sprintf("%s:%d", u.Host, o.Port)
tcpAddr, _ := net.ResolveTCPAddr("tcp", uri)
connection, err := net.DialTCP("tcp", nil, tcpAddr)
if err != nil {
return fmt.Errorf("OpenTSDB: Telnet connect fail")
}
defer connection.Close()
for _, m := range metrics {
now := m.Time().UnixNano() / 1000000000
tags := ToLineFormat(cleanTags(m.Tags()))
for fieldName, value := range m.Fields() {
switch value.(type) {
case int64:
case uint64:
case float64:
default:
log.Printf("D! OpenTSDB does not support metric value: [%s] of type [%T].\n", value, value)
continue
}
metricValue, buildError := buildValue(value)
if buildError != nil {
log.Printf("E! OpenTSDB: %s\n", buildError.Error())
continue
}
messageLine := fmt.Sprintf("put %s %v %s %s\n",
sanitize(fmt.Sprintf("%s%s%s%s", o.Prefix, m.Name(), o.Separator, fieldName)),
now, metricValue, tags)
_, err := connection.Write([]byte(messageLine))
if err != nil {
return fmt.Errorf("OpenTSDB: Telnet writing error %s", err.Error())
}
}
}
return nil
}
func cleanTags(tags map[string]string) map[string]string {
tagSet := make(map[string]string, len(tags))
for k, v := range tags {
val := sanitize(v)
if val != "" {
tagSet[sanitize(k)] = val
}
}
return tagSet
}
func buildValue(v interface{}) (string, error) {
var retv string
switch p := v.(type) {
case int64:
retv = IntToString(int64(p))
case uint64:
retv = UIntToString(uint64(p))
case float64:
retv = FloatToString(float64(p))
default:
return retv, fmt.Errorf("unexpected type %T with value %v for OpenTSDB", v, v)
}
return retv, nil
}
func IntToString(input_num int64) string {
return strconv.FormatInt(input_num, 10)
}
func UIntToString(input_num uint64) string {
return strconv.FormatUint(input_num, 10)
}
func FloatToString(input_num float64) string {
return strconv.FormatFloat(input_num, 'f', 6, 64)
}
func (o *OpenTSDB) SampleConfig() string {
return sampleConfig
}
func (o *OpenTSDB) Description() string {
return "Configuration for OpenTSDB server to send metrics to"
}
func (o *OpenTSDB) Close() error {
return nil
}
func sanitize(value string) string {
// Apply special hypenation rules to preserve backwards compatibility
value = hypenChars.Replace(value)
// Replace any remaining illegal chars
return allowedChars.ReplaceAllLiteralString(value, "_")
}
func init() {
outputs.Add("opentsdb", func() telegraf.Output {
return &OpenTSDB{
HttpPath: defaultHttpPath,
Separator: defaultSeperator,
}
})
}
| {
return o.WriteTelnet(metrics, u)
} |
gralt_benchmarks.py | # Copyright 2021 The ParallelAccel Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
Test the speed of GRALTool on standard benchmark acyclic_graphs.
This is deprecated code and is included for reference. New benchmarks should use the
Benchmark and BenchmarkSuite models.
"""
import json
import os
import time
import benchmarks.acyclic_graphs.benchmark_acyclic_graphs as acyclic_graphs
from benchmarks.acyclic_graphs import pbaxisum
import benchmarks.gralt.settings as settings
import linear_algebra
import tensorflow as tf
import grapal_tool as gralt
sample_subgraph = gralt.subgraphs.Sample()
expectation_subgraph = gralt.subgraphs.Expectation()
state_subgraph = gralt.subgraphs.State()
def exp_and_grad_call(
acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t, num_samples_t):
|
call_dict = {
"samples": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: sample_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
repetitions=num_samples_t),
"exp": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t),
"exp_and_grad": exp_and_grad_call,
"state": lambda acyclic_graph_t, symbol_names_t, symbol_values_t, ops_t,
num_samples_t: state_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t),
}
get_num_samples_dict = {
"samples": lambda settings_dict:
tf.constant([settings_dict["num_samples"]]),
"exp": lambda settings_dict: tf.constant([0]),
"exp_and_grad": lambda settings_dict: tf.constant([0]),
"state": lambda settings_dict: tf.constant([0]),
}
get_ops_dict = {
"samples": lambda discretes: tf.constant(""),
"exp": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"exp_and_grad": lambda discretes:
gralt.convert_to_tensor([[pbaxisum.get_random_prob_basis_axis_sum(discretes)]]),
"state": lambda discretes: tf.constant(""),
}
def run_gralt_benchmarks(
min_subgraphs, max_subgraphs, skip_subgraphs, min_discretes, max_discretes, iterations,
num_samples, rounding_digits, acyclic_graph_type, sim_type, rel_save_dir,
save_dir_prefix=os.getcwd()):
if acyclic_graph_type == "approxopt":
acyclic_graph_builder = acyclic_graphs.approxopt
elif acyclic_graph_type == "hea":
acyclic_graph_builder = acyclic_graphs.hea
else:
raise ValueError(acyclic_graph_type + " is not a valid type of test acyclic_graph.")
if sim_type in {"samples", "exp", "exp_and_grad", "state"}:
call_subgraph = call_dict[sim_type]
get_num_samples = get_num_samples_dict[sim_type]
get_ops = get_ops_dict[sim_type]
else:
raise ValueError(sim_type + " is not a valid simulation types.")
# Save settings.
full_save_dir = os.path.join(save_dir_prefix, rel_save_dir)
settings.set_settings(
min_subgraphs=min_subgraphs,
max_subgraphs=max_subgraphs,
skip_subgraphs=skip_subgraphs,
min_discretes=min_discretes,
max_discretes=max_discretes,
iterations=iterations,
num_samples=num_samples,
rounding_digits=rounding_digits,
acyclic_graph_type=acyclic_graph_type,
sim_type=sim_type,
full_save_dir=full_save_dir
)
settings_dict = settings.load_settings(full_save_dir)
# Run benchmarks.
num_samples_t = get_num_samples(settings_dict)
for q in range(settings_dict["min_discretes"], settings_dict["max_discretes"] + 1):
print(f"Current discrete size: {q}")
benchmarks_dict = dict()
discretes = linear_algebra.GridSpace.rect(1, q)
ops_t = get_ops(discretes)
for l in range(
settings_dict["min_subgraphs"], settings_dict["max_subgraphs"] + 1,
settings_dict["skip_subgraphs"]):
print(f"Current number of subgraphs: {l}")
benchmarks_dict[l] = {}
acyclic_graph, symbols = acyclic_graph_builder(discretes, l, acyclic_graph_type)
is_acyclic_graph_compiled = False
symbol_names_t = tf.constant([str(s) for s in symbols])
for r in range(settings_dict["iterations"]):
symbol_values_t = tf.random.uniform(
[1, len(symbols)], minval=-2.0, maxval=2.0)
start = time.time()
if not is_acyclic_graph_compiled:
compiled_acyclic_graph = gralt.convert_to_tensor([acyclic_graph])
is_acyclic_graph_compiled = True
result = call_subgraph(
compiled_acyclic_graph, symbol_names_t, symbol_values_t,
ops_t, num_samples_t)
stop = time.time()
this_runtime = round(stop - start, rounding_digits)
if r == 0:
# First run is special because it considers the compilation time
benchmarks_dict[l]["initial"] = this_runtime
benchmarks_dict[l]["remaining"] = []
print("initial runtime of {} seconds".format(this_runtime))
else:
print("subsequent runtime of {} seconds".format(this_runtime))
benchmarks_dict[l]["remaining"].append(this_runtime)
benchmarks_dict[l]["depth"] = len(acyclic_graph)
# Checkpoint the benchmarks after each discrete number.
benchmarks_filename = "benchmarks_dict_{}.json".format(q)
benchmarks_data_file = os.path.join(full_save_dir, benchmarks_filename)
with open(benchmarks_data_file, 'w') as datafile:
json.dump(benchmarks_dict, datafile)
| with tf.GradientTape() as g:
g.watch(symbol_values_t)
exp = expectation_subgraph(
acyclic_graph_t, symbol_names=symbol_names_t, symbol_values=symbol_values_t,
operators=ops_t)
grad = g.gradient(exp, symbol_values_t)
return exp, grad |
parser_test.go | package main
import (
"fmt"
"testing"
)
func TestParse(t *testing.T) |
func TestParseDefinition(t *testing.T) {
code := `(begin
(define (double x) (+ x x))
(define b 5)
(+ (double 2) b))`
ats, _ := Parse(code)
except := `(begin (define (double x) (+ x x)) (define b 5) (+ (double 2) b))`
if fmt.Sprintf("%v", ats) != except {
t.Error("Parse Error")
t.Log("ats: ", ats, "except: ", except)
}
}
| {
code := `(+ (* 1 1) (f "xx" "yy"))`
ats, _ := Parse(code)
except := `(+ (* 1 1) (f "xx" "yy"))`
if fmt.Sprintf("%v", ats) != except {
t.Error("Parse Error")
t.Log("ats: ", ats, "except: ", except)
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.